if_bge.c revision 1.209 1 /* $NetBSD: if_bge.c,v 1.209 2013/03/07 03:53:35 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.209 2013/03/07 03:53:35 msaitoh Exp $");
83
84 #include "vlan.h"
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/sockio.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/device.h>
94 #include <sys/socket.h>
95 #include <sys/sysctl.h>
96
97 #include <net/if.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_ether.h>
101
102 #include <sys/rnd.h>
103
104 #ifdef INET
105 #include <netinet/in.h>
106 #include <netinet/in_systm.h>
107 #include <netinet/in_var.h>
108 #include <netinet/ip.h>
109 #endif
110
111 /* Headers for TCP Segmentation Offload (TSO) */
112 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
113 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
114 #include <netinet/ip.h> /* for struct ip */
115 #include <netinet/tcp.h> /* for struct tcphdr */
116
117
118 #include <net/bpf.h>
119
120 #include <dev/pci/pcireg.h>
121 #include <dev/pci/pcivar.h>
122 #include <dev/pci/pcidevs.h>
123
124 #include <dev/mii/mii.h>
125 #include <dev/mii/miivar.h>
126 #include <dev/mii/miidevs.h>
127 #include <dev/mii/brgphyreg.h>
128
129 #include <dev/pci/if_bgereg.h>
130 #include <dev/pci/if_bgevar.h>
131
132 #include <prop/proplib.h>
133
134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
135
136
137 /*
138 * Tunable thresholds for rx-side bge interrupt mitigation.
139 */
140
141 /*
142 * The pairs of values below were obtained from empirical measurement
143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
144 * interrupt for every N packets received, where N is, approximately,
145 * the second value (rx_max_bds) in each pair. The values are chosen
146 * such that moving from one pair to the succeeding pair was observed
147 * to roughly halve interrupt rate under sustained input packet load.
148 * The values were empirically chosen to avoid overflowing internal
149 * limits on the bcm5700: increasing rx_ticks much beyond 600
150 * results in internal wrapping and higher interrupt rates.
151 * The limit of 46 frames was chosen to match NFS workloads.
152 *
153 * These values also work well on bcm5701, bcm5704C, and (less
154 * tested) bcm5703. On other chipsets, (including the Altima chip
155 * family), the larger values may overflow internal chip limits,
156 * leading to increasing interrupt rates rather than lower interrupt
157 * rates.
158 *
159 * Applications using heavy interrupt mitigation (interrupting every
160 * 32 or 46 frames) in both directions may need to increase the TCP
161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
162 * full link bandwidth, due to ACKs and window updates lingering
163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
164 */
165 static const struct bge_load_rx_thresh {
166 int rx_ticks;
167 int rx_max_bds; }
168 bge_rx_threshes[] = {
169 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */
170 { 32, 2 },
171 { 50, 4 },
172 { 100, 8 },
173 { 192, 16 },
174 { 416, 32 },
175 { 598, 46 }
176 };
177 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
178
179 /* XXX patchable; should be sysctl'able */
180 static int bge_auto_thresh = 1;
181 static int bge_rx_thresh_lvl;
182
183 static int bge_rxthresh_nodenum;
184
185 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
186
187 static int bge_probe(device_t, cfdata_t, void *);
188 static void bge_attach(device_t, device_t, void *);
189 static void bge_release_resources(struct bge_softc *);
190
191 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
192 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
193 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
194 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
195 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
196
197 static void bge_txeof(struct bge_softc *);
198 static void bge_rxeof(struct bge_softc *);
199
200 static void bge_asf_driver_up (struct bge_softc *);
201 static void bge_tick(void *);
202 static void bge_stats_update(struct bge_softc *);
203 static void bge_stats_update_regs(struct bge_softc *);
204 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
205
206 static int bge_intr(void *);
207 static void bge_start(struct ifnet *);
208 static int bge_ifflags_cb(struct ethercom *);
209 static int bge_ioctl(struct ifnet *, u_long, void *);
210 static int bge_init(struct ifnet *);
211 static void bge_stop(struct ifnet *, int);
212 static void bge_watchdog(struct ifnet *);
213 static int bge_ifmedia_upd(struct ifnet *);
214 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
215
216 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
217 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
218
219 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
220 static int bge_read_eeprom(struct bge_softc *, void *, int, int);
221 static void bge_setmulti(struct bge_softc *);
222
223 static void bge_handle_events(struct bge_softc *);
224 static int bge_alloc_jumbo_mem(struct bge_softc *);
225 #if 0 /* XXX */
226 static void bge_free_jumbo_mem(struct bge_softc *);
227 #endif
228 static void *bge_jalloc(struct bge_softc *);
229 static void bge_jfree(struct mbuf *, void *, size_t, void *);
230 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
231 bus_dmamap_t);
232 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
233 static int bge_init_rx_ring_std(struct bge_softc *);
234 static void bge_free_rx_ring_std(struct bge_softc *);
235 static int bge_init_rx_ring_jumbo(struct bge_softc *);
236 static void bge_free_rx_ring_jumbo(struct bge_softc *);
237 static void bge_free_tx_ring(struct bge_softc *);
238 static int bge_init_tx_ring(struct bge_softc *);
239
240 static int bge_chipinit(struct bge_softc *);
241 static int bge_blockinit(struct bge_softc *);
242 static int bge_setpowerstate(struct bge_softc *, int);
243 static uint32_t bge_readmem_ind(struct bge_softc *, int);
244 static void bge_writemem_ind(struct bge_softc *, int, int);
245 static void bge_writembx(struct bge_softc *, int, int);
246 static void bge_writemem_direct(struct bge_softc *, int, int);
247 static void bge_writereg_ind(struct bge_softc *, int, int);
248 static void bge_set_max_readrq(struct bge_softc *);
249
250 static int bge_miibus_readreg(device_t, int, int);
251 static void bge_miibus_writereg(device_t, int, int, int);
252 static void bge_miibus_statchg(struct ifnet *);
253
254 #define BGE_RESET_START 1
255 #define BGE_RESET_STOP 2
256 static void bge_sig_post_reset(struct bge_softc *, int);
257 static void bge_sig_legacy(struct bge_softc *, int);
258 static void bge_sig_pre_reset(struct bge_softc *, int);
259 static void bge_stop_fw(struct bge_softc *);
260 static int bge_reset(struct bge_softc *);
261 static void bge_link_upd(struct bge_softc *);
262 static void bge_sysctl_init(struct bge_softc *);
263 static int bge_sysctl_verify(SYSCTLFN_PROTO);
264
265 #ifdef BGE_DEBUG
266 #define DPRINTF(x) if (bgedebug) printf x
267 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
268 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
269 int bgedebug = 0;
270 int bge_tso_debug = 0;
271 void bge_debug_info(struct bge_softc *);
272 #else
273 #define DPRINTF(x)
274 #define DPRINTFN(n,x)
275 #define BGE_TSO_PRINTF(x)
276 #endif
277
278 #ifdef BGE_EVENT_COUNTERS
279 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
280 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
281 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
282 #else
283 #define BGE_EVCNT_INCR(ev) /* nothing */
284 #define BGE_EVCNT_ADD(ev, val) /* nothing */
285 #define BGE_EVCNT_UPD(ev, val) /* nothing */
286 #endif
287
288 static const struct bge_product {
289 pci_vendor_id_t bp_vendor;
290 pci_product_id_t bp_product;
291 const char *bp_name;
292 } bge_products[] = {
293 /*
294 * The BCM5700 documentation seems to indicate that the hardware
295 * still has the Alteon vendor ID burned into it, though it
296 * should always be overridden by the value in the EEPROM. We'll
297 * check for it anyway.
298 */
299 { PCI_VENDOR_ALTEON,
300 PCI_PRODUCT_ALTEON_BCM5700,
301 "Broadcom BCM5700 Gigabit Ethernet",
302 },
303 { PCI_VENDOR_ALTEON,
304 PCI_PRODUCT_ALTEON_BCM5701,
305 "Broadcom BCM5701 Gigabit Ethernet",
306 },
307 { PCI_VENDOR_ALTIMA,
308 PCI_PRODUCT_ALTIMA_AC1000,
309 "Altima AC1000 Gigabit Ethernet",
310 },
311 { PCI_VENDOR_ALTIMA,
312 PCI_PRODUCT_ALTIMA_AC1001,
313 "Altima AC1001 Gigabit Ethernet",
314 },
315 { PCI_VENDOR_ALTIMA,
316 PCI_PRODUCT_ALTIMA_AC1003,
317 "Altima AC1003 Gigabit Ethernet",
318 },
319 { PCI_VENDOR_ALTIMA,
320 PCI_PRODUCT_ALTIMA_AC9100,
321 "Altima AC9100 Gigabit Ethernet",
322 },
323 { PCI_VENDOR_APPLE,
324 PCI_PRODUCT_APPLE_BCM5701,
325 "APPLE BCM5701 Gigabit Ethernet",
326 },
327 { PCI_VENDOR_BROADCOM,
328 PCI_PRODUCT_BROADCOM_BCM5700,
329 "Broadcom BCM5700 Gigabit Ethernet",
330 },
331 { PCI_VENDOR_BROADCOM,
332 PCI_PRODUCT_BROADCOM_BCM5701,
333 "Broadcom BCM5701 Gigabit Ethernet",
334 },
335 { PCI_VENDOR_BROADCOM,
336 PCI_PRODUCT_BROADCOM_BCM5702,
337 "Broadcom BCM5702 Gigabit Ethernet",
338 },
339 { PCI_VENDOR_BROADCOM,
340 PCI_PRODUCT_BROADCOM_BCM5702X,
341 "Broadcom BCM5702X Gigabit Ethernet" },
342 { PCI_VENDOR_BROADCOM,
343 PCI_PRODUCT_BROADCOM_BCM5703,
344 "Broadcom BCM5703 Gigabit Ethernet",
345 },
346 { PCI_VENDOR_BROADCOM,
347 PCI_PRODUCT_BROADCOM_BCM5703X,
348 "Broadcom BCM5703X Gigabit Ethernet",
349 },
350 { PCI_VENDOR_BROADCOM,
351 PCI_PRODUCT_BROADCOM_BCM5703_ALT,
352 "Broadcom BCM5703 Gigabit Ethernet",
353 },
354 { PCI_VENDOR_BROADCOM,
355 PCI_PRODUCT_BROADCOM_BCM5704C,
356 "Broadcom BCM5704C Dual Gigabit Ethernet",
357 },
358 { PCI_VENDOR_BROADCOM,
359 PCI_PRODUCT_BROADCOM_BCM5704S,
360 "Broadcom BCM5704S Dual Gigabit Ethernet",
361 },
362 { PCI_VENDOR_BROADCOM,
363 PCI_PRODUCT_BROADCOM_BCM5705,
364 "Broadcom BCM5705 Gigabit Ethernet",
365 },
366 { PCI_VENDOR_BROADCOM,
367 PCI_PRODUCT_BROADCOM_BCM5705F,
368 "Broadcom BCM5705F Gigabit Ethernet",
369 },
370 { PCI_VENDOR_BROADCOM,
371 PCI_PRODUCT_BROADCOM_BCM5705K,
372 "Broadcom BCM5705K Gigabit Ethernet",
373 },
374 { PCI_VENDOR_BROADCOM,
375 PCI_PRODUCT_BROADCOM_BCM5705M,
376 "Broadcom BCM5705M Gigabit Ethernet",
377 },
378 { PCI_VENDOR_BROADCOM,
379 PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
380 "Broadcom BCM5705M Gigabit Ethernet",
381 },
382 { PCI_VENDOR_BROADCOM,
383 PCI_PRODUCT_BROADCOM_BCM5714,
384 "Broadcom BCM5714 Gigabit Ethernet",
385 },
386 { PCI_VENDOR_BROADCOM,
387 PCI_PRODUCT_BROADCOM_BCM5714S,
388 "Broadcom BCM5714S Gigabit Ethernet",
389 },
390 { PCI_VENDOR_BROADCOM,
391 PCI_PRODUCT_BROADCOM_BCM5715,
392 "Broadcom BCM5715 Gigabit Ethernet",
393 },
394 { PCI_VENDOR_BROADCOM,
395 PCI_PRODUCT_BROADCOM_BCM5715S,
396 "Broadcom BCM5715S Gigabit Ethernet",
397 },
398 { PCI_VENDOR_BROADCOM,
399 PCI_PRODUCT_BROADCOM_BCM5717,
400 "Broadcom BCM5717 Gigabit Ethernet",
401 },
402 { PCI_VENDOR_BROADCOM,
403 PCI_PRODUCT_BROADCOM_BCM5718,
404 "Broadcom BCM5718 Gigabit Ethernet",
405 },
406 { PCI_VENDOR_BROADCOM,
407 PCI_PRODUCT_BROADCOM_BCM5720,
408 "Broadcom BCM5720 Gigabit Ethernet",
409 },
410 { PCI_VENDOR_BROADCOM,
411 PCI_PRODUCT_BROADCOM_BCM5721,
412 "Broadcom BCM5721 Gigabit Ethernet",
413 },
414 { PCI_VENDOR_BROADCOM,
415 PCI_PRODUCT_BROADCOM_BCM5722,
416 "Broadcom BCM5722 Gigabit Ethernet",
417 },
418 { PCI_VENDOR_BROADCOM,
419 PCI_PRODUCT_BROADCOM_BCM5723,
420 "Broadcom BCM5723 Gigabit Ethernet",
421 },
422 { PCI_VENDOR_BROADCOM,
423 PCI_PRODUCT_BROADCOM_BCM5724,
424 "Broadcom BCM5724 Gigabit Ethernet",
425 },
426 { PCI_VENDOR_BROADCOM,
427 PCI_PRODUCT_BROADCOM_BCM5750,
428 "Broadcom BCM5750 Gigabit Ethernet",
429 },
430 { PCI_VENDOR_BROADCOM,
431 PCI_PRODUCT_BROADCOM_BCM5750M,
432 "Broadcom BCM5750M Gigabit Ethernet",
433 },
434 { PCI_VENDOR_BROADCOM,
435 PCI_PRODUCT_BROADCOM_BCM5751,
436 "Broadcom BCM5751 Gigabit Ethernet",
437 },
438 { PCI_VENDOR_BROADCOM,
439 PCI_PRODUCT_BROADCOM_BCM5751F,
440 "Broadcom BCM5751F Gigabit Ethernet",
441 },
442 { PCI_VENDOR_BROADCOM,
443 PCI_PRODUCT_BROADCOM_BCM5751M,
444 "Broadcom BCM5751M Gigabit Ethernet",
445 },
446 { PCI_VENDOR_BROADCOM,
447 PCI_PRODUCT_BROADCOM_BCM5752,
448 "Broadcom BCM5752 Gigabit Ethernet",
449 },
450 { PCI_VENDOR_BROADCOM,
451 PCI_PRODUCT_BROADCOM_BCM5752M,
452 "Broadcom BCM5752M Gigabit Ethernet",
453 },
454 { PCI_VENDOR_BROADCOM,
455 PCI_PRODUCT_BROADCOM_BCM5753,
456 "Broadcom BCM5753 Gigabit Ethernet",
457 },
458 { PCI_VENDOR_BROADCOM,
459 PCI_PRODUCT_BROADCOM_BCM5753F,
460 "Broadcom BCM5753F Gigabit Ethernet",
461 },
462 { PCI_VENDOR_BROADCOM,
463 PCI_PRODUCT_BROADCOM_BCM5753M,
464 "Broadcom BCM5753M Gigabit Ethernet",
465 },
466 { PCI_VENDOR_BROADCOM,
467 PCI_PRODUCT_BROADCOM_BCM5754,
468 "Broadcom BCM5754 Gigabit Ethernet",
469 },
470 { PCI_VENDOR_BROADCOM,
471 PCI_PRODUCT_BROADCOM_BCM5754M,
472 "Broadcom BCM5754M Gigabit Ethernet",
473 },
474 { PCI_VENDOR_BROADCOM,
475 PCI_PRODUCT_BROADCOM_BCM5755,
476 "Broadcom BCM5755 Gigabit Ethernet",
477 },
478 { PCI_VENDOR_BROADCOM,
479 PCI_PRODUCT_BROADCOM_BCM5755M,
480 "Broadcom BCM5755M Gigabit Ethernet",
481 },
482 { PCI_VENDOR_BROADCOM,
483 PCI_PRODUCT_BROADCOM_BCM5756,
484 "Broadcom BCM5756 Gigabit Ethernet",
485 },
486 { PCI_VENDOR_BROADCOM,
487 PCI_PRODUCT_BROADCOM_BCM5761,
488 "Broadcom BCM5761 Gigabit Ethernet",
489 },
490 { PCI_VENDOR_BROADCOM,
491 PCI_PRODUCT_BROADCOM_BCM5761E,
492 "Broadcom BCM5761E Gigabit Ethernet",
493 },
494 { PCI_VENDOR_BROADCOM,
495 PCI_PRODUCT_BROADCOM_BCM5761S,
496 "Broadcom BCM5761S Gigabit Ethernet",
497 },
498 { PCI_VENDOR_BROADCOM,
499 PCI_PRODUCT_BROADCOM_BCM5761SE,
500 "Broadcom BCM5761SE Gigabit Ethernet",
501 },
502 { PCI_VENDOR_BROADCOM,
503 PCI_PRODUCT_BROADCOM_BCM5764,
504 "Broadcom BCM5764 Gigabit Ethernet",
505 },
506 { PCI_VENDOR_BROADCOM,
507 PCI_PRODUCT_BROADCOM_BCM5780,
508 "Broadcom BCM5780 Gigabit Ethernet",
509 },
510 { PCI_VENDOR_BROADCOM,
511 PCI_PRODUCT_BROADCOM_BCM5780S,
512 "Broadcom BCM5780S Gigabit Ethernet",
513 },
514 { PCI_VENDOR_BROADCOM,
515 PCI_PRODUCT_BROADCOM_BCM5781,
516 "Broadcom BCM5781 Gigabit Ethernet",
517 },
518 { PCI_VENDOR_BROADCOM,
519 PCI_PRODUCT_BROADCOM_BCM5782,
520 "Broadcom BCM5782 Gigabit Ethernet",
521 },
522 { PCI_VENDOR_BROADCOM,
523 PCI_PRODUCT_BROADCOM_BCM5784M,
524 "BCM5784M NetLink 1000baseT Ethernet",
525 },
526 { PCI_VENDOR_BROADCOM,
527 PCI_PRODUCT_BROADCOM_BCM5785F,
528 "BCM5785F NetLink 10/100 Ethernet",
529 },
530 { PCI_VENDOR_BROADCOM,
531 PCI_PRODUCT_BROADCOM_BCM5785G,
532 "BCM5785G NetLink 1000baseT Ethernet",
533 },
534 { PCI_VENDOR_BROADCOM,
535 PCI_PRODUCT_BROADCOM_BCM5786,
536 "Broadcom BCM5786 Gigabit Ethernet",
537 },
538 { PCI_VENDOR_BROADCOM,
539 PCI_PRODUCT_BROADCOM_BCM5787,
540 "Broadcom BCM5787 Gigabit Ethernet",
541 },
542 { PCI_VENDOR_BROADCOM,
543 PCI_PRODUCT_BROADCOM_BCM5787F,
544 "Broadcom BCM5787F 10/100 Ethernet",
545 },
546 { PCI_VENDOR_BROADCOM,
547 PCI_PRODUCT_BROADCOM_BCM5787M,
548 "Broadcom BCM5787M Gigabit Ethernet",
549 },
550 { PCI_VENDOR_BROADCOM,
551 PCI_PRODUCT_BROADCOM_BCM5788,
552 "Broadcom BCM5788 Gigabit Ethernet",
553 },
554 { PCI_VENDOR_BROADCOM,
555 PCI_PRODUCT_BROADCOM_BCM5789,
556 "Broadcom BCM5789 Gigabit Ethernet",
557 },
558 { PCI_VENDOR_BROADCOM,
559 PCI_PRODUCT_BROADCOM_BCM5901,
560 "Broadcom BCM5901 Fast Ethernet",
561 },
562 { PCI_VENDOR_BROADCOM,
563 PCI_PRODUCT_BROADCOM_BCM5901A2,
564 "Broadcom BCM5901A2 Fast Ethernet",
565 },
566 { PCI_VENDOR_BROADCOM,
567 PCI_PRODUCT_BROADCOM_BCM5903M,
568 "Broadcom BCM5903M Fast Ethernet",
569 },
570 { PCI_VENDOR_BROADCOM,
571 PCI_PRODUCT_BROADCOM_BCM5906,
572 "Broadcom BCM5906 Fast Ethernet",
573 },
574 { PCI_VENDOR_BROADCOM,
575 PCI_PRODUCT_BROADCOM_BCM5906M,
576 "Broadcom BCM5906M Fast Ethernet",
577 },
578 { PCI_VENDOR_BROADCOM,
579 PCI_PRODUCT_BROADCOM_BCM57760,
580 "Broadcom BCM57760 Fast Ethernet",
581 },
582 { PCI_VENDOR_BROADCOM,
583 PCI_PRODUCT_BROADCOM_BCM57761,
584 "Broadcom BCM57761 Fast Ethernet",
585 },
586 { PCI_VENDOR_BROADCOM,
587 PCI_PRODUCT_BROADCOM_BCM57762,
588 "Broadcom BCM57762 Gigabit Ethernet",
589 },
590 { PCI_VENDOR_BROADCOM,
591 PCI_PRODUCT_BROADCOM_BCM57765,
592 "Broadcom BCM57765 Fast Ethernet",
593 },
594 { PCI_VENDOR_BROADCOM,
595 PCI_PRODUCT_BROADCOM_BCM57780,
596 "Broadcom BCM57780 Fast Ethernet",
597 },
598 { PCI_VENDOR_BROADCOM,
599 PCI_PRODUCT_BROADCOM_BCM57781,
600 "Broadcom BCM57781 Fast Ethernet",
601 },
602 { PCI_VENDOR_BROADCOM,
603 PCI_PRODUCT_BROADCOM_BCM57785,
604 "Broadcom BCM57785 Fast Ethernet",
605 },
606 { PCI_VENDOR_BROADCOM,
607 PCI_PRODUCT_BROADCOM_BCM57788,
608 "Broadcom BCM57788 Fast Ethernet",
609 },
610 { PCI_VENDOR_BROADCOM,
611 PCI_PRODUCT_BROADCOM_BCM57790,
612 "Broadcom BCM57790 Fast Ethernet",
613 },
614 { PCI_VENDOR_BROADCOM,
615 PCI_PRODUCT_BROADCOM_BCM57791,
616 "Broadcom BCM57791 Fast Ethernet",
617 },
618 { PCI_VENDOR_BROADCOM,
619 PCI_PRODUCT_BROADCOM_BCM57795,
620 "Broadcom BCM57795 Fast Ethernet",
621 },
622 { PCI_VENDOR_SCHNEIDERKOCH,
623 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
624 "SysKonnect SK-9Dx1 Gigabit Ethernet",
625 },
626 { PCI_VENDOR_3COM,
627 PCI_PRODUCT_3COM_3C996,
628 "3Com 3c996 Gigabit Ethernet",
629 },
630 { PCI_VENDOR_FUJITSU4,
631 PCI_PRODUCT_FUJITSU4_PW008GE4,
632 "Fujitsu PW008GE4 Gigabit Ethernet",
633 },
634 { PCI_VENDOR_FUJITSU4,
635 PCI_PRODUCT_FUJITSU4_PW008GE5,
636 "Fujitsu PW008GE5 Gigabit Ethernet",
637 },
638 { PCI_VENDOR_FUJITSU4,
639 PCI_PRODUCT_FUJITSU4_PP250_450_LAN,
640 "Fujitsu Primepower 250/450 Gigabit Ethernet",
641 },
642 { 0,
643 0,
644 NULL },
645 };
646
647 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY)
648 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY)
649 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS)
650 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_575X_PLUS)
651 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS)
652 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE)
653
654 static const struct bge_revision {
655 uint32_t br_chipid;
656 const char *br_name;
657 } bge_revisions[] = {
658 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
659 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
660 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
661 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
662 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
663 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
664 /* This is treated like a BCM5700 Bx */
665 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
666 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
667 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
668 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
669 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
670 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
671 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
672 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
673 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
674 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
675 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
676 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
677 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
678 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
679 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
680 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
681 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
682 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
683 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
684 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
685 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
686 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
687 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
688 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
689 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
690 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
691 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
692 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
693 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
694 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
695 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
696 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
697 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
698 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
699 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
700 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
701 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
702 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
703 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
704 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
705 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
706 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
707 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
708 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
709 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
710 /* 5754 and 5787 share the same ASIC ID */
711 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
712 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
713 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
714 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
715 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
716 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
717 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
718 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
719
720 { 0, NULL }
721 };
722
723 /*
724 * Some defaults for major revisions, so that newer steppings
725 * that we don't know about have a shot at working.
726 */
727 static const struct bge_revision bge_majorrevs[] = {
728 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
729 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
730 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
731 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
732 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
733 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
734 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
735 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
736 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
737 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
738 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
739 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
740 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
741 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
742 /* 5754 and 5787 share the same ASIC ID */
743 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
744 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
745 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
746 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
747 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
748 { BGE_ASICREV_BCM57766, "unknown BCM57766" },
749
750 { 0, NULL }
751 };
752
753 static int bge_allow_asf = 1;
754
755 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc),
756 bge_probe, bge_attach, NULL, NULL);
757
758 static uint32_t
759 bge_readmem_ind(struct bge_softc *sc, int off)
760 {
761 pcireg_t val;
762
763 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
764 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
765 return val;
766 }
767
768 static void
769 bge_writemem_ind(struct bge_softc *sc, int off, int val)
770 {
771 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
772 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
773 }
774
775 /*
776 * PCI Express only
777 */
778 static void
779 bge_set_max_readrq(struct bge_softc *sc)
780 {
781 pcireg_t val;
782
783 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
784 + PCI_PCIE_DCSR);
785 if ((val & PCI_PCIE_DCSR_MAX_READ_REQ) !=
786 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
787 aprint_verbose_dev(sc->bge_dev,
788 "adjust device control 0x%04x ", val);
789 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ;
790 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
791 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
792 + PCI_PCIE_DCSR, val);
793 aprint_verbose("-> 0x%04x\n", val);
794 }
795 }
796
797 #ifdef notdef
798 static uint32_t
799 bge_readreg_ind(struct bge_softc *sc, int off)
800 {
801 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
802 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
803 }
804 #endif
805
806 static void
807 bge_writereg_ind(struct bge_softc *sc, int off, int val)
808 {
809 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
810 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
811 }
812
813 static void
814 bge_writemem_direct(struct bge_softc *sc, int off, int val)
815 {
816 CSR_WRITE_4(sc, off, val);
817 }
818
819 static void
820 bge_writembx(struct bge_softc *sc, int off, int val)
821 {
822 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
823 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
824
825 CSR_WRITE_4(sc, off, val);
826 }
827
828 static uint8_t
829 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
830 {
831 uint32_t access, byte = 0;
832 int i;
833
834 /* Lock. */
835 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
836 for (i = 0; i < 8000; i++) {
837 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
838 break;
839 DELAY(20);
840 }
841 if (i == 8000)
842 return 1;
843
844 /* Enable access. */
845 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
846 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
847
848 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
849 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
850 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
851 DELAY(10);
852 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
853 DELAY(10);
854 break;
855 }
856 }
857
858 if (i == BGE_TIMEOUT * 10) {
859 aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
860 return 1;
861 }
862
863 /* Get result. */
864 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
865
866 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
867
868 /* Disable access. */
869 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
870
871 /* Unlock. */
872 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
873 CSR_READ_4(sc, BGE_NVRAM_SWARB);
874
875 return 0;
876 }
877
878 /*
879 * Read a sequence of bytes from NVRAM.
880 */
881 static int
882 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
883 {
884 int error = 0, i;
885 uint8_t byte = 0;
886
887 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
888 return 1;
889
890 for (i = 0; i < cnt; i++) {
891 error = bge_nvram_getbyte(sc, off + i, &byte);
892 if (error)
893 break;
894 *(dest + i) = byte;
895 }
896
897 return (error ? 1 : 0);
898 }
899
900 /*
901 * Read a byte of data stored in the EEPROM at address 'addr.' The
902 * BCM570x supports both the traditional bitbang interface and an
903 * auto access interface for reading the EEPROM. We use the auto
904 * access method.
905 */
906 static uint8_t
907 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
908 {
909 int i;
910 uint32_t byte = 0;
911
912 /*
913 * Enable use of auto EEPROM access so we can avoid
914 * having to use the bitbang method.
915 */
916 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
917
918 /* Reset the EEPROM, load the clock period. */
919 CSR_WRITE_4(sc, BGE_EE_ADDR,
920 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
921 DELAY(20);
922
923 /* Issue the read EEPROM command. */
924 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
925
926 /* Wait for completion */
927 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
928 DELAY(10);
929 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
930 break;
931 }
932
933 if (i == BGE_TIMEOUT * 10) {
934 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
935 return 1;
936 }
937
938 /* Get result. */
939 byte = CSR_READ_4(sc, BGE_EE_DATA);
940
941 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
942
943 return 0;
944 }
945
946 /*
947 * Read a sequence of bytes from the EEPROM.
948 */
949 static int
950 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
951 {
952 int error = 0, i;
953 uint8_t byte = 0;
954 char *dest = destv;
955
956 for (i = 0; i < cnt; i++) {
957 error = bge_eeprom_getbyte(sc, off + i, &byte);
958 if (error)
959 break;
960 *(dest + i) = byte;
961 }
962
963 return (error ? 1 : 0);
964 }
965
966 static int
967 bge_miibus_readreg(device_t dev, int phy, int reg)
968 {
969 struct bge_softc *sc = device_private(dev);
970 uint32_t val;
971 uint32_t autopoll;
972 int i;
973
974 /*
975 * Broadcom's own driver always assumes the internal
976 * PHY is at GMII address 1. On some chips, the PHY responds
977 * to accesses at all addresses, which could cause us to
978 * bogusly attach the PHY 32 times at probe type. Always
979 * restricting the lookup to address 1 is simpler than
980 * trying to figure out which chips revisions should be
981 * special-cased.
982 */
983 if (phy != 1)
984 return 0;
985
986 /* Reading with autopolling on may trigger PCI errors */
987 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
988 if (autopoll & BGE_MIMODE_AUTOPOLL) {
989 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
990 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
991 DELAY(40);
992 }
993
994 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
995 BGE_MIPHY(phy) | BGE_MIREG(reg));
996
997 for (i = 0; i < BGE_TIMEOUT; i++) {
998 val = CSR_READ_4(sc, BGE_MI_COMM);
999 if (!(val & BGE_MICOMM_BUSY))
1000 break;
1001 delay(10);
1002 }
1003
1004 if (i == BGE_TIMEOUT) {
1005 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1006 val = 0;
1007 goto done;
1008 }
1009
1010 val = CSR_READ_4(sc, BGE_MI_COMM);
1011
1012 done:
1013 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1014 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1015 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1016 DELAY(40);
1017 }
1018
1019 if (val & BGE_MICOMM_READFAIL)
1020 return 0;
1021
1022 return (val & 0xFFFF);
1023 }
1024
1025 static void
1026 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
1027 {
1028 struct bge_softc *sc = device_private(dev);
1029 uint32_t autopoll;
1030 int i;
1031
1032 if (phy!=1) {
1033 return;
1034 }
1035
1036 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1037 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
1038 return;
1039
1040 /* Reading with autopolling on may trigger PCI errors */
1041 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1042 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1043 delay(40);
1044 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1045 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1046 delay(10); /* 40 usec is supposed to be adequate */
1047 }
1048
1049 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1050 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1051
1052 for (i = 0; i < BGE_TIMEOUT; i++) {
1053 delay(10);
1054 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
1055 delay(5);
1056 CSR_READ_4(sc, BGE_MI_COMM);
1057 break;
1058 }
1059 }
1060
1061 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1062 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1063 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1064 delay(40);
1065 }
1066
1067 if (i == BGE_TIMEOUT)
1068 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1069 }
1070
1071 static void
1072 bge_miibus_statchg(struct ifnet *ifp)
1073 {
1074 struct bge_softc *sc = ifp->if_softc;
1075 struct mii_data *mii = &sc->bge_mii;
1076
1077 /*
1078 * Get flow control negotiation result.
1079 */
1080 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1081 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
1082 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1083 mii->mii_media_active &= ~IFM_ETH_FMASK;
1084 }
1085
1086 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
1087 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1088 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1089 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
1090 else
1091 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
1092
1093 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1094 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
1095 else
1096 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
1097
1098 /*
1099 * 802.3x flow control
1100 */
1101 if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1102 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
1103 else
1104 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
1105
1106 if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1107 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
1108 else
1109 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
1110 }
1111
1112 /*
1113 * Update rx threshold levels to values in a particular slot
1114 * of the interrupt-mitigation table bge_rx_threshes.
1115 */
1116 static void
1117 bge_set_thresh(struct ifnet *ifp, int lvl)
1118 {
1119 struct bge_softc *sc = ifp->if_softc;
1120 int s;
1121
1122 /* For now, just save the new Rx-intr thresholds and record
1123 * that a threshold update is pending. Updating the hardware
1124 * registers here (even at splhigh()) is observed to
1125 * occasionaly cause glitches where Rx-interrupts are not
1126 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
1127 */
1128 s = splnet();
1129 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
1130 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
1131 sc->bge_pending_rxintr_change = 1;
1132 splx(s);
1133 }
1134
1135
1136 /*
1137 * Update Rx thresholds of all bge devices
1138 */
1139 static void
1140 bge_update_all_threshes(int lvl)
1141 {
1142 struct ifnet *ifp;
1143 const char * const namebuf = "bge";
1144 int namelen;
1145
1146 if (lvl < 0)
1147 lvl = 0;
1148 else if (lvl >= NBGE_RX_THRESH)
1149 lvl = NBGE_RX_THRESH - 1;
1150
1151 namelen = strlen(namebuf);
1152 /*
1153 * Now search all the interfaces for this name/number
1154 */
1155 IFNET_FOREACH(ifp) {
1156 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
1157 continue;
1158 /* We got a match: update if doing auto-threshold-tuning */
1159 if (bge_auto_thresh)
1160 bge_set_thresh(ifp, lvl);
1161 }
1162 }
1163
1164 /*
1165 * Handle events that have triggered interrupts.
1166 */
1167 static void
1168 bge_handle_events(struct bge_softc *sc)
1169 {
1170
1171 return;
1172 }
1173
1174 /*
1175 * Memory management for jumbo frames.
1176 */
1177
1178 static int
1179 bge_alloc_jumbo_mem(struct bge_softc *sc)
1180 {
1181 char *ptr, *kva;
1182 bus_dma_segment_t seg;
1183 int i, rseg, state, error;
1184 struct bge_jpool_entry *entry;
1185
1186 state = error = 0;
1187
1188 /* Grab a big chunk o' storage. */
1189 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
1190 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1191 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1192 return ENOBUFS;
1193 }
1194
1195 state = 1;
1196 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
1197 BUS_DMA_NOWAIT)) {
1198 aprint_error_dev(sc->bge_dev,
1199 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
1200 error = ENOBUFS;
1201 goto out;
1202 }
1203
1204 state = 2;
1205 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
1206 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
1207 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1208 error = ENOBUFS;
1209 goto out;
1210 }
1211
1212 state = 3;
1213 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1214 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
1215 aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
1216 error = ENOBUFS;
1217 goto out;
1218 }
1219
1220 state = 4;
1221 sc->bge_cdata.bge_jumbo_buf = (void *)kva;
1222 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
1223
1224 SLIST_INIT(&sc->bge_jfree_listhead);
1225 SLIST_INIT(&sc->bge_jinuse_listhead);
1226
1227 /*
1228 * Now divide it up into 9K pieces and save the addresses
1229 * in an array.
1230 */
1231 ptr = sc->bge_cdata.bge_jumbo_buf;
1232 for (i = 0; i < BGE_JSLOTS; i++) {
1233 sc->bge_cdata.bge_jslots[i] = ptr;
1234 ptr += BGE_JLEN;
1235 entry = malloc(sizeof(struct bge_jpool_entry),
1236 M_DEVBUF, M_NOWAIT);
1237 if (entry == NULL) {
1238 aprint_error_dev(sc->bge_dev,
1239 "no memory for jumbo buffer queue!\n");
1240 error = ENOBUFS;
1241 goto out;
1242 }
1243 entry->slot = i;
1244 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1245 entry, jpool_entries);
1246 }
1247 out:
1248 if (error != 0) {
1249 switch (state) {
1250 case 4:
1251 bus_dmamap_unload(sc->bge_dmatag,
1252 sc->bge_cdata.bge_rx_jumbo_map);
1253 case 3:
1254 bus_dmamap_destroy(sc->bge_dmatag,
1255 sc->bge_cdata.bge_rx_jumbo_map);
1256 case 2:
1257 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
1258 case 1:
1259 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1260 break;
1261 default:
1262 break;
1263 }
1264 }
1265
1266 return error;
1267 }
1268
1269 /*
1270 * Allocate a jumbo buffer.
1271 */
1272 static void *
1273 bge_jalloc(struct bge_softc *sc)
1274 {
1275 struct bge_jpool_entry *entry;
1276
1277 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
1278
1279 if (entry == NULL) {
1280 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
1281 return NULL;
1282 }
1283
1284 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
1285 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
1286 return (sc->bge_cdata.bge_jslots[entry->slot]);
1287 }
1288
1289 /*
1290 * Release a jumbo buffer.
1291 */
1292 static void
1293 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1294 {
1295 struct bge_jpool_entry *entry;
1296 struct bge_softc *sc;
1297 int i, s;
1298
1299 /* Extract the softc struct pointer. */
1300 sc = (struct bge_softc *)arg;
1301
1302 if (sc == NULL)
1303 panic("bge_jfree: can't find softc pointer!");
1304
1305 /* calculate the slot this buffer belongs to */
1306
1307 i = ((char *)buf
1308 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
1309
1310 if ((i < 0) || (i >= BGE_JSLOTS))
1311 panic("bge_jfree: asked to free buffer that we don't manage!");
1312
1313 s = splvm();
1314 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
1315 if (entry == NULL)
1316 panic("bge_jfree: buffer not in use!");
1317 entry->slot = i;
1318 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
1319 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
1320
1321 if (__predict_true(m != NULL))
1322 pool_cache_put(mb_cache, m);
1323 splx(s);
1324 }
1325
1326
1327 /*
1328 * Initialize a standard receive ring descriptor.
1329 */
1330 static int
1331 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
1332 bus_dmamap_t dmamap)
1333 {
1334 struct mbuf *m_new = NULL;
1335 struct bge_rx_bd *r;
1336 int error;
1337
1338 if (dmamap == NULL) {
1339 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
1340 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
1341 if (error != 0)
1342 return error;
1343 }
1344
1345 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
1346
1347 if (m == NULL) {
1348 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1349 if (m_new == NULL)
1350 return ENOBUFS;
1351
1352 MCLGET(m_new, M_DONTWAIT);
1353 if (!(m_new->m_flags & M_EXT)) {
1354 m_freem(m_new);
1355 return ENOBUFS;
1356 }
1357 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1358
1359 } else {
1360 m_new = m;
1361 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1362 m_new->m_data = m_new->m_ext.ext_buf;
1363 }
1364 if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1365 m_adj(m_new, ETHER_ALIGN);
1366 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
1367 BUS_DMA_READ|BUS_DMA_NOWAIT))
1368 return ENOBUFS;
1369 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
1370 BUS_DMASYNC_PREREAD);
1371
1372 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
1373 r = &sc->bge_rdata->bge_rx_std_ring[i];
1374 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
1375 r->bge_flags = BGE_RXBDFLAG_END;
1376 r->bge_len = m_new->m_len;
1377 r->bge_idx = i;
1378
1379 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1380 offsetof(struct bge_ring_data, bge_rx_std_ring) +
1381 i * sizeof (struct bge_rx_bd),
1382 sizeof (struct bge_rx_bd),
1383 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1384
1385 return 0;
1386 }
1387
1388 /*
1389 * Initialize a jumbo receive ring descriptor. This allocates
1390 * a jumbo buffer from the pool managed internally by the driver.
1391 */
1392 static int
1393 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1394 {
1395 struct mbuf *m_new = NULL;
1396 struct bge_rx_bd *r;
1397 void *buf = NULL;
1398
1399 if (m == NULL) {
1400
1401 /* Allocate the mbuf. */
1402 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1403 if (m_new == NULL)
1404 return ENOBUFS;
1405
1406 /* Allocate the jumbo buffer */
1407 buf = bge_jalloc(sc);
1408 if (buf == NULL) {
1409 m_freem(m_new);
1410 aprint_error_dev(sc->bge_dev,
1411 "jumbo allocation failed -- packet dropped!\n");
1412 return ENOBUFS;
1413 }
1414
1415 /* Attach the buffer to the mbuf. */
1416 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1417 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1418 bge_jfree, sc);
1419 m_new->m_flags |= M_EXT_RW;
1420 } else {
1421 m_new = m;
1422 buf = m_new->m_data = m_new->m_ext.ext_buf;
1423 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1424 }
1425 if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1426 m_adj(m_new, ETHER_ALIGN);
1427 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1428 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
1429 BUS_DMASYNC_PREREAD);
1430 /* Set up the descriptor. */
1431 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1432 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1433 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1434 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1435 r->bge_len = m_new->m_len;
1436 r->bge_idx = i;
1437
1438 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1439 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1440 i * sizeof (struct bge_rx_bd),
1441 sizeof (struct bge_rx_bd),
1442 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1443
1444 return 0;
1445 }
1446
1447 /*
1448 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1449 * that's 1MB or memory, which is a lot. For now, we fill only the first
1450 * 256 ring entries and hope that our CPU is fast enough to keep up with
1451 * the NIC.
1452 */
1453 static int
1454 bge_init_rx_ring_std(struct bge_softc *sc)
1455 {
1456 int i;
1457
1458 if (sc->bge_flags & BGE_RXRING_VALID)
1459 return 0;
1460
1461 for (i = 0; i < BGE_SSLOTS; i++) {
1462 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1463 return ENOBUFS;
1464 }
1465
1466 sc->bge_std = i - 1;
1467 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1468
1469 sc->bge_flags |= BGE_RXRING_VALID;
1470
1471 return 0;
1472 }
1473
1474 static void
1475 bge_free_rx_ring_std(struct bge_softc *sc)
1476 {
1477 int i;
1478
1479 if (!(sc->bge_flags & BGE_RXRING_VALID))
1480 return;
1481
1482 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1483 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1484 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1485 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1486 bus_dmamap_destroy(sc->bge_dmatag,
1487 sc->bge_cdata.bge_rx_std_map[i]);
1488 }
1489 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1490 sizeof(struct bge_rx_bd));
1491 }
1492
1493 sc->bge_flags &= ~BGE_RXRING_VALID;
1494 }
1495
1496 static int
1497 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1498 {
1499 int i;
1500 volatile struct bge_rcb *rcb;
1501
1502 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1503 return 0;
1504
1505 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1506 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1507 return ENOBUFS;
1508 }
1509
1510 sc->bge_jumbo = i - 1;
1511 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1512
1513 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1514 rcb->bge_maxlen_flags = 0;
1515 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1516
1517 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1518
1519 return 0;
1520 }
1521
1522 static void
1523 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1524 {
1525 int i;
1526
1527 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1528 return;
1529
1530 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1531 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1532 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1533 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1534 }
1535 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1536 sizeof(struct bge_rx_bd));
1537 }
1538
1539 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1540 }
1541
1542 static void
1543 bge_free_tx_ring(struct bge_softc *sc)
1544 {
1545 int i;
1546 struct txdmamap_pool_entry *dma;
1547
1548 if (!(sc->bge_flags & BGE_TXRING_VALID))
1549 return;
1550
1551 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1552 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1553 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1554 sc->bge_cdata.bge_tx_chain[i] = NULL;
1555 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1556 link);
1557 sc->txdma[i] = 0;
1558 }
1559 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1560 sizeof(struct bge_tx_bd));
1561 }
1562
1563 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1564 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1565 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1566 free(dma, M_DEVBUF);
1567 }
1568
1569 sc->bge_flags &= ~BGE_TXRING_VALID;
1570 }
1571
1572 static int
1573 bge_init_tx_ring(struct bge_softc *sc)
1574 {
1575 int i;
1576 bus_dmamap_t dmamap;
1577 struct txdmamap_pool_entry *dma;
1578
1579 if (sc->bge_flags & BGE_TXRING_VALID)
1580 return 0;
1581
1582 sc->bge_txcnt = 0;
1583 sc->bge_tx_saved_considx = 0;
1584
1585 /* Initialize transmit producer index for host-memory send ring. */
1586 sc->bge_tx_prodidx = 0;
1587 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1588 /* 5700 b2 errata */
1589 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1590 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1591
1592 /* NIC-memory send ring not used; initialize to zero. */
1593 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1594 /* 5700 b2 errata */
1595 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1596 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1597
1598 SLIST_INIT(&sc->txdma_list);
1599 for (i = 0; i < BGE_RSLOTS; i++) {
1600 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1601 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1602 &dmamap))
1603 return ENOBUFS;
1604 if (dmamap == NULL)
1605 panic("dmamap NULL in bge_init_tx_ring");
1606 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1607 if (dma == NULL) {
1608 aprint_error_dev(sc->bge_dev,
1609 "can't alloc txdmamap_pool_entry\n");
1610 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1611 return ENOMEM;
1612 }
1613 dma->dmamap = dmamap;
1614 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1615 }
1616
1617 sc->bge_flags |= BGE_TXRING_VALID;
1618
1619 return 0;
1620 }
1621
1622 static void
1623 bge_setmulti(struct bge_softc *sc)
1624 {
1625 struct ethercom *ac = &sc->ethercom;
1626 struct ifnet *ifp = &ac->ec_if;
1627 struct ether_multi *enm;
1628 struct ether_multistep step;
1629 uint32_t hashes[4] = { 0, 0, 0, 0 };
1630 uint32_t h;
1631 int i;
1632
1633 if (ifp->if_flags & IFF_PROMISC)
1634 goto allmulti;
1635
1636 /* Now program new ones. */
1637 ETHER_FIRST_MULTI(step, ac, enm);
1638 while (enm != NULL) {
1639 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1640 /*
1641 * We must listen to a range of multicast addresses.
1642 * For now, just accept all multicasts, rather than
1643 * trying to set only those filter bits needed to match
1644 * the range. (At this time, the only use of address
1645 * ranges is for IP multicast routing, for which the
1646 * range is big enough to require all bits set.)
1647 */
1648 goto allmulti;
1649 }
1650
1651 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1652
1653 /* Just want the 7 least-significant bits. */
1654 h &= 0x7f;
1655
1656 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1657 ETHER_NEXT_MULTI(step, enm);
1658 }
1659
1660 ifp->if_flags &= ~IFF_ALLMULTI;
1661 goto setit;
1662
1663 allmulti:
1664 ifp->if_flags |= IFF_ALLMULTI;
1665 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1666
1667 setit:
1668 for (i = 0; i < 4; i++)
1669 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1670 }
1671
1672 static void
1673 bge_sig_pre_reset(struct bge_softc *sc, int type)
1674 {
1675
1676 /*
1677 * Some chips don't like this so only do this if ASF is enabled
1678 */
1679 if (sc->bge_asf_mode)
1680 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1681
1682 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1683 switch (type) {
1684 case BGE_RESET_START:
1685 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1686 break;
1687 case BGE_RESET_STOP:
1688 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1689 break;
1690 }
1691 }
1692 }
1693
1694 static void
1695 bge_sig_post_reset(struct bge_softc *sc, int type)
1696 {
1697
1698 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1699 switch (type) {
1700 case BGE_RESET_START:
1701 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1702 /* START DONE */
1703 break;
1704 case BGE_RESET_STOP:
1705 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1706 break;
1707 }
1708 }
1709 }
1710
1711 static void
1712 bge_sig_legacy(struct bge_softc *sc, int type)
1713 {
1714
1715 if (sc->bge_asf_mode) {
1716 switch (type) {
1717 case BGE_RESET_START:
1718 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1719 break;
1720 case BGE_RESET_STOP:
1721 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1722 break;
1723 }
1724 }
1725 }
1726
1727 static void
1728 bge_stop_fw(struct bge_softc *sc)
1729 {
1730 int i;
1731
1732 if (sc->bge_asf_mode) {
1733 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1734 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1735 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1736
1737 for (i = 0; i < 100; i++) {
1738 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1739 break;
1740 DELAY(10);
1741 }
1742 }
1743 }
1744
1745 static int
1746 bge_poll_fw(struct bge_softc *sc)
1747 {
1748 uint32_t val;
1749 int i;
1750
1751 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1752 for (i = 0; i < BGE_TIMEOUT; i++) {
1753 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
1754 if (val & BGE_VCPU_STATUS_INIT_DONE)
1755 break;
1756 DELAY(100);
1757 }
1758 if (i >= BGE_TIMEOUT) {
1759 aprint_error_dev(sc->bge_dev, "reset timed out\n");
1760 return -1;
1761 }
1762 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) {
1763 /*
1764 * Poll the value location we just wrote until
1765 * we see the 1's complement of the magic number.
1766 * This indicates that the firmware initialization
1767 * is complete.
1768 * XXX 1000ms for Flash and 10000ms for SEEPROM.
1769 */
1770 for (i = 0; i < BGE_TIMEOUT; i++) {
1771 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1772 if (val == ~BGE_MAGIC_NUMBER)
1773 break;
1774 DELAY(10);
1775 }
1776
1777 if (i >= BGE_TIMEOUT) {
1778 aprint_error_dev(sc->bge_dev,
1779 "firmware handshake timed out, val = %x\n", val);
1780 return -1;
1781 }
1782 }
1783
1784 return 0;
1785 }
1786
1787 /*
1788 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1789 * self-test results.
1790 */
1791 static int
1792 bge_chipinit(struct bge_softc *sc)
1793 {
1794 int i;
1795 uint32_t dma_rw_ctl;
1796
1797 /* Set endianness before we access any non-PCI registers. */
1798 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1799 BGE_INIT);
1800
1801 /* Set power state to D0. */
1802 bge_setpowerstate(sc, 0);
1803
1804 /* Clear the MAC control register */
1805 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1806
1807 /*
1808 * Clear the MAC statistics block in the NIC's
1809 * internal memory.
1810 */
1811 for (i = BGE_STATS_BLOCK;
1812 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1813 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1814
1815 for (i = BGE_STATUS_BLOCK;
1816 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1817 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1818
1819 /* Set up the PCI DMA control register. */
1820 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
1821 if (sc->bge_flags & BGE_PCIE) {
1822 /* Read watermark not used, 128 bytes for write. */
1823 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
1824 device_xname(sc->bge_dev)));
1825 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1826 } else if (sc->bge_flags & BGE_PCIX) {
1827 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
1828 device_xname(sc->bge_dev)));
1829 /* PCI-X bus */
1830 if (BGE_IS_5714_FAMILY(sc)) {
1831 /* 256 bytes for read and write. */
1832 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1833 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1834
1835 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1836 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1837 else
1838 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1839 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1840 /* 1536 bytes for read, 384 bytes for write. */
1841 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1842 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1843 } else {
1844 /* 384 bytes for read and write. */
1845 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1846 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1847 (0x0F);
1848 }
1849
1850 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1851 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1852 uint32_t tmp;
1853
1854 /* Set ONEDMA_ATONCE for hardware workaround. */
1855 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1856 if (tmp == 6 || tmp == 7)
1857 dma_rw_ctl |=
1858 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1859
1860 /* Set PCI-X DMA write workaround. */
1861 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1862 }
1863 } else {
1864 /* Conventional PCI bus: 256 bytes for read and write. */
1865 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
1866 device_xname(sc->bge_dev)));
1867 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1868 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1869
1870 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1871 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1872 dma_rw_ctl |= 0x0F;
1873 }
1874
1875 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1876 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1877 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1878 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1879
1880 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1881 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1882 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1883
1884 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1885 dma_rw_ctl);
1886
1887 /*
1888 * Set up general mode register.
1889 */
1890 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1891 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1892 BGE_MODECTL_TX_NO_PHDR_CSUM);
1893
1894 /*
1895 * BCM5701 B5 have a bug causing data corruption when using
1896 * 64-bit DMA reads, which can be terminated early and then
1897 * completed later as 32-bit accesses, in combination with
1898 * certain bridges.
1899 */
1900 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1901 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1902 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1903
1904 /*
1905 * Tell the firmware the driver is running
1906 */
1907 if (sc->bge_asf_mode & ASF_STACKUP)
1908 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1909
1910 /*
1911 * Disable memory write invalidate. Apparently it is not supported
1912 * properly by these devices.
1913 */
1914 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
1915 PCI_COMMAND_INVALIDATE_ENABLE);
1916
1917 #ifdef __brokenalpha__
1918 /*
1919 * Must insure that we do not cross an 8K (bytes) boundary
1920 * for DMA reads. Our highest limit is 1K bytes. This is a
1921 * restriction on some ALPHA platforms with early revision
1922 * 21174 PCI chipsets, such as the AlphaPC 164lx
1923 */
1924 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1925 #endif
1926
1927 /* Set the timer prescaler (always 66MHz) */
1928 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1929
1930 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1931 DELAY(40); /* XXX */
1932
1933 /* Put PHY into ready state */
1934 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1935 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1936 DELAY(40);
1937 }
1938
1939 return 0;
1940 }
1941
1942 static int
1943 bge_blockinit(struct bge_softc *sc)
1944 {
1945 volatile struct bge_rcb *rcb;
1946 bus_size_t rcb_addr;
1947 int i;
1948 struct ifnet *ifp = &sc->ethercom.ec_if;
1949 bge_hostaddr taddr;
1950 uint32_t val;
1951
1952 /*
1953 * Initialize the memory window pointer register so that
1954 * we can access the first 32K of internal NIC RAM. This will
1955 * allow us to set up the TX send ring RCBs and the RX return
1956 * ring RCBs, plus other things which live in NIC memory.
1957 */
1958
1959 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1960
1961 /* Step 33: Configure mbuf memory pool */
1962 if (BGE_IS_5700_FAMILY(sc)) {
1963 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1964 BGE_BUFFPOOL_1);
1965
1966 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1967 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1968 else
1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1970
1971 /* Configure DMA resource pool */
1972 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1973 BGE_DMA_DESCRIPTORS);
1974 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1975 }
1976
1977 /* Step 35: Configure mbuf pool watermarks */
1978 #ifdef ORIG_WPAUL_VALUES
1979 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1980 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1981 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1982 #else
1983
1984 /* new broadcom docs strongly recommend these: */
1985 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1986 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1987 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
1988 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1989 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1990 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1991 } else if (BGE_IS_5705_PLUS(sc)) {
1992 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1993
1994 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1995 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1996 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1997 } else {
1998 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1999 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2000 }
2001 } else if (!BGE_IS_5705_PLUS(sc)) {
2002 if (ifp->if_mtu > ETHER_MAX_LEN) {
2003 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
2004 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
2005 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2006 } else {
2007 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304);
2008 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152);
2009 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380);
2010 }
2011 } else {
2012 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2013 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2014 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2015 }
2016 #endif
2017
2018 /* Step 36: Configure DMA resource watermarks */
2019 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2020 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
2021
2022 /* Step 38: Enable buffer manager */
2023 CSR_WRITE_4(sc, BGE_BMAN_MODE,
2024 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
2025
2026 /* Step 39: Poll for buffer manager start indication */
2027 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2028 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2029 break;
2030 DELAY(10);
2031 }
2032
2033 if (i == BGE_TIMEOUT * 2) {
2034 aprint_error_dev(sc->bge_dev,
2035 "buffer manager failed to start\n");
2036 return ENXIO;
2037 }
2038
2039 /* Step 40: Enable flow-through queues */
2040 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2041 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2042
2043 /* Wait until queue initialization is complete */
2044 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2045 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2046 break;
2047 DELAY(10);
2048 }
2049
2050 if (i == BGE_TIMEOUT * 2) {
2051 aprint_error_dev(sc->bge_dev,
2052 "flow-through queue init failed\n");
2053 return ENXIO;
2054 }
2055
2056 /* Step 41: Initialize the standard RX ring control block */
2057 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
2058 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
2059 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2060 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2061 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766)
2062 rcb->bge_maxlen_flags =
2063 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2064 else if (BGE_IS_5705_PLUS(sc))
2065 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2066 else
2067 rcb->bge_maxlen_flags =
2068 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2069 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2070 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2071 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2072 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2073 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2074
2075 /*
2076 * Step 42: Initialize the jumbo RX ring control block
2077 * We set the 'ring disabled' bit in the flags
2078 * field until we're actually ready to start
2079 * using this ring (i.e. once we set the MTU
2080 * high enough to require it).
2081 */
2082 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2083 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
2084 BGE_HOSTADDR(rcb->bge_hostaddr,
2085 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
2086 rcb->bge_maxlen_flags =
2087 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
2088 BGE_RCB_FLAG_RING_DISABLED);
2089 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2090 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2091 rcb->bge_hostaddr.bge_addr_hi);
2092 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2093 rcb->bge_hostaddr.bge_addr_lo);
2094 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2095 rcb->bge_maxlen_flags);
2096 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2097
2098 /* Set up dummy disabled mini ring RCB */
2099 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2100 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2101 BGE_RCB_FLAG_RING_DISABLED);
2102 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2103 rcb->bge_maxlen_flags);
2104
2105 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2106 offsetof(struct bge_ring_data, bge_info),
2107 sizeof (struct bge_gib),
2108 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2109 }
2110
2111 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2112 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2113 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2114 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2115 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2116 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2117 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2118 }
2119 /*
2120 * Set the BD ring replenish thresholds. The recommended
2121 * values are 1/8th the number of descriptors allocated to
2122 * each ring.
2123 */
2124 i = BGE_STD_RX_RING_CNT / 8;
2125
2126 /*
2127 * Use a value of 8 for the following chips to workaround HW errata.
2128 * Some of these chips have been added based on empirical
2129 * evidence (they don't work unless this is done).
2130 */
2131 if (BGE_IS_5705_PLUS(sc))
2132 i = 8;
2133
2134 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
2135 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8);
2136
2137 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2138 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2139 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
2140 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2141 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2142 }
2143
2144 /*
2145 * Disable all unused send rings by setting the 'ring disabled'
2146 * bit in the flags field of all the TX send ring control blocks.
2147 * These are located in NIC memory.
2148 */
2149 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2150 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
2151 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2152 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2153 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2154 rcb_addr += sizeof(struct bge_rcb);
2155 }
2156
2157 /* Configure TX RCB 0 (we use only the first ring) */
2158 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2159 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2160 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2161 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2162 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2163 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2164 if (BGE_IS_5700_FAMILY(sc))
2165 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2166 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2167
2168 /* Disable all unused RX return rings */
2169 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2170 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
2171 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2172 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2173 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2174 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2175 BGE_RCB_FLAG_RING_DISABLED));
2176 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2177 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2178 (i * (sizeof(uint64_t))), 0);
2179 rcb_addr += sizeof(struct bge_rcb);
2180 }
2181
2182 /* Initialize RX ring indexes */
2183 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2184 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2185 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2186
2187 /*
2188 * Set up RX return ring 0
2189 * Note that the NIC address for RX return rings is 0x00000000.
2190 * The return rings live entirely within the host, so the
2191 * nicaddr field in the RCB isn't used.
2192 */
2193 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2194 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2195 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2196 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2197 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2198 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2199 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2200
2201 /* Set random backoff seed for TX */
2202 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2203 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
2204 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
2205 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] +
2206 BGE_TX_BACKOFF_SEED_MASK);
2207
2208 /* Set inter-packet gap */
2209 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
2210
2211 /*
2212 * Specify which ring to use for packets that don't match
2213 * any RX rules.
2214 */
2215 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2216
2217 /*
2218 * Configure number of RX lists. One interrupt distribution
2219 * list, sixteen active lists, one bad frames class.
2220 */
2221 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2222
2223 /* Inialize RX list placement stats mask. */
2224 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2225 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2226
2227 /* Disable host coalescing until we get it set up */
2228 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2229
2230 /* Poll to make sure it's shut down. */
2231 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2232 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2233 break;
2234 DELAY(10);
2235 }
2236
2237 if (i == BGE_TIMEOUT * 2) {
2238 aprint_error_dev(sc->bge_dev,
2239 "host coalescing engine failed to idle\n");
2240 return ENXIO;
2241 }
2242
2243 /* Set up host coalescing defaults */
2244 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2245 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2246 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2247 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2248 if (BGE_IS_5700_FAMILY(sc)) {
2249 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2250 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2251 }
2252 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2253 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2254
2255 /* Set up address of statistics block */
2256 if (BGE_IS_5700_FAMILY(sc)) {
2257 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2258 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2259 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2260 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2261 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2262 }
2263
2264 /* Set up address of status block */
2265 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2266 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2267 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2268 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2269 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2270 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2271
2272 /* Turn on host coalescing state machine */
2273 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2274
2275 /* Turn on RX BD completion state machine and enable attentions */
2276 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2277 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2278
2279 /* Turn on RX list placement state machine */
2280 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2281
2282 /* Turn on RX list selector state machine. */
2283 if (BGE_IS_5700_FAMILY(sc))
2284 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2285
2286 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2287 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2288 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2289 BGE_MACMODE_FRMHDR_DMA_ENB;
2290
2291 if (sc->bge_flags & BGE_PHY_FIBER_TBI)
2292 val |= BGE_PORTMODE_TBI;
2293 else if (sc->bge_flags & BGE_PHY_FIBER_MII)
2294 val |= BGE_PORTMODE_GMII;
2295 else
2296 val |= BGE_PORTMODE_MII;
2297
2298 /* Turn on DMA, clear stats */
2299 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2300
2301 /* Set misc. local control, enable interrupts on attentions */
2302 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
2303
2304 #ifdef notdef
2305 /* Assert GPIO pins for PHY reset */
2306 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2307 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2308 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2309 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2310 #endif
2311
2312 #if defined(not_quite_yet)
2313 /* Linux driver enables enable gpio pin #1 on 5700s */
2314 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
2315 sc->bge_local_ctrl_reg |=
2316 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
2317 }
2318 #endif
2319 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2320
2321 /* Turn on DMA completion state machine */
2322 if (BGE_IS_5700_FAMILY(sc))
2323 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2324
2325 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2326
2327 /* Enable host coalescing bug fix */
2328 if (BGE_IS_5755_PLUS(sc))
2329 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2330
2331 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2332 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2333
2334 /* Turn on write DMA state machine */
2335 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2336
2337 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2338 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2339 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2340 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2341 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2342 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2343 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2344
2345 if (sc->bge_flags & BGE_PCIE)
2346 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2347 if (sc->bge_flags & BGE_TSO)
2348 val |= BGE_RDMAMODE_TSO4_ENABLE;
2349
2350 /* Turn on read DMA state machine */
2351 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2352 delay(40);
2353
2354 /* Turn on RX data completion state machine */
2355 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2356
2357 /* Turn on RX BD initiator state machine */
2358 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2359
2360 /* Turn on RX data and RX BD initiator state machine */
2361 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2362
2363 /* Turn on Mbuf cluster free state machine */
2364 if (BGE_IS_5700_FAMILY(sc))
2365 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2366
2367 /* Turn on send BD completion state machine */
2368 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2369
2370 /* Turn on send data completion state machine */
2371 val = BGE_SDCMODE_ENABLE;
2372 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2373 val |= BGE_SDCMODE_CDELAY;
2374 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2375
2376 /* Turn on send data initiator state machine */
2377 if (sc->bge_flags & BGE_TSO) {
2378 /* XXX: magic value from Linux driver */
2379 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
2380 } else
2381 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2382
2383 /* Turn on send BD initiator state machine */
2384 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2385
2386 /* Turn on send BD selector state machine */
2387 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2388
2389 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2390 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2391 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2392
2393 /* ack/clear link change events */
2394 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2395 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2396 BGE_MACSTAT_LINK_CHANGED);
2397 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2398
2399 /* Enable PHY auto polling (for MII/GMII only) */
2400 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2401 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2402 } else {
2403 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2404 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
2405 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2406 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2407 BGE_EVTENB_MI_INTERRUPT);
2408 }
2409
2410 /*
2411 * Clear any pending link state attention.
2412 * Otherwise some link state change events may be lost until attention
2413 * is cleared by bge_intr() -> bge_link_upd() sequence.
2414 * It's not necessary on newer BCM chips - perhaps enabling link
2415 * state change attentions implies clearing pending attention.
2416 */
2417 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2418 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2419 BGE_MACSTAT_LINK_CHANGED);
2420
2421 /* Enable link state change attentions. */
2422 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2423
2424 return 0;
2425 }
2426
2427 static const struct bge_revision *
2428 bge_lookup_rev(uint32_t chipid)
2429 {
2430 const struct bge_revision *br;
2431
2432 for (br = bge_revisions; br->br_name != NULL; br++) {
2433 if (br->br_chipid == chipid)
2434 return br;
2435 }
2436
2437 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2438 if (br->br_chipid == BGE_ASICREV(chipid))
2439 return br;
2440 }
2441
2442 return NULL;
2443 }
2444
2445 static const struct bge_product *
2446 bge_lookup(const struct pci_attach_args *pa)
2447 {
2448 const struct bge_product *bp;
2449
2450 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2451 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2452 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2453 return bp;
2454 }
2455
2456 return NULL;
2457 }
2458
2459 static int
2460 bge_setpowerstate(struct bge_softc *sc, int powerlevel)
2461 {
2462 #ifdef NOTYET
2463 uint32_t pm_ctl = 0;
2464
2465 /* XXX FIXME: make sure indirect accesses enabled? */
2466 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2467 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2468 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2469
2470 /* clear the PME_assert bit and power state bits, enable PME */
2471 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2472 pm_ctl &= ~PCIM_PSTAT_DMASK;
2473 pm_ctl |= (1 << 8);
2474
2475 if (powerlevel == 0) {
2476 pm_ctl |= PCIM_PSTAT_D0;
2477 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2478 pm_ctl, 2);
2479 DELAY(10000);
2480 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2481 DELAY(10000);
2482
2483 #ifdef NOTYET
2484 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2485 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2486 #endif
2487 DELAY(40); DELAY(40); DELAY(40);
2488 DELAY(10000); /* above not quite adequate on 5700 */
2489 return 0;
2490 }
2491
2492
2493 /*
2494 * Entering ACPI power states D1-D3 is achieved by wiggling
2495 * GMII gpio pins. Example code assumes all hardware vendors
2496 * followed Broadcom's sample pcb layout. Until we verify that
2497 * for all supported OEM cards, states D1-D3 are unsupported.
2498 */
2499 aprint_error_dev(sc->bge_dev,
2500 "power state %d unimplemented; check GPIO pins\n",
2501 powerlevel);
2502 #endif
2503 return EOPNOTSUPP;
2504 }
2505
2506
2507 /*
2508 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2509 * against our list and return its name if we find a match. Note
2510 * that since the Broadcom controller contains VPD support, we
2511 * can get the device name string from the controller itself instead
2512 * of the compiled-in string. This is a little slow, but it guarantees
2513 * we'll always announce the right product name.
2514 */
2515 static int
2516 bge_probe(device_t parent, cfdata_t match, void *aux)
2517 {
2518 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2519
2520 if (bge_lookup(pa) != NULL)
2521 return 1;
2522
2523 return 0;
2524 }
2525
2526 static void
2527 bge_attach(device_t parent, device_t self, void *aux)
2528 {
2529 struct bge_softc *sc = device_private(self);
2530 struct pci_attach_args *pa = aux;
2531 prop_dictionary_t dict;
2532 const struct bge_product *bp;
2533 const struct bge_revision *br;
2534 pci_chipset_tag_t pc;
2535 pci_intr_handle_t ih;
2536 const char *intrstr = NULL;
2537 bus_dma_segment_t seg;
2538 int rseg;
2539 uint32_t hwcfg = 0;
2540 uint32_t command;
2541 struct ifnet *ifp;
2542 uint32_t misccfg;
2543 void * kva;
2544 u_char eaddr[ETHER_ADDR_LEN];
2545 pcireg_t memtype, subid;
2546 bus_addr_t memaddr;
2547 bus_size_t memsize;
2548 uint32_t pm_ctl;
2549 bool no_seeprom;
2550
2551 bp = bge_lookup(pa);
2552 KASSERT(bp != NULL);
2553
2554 sc->sc_pc = pa->pa_pc;
2555 sc->sc_pcitag = pa->pa_tag;
2556 sc->bge_dev = self;
2557
2558 pc = sc->sc_pc;
2559 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
2560
2561 aprint_naive(": Ethernet controller\n");
2562 aprint_normal(": %s\n", bp->bp_name);
2563
2564 /*
2565 * Map control/status registers.
2566 */
2567 DPRINTFN(5, ("Map control/status regs\n"));
2568 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
2569 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2570 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
2571 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
2572
2573 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2574 aprint_error_dev(sc->bge_dev,
2575 "failed to enable memory mapping!\n");
2576 return;
2577 }
2578
2579 DPRINTFN(5, ("pci_mem_find\n"));
2580 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
2581 switch (memtype) {
2582 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2583 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2584 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2585 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2586 &memaddr, &memsize) == 0)
2587 break;
2588 default:
2589 aprint_error_dev(sc->bge_dev, "can't find mem space\n");
2590 return;
2591 }
2592
2593 DPRINTFN(5, ("pci_intr_map\n"));
2594 if (pci_intr_map(pa, &ih)) {
2595 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n");
2596 return;
2597 }
2598
2599 DPRINTFN(5, ("pci_intr_string\n"));
2600 intrstr = pci_intr_string(pc, ih);
2601
2602 DPRINTFN(5, ("pci_intr_establish\n"));
2603 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2604
2605 if (sc->bge_intrhand == NULL) {
2606 aprint_error_dev(sc->bge_dev,
2607 "couldn't establish interrupt%s%s\n",
2608 intrstr ? " at " : "", intrstr ? intrstr : "");
2609 return;
2610 }
2611 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
2612
2613 /*
2614 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2615 * can clobber the chip's PCI config-space power control registers,
2616 * leaving the card in D3 powersave state.
2617 * We do not have memory-mapped registers in this state,
2618 * so force device into D0 state before starting initialization.
2619 */
2620 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
2621 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2622 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2623 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2624 DELAY(1000); /* 27 usec is allegedly sufficent */
2625
2626 /*
2627 * Save ASIC rev.
2628 */
2629 sc->bge_chipid =
2630 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2631 >> BGE_PCIMISCCTL_ASICREV_SHIFT;
2632
2633 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2634 switch (PCI_PRODUCT(pa->pa_id)) {
2635 case PCI_PRODUCT_BROADCOM_BCM5717:
2636 case PCI_PRODUCT_BROADCOM_BCM5718:
2637 case PCI_PRODUCT_BROADCOM_BCM5724: /* ??? */
2638 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2639 BGE_PCI_GEN2_PRODID_ASICREV);
2640 break;
2641 case PCI_PRODUCT_BROADCOM_BCM57761:
2642 case PCI_PRODUCT_BROADCOM_BCM57762:
2643 case PCI_PRODUCT_BROADCOM_BCM57765:
2644 case PCI_PRODUCT_BROADCOM_BCM57781:
2645 case PCI_PRODUCT_BROADCOM_BCM57785:
2646 case PCI_PRODUCT_BROADCOM_BCM57791:
2647 case PCI_PRODUCT_BROADCOM_BCM57795:
2648 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2649 BGE_PCI_GEN15_PRODID_ASICREV);
2650 break;
2651 default:
2652 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2653 BGE_PCI_PRODID_ASICREV);
2654 break;
2655 }
2656 }
2657
2658 if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
2659 &sc->bge_pciecap, NULL) != 0)
2660 || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) {
2661 /* PCIe */
2662 sc->bge_flags |= BGE_PCIE;
2663 bge_set_max_readrq(sc);
2664 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
2665 BGE_PCISTATE_PCI_BUSMODE) == 0) {
2666 /* PCI-X */
2667 sc->bge_flags |= BGE_PCIX;
2668 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
2669 &sc->bge_pcixcap, NULL) == 0)
2670 aprint_error_dev(sc->bge_dev,
2671 "unable to find PCIX capability\n");
2672 }
2673
2674 /* chipid */
2675 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2676 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 ||
2677 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2678 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2679 sc->bge_flags |= BGE_5700_FAMILY;
2680
2681 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 ||
2682 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 ||
2683 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
2684 sc->bge_flags |= BGE_5714_FAMILY;
2685
2686 /* Intentionally exclude BGE_ASICREV_BCM5906 */
2687 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2688 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2689 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2690 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2691 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2692 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
2693 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2694 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766 ||
2695 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2696 sc->bge_flags |= BGE_5755_PLUS;
2697
2698 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
2699 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2700 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 ||
2701 BGE_IS_5755_PLUS(sc) ||
2702 BGE_IS_5714_FAMILY(sc))
2703 sc->bge_flags |= BGE_575X_PLUS;
2704
2705 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 ||
2706 BGE_IS_575X_PLUS(sc))
2707 sc->bge_flags |= BGE_5705_PLUS;
2708
2709 /*
2710 * When using the BCM5701 in PCI-X mode, data corruption has
2711 * been observed in the first few bytes of some received packets.
2712 * Aligning the packet buffer in memory eliminates the corruption.
2713 * Unfortunately, this misaligns the packet payloads. On platforms
2714 * which do not support unaligned accesses, we will realign the
2715 * payloads by copying the received packets.
2716 */
2717 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2718 sc->bge_flags & BGE_PCIX)
2719 sc->bge_flags |= BGE_RX_ALIGNBUG;
2720
2721 if (BGE_IS_5700_FAMILY(sc))
2722 sc->bge_flags |= BGE_JUMBO_CAPABLE;
2723
2724 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2725 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2726 PCI_VENDOR(subid) == PCI_VENDOR_DELL)
2727 sc->bge_flags |= BGE_NO_3LED;
2728
2729 misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2730 misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2731
2732 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2733 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2734 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2735 sc->bge_flags |= BGE_IS_5788;
2736
2737 /*
2738 * Some controllers seem to require a special firmware to use
2739 * TSO. But the firmware is not available to FreeBSD and Linux
2740 * claims that the TSO performed by the firmware is slower than
2741 * hardware based TSO. Moreover the firmware based TSO has one
2742 * known bug which can't handle TSO if ethernet header + IP/TCP
2743 * header is greater than 80 bytes. The workaround for the TSO
2744 * bug exist but it seems it's too expensive than not using
2745 * TSO at all. Some hardwares also have the TSO bug so limit
2746 * the TSO to the controllers that are not affected TSO issues
2747 * (e.g. 5755 or higher).
2748 */
2749 if (BGE_IS_5755_PLUS(sc)) {
2750 /*
2751 * BCM5754 and BCM5787 shares the same ASIC id so
2752 * explicit device id check is required.
2753 */
2754 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
2755 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
2756 sc->bge_flags |= BGE_TSO;
2757 }
2758
2759 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2760 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2761 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2762 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2763 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2764 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2765 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2766 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2767 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2768 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2769 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2770 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2771 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2772 sc->bge_flags |= BGE_10_100_ONLY;
2773
2774 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2775 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2776 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2777 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2778 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2779 sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
2780
2781 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2782 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2783 sc->bge_flags |= BGE_PHY_CRC_BUG;
2784 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2785 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2786 sc->bge_flags |= BGE_PHY_ADC_BUG;
2787 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2788 sc->bge_flags |= BGE_PHY_5704_A0_BUG;
2789
2790 if (BGE_IS_5705_PLUS(sc) &&
2791 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2792 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2793 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2794 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 &&
2795 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57766 &&
2796 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) {
2797 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2798 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2799 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2800 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2801 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2802 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2803 sc->bge_flags |= BGE_PHY_JITTER_BUG;
2804 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2805 sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
2806 } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
2807 sc->bge_flags |= BGE_PHY_BER_BUG;
2808 }
2809
2810 /*
2811 * SEEPROM check.
2812 * First check if firmware knows we do not have SEEPROM.
2813 */
2814 if (prop_dictionary_get_bool(device_properties(self),
2815 "without-seeprom", &no_seeprom) && no_seeprom)
2816 sc->bge_flags |= BGE_NO_EEPROM;
2817
2818 /* Now check the 'ROM failed' bit on the RX CPU */
2819 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
2820 sc->bge_flags |= BGE_NO_EEPROM;
2821
2822 /* Try to reset the chip. */
2823 DPRINTFN(5, ("bge_reset\n"));
2824 bge_reset(sc);
2825
2826 sc->bge_asf_mode = 0;
2827 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2828 == BGE_MAGIC_NUMBER)) {
2829 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2830 & BGE_HWCFG_ASF) {
2831 sc->bge_asf_mode |= ASF_ENABLE;
2832 sc->bge_asf_mode |= ASF_STACKUP;
2833 if (BGE_IS_575X_PLUS(sc)) {
2834 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2835 }
2836 }
2837 }
2838
2839 /* Try to reset the chip again the nice way. */
2840 bge_stop_fw(sc);
2841 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2842 if (bge_reset(sc))
2843 aprint_error_dev(sc->bge_dev, "chip reset failed\n");
2844
2845 bge_sig_legacy(sc, BGE_RESET_STOP);
2846 bge_sig_post_reset(sc, BGE_RESET_STOP);
2847
2848 if (bge_chipinit(sc)) {
2849 aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
2850 bge_release_resources(sc);
2851 return;
2852 }
2853
2854 /*
2855 * Get station address from the EEPROM.
2856 */
2857 if (bge_get_eaddr(sc, eaddr)) {
2858 aprint_error_dev(sc->bge_dev,
2859 "failed to read station address\n");
2860 bge_release_resources(sc);
2861 return;
2862 }
2863
2864 br = bge_lookup_rev(sc->bge_chipid);
2865
2866 if (br == NULL) {
2867 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
2868 sc->bge_chipid);
2869 } else {
2870 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
2871 br->br_name, sc->bge_chipid);
2872 }
2873 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2874
2875 /* Allocate the general information block and ring buffers. */
2876 if (pci_dma64_available(pa))
2877 sc->bge_dmatag = pa->pa_dmat64;
2878 else
2879 sc->bge_dmatag = pa->pa_dmat;
2880 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2881 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2882 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2883 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
2884 return;
2885 }
2886 DPRINTFN(5, ("bus_dmamem_map\n"));
2887 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2888 sizeof(struct bge_ring_data), &kva,
2889 BUS_DMA_NOWAIT)) {
2890 aprint_error_dev(sc->bge_dev,
2891 "can't map DMA buffers (%zu bytes)\n",
2892 sizeof(struct bge_ring_data));
2893 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2894 return;
2895 }
2896 DPRINTFN(5, ("bus_dmamem_create\n"));
2897 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2898 sizeof(struct bge_ring_data), 0,
2899 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2900 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
2901 bus_dmamem_unmap(sc->bge_dmatag, kva,
2902 sizeof(struct bge_ring_data));
2903 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2904 return;
2905 }
2906 DPRINTFN(5, ("bus_dmamem_load\n"));
2907 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2908 sizeof(struct bge_ring_data), NULL,
2909 BUS_DMA_NOWAIT)) {
2910 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2911 bus_dmamem_unmap(sc->bge_dmatag, kva,
2912 sizeof(struct bge_ring_data));
2913 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2914 return;
2915 }
2916
2917 DPRINTFN(5, ("bzero\n"));
2918 sc->bge_rdata = (struct bge_ring_data *)kva;
2919
2920 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2921
2922 /* Try to allocate memory for jumbo buffers. */
2923 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2924 if (bge_alloc_jumbo_mem(sc)) {
2925 aprint_error_dev(sc->bge_dev,
2926 "jumbo buffer allocation failed\n");
2927 } else
2928 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2929 }
2930
2931 /* Set default tuneable values. */
2932 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2933 sc->bge_rx_coal_ticks = 150;
2934 sc->bge_rx_max_coal_bds = 64;
2935 #ifdef ORIG_WPAUL_VALUES
2936 sc->bge_tx_coal_ticks = 150;
2937 sc->bge_tx_max_coal_bds = 128;
2938 #else
2939 sc->bge_tx_coal_ticks = 300;
2940 sc->bge_tx_max_coal_bds = 400;
2941 #endif
2942 if (BGE_IS_5705_PLUS(sc)) {
2943 sc->bge_tx_coal_ticks = (12 * 5);
2944 sc->bge_tx_max_coal_bds = (12 * 5);
2945 aprint_verbose_dev(sc->bge_dev,
2946 "setting short Tx thresholds\n");
2947 }
2948
2949 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2950 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2951 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766)
2952 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2953 else if (BGE_IS_5705_PLUS(sc))
2954 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2955 else
2956 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2957
2958 /* Set up ifnet structure */
2959 ifp = &sc->ethercom.ec_if;
2960 ifp->if_softc = sc;
2961 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2962 ifp->if_ioctl = bge_ioctl;
2963 ifp->if_stop = bge_stop;
2964 ifp->if_start = bge_start;
2965 ifp->if_init = bge_init;
2966 ifp->if_watchdog = bge_watchdog;
2967 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2968 IFQ_SET_READY(&ifp->if_snd);
2969 DPRINTFN(5, ("strcpy if_xname\n"));
2970 strcpy(ifp->if_xname, device_xname(sc->bge_dev));
2971
2972 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
2973 sc->ethercom.ec_if.if_capabilities |=
2974 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
2975 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */
2976 sc->ethercom.ec_if.if_capabilities |=
2977 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2978 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
2979 #endif
2980 sc->ethercom.ec_capabilities |=
2981 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2982
2983 if (sc->bge_flags & BGE_TSO)
2984 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
2985
2986 /*
2987 * Do MII setup.
2988 */
2989 DPRINTFN(5, ("mii setup\n"));
2990 sc->bge_mii.mii_ifp = ifp;
2991 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2992 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2993 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2994
2995 /*
2996 * Figure out what sort of media we have by checking the hardware
2997 * config word in the first 32k of NIC internal memory, or fall back to
2998 * the config word in the EEPROM. Note: on some BCM5700 cards,
2999 * this value appears to be unset. If that's the case, we have to rely
3000 * on identifying the NIC by its PCI subsystem ID, as we do below for
3001 * the SysKonnect SK-9D41.
3002 */
3003 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
3004 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3005 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
3006 bge_read_eeprom(sc, (void *)&hwcfg,
3007 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
3008 hwcfg = be32toh(hwcfg);
3009 }
3010 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3011 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 ||
3012 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3013 if (BGE_IS_5714_FAMILY(sc))
3014 sc->bge_flags |= BGE_PHY_FIBER_MII;
3015 else
3016 sc->bge_flags |= BGE_PHY_FIBER_TBI;
3017 }
3018
3019 /* set phyflags and chipid before mii_attach() */
3020 dict = device_properties(self);
3021 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags);
3022 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
3023
3024 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3025 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3026 bge_ifmedia_sts);
3027 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
3028 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX,
3029 0, NULL);
3030 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3031 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3032 /* Pretend the user requested this setting */
3033 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3034 } else {
3035 /*
3036 * Do transceiver setup and tell the firmware the
3037 * driver is down so we can try to get access the
3038 * probe if ASF is running. Retry a couple of times
3039 * if we get a conflict with the ASF firmware accessing
3040 * the PHY.
3041 */
3042 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3043 bge_asf_driver_up(sc);
3044
3045 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3046 bge_ifmedia_sts);
3047 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff,
3048 MII_PHY_ANY, MII_OFFSET_ANY,
3049 MIIF_FORCEANEG|MIIF_DOPAUSE);
3050
3051 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
3052 aprint_error_dev(sc->bge_dev, "no PHY found!\n");
3053 ifmedia_add(&sc->bge_mii.mii_media,
3054 IFM_ETHER|IFM_MANUAL, 0, NULL);
3055 ifmedia_set(&sc->bge_mii.mii_media,
3056 IFM_ETHER|IFM_MANUAL);
3057 } else
3058 ifmedia_set(&sc->bge_mii.mii_media,
3059 IFM_ETHER|IFM_AUTO);
3060
3061 /*
3062 * Now tell the firmware we are going up after probing the PHY
3063 */
3064 if (sc->bge_asf_mode & ASF_STACKUP)
3065 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3066 }
3067
3068 /*
3069 * Call MI attach routine.
3070 */
3071 DPRINTFN(5, ("if_attach\n"));
3072 if_attach(ifp);
3073 DPRINTFN(5, ("ether_ifattach\n"));
3074 ether_ifattach(ifp, eaddr);
3075 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
3076 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
3077 RND_TYPE_NET, 0);
3078 #ifdef BGE_EVENT_COUNTERS
3079 /*
3080 * Attach event counters.
3081 */
3082 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
3083 NULL, device_xname(sc->bge_dev), "intr");
3084 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
3085 NULL, device_xname(sc->bge_dev), "tx_xoff");
3086 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
3087 NULL, device_xname(sc->bge_dev), "tx_xon");
3088 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
3089 NULL, device_xname(sc->bge_dev), "rx_xoff");
3090 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
3091 NULL, device_xname(sc->bge_dev), "rx_xon");
3092 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
3093 NULL, device_xname(sc->bge_dev), "rx_macctl");
3094 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
3095 NULL, device_xname(sc->bge_dev), "xoffentered");
3096 #endif /* BGE_EVENT_COUNTERS */
3097 DPRINTFN(5, ("callout_init\n"));
3098 callout_init(&sc->bge_timeout, 0);
3099
3100 if (pmf_device_register(self, NULL, NULL))
3101 pmf_class_network_register(self, ifp);
3102 else
3103 aprint_error_dev(self, "couldn't establish power handler\n");
3104
3105 bge_sysctl_init(sc);
3106
3107 #ifdef BGE_DEBUG
3108 bge_debug_info(sc);
3109 #endif
3110 }
3111
3112 static void
3113 bge_release_resources(struct bge_softc *sc)
3114 {
3115 if (sc->bge_vpd_prodname != NULL)
3116 free(sc->bge_vpd_prodname, M_DEVBUF);
3117
3118 if (sc->bge_vpd_readonly != NULL)
3119 free(sc->bge_vpd_readonly, M_DEVBUF);
3120 }
3121
3122 static int
3123 bge_reset(struct bge_softc *sc)
3124 {
3125 uint32_t cachesize, command, pcistate, marbmode;
3126 #if 0
3127 uint32_t new_pcistate;
3128 #endif
3129 pcireg_t devctl, reg;
3130 int i, val;
3131 void (*write_op)(struct bge_softc *, int, int);
3132
3133 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)
3134 && (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
3135 if (sc->bge_flags & BGE_PCIE)
3136 write_op = bge_writemem_direct;
3137 else
3138 write_op = bge_writemem_ind;
3139 } else
3140 write_op = bge_writereg_ind;
3141
3142 /* Save some important PCI state. */
3143 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
3144 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
3145 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE);
3146
3147 /* Step 5a: Enable memory arbiter. */
3148 marbmode = 0;
3149 if (BGE_IS_5714_FAMILY(sc))
3150 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
3151 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
3152
3153 /* Step 5b-5d: */
3154 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
3155 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3156 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
3157
3158 /* XXX ???: Disable fastboot on controllers that support it. */
3159 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
3160 BGE_IS_5755_PLUS(sc))
3161 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
3162
3163 /*
3164 * Step 6: Write the magic number to SRAM at offset 0xB50.
3165 * When firmware finishes its initialization it will
3166 * write ~BGE_MAGIC_NUMBER to the same location.
3167 */
3168 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3169
3170 /* Step 7: */
3171 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1);
3172 /*
3173 * XXX: from FreeBSD/Linux; no documentation
3174 */
3175 if (sc->bge_flags & BGE_PCIE) {
3176 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60)
3177 /* PCI Express 1.0 system */
3178 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20);
3179 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3180 /*
3181 * Prevent PCI Express link training
3182 * during global reset.
3183 */
3184 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3185 val |= (1<<29);
3186 }
3187 }
3188
3189 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3190 i = CSR_READ_4(sc, BGE_VCPU_STATUS);
3191 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3192 i | BGE_VCPU_STATUS_DRV_RESET);
3193 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3194 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3195 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3196 }
3197
3198 /*
3199 * Set GPHY Power Down Override to leave GPHY
3200 * powered up in D0 uninitialized.
3201 */
3202 if (BGE_IS_5705_PLUS(sc))
3203 val |= BGE_MISCCFG_KEEP_GPHY_POWER;
3204
3205 /* XXX 5721, 5751 and 5752 */
3206 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750)
3207 val |= BGE_MISCCFG_GRC_RESET_DISABLE;
3208
3209 /* Issue global reset */
3210 write_op(sc, BGE_MISC_CFG, val);
3211
3212 /* Step 8: wait for complete */
3213 if (sc->bge_flags & BGE_PCIE)
3214 delay(100*1000); /* too big */
3215 else
3216 delay(100);
3217
3218 /* From Linux: dummy read to flush PCI posted writes */
3219 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
3220
3221 /* Step 9-10: Reset some of the PCI state that got zapped by reset */
3222 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
3223 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3224 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW
3225 | BGE_PCIMISCCTL_CLOCKCTL_RW);
3226 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
3227 write_op(sc, BGE_MISC_CFG, (65 << 1));
3228
3229 /* Step 11: disable PCI-X Relaxed Ordering. */
3230 if (sc->bge_flags & BGE_PCIX) {
3231 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
3232 + PCI_PCIX_CMD);
3233 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
3234 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER);
3235 }
3236
3237 if (sc->bge_flags & BGE_PCIE) {
3238 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3239 DELAY(500000);
3240 /* XXX: Magic Numbers */
3241 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3242 BGE_PCI_UNKNOWN0);
3243 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3244 BGE_PCI_UNKNOWN0,
3245 reg | (1 << 15));
3246 }
3247 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3248 sc->bge_pciecap + PCI_PCIE_DCSR);
3249 /* Clear enable no snoop and disable relaxed ordering. */
3250 devctl &= ~(PCI_PCIE_DCSR_ENA_RELAX_ORD |
3251 PCI_PCIE_DCSR_ENA_NO_SNOOP);
3252 /* Set PCIE max payload size to 128. */
3253 devctl &= ~(0x00e0);
3254 /* Clear device status register. Write 1b to clear */
3255 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED
3256 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED;
3257 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3258 sc->bge_pciecap + PCI_PCIE_DCSR, devctl);
3259 }
3260
3261 /* Step 12: Enable memory arbiter. */
3262 marbmode = 0;
3263 if (BGE_IS_5714_FAMILY(sc))
3264 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
3265 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
3266
3267 /* Step 17: Poll until the firmware initialization is complete */
3268 bge_poll_fw(sc);
3269
3270 /* XXX 5721, 5751 and 5752 */
3271 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
3272 /* Step 19: */
3273 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
3274 /* Step 20: */
3275 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
3276 }
3277
3278 /*
3279 * Step 18: wirte mac mode
3280 * XXX Write 0x0c for 5703S and 5704S
3281 */
3282 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3283
3284
3285 /* Step 21: 5822 B0 errata */
3286 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
3287 pcireg_t msidata;
3288
3289 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3290 BGE_PCI_MSI_DATA);
3291 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
3292 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
3293 msidata);
3294 }
3295
3296 /* Step 23: restore cache line size */
3297 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
3298
3299 #if 0
3300 /*
3301 * XXX Wait for the value of the PCISTATE register to
3302 * return to its original pre-reset state. This is a
3303 * fairly good indicator of reset completion. If we don't
3304 * wait for the reset to fully complete, trying to read
3305 * from the device's non-PCI registers may yield garbage
3306 * results.
3307 */
3308 for (i = 0; i < BGE_TIMEOUT; i++) {
3309 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3310 BGE_PCI_PCISTATE);
3311 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
3312 (pcistate & ~BGE_PCISTATE_RESERVED))
3313 break;
3314 DELAY(10);
3315 }
3316 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
3317 (pcistate & ~BGE_PCISTATE_RESERVED)) {
3318 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n");
3319 }
3320 #endif
3321
3322 /* Step 28: Fix up byte swapping */
3323 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
3324
3325 /* Tell the ASF firmware we are up */
3326 if (sc->bge_asf_mode & ASF_STACKUP)
3327 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3328
3329 /*
3330 * The 5704 in TBI mode apparently needs some special
3331 * adjustment to insure the SERDES drive level is set
3332 * to 1.2V.
3333 */
3334 if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
3335 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3336 uint32_t serdescfg;
3337
3338 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
3339 serdescfg = (serdescfg & ~0xFFF) | 0x880;
3340 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
3341 }
3342
3343 if (sc->bge_flags & BGE_PCIE &&
3344 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3345 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3346 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3347 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 &&
3348 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57766) {
3349 uint32_t v;
3350
3351 /* Enable PCI Express bug fix */
3352 v = CSR_READ_4(sc, 0x7c00);
3353 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
3354 }
3355 DELAY(10000);
3356
3357 return 0;
3358 }
3359
3360 /*
3361 * Frame reception handling. This is called if there's a frame
3362 * on the receive return list.
3363 *
3364 * Note: we have to be able to handle two possibilities here:
3365 * 1) the frame is from the jumbo receive ring
3366 * 2) the frame is from the standard receive ring
3367 */
3368
3369 static void
3370 bge_rxeof(struct bge_softc *sc)
3371 {
3372 struct ifnet *ifp;
3373 uint16_t rx_prod, rx_cons;
3374 int stdcnt = 0, jumbocnt = 0;
3375 bus_dmamap_t dmamap;
3376 bus_addr_t offset, toff;
3377 bus_size_t tlen;
3378 int tosync;
3379
3380 rx_cons = sc->bge_rx_saved_considx;
3381 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3382
3383 /* Nothing to do */
3384 if (rx_cons == rx_prod)
3385 return;
3386
3387 ifp = &sc->ethercom.ec_if;
3388
3389 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3390 offsetof(struct bge_ring_data, bge_status_block),
3391 sizeof (struct bge_status_block),
3392 BUS_DMASYNC_POSTREAD);
3393
3394 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3395 tosync = rx_prod - rx_cons;
3396
3397 if (tosync != 0)
3398 rnd_add_uint32(&sc->rnd_source, tosync);
3399
3400 toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
3401
3402 if (tosync < 0) {
3403 tlen = (sc->bge_return_ring_cnt - rx_cons) *
3404 sizeof (struct bge_rx_bd);
3405 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3406 toff, tlen, BUS_DMASYNC_POSTREAD);
3407 tosync = -tosync;
3408 }
3409
3410 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3411 offset, tosync * sizeof (struct bge_rx_bd),
3412 BUS_DMASYNC_POSTREAD);
3413
3414 while (rx_cons != rx_prod) {
3415 struct bge_rx_bd *cur_rx;
3416 uint32_t rxidx;
3417 struct mbuf *m = NULL;
3418
3419 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
3420
3421 rxidx = cur_rx->bge_idx;
3422 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3423
3424 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3425 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3426 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3427 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3428 jumbocnt++;
3429 bus_dmamap_sync(sc->bge_dmatag,
3430 sc->bge_cdata.bge_rx_jumbo_map,
3431 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
3432 BGE_JLEN, BUS_DMASYNC_POSTREAD);
3433 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3434 ifp->if_ierrors++;
3435 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3436 continue;
3437 }
3438 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
3439 NULL)== ENOBUFS) {
3440 ifp->if_ierrors++;
3441 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3442 continue;
3443 }
3444 } else {
3445 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3446 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3447
3448 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3449 stdcnt++;
3450 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3451 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
3452 if (dmamap == NULL) {
3453 ifp->if_ierrors++;
3454 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3455 continue;
3456 }
3457 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3458 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3459 bus_dmamap_unload(sc->bge_dmatag, dmamap);
3460 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3461 ifp->if_ierrors++;
3462 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3463 continue;
3464 }
3465 if (bge_newbuf_std(sc, sc->bge_std,
3466 NULL, dmamap) == ENOBUFS) {
3467 ifp->if_ierrors++;
3468 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3469 continue;
3470 }
3471 }
3472
3473 ifp->if_ipackets++;
3474 #ifndef __NO_STRICT_ALIGNMENT
3475 /*
3476 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
3477 * the Rx buffer has the layer-2 header unaligned.
3478 * If our CPU requires alignment, re-align by copying.
3479 */
3480 if (sc->bge_flags & BGE_RX_ALIGNBUG) {
3481 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
3482 cur_rx->bge_len);
3483 m->m_data += ETHER_ALIGN;
3484 }
3485 #endif
3486
3487 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3488 m->m_pkthdr.rcvif = ifp;
3489
3490 /*
3491 * Handle BPF listeners. Let the BPF user see the packet.
3492 */
3493 bpf_mtap(ifp, m);
3494
3495 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
3496
3497 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
3498 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
3499 /*
3500 * Rx transport checksum-offload may also
3501 * have bugs with packets which, when transmitted,
3502 * were `runts' requiring padding.
3503 */
3504 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3505 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
3506 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
3507 m->m_pkthdr.csum_data =
3508 cur_rx->bge_tcp_udp_csum;
3509 m->m_pkthdr.csum_flags |=
3510 (M_CSUM_TCPv4|M_CSUM_UDPv4|
3511 M_CSUM_DATA);
3512 }
3513
3514 /*
3515 * If we received a packet with a vlan tag, pass it
3516 * to vlan_input() instead of ether_input().
3517 */
3518 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3519 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue);
3520 }
3521
3522 (*ifp->if_input)(ifp, m);
3523 }
3524
3525 sc->bge_rx_saved_considx = rx_cons;
3526 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3527 if (stdcnt)
3528 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3529 if (jumbocnt)
3530 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3531 }
3532
3533 static void
3534 bge_txeof(struct bge_softc *sc)
3535 {
3536 struct bge_tx_bd *cur_tx = NULL;
3537 struct ifnet *ifp;
3538 struct txdmamap_pool_entry *dma;
3539 bus_addr_t offset, toff;
3540 bus_size_t tlen;
3541 int tosync;
3542 struct mbuf *m;
3543
3544 ifp = &sc->ethercom.ec_if;
3545
3546 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3547 offsetof(struct bge_ring_data, bge_status_block),
3548 sizeof (struct bge_status_block),
3549 BUS_DMASYNC_POSTREAD);
3550
3551 offset = offsetof(struct bge_ring_data, bge_tx_ring);
3552 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
3553 sc->bge_tx_saved_considx;
3554
3555 if (tosync != 0)
3556 rnd_add_uint32(&sc->rnd_source, tosync);
3557
3558 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
3559
3560 if (tosync < 0) {
3561 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
3562 sizeof (struct bge_tx_bd);
3563 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3564 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3565 tosync = -tosync;
3566 }
3567
3568 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3569 offset, tosync * sizeof (struct bge_tx_bd),
3570 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3571
3572 /*
3573 * Go through our tx ring and free mbufs for those
3574 * frames that have been sent.
3575 */
3576 while (sc->bge_tx_saved_considx !=
3577 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
3578 uint32_t idx = 0;
3579
3580 idx = sc->bge_tx_saved_considx;
3581 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
3582 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3583 ifp->if_opackets++;
3584 m = sc->bge_cdata.bge_tx_chain[idx];
3585 if (m != NULL) {
3586 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3587 dma = sc->txdma[idx];
3588 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
3589 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3590 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
3591 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
3592 sc->txdma[idx] = NULL;
3593
3594 m_freem(m);
3595 }
3596 sc->bge_txcnt--;
3597 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3598 ifp->if_timer = 0;
3599 }
3600
3601 if (cur_tx != NULL)
3602 ifp->if_flags &= ~IFF_OACTIVE;
3603 }
3604
3605 static int
3606 bge_intr(void *xsc)
3607 {
3608 struct bge_softc *sc;
3609 struct ifnet *ifp;
3610 uint32_t statusword;
3611
3612 sc = xsc;
3613 ifp = &sc->ethercom.ec_if;
3614
3615 /* It is possible for the interrupt to arrive before
3616 * the status block is updated prior to the interrupt.
3617 * Reading the PCI State register will confirm whether the
3618 * interrupt is ours and will flush the status block.
3619 */
3620
3621 /* read status word from status block */
3622 statusword = sc->bge_rdata->bge_status_block.bge_status;
3623
3624 if ((statusword & BGE_STATFLAG_UPDATED) ||
3625 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
3626 /* Ack interrupt and stop others from occuring. */
3627 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3628
3629 BGE_EVCNT_INCR(sc->bge_ev_intr);
3630
3631 /* clear status word */
3632 sc->bge_rdata->bge_status_block.bge_status = 0;
3633
3634 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3635 statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3636 BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
3637 bge_link_upd(sc);
3638
3639 if (ifp->if_flags & IFF_RUNNING) {
3640 /* Check RX return ring producer/consumer */
3641 bge_rxeof(sc);
3642
3643 /* Check TX ring producer/consumer */
3644 bge_txeof(sc);
3645 }
3646
3647 if (sc->bge_pending_rxintr_change) {
3648 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
3649 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
3650 uint32_t junk;
3651
3652 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
3653 DELAY(10);
3654 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3655
3656 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
3657 DELAY(10);
3658 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3659
3660 sc->bge_pending_rxintr_change = 0;
3661 }
3662 bge_handle_events(sc);
3663
3664 /* Re-enable interrupts. */
3665 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3666
3667 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
3668 bge_start(ifp);
3669
3670 return 1;
3671 } else
3672 return 0;
3673 }
3674
3675 static void
3676 bge_asf_driver_up(struct bge_softc *sc)
3677 {
3678 if (sc->bge_asf_mode & ASF_STACKUP) {
3679 /* Send ASF heartbeat aprox. every 2s */
3680 if (sc->bge_asf_count)
3681 sc->bge_asf_count --;
3682 else {
3683 sc->bge_asf_count = 2;
3684 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3685 BGE_FW_DRV_ALIVE);
3686 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3687 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3688 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3689 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3690 }
3691 }
3692 }
3693
3694 static void
3695 bge_tick(void *xsc)
3696 {
3697 struct bge_softc *sc = xsc;
3698 struct mii_data *mii = &sc->bge_mii;
3699 int s;
3700
3701 s = splnet();
3702
3703 if (BGE_IS_5705_PLUS(sc))
3704 bge_stats_update_regs(sc);
3705 else
3706 bge_stats_update(sc);
3707
3708 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3709 /*
3710 * Since in TBI mode auto-polling can't be used we should poll
3711 * link status manually. Here we register pending link event
3712 * and trigger interrupt.
3713 */
3714 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3715 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3716 } else {
3717 /*
3718 * Do not touch PHY if we have link up. This could break
3719 * IPMI/ASF mode or produce extra input errors.
3720 * (extra input errors was reported for bcm5701 & bcm5704).
3721 */
3722 if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3723 mii_tick(mii);
3724 }
3725
3726 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3727
3728 splx(s);
3729 }
3730
3731 static void
3732 bge_stats_update_regs(struct bge_softc *sc)
3733 {
3734 struct ifnet *ifp = &sc->ethercom.ec_if;
3735
3736 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3737 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3738
3739 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3740 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3741 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3742 }
3743
3744 static void
3745 bge_stats_update(struct bge_softc *sc)
3746 {
3747 struct ifnet *ifp = &sc->ethercom.ec_if;
3748 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3749
3750 #define READ_STAT(sc, stats, stat) \
3751 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3752
3753 ifp->if_collisions +=
3754 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3755 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3756 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3757 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3758 ifp->if_collisions;
3759
3760 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
3761 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
3762 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
3763 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
3764 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
3765 READ_STAT(sc, stats,
3766 xoffPauseFramesReceived.bge_addr_lo));
3767 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
3768 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
3769 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
3770 READ_STAT(sc, stats,
3771 macControlFramesReceived.bge_addr_lo));
3772 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
3773 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
3774
3775 #undef READ_STAT
3776
3777 #ifdef notdef
3778 ifp->if_collisions +=
3779 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3780 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3781 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3782 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3783 ifp->if_collisions;
3784 #endif
3785 }
3786
3787 /*
3788 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3789 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3790 * but when such padded frames employ the bge IP/TCP checksum offload,
3791 * the hardware checksum assist gives incorrect results (possibly
3792 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3793 * If we pad such runts with zeros, the onboard checksum comes out correct.
3794 */
3795 static inline int
3796 bge_cksum_pad(struct mbuf *pkt)
3797 {
3798 struct mbuf *last = NULL;
3799 int padlen;
3800
3801 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3802
3803 /* if there's only the packet-header and we can pad there, use it. */
3804 if (pkt->m_pkthdr.len == pkt->m_len &&
3805 M_TRAILINGSPACE(pkt) >= padlen) {
3806 last = pkt;
3807 } else {
3808 /*
3809 * Walk packet chain to find last mbuf. We will either
3810 * pad there, or append a new mbuf and pad it
3811 * (thus perhaps avoiding the bcm5700 dma-min bug).
3812 */
3813 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3814 continue; /* do nothing */
3815 }
3816
3817 /* `last' now points to last in chain. */
3818 if (M_TRAILINGSPACE(last) < padlen) {
3819 /* Allocate new empty mbuf, pad it. Compact later. */
3820 struct mbuf *n;
3821 MGET(n, M_DONTWAIT, MT_DATA);
3822 if (n == NULL)
3823 return ENOBUFS;
3824 n->m_len = 0;
3825 last->m_next = n;
3826 last = n;
3827 }
3828 }
3829
3830 KDASSERT(!M_READONLY(last));
3831 KDASSERT(M_TRAILINGSPACE(last) >= padlen);
3832
3833 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3834 memset(mtod(last, char *) + last->m_len, 0, padlen);
3835 last->m_len += padlen;
3836 pkt->m_pkthdr.len += padlen;
3837 return 0;
3838 }
3839
3840 /*
3841 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3842 */
3843 static inline int
3844 bge_compact_dma_runt(struct mbuf *pkt)
3845 {
3846 struct mbuf *m, *prev;
3847 int totlen, prevlen;
3848
3849 prev = NULL;
3850 totlen = 0;
3851 prevlen = -1;
3852
3853 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3854 int mlen = m->m_len;
3855 int shortfall = 8 - mlen ;
3856
3857 totlen += mlen;
3858 if (mlen == 0)
3859 continue;
3860 if (mlen >= 8)
3861 continue;
3862
3863 /* If we get here, mbuf data is too small for DMA engine.
3864 * Try to fix by shuffling data to prev or next in chain.
3865 * If that fails, do a compacting deep-copy of the whole chain.
3866 */
3867
3868 /* Internal frag. If fits in prev, copy it there. */
3869 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
3870 memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
3871 prev->m_len += mlen;
3872 m->m_len = 0;
3873 /* XXX stitch chain */
3874 prev->m_next = m_free(m);
3875 m = prev;
3876 continue;
3877 }
3878 else if (m->m_next != NULL &&
3879 M_TRAILINGSPACE(m) >= shortfall &&
3880 m->m_next->m_len >= (8 + shortfall)) {
3881 /* m is writable and have enough data in next, pull up. */
3882
3883 memcpy(m->m_data + m->m_len, m->m_next->m_data,
3884 shortfall);
3885 m->m_len += shortfall;
3886 m->m_next->m_len -= shortfall;
3887 m->m_next->m_data += shortfall;
3888 }
3889 else if (m->m_next == NULL || 1) {
3890 /* Got a runt at the very end of the packet.
3891 * borrow data from the tail of the preceding mbuf and
3892 * update its length in-place. (The original data is still
3893 * valid, so we can do this even if prev is not writable.)
3894 */
3895
3896 /* if we'd make prev a runt, just move all of its data. */
3897 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3898 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3899
3900 if ((prev->m_len - shortfall) < 8)
3901 shortfall = prev->m_len;
3902
3903 #ifdef notyet /* just do the safe slow thing for now */
3904 if (!M_READONLY(m)) {
3905 if (M_LEADINGSPACE(m) < shorfall) {
3906 void *m_dat;
3907 m_dat = (m->m_flags & M_PKTHDR) ?
3908 m->m_pktdat : m->dat;
3909 memmove(m_dat, mtod(m, void*), m->m_len);
3910 m->m_data = m_dat;
3911 }
3912 } else
3913 #endif /* just do the safe slow thing */
3914 {
3915 struct mbuf * n = NULL;
3916 int newprevlen = prev->m_len - shortfall;
3917
3918 MGET(n, M_NOWAIT, MT_DATA);
3919 if (n == NULL)
3920 return ENOBUFS;
3921 KASSERT(m->m_len + shortfall < MLEN
3922 /*,
3923 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3924
3925 /* first copy the data we're stealing from prev */
3926 memcpy(n->m_data, prev->m_data + newprevlen,
3927 shortfall);
3928
3929 /* update prev->m_len accordingly */
3930 prev->m_len -= shortfall;
3931
3932 /* copy data from runt m */
3933 memcpy(n->m_data + shortfall, m->m_data,
3934 m->m_len);
3935
3936 /* n holds what we stole from prev, plus m */
3937 n->m_len = shortfall + m->m_len;
3938
3939 /* stitch n into chain and free m */
3940 n->m_next = m->m_next;
3941 prev->m_next = n;
3942 /* KASSERT(m->m_next == NULL); */
3943 m->m_next = NULL;
3944 m_free(m);
3945 m = n; /* for continuing loop */
3946 }
3947 }
3948 prevlen = m->m_len;
3949 }
3950 return 0;
3951 }
3952
3953 /*
3954 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3955 * pointers to descriptors.
3956 */
3957 static int
3958 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
3959 {
3960 struct bge_tx_bd *f = NULL;
3961 uint32_t frag, cur;
3962 uint16_t csum_flags = 0;
3963 uint16_t txbd_tso_flags = 0;
3964 struct txdmamap_pool_entry *dma;
3965 bus_dmamap_t dmamap;
3966 int i = 0;
3967 struct m_tag *mtag;
3968 int use_tso, maxsegsize, error;
3969
3970 cur = frag = *txidx;
3971
3972 if (m_head->m_pkthdr.csum_flags) {
3973 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3974 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3975 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3976 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3977 }
3978
3979 /*
3980 * If we were asked to do an outboard checksum, and the NIC
3981 * has the bug where it sometimes adds in the Ethernet padding,
3982 * explicitly pad with zeros so the cksum will be correct either way.
3983 * (For now, do this for all chip versions, until newer
3984 * are confirmed to not require the workaround.)
3985 */
3986 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3987 #ifdef notyet
3988 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3989 #endif
3990 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3991 goto check_dma_bug;
3992
3993 if (bge_cksum_pad(m_head) != 0)
3994 return ENOBUFS;
3995
3996 check_dma_bug:
3997 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
3998 goto doit;
3999
4000 /*
4001 * bcm5700 Revision B silicon cannot handle DMA descriptors with
4002 * less than eight bytes. If we encounter a teeny mbuf
4003 * at the end of a chain, we can pad. Otherwise, copy.
4004 */
4005 if (bge_compact_dma_runt(m_head) != 0)
4006 return ENOBUFS;
4007
4008 doit:
4009 dma = SLIST_FIRST(&sc->txdma_list);
4010 if (dma == NULL)
4011 return ENOBUFS;
4012 dmamap = dma->dmamap;
4013
4014 /*
4015 * Set up any necessary TSO state before we start packing...
4016 */
4017 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4018 if (!use_tso) {
4019 maxsegsize = 0;
4020 } else { /* TSO setup */
4021 unsigned mss;
4022 struct ether_header *eh;
4023 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
4024 struct mbuf * m0 = m_head;
4025 struct ip *ip;
4026 struct tcphdr *th;
4027 int iphl, hlen;
4028
4029 /*
4030 * XXX It would be nice if the mbuf pkthdr had offset
4031 * fields for the protocol headers.
4032 */
4033
4034 eh = mtod(m0, struct ether_header *);
4035 switch (htons(eh->ether_type)) {
4036 case ETHERTYPE_IP:
4037 offset = ETHER_HDR_LEN;
4038 break;
4039
4040 case ETHERTYPE_VLAN:
4041 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4042 break;
4043
4044 default:
4045 /*
4046 * Don't support this protocol or encapsulation.
4047 */
4048 return ENOBUFS;
4049 }
4050
4051 /*
4052 * TCP/IP headers are in the first mbuf; we can do
4053 * this the easy way.
4054 */
4055 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4056 hlen = iphl + offset;
4057 if (__predict_false(m0->m_len <
4058 (hlen + sizeof(struct tcphdr)))) {
4059
4060 aprint_debug_dev(sc->bge_dev,
4061 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
4062 "not handled yet\n",
4063 m0->m_len, hlen+ sizeof(struct tcphdr));
4064 #ifdef NOTYET
4065 /*
4066 * XXX jonathan (at) NetBSD.org: untested.
4067 * how to force this branch to be taken?
4068 */
4069 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain);
4070
4071 m_copydata(m0, offset, sizeof(ip), &ip);
4072 m_copydata(m0, hlen, sizeof(th), &th);
4073
4074 ip.ip_len = 0;
4075
4076 m_copyback(m0, hlen + offsetof(struct ip, ip_len),
4077 sizeof(ip.ip_len), &ip.ip_len);
4078
4079 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4080 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4081
4082 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4083 sizeof(th.th_sum), &th.th_sum);
4084
4085 hlen += th.th_off << 2;
4086 iptcp_opt_words = hlen;
4087 #else
4088 /*
4089 * if_wm "hard" case not yet supported, can we not
4090 * mandate it out of existence?
4091 */
4092 (void) ip; (void)th; (void) ip_tcp_hlen;
4093
4094 return ENOBUFS;
4095 #endif
4096 } else {
4097 ip = (struct ip *) (mtod(m0, char *) + offset);
4098 th = (struct tcphdr *) (mtod(m0, char *) + hlen);
4099 ip_tcp_hlen = iphl + (th->th_off << 2);
4100
4101 /* Total IP/TCP options, in 32-bit words */
4102 iptcp_opt_words = (ip_tcp_hlen
4103 - sizeof(struct tcphdr)
4104 - sizeof(struct ip)) >> 2;
4105 }
4106 if (BGE_IS_575X_PLUS(sc)) {
4107 th->th_sum = 0;
4108 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM);
4109 } else {
4110 /*
4111 * XXX jonathan (at) NetBSD.org: 5705 untested.
4112 * Requires TSO firmware patch for 5701/5703/5704.
4113 */
4114 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4115 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4116 }
4117
4118 mss = m_head->m_pkthdr.segsz;
4119 txbd_tso_flags |=
4120 BGE_TXBDFLAG_CPU_PRE_DMA |
4121 BGE_TXBDFLAG_CPU_POST_DMA;
4122
4123 /*
4124 * Our NIC TSO-assist assumes TSO has standard, optionless
4125 * IPv4 and TCP headers, which total 40 bytes. By default,
4126 * the NIC copies 40 bytes of IP/TCP header from the
4127 * supplied header into the IP/TCP header portion of
4128 * each post-TSO-segment. If the supplied packet has IP or
4129 * TCP options, we need to tell the NIC to copy those extra
4130 * bytes into each post-TSO header, in addition to the normal
4131 * 40-byte IP/TCP header (and to leave space accordingly).
4132 * Unfortunately, the driver encoding of option length
4133 * varies across different ASIC families.
4134 */
4135 tcp_seg_flags = 0;
4136 if (iptcp_opt_words) {
4137 if (BGE_IS_5705_PLUS(sc)) {
4138 tcp_seg_flags =
4139 iptcp_opt_words << 11;
4140 } else {
4141 txbd_tso_flags |=
4142 iptcp_opt_words << 12;
4143 }
4144 }
4145 maxsegsize = mss | tcp_seg_flags;
4146 ip->ip_len = htons(mss + ip_tcp_hlen);
4147
4148 } /* TSO setup */
4149
4150 /*
4151 * Start packing the mbufs in this chain into
4152 * the fragment pointers. Stop when we run out
4153 * of fragments or hit the end of the mbuf chain.
4154 */
4155 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
4156 BUS_DMA_NOWAIT);
4157 if (error)
4158 return ENOBUFS;
4159 /*
4160 * Sanity check: avoid coming within 16 descriptors
4161 * of the end of the ring.
4162 */
4163 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
4164 BGE_TSO_PRINTF(("%s: "
4165 " dmamap_load_mbuf too close to ring wrap\n",
4166 device_xname(sc->bge_dev)));
4167 goto fail_unload;
4168 }
4169
4170 mtag = sc->ethercom.ec_nvlans ?
4171 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
4172
4173
4174 /* Iterate over dmap-map fragments. */
4175 for (i = 0; i < dmamap->dm_nsegs; i++) {
4176 f = &sc->bge_rdata->bge_tx_ring[frag];
4177 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
4178 break;
4179
4180 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
4181 f->bge_len = dmamap->dm_segs[i].ds_len;
4182
4183 /*
4184 * For 5751 and follow-ons, for TSO we must turn
4185 * off checksum-assist flag in the tx-descr, and
4186 * supply the ASIC-revision-specific encoding
4187 * of TSO flags and segsize.
4188 */
4189 if (use_tso) {
4190 if (BGE_IS_575X_PLUS(sc) || i == 0) {
4191 f->bge_rsvd = maxsegsize;
4192 f->bge_flags = csum_flags | txbd_tso_flags;
4193 } else {
4194 f->bge_rsvd = 0;
4195 f->bge_flags =
4196 (csum_flags | txbd_tso_flags) & 0x0fff;
4197 }
4198 } else {
4199 f->bge_rsvd = 0;
4200 f->bge_flags = csum_flags;
4201 }
4202
4203 if (mtag != NULL) {
4204 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
4205 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
4206 } else {
4207 f->bge_vlan_tag = 0;
4208 }
4209 cur = frag;
4210 BGE_INC(frag, BGE_TX_RING_CNT);
4211 }
4212
4213 if (i < dmamap->dm_nsegs) {
4214 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
4215 device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
4216 goto fail_unload;
4217 }
4218
4219 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4220 BUS_DMASYNC_PREWRITE);
4221
4222 if (frag == sc->bge_tx_saved_considx) {
4223 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
4224 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
4225
4226 goto fail_unload;
4227 }
4228
4229 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4230 sc->bge_cdata.bge_tx_chain[cur] = m_head;
4231 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
4232 sc->txdma[cur] = dma;
4233 sc->bge_txcnt += dmamap->dm_nsegs;
4234
4235 *txidx = frag;
4236
4237 return 0;
4238
4239 fail_unload:
4240 bus_dmamap_unload(sc->bge_dmatag, dmamap);
4241
4242 return ENOBUFS;
4243 }
4244
4245 /*
4246 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4247 * to the mbuf data regions directly in the transmit descriptors.
4248 */
4249 static void
4250 bge_start(struct ifnet *ifp)
4251 {
4252 struct bge_softc *sc;
4253 struct mbuf *m_head = NULL;
4254 uint32_t prodidx;
4255 int pkts = 0;
4256
4257 sc = ifp->if_softc;
4258
4259 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4260 return;
4261
4262 prodidx = sc->bge_tx_prodidx;
4263
4264 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
4265 IFQ_POLL(&ifp->if_snd, m_head);
4266 if (m_head == NULL)
4267 break;
4268
4269 #if 0
4270 /*
4271 * XXX
4272 * safety overkill. If this is a fragmented packet chain
4273 * with delayed TCP/UDP checksums, then only encapsulate
4274 * it if we have enough descriptors to handle the entire
4275 * chain at once.
4276 * (paranoia -- may not actually be needed)
4277 */
4278 if (m_head->m_flags & M_FIRSTFRAG &&
4279 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4280 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4281 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
4282 ifp->if_flags |= IFF_OACTIVE;
4283 break;
4284 }
4285 }
4286 #endif
4287
4288 /*
4289 * Pack the data into the transmit ring. If we
4290 * don't have room, set the OACTIVE flag and wait
4291 * for the NIC to drain the ring.
4292 */
4293 if (bge_encap(sc, m_head, &prodidx)) {
4294 ifp->if_flags |= IFF_OACTIVE;
4295 break;
4296 }
4297
4298 /* now we are committed to transmit the packet */
4299 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4300 pkts++;
4301
4302 /*
4303 * If there's a BPF listener, bounce a copy of this frame
4304 * to him.
4305 */
4306 bpf_mtap(ifp, m_head);
4307 }
4308 if (pkts == 0)
4309 return;
4310
4311 /* Transmit */
4312 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4313 /* 5700 b2 errata */
4314 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
4315 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4316
4317 sc->bge_tx_prodidx = prodidx;
4318
4319 /*
4320 * Set a timeout in case the chip goes out to lunch.
4321 */
4322 ifp->if_timer = 5;
4323 }
4324
4325 static int
4326 bge_init(struct ifnet *ifp)
4327 {
4328 struct bge_softc *sc = ifp->if_softc;
4329 const uint16_t *m;
4330 uint32_t mode;
4331 int s, error = 0;
4332
4333 s = splnet();
4334
4335 ifp = &sc->ethercom.ec_if;
4336
4337 /* Cancel pending I/O and flush buffers. */
4338 bge_stop(ifp, 0);
4339
4340 bge_stop_fw(sc);
4341 bge_sig_pre_reset(sc, BGE_RESET_START);
4342 bge_reset(sc);
4343 bge_sig_legacy(sc, BGE_RESET_START);
4344 bge_sig_post_reset(sc, BGE_RESET_START);
4345
4346 bge_chipinit(sc);
4347
4348 /*
4349 * Init the various state machines, ring
4350 * control blocks and firmware.
4351 */
4352 error = bge_blockinit(sc);
4353 if (error != 0) {
4354 aprint_error_dev(sc->bge_dev, "initialization error %d\n",
4355 error);
4356 splx(s);
4357 return error;
4358 }
4359
4360 ifp = &sc->ethercom.ec_if;
4361
4362 /* Specify MTU. */
4363 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4364 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
4365
4366 /* Load our MAC address. */
4367 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
4368 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4369 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4370
4371 /* Enable or disable promiscuous mode as needed. */
4372 if (ifp->if_flags & IFF_PROMISC)
4373 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4374 else
4375 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4376
4377 /* Program multicast filter. */
4378 bge_setmulti(sc);
4379
4380 /* Init RX ring. */
4381 bge_init_rx_ring_std(sc);
4382
4383 /*
4384 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4385 * memory to insure that the chip has in fact read the first
4386 * entry of the ring.
4387 */
4388 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4389 uint32_t v, i;
4390 for (i = 0; i < 10; i++) {
4391 DELAY(20);
4392 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4393 if (v == (MCLBYTES - ETHER_ALIGN))
4394 break;
4395 }
4396 if (i == 10)
4397 aprint_error_dev(sc->bge_dev,
4398 "5705 A0 chip failed to load RX ring\n");
4399 }
4400
4401 /* Init jumbo RX ring. */
4402 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
4403 bge_init_rx_ring_jumbo(sc);
4404
4405 /* Init our RX return ring index */
4406 sc->bge_rx_saved_considx = 0;
4407
4408 /* Init TX ring. */
4409 bge_init_tx_ring(sc);
4410
4411 /* Enable TX MAC state machine lockup fix. */
4412 mode = CSR_READ_4(sc, BGE_TX_MODE);
4413 if (BGE_IS_5755_PLUS(sc) ||
4414 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4415 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4416
4417 /* Turn on transmitter */
4418 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4419 DELAY(100);
4420
4421 /* Turn on receiver */
4422 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4423 DELAY(10);
4424
4425 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4426
4427 /* Tell firmware we're alive. */
4428 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4429
4430 /* Enable host interrupts. */
4431 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4432 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4433 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4434
4435 if ((error = bge_ifmedia_upd(ifp)) != 0)
4436 goto out;
4437
4438 ifp->if_flags |= IFF_RUNNING;
4439 ifp->if_flags &= ~IFF_OACTIVE;
4440
4441 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
4442
4443 out:
4444 sc->bge_if_flags = ifp->if_flags;
4445 splx(s);
4446
4447 return error;
4448 }
4449
4450 /*
4451 * Set media options.
4452 */
4453 static int
4454 bge_ifmedia_upd(struct ifnet *ifp)
4455 {
4456 struct bge_softc *sc = ifp->if_softc;
4457 struct mii_data *mii = &sc->bge_mii;
4458 struct ifmedia *ifm = &sc->bge_ifmedia;
4459 int rc;
4460
4461 /* If this is a 1000baseX NIC, enable the TBI port. */
4462 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4463 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4464 return EINVAL;
4465 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4466 case IFM_AUTO:
4467 /*
4468 * The BCM5704 ASIC appears to have a special
4469 * mechanism for programming the autoneg
4470 * advertisement registers in TBI mode.
4471 */
4472 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4473 uint32_t sgdig;
4474 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4475 if (sgdig & BGE_SGDIGSTS_DONE) {
4476 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4477 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4478 sgdig |= BGE_SGDIGCFG_AUTO |
4479 BGE_SGDIGCFG_PAUSE_CAP |
4480 BGE_SGDIGCFG_ASYM_PAUSE;
4481 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4482 sgdig | BGE_SGDIGCFG_SEND);
4483 DELAY(5);
4484 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4485 }
4486 }
4487 break;
4488 case IFM_1000_SX:
4489 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4490 BGE_CLRBIT(sc, BGE_MAC_MODE,
4491 BGE_MACMODE_HALF_DUPLEX);
4492 } else {
4493 BGE_SETBIT(sc, BGE_MAC_MODE,
4494 BGE_MACMODE_HALF_DUPLEX);
4495 }
4496 break;
4497 default:
4498 return EINVAL;
4499 }
4500 /* XXX 802.3x flow control for 1000BASE-SX */
4501 return 0;
4502 }
4503
4504 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4505 if ((rc = mii_mediachg(mii)) == ENXIO)
4506 return 0;
4507
4508 /*
4509 * Force an interrupt so that we will call bge_link_upd
4510 * if needed and clear any pending link state attention.
4511 * Without this we are not getting any further interrupts
4512 * for link state changes and thus will not UP the link and
4513 * not be able to send in bge_start. The only way to get
4514 * things working was to receive a packet and get a RX intr.
4515 */
4516 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4517 sc->bge_flags & BGE_IS_5788)
4518 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4519 else
4520 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4521
4522 return rc;
4523 }
4524
4525 /*
4526 * Report current media status.
4527 */
4528 static void
4529 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4530 {
4531 struct bge_softc *sc = ifp->if_softc;
4532 struct mii_data *mii = &sc->bge_mii;
4533
4534 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4535 ifmr->ifm_status = IFM_AVALID;
4536 ifmr->ifm_active = IFM_ETHER;
4537 if (CSR_READ_4(sc, BGE_MAC_STS) &
4538 BGE_MACSTAT_TBI_PCS_SYNCHED)
4539 ifmr->ifm_status |= IFM_ACTIVE;
4540 ifmr->ifm_active |= IFM_1000_SX;
4541 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4542 ifmr->ifm_active |= IFM_HDX;
4543 else
4544 ifmr->ifm_active |= IFM_FDX;
4545 return;
4546 }
4547
4548 mii_pollstat(mii);
4549 ifmr->ifm_status = mii->mii_media_status;
4550 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4551 sc->bge_flowflags;
4552 }
4553
4554 static int
4555 bge_ifflags_cb(struct ethercom *ec)
4556 {
4557 struct ifnet *ifp = &ec->ec_if;
4558 struct bge_softc *sc = ifp->if_softc;
4559 int change = ifp->if_flags ^ sc->bge_if_flags;
4560
4561 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
4562 return ENETRESET;
4563 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
4564 return 0;
4565
4566 if ((ifp->if_flags & IFF_PROMISC) == 0)
4567 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4568 else
4569 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4570
4571 bge_setmulti(sc);
4572
4573 sc->bge_if_flags = ifp->if_flags;
4574 return 0;
4575 }
4576
4577 static int
4578 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
4579 {
4580 struct bge_softc *sc = ifp->if_softc;
4581 struct ifreq *ifr = (struct ifreq *) data;
4582 int s, error = 0;
4583 struct mii_data *mii;
4584
4585 s = splnet();
4586
4587 switch (command) {
4588 case SIOCSIFMEDIA:
4589 /* XXX Flow control is not supported for 1000BASE-SX */
4590 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4591 ifr->ifr_media &= ~IFM_ETH_FMASK;
4592 sc->bge_flowflags = 0;
4593 }
4594
4595 /* Flow control requires full-duplex mode. */
4596 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4597 (ifr->ifr_media & IFM_FDX) == 0) {
4598 ifr->ifr_media &= ~IFM_ETH_FMASK;
4599 }
4600 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4601 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4602 /* We can do both TXPAUSE and RXPAUSE. */
4603 ifr->ifr_media |=
4604 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4605 }
4606 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4607 }
4608 /* FALLTHROUGH */
4609 case SIOCGIFMEDIA:
4610 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4611 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4612 command);
4613 } else {
4614 mii = &sc->bge_mii;
4615 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4616 command);
4617 }
4618 break;
4619 default:
4620 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
4621 break;
4622
4623 error = 0;
4624
4625 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
4626 ;
4627 else if (ifp->if_flags & IFF_RUNNING)
4628 bge_setmulti(sc);
4629 break;
4630 }
4631
4632 splx(s);
4633
4634 return error;
4635 }
4636
4637 static void
4638 bge_watchdog(struct ifnet *ifp)
4639 {
4640 struct bge_softc *sc;
4641
4642 sc = ifp->if_softc;
4643
4644 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
4645
4646 ifp->if_flags &= ~IFF_RUNNING;
4647 bge_init(ifp);
4648
4649 ifp->if_oerrors++;
4650 }
4651
4652 static void
4653 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
4654 {
4655 int i;
4656
4657 BGE_CLRBIT(sc, reg, bit);
4658
4659 for (i = 0; i < 1000; i++) {
4660 if ((CSR_READ_4(sc, reg) & bit) == 0)
4661 return;
4662 delay(100);
4663 }
4664
4665 /*
4666 * Doesn't print only when the register is BGE_SRS_MODE. It occurs
4667 * on some environment (and once after boot?)
4668 */
4669 if (reg != BGE_SRS_MODE)
4670 aprint_error_dev(sc->bge_dev,
4671 "block failed to stop: reg 0x%lx, bit 0x%08x\n",
4672 (u_long)reg, bit);
4673 }
4674
4675 /*
4676 * Stop the adapter and free any mbufs allocated to the
4677 * RX and TX lists.
4678 */
4679 static void
4680 bge_stop(struct ifnet *ifp, int disable)
4681 {
4682 struct bge_softc *sc = ifp->if_softc;
4683
4684 callout_stop(&sc->bge_timeout);
4685
4686 /*
4687 * Tell firmware we're shutting down.
4688 */
4689 bge_stop_fw(sc);
4690 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4691
4692 /* Disable host interrupts. */
4693 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4694 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4695
4696 /*
4697 * Disable all of the receiver blocks.
4698 */
4699 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4700 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4701 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4702 if (BGE_IS_5700_FAMILY(sc))
4703 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4704 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4705 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4706 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4707
4708 /*
4709 * Disable all of the transmit blocks.
4710 */
4711 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4712 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4713 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4714 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4715 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4716 if (BGE_IS_5700_FAMILY(sc))
4717 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4718 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4719
4720 /*
4721 * Shut down all of the memory managers and related
4722 * state machines.
4723 */
4724 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4725 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4726 if (BGE_IS_5700_FAMILY(sc))
4727 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4728
4729 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4730 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4731
4732 if (BGE_IS_5700_FAMILY(sc)) {
4733 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4734 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4735 }
4736
4737 bge_reset(sc);
4738 bge_sig_legacy(sc, BGE_RESET_STOP);
4739 bge_sig_post_reset(sc, BGE_RESET_STOP);
4740
4741 /*
4742 * Keep the ASF firmware running if up.
4743 */
4744 if (sc->bge_asf_mode & ASF_STACKUP)
4745 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4746 else
4747 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4748
4749 /* Free the RX lists. */
4750 bge_free_rx_ring_std(sc);
4751
4752 /* Free jumbo RX list. */
4753 if (BGE_IS_JUMBO_CAPABLE(sc))
4754 bge_free_rx_ring_jumbo(sc);
4755
4756 /* Free TX buffers. */
4757 bge_free_tx_ring(sc);
4758
4759 /*
4760 * Isolate/power down the PHY.
4761 */
4762 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI))
4763 mii_down(&sc->bge_mii);
4764
4765 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4766
4767 /* Clear MAC's link state (PHY may still have link UP). */
4768 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4769
4770 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4771 }
4772
4773 static void
4774 bge_link_upd(struct bge_softc *sc)
4775 {
4776 struct ifnet *ifp = &sc->ethercom.ec_if;
4777 struct mii_data *mii = &sc->bge_mii;
4778 uint32_t status;
4779 int link;
4780
4781 /* Clear 'pending link event' flag */
4782 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4783
4784 /*
4785 * Process link state changes.
4786 * Grrr. The link status word in the status block does
4787 * not work correctly on the BCM5700 rev AX and BX chips,
4788 * according to all available information. Hence, we have
4789 * to enable MII interrupts in order to properly obtain
4790 * async link changes. Unfortunately, this also means that
4791 * we have to read the MAC status register to detect link
4792 * changes, thereby adding an additional register access to
4793 * the interrupt handler.
4794 */
4795
4796 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4797 status = CSR_READ_4(sc, BGE_MAC_STS);
4798 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4799 mii_pollstat(mii);
4800
4801 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4802 mii->mii_media_status & IFM_ACTIVE &&
4803 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4804 BGE_STS_SETBIT(sc, BGE_STS_LINK);
4805 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4806 (!(mii->mii_media_status & IFM_ACTIVE) ||
4807 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4808 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4809
4810 /* Clear the interrupt */
4811 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4812 BGE_EVTENB_MI_INTERRUPT);
4813 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4814 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4815 BRGPHY_INTRS);
4816 }
4817 return;
4818 }
4819
4820 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4821 status = CSR_READ_4(sc, BGE_MAC_STS);
4822 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4823 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4824 BGE_STS_SETBIT(sc, BGE_STS_LINK);
4825 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4826 BGE_CLRBIT(sc, BGE_MAC_MODE,
4827 BGE_MACMODE_TBI_SEND_CFGS);
4828 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4829 if_link_state_change(ifp, LINK_STATE_UP);
4830 }
4831 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4832 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4833 if_link_state_change(ifp, LINK_STATE_DOWN);
4834 }
4835 /*
4836 * Discard link events for MII/GMII cards if MI auto-polling disabled.
4837 * This should not happen since mii callouts are locked now, but
4838 * we keep this check for debug.
4839 */
4840 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4841 /*
4842 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
4843 * bit in status word always set. Workaround this bug by
4844 * reading PHY link status directly.
4845 */
4846 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4847 BGE_STS_LINK : 0;
4848
4849 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4850 mii_pollstat(mii);
4851
4852 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4853 mii->mii_media_status & IFM_ACTIVE &&
4854 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4855 BGE_STS_SETBIT(sc, BGE_STS_LINK);
4856 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4857 (!(mii->mii_media_status & IFM_ACTIVE) ||
4858 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4859 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4860 }
4861 }
4862
4863 /* Clear the attention */
4864 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4865 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4866 BGE_MACSTAT_LINK_CHANGED);
4867 }
4868
4869 static int
4870 bge_sysctl_verify(SYSCTLFN_ARGS)
4871 {
4872 int error, t;
4873 struct sysctlnode node;
4874
4875 node = *rnode;
4876 t = *(int*)rnode->sysctl_data;
4877 node.sysctl_data = &t;
4878 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4879 if (error || newp == NULL)
4880 return error;
4881
4882 #if 0
4883 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
4884 node.sysctl_num, rnode->sysctl_num));
4885 #endif
4886
4887 if (node.sysctl_num == bge_rxthresh_nodenum) {
4888 if (t < 0 || t >= NBGE_RX_THRESH)
4889 return EINVAL;
4890 bge_update_all_threshes(t);
4891 } else
4892 return EINVAL;
4893
4894 *(int*)rnode->sysctl_data = t;
4895
4896 return 0;
4897 }
4898
4899 /*
4900 * Set up sysctl(3) MIB, hw.bge.*.
4901 */
4902 static void
4903 bge_sysctl_init(struct bge_softc *sc)
4904 {
4905 int rc, bge_root_num;
4906 const struct sysctlnode *node;
4907
4908 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, NULL,
4909 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
4910 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
4911 goto out;
4912 }
4913
4914 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
4915 0, CTLTYPE_NODE, "bge",
4916 SYSCTL_DESCR("BGE interface controls"),
4917 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
4918 goto out;
4919 }
4920
4921 bge_root_num = node->sysctl_num;
4922
4923 /* BGE Rx interrupt mitigation level */
4924 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
4925 CTLFLAG_READWRITE,
4926 CTLTYPE_INT, "rx_lvl",
4927 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
4928 bge_sysctl_verify, 0,
4929 &bge_rx_thresh_lvl,
4930 0, CTL_HW, bge_root_num, CTL_CREATE,
4931 CTL_EOL)) != 0) {
4932 goto out;
4933 }
4934
4935 bge_rxthresh_nodenum = node->sysctl_num;
4936
4937 return;
4938
4939 out:
4940 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
4941 }
4942
4943 #ifdef BGE_DEBUG
4944 void
4945 bge_debug_info(struct bge_softc *sc)
4946 {
4947
4948 printf("Hardware Flags:\n");
4949 if (BGE_IS_5755_PLUS(sc))
4950 printf(" - 5755 Plus\n");
4951 if (BGE_IS_575X_PLUS(sc))
4952 printf(" - 575X Plus\n");
4953 if (BGE_IS_5705_PLUS(sc))
4954 printf(" - 5705 Plus\n");
4955 if (BGE_IS_5714_FAMILY(sc))
4956 printf(" - 5714 Family\n");
4957 if (BGE_IS_5700_FAMILY(sc))
4958 printf(" - 5700 Family\n");
4959 if (sc->bge_flags & BGE_IS_5788)
4960 printf(" - 5788\n");
4961 if (sc->bge_flags & BGE_JUMBO_CAPABLE)
4962 printf(" - Supports Jumbo Frames\n");
4963 if (sc->bge_flags & BGE_NO_EEPROM)
4964 printf(" - No EEPROM\n");
4965 if (sc->bge_flags & BGE_PCIX)
4966 printf(" - PCI-X Bus\n");
4967 if (sc->bge_flags & BGE_PCIE)
4968 printf(" - PCI Express Bus\n");
4969 if (sc->bge_flags & BGE_NO_3LED)
4970 printf(" - No 3 LEDs\n");
4971 if (sc->bge_flags & BGE_RX_ALIGNBUG)
4972 printf(" - RX Alignment Bug\n");
4973 if (sc->bge_flags & BGE_TSO)
4974 printf(" - TSO\n");
4975 }
4976 #endif /* BGE_DEBUG */
4977
4978 static int
4979 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4980 {
4981 prop_dictionary_t dict;
4982 prop_data_t ea;
4983
4984 if ((sc->bge_flags & BGE_NO_EEPROM) == 0)
4985 return 1;
4986
4987 dict = device_properties(sc->bge_dev);
4988 ea = prop_dictionary_get(dict, "mac-address");
4989 if (ea != NULL) {
4990 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
4991 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
4992 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
4993 return 0;
4994 }
4995
4996 return 1;
4997 }
4998
4999 static int
5000 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5001 {
5002 uint32_t mac_addr;
5003
5004 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
5005 if ((mac_addr >> 16) == 0x484b) {
5006 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5007 ether_addr[1] = (uint8_t)mac_addr;
5008 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
5009 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5010 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5011 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5012 ether_addr[5] = (uint8_t)mac_addr;
5013 return 0;
5014 }
5015 return 1;
5016 }
5017
5018 static int
5019 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5020 {
5021 int mac_offset = BGE_EE_MAC_OFFSET;
5022
5023 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5024 mac_offset = BGE_EE_MAC_OFFSET_5906;
5025
5026 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5027 ETHER_ADDR_LEN));
5028 }
5029
5030 static int
5031 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5032 {
5033
5034 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5035 return 1;
5036
5037 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5038 ETHER_ADDR_LEN));
5039 }
5040
5041 static int
5042 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5043 {
5044 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5045 /* NOTE: Order is critical */
5046 bge_get_eaddr_fw,
5047 bge_get_eaddr_mem,
5048 bge_get_eaddr_nvram,
5049 bge_get_eaddr_eeprom,
5050 NULL
5051 };
5052 const bge_eaddr_fcn_t *func;
5053
5054 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5055 if ((*func)(sc, eaddr) == 0)
5056 break;
5057 }
5058 return (*func == NULL ? ENXIO : 0);
5059 }
5060