if_bge.c revision 1.206 1 /* $NetBSD: if_bge.c,v 1.206 2013/02/26 11:06:23 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.206 2013/02/26 11:06:23 msaitoh Exp $");
83
84 #include "vlan.h"
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/sockio.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/device.h>
94 #include <sys/socket.h>
95 #include <sys/sysctl.h>
96
97 #include <net/if.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_ether.h>
101
102 #include <sys/rnd.h>
103
104 #ifdef INET
105 #include <netinet/in.h>
106 #include <netinet/in_systm.h>
107 #include <netinet/in_var.h>
108 #include <netinet/ip.h>
109 #endif
110
111 /* Headers for TCP Segmentation Offload (TSO) */
112 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
113 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
114 #include <netinet/ip.h> /* for struct ip */
115 #include <netinet/tcp.h> /* for struct tcphdr */
116
117
118 #include <net/bpf.h>
119
120 #include <dev/pci/pcireg.h>
121 #include <dev/pci/pcivar.h>
122 #include <dev/pci/pcidevs.h>
123
124 #include <dev/mii/mii.h>
125 #include <dev/mii/miivar.h>
126 #include <dev/mii/miidevs.h>
127 #include <dev/mii/brgphyreg.h>
128
129 #include <dev/pci/if_bgereg.h>
130 #include <dev/pci/if_bgevar.h>
131
132 #include <prop/proplib.h>
133
134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
135
136
137 /*
138 * Tunable thresholds for rx-side bge interrupt mitigation.
139 */
140
141 /*
142 * The pairs of values below were obtained from empirical measurement
143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
144 * interrupt for every N packets received, where N is, approximately,
145 * the second value (rx_max_bds) in each pair. The values are chosen
146 * such that moving from one pair to the succeeding pair was observed
147 * to roughly halve interrupt rate under sustained input packet load.
148 * The values were empirically chosen to avoid overflowing internal
149 * limits on the bcm5700: increasing rx_ticks much beyond 600
150 * results in internal wrapping and higher interrupt rates.
151 * The limit of 46 frames was chosen to match NFS workloads.
152 *
153 * These values also work well on bcm5701, bcm5704C, and (less
154 * tested) bcm5703. On other chipsets, (including the Altima chip
155 * family), the larger values may overflow internal chip limits,
156 * leading to increasing interrupt rates rather than lower interrupt
157 * rates.
158 *
159 * Applications using heavy interrupt mitigation (interrupting every
160 * 32 or 46 frames) in both directions may need to increase the TCP
161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
162 * full link bandwidth, due to ACKs and window updates lingering
163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
164 */
165 static const struct bge_load_rx_thresh {
166 int rx_ticks;
167 int rx_max_bds; }
168 bge_rx_threshes[] = {
169 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */
170 { 32, 2 },
171 { 50, 4 },
172 { 100, 8 },
173 { 192, 16 },
174 { 416, 32 },
175 { 598, 46 }
176 };
177 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
178
179 /* XXX patchable; should be sysctl'able */
180 static int bge_auto_thresh = 1;
181 static int bge_rx_thresh_lvl;
182
183 static int bge_rxthresh_nodenum;
184
185 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
186
187 static int bge_probe(device_t, cfdata_t, void *);
188 static void bge_attach(device_t, device_t, void *);
189 static void bge_release_resources(struct bge_softc *);
190
191 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
192 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
193 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
194 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
195 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
196
197 static void bge_txeof(struct bge_softc *);
198 static void bge_rxeof(struct bge_softc *);
199
200 static void bge_asf_driver_up (struct bge_softc *);
201 static void bge_tick(void *);
202 static void bge_stats_update(struct bge_softc *);
203 static void bge_stats_update_regs(struct bge_softc *);
204 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
205
206 static int bge_intr(void *);
207 static void bge_start(struct ifnet *);
208 static int bge_ifflags_cb(struct ethercom *);
209 static int bge_ioctl(struct ifnet *, u_long, void *);
210 static int bge_init(struct ifnet *);
211 static void bge_stop(struct ifnet *, int);
212 static void bge_watchdog(struct ifnet *);
213 static int bge_ifmedia_upd(struct ifnet *);
214 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
215
216 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
217 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
218
219 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
220 static int bge_read_eeprom(struct bge_softc *, void *, int, int);
221 static void bge_setmulti(struct bge_softc *);
222
223 static void bge_handle_events(struct bge_softc *);
224 static int bge_alloc_jumbo_mem(struct bge_softc *);
225 #if 0 /* XXX */
226 static void bge_free_jumbo_mem(struct bge_softc *);
227 #endif
228 static void *bge_jalloc(struct bge_softc *);
229 static void bge_jfree(struct mbuf *, void *, size_t, void *);
230 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
231 bus_dmamap_t);
232 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
233 static int bge_init_rx_ring_std(struct bge_softc *);
234 static void bge_free_rx_ring_std(struct bge_softc *);
235 static int bge_init_rx_ring_jumbo(struct bge_softc *);
236 static void bge_free_rx_ring_jumbo(struct bge_softc *);
237 static void bge_free_tx_ring(struct bge_softc *);
238 static int bge_init_tx_ring(struct bge_softc *);
239
240 static int bge_chipinit(struct bge_softc *);
241 static int bge_blockinit(struct bge_softc *);
242 static int bge_setpowerstate(struct bge_softc *, int);
243 static uint32_t bge_readmem_ind(struct bge_softc *, int);
244 static void bge_writemem_ind(struct bge_softc *, int, int);
245 static void bge_writembx(struct bge_softc *, int, int);
246 static void bge_writemem_direct(struct bge_softc *, int, int);
247 static void bge_writereg_ind(struct bge_softc *, int, int);
248 static void bge_set_max_readrq(struct bge_softc *);
249
250 static int bge_miibus_readreg(device_t, int, int);
251 static void bge_miibus_writereg(device_t, int, int, int);
252 static void bge_miibus_statchg(struct ifnet *);
253
254 #define BGE_RESET_START 1
255 #define BGE_RESET_STOP 2
256 static void bge_sig_post_reset(struct bge_softc *, int);
257 static void bge_sig_legacy(struct bge_softc *, int);
258 static void bge_sig_pre_reset(struct bge_softc *, int);
259 static void bge_stop_fw(struct bge_softc *);
260 static int bge_reset(struct bge_softc *);
261 static void bge_link_upd(struct bge_softc *);
262 static void sysctl_bge_init(struct bge_softc *);
263 static int sysctl_bge_verify(SYSCTLFN_PROTO);
264
265 #ifdef BGE_DEBUG
266 #define DPRINTF(x) if (bgedebug) printf x
267 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
268 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
269 int bgedebug = 0;
270 int bge_tso_debug = 0;
271 void bge_debug_info(struct bge_softc *);
272 #else
273 #define DPRINTF(x)
274 #define DPRINTFN(n,x)
275 #define BGE_TSO_PRINTF(x)
276 #endif
277
278 #ifdef BGE_EVENT_COUNTERS
279 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
280 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
281 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
282 #else
283 #define BGE_EVCNT_INCR(ev) /* nothing */
284 #define BGE_EVCNT_ADD(ev, val) /* nothing */
285 #define BGE_EVCNT_UPD(ev, val) /* nothing */
286 #endif
287
288 static const struct bge_product {
289 pci_vendor_id_t bp_vendor;
290 pci_product_id_t bp_product;
291 const char *bp_name;
292 } bge_products[] = {
293 /*
294 * The BCM5700 documentation seems to indicate that the hardware
295 * still has the Alteon vendor ID burned into it, though it
296 * should always be overridden by the value in the EEPROM. We'll
297 * check for it anyway.
298 */
299 { PCI_VENDOR_ALTEON,
300 PCI_PRODUCT_ALTEON_BCM5700,
301 "Broadcom BCM5700 Gigabit Ethernet",
302 },
303 { PCI_VENDOR_ALTEON,
304 PCI_PRODUCT_ALTEON_BCM5701,
305 "Broadcom BCM5701 Gigabit Ethernet",
306 },
307 { PCI_VENDOR_ALTIMA,
308 PCI_PRODUCT_ALTIMA_AC1000,
309 "Altima AC1000 Gigabit Ethernet",
310 },
311 { PCI_VENDOR_ALTIMA,
312 PCI_PRODUCT_ALTIMA_AC1001,
313 "Altima AC1001 Gigabit Ethernet",
314 },
315 { PCI_VENDOR_ALTIMA,
316 PCI_PRODUCT_ALTIMA_AC9100,
317 "Altima AC9100 Gigabit Ethernet",
318 },
319 { PCI_VENDOR_BROADCOM,
320 PCI_PRODUCT_BROADCOM_BCM5700,
321 "Broadcom BCM5700 Gigabit Ethernet",
322 },
323 { PCI_VENDOR_BROADCOM,
324 PCI_PRODUCT_BROADCOM_BCM5701,
325 "Broadcom BCM5701 Gigabit Ethernet",
326 },
327 { PCI_VENDOR_BROADCOM,
328 PCI_PRODUCT_BROADCOM_BCM5702,
329 "Broadcom BCM5702 Gigabit Ethernet",
330 },
331 { PCI_VENDOR_BROADCOM,
332 PCI_PRODUCT_BROADCOM_BCM5702X,
333 "Broadcom BCM5702X Gigabit Ethernet" },
334 { PCI_VENDOR_BROADCOM,
335 PCI_PRODUCT_BROADCOM_BCM5703,
336 "Broadcom BCM5703 Gigabit Ethernet",
337 },
338 { PCI_VENDOR_BROADCOM,
339 PCI_PRODUCT_BROADCOM_BCM5703X,
340 "Broadcom BCM5703X Gigabit Ethernet",
341 },
342 { PCI_VENDOR_BROADCOM,
343 PCI_PRODUCT_BROADCOM_BCM5703_ALT,
344 "Broadcom BCM5703 Gigabit Ethernet",
345 },
346 { PCI_VENDOR_BROADCOM,
347 PCI_PRODUCT_BROADCOM_BCM5704C,
348 "Broadcom BCM5704C Dual Gigabit Ethernet",
349 },
350 { PCI_VENDOR_BROADCOM,
351 PCI_PRODUCT_BROADCOM_BCM5704S,
352 "Broadcom BCM5704S Dual Gigabit Ethernet",
353 },
354 { PCI_VENDOR_BROADCOM,
355 PCI_PRODUCT_BROADCOM_BCM5705,
356 "Broadcom BCM5705 Gigabit Ethernet",
357 },
358 { PCI_VENDOR_BROADCOM,
359 PCI_PRODUCT_BROADCOM_BCM5705F,
360 "Broadcom BCM5705F Gigabit Ethernet",
361 },
362 { PCI_VENDOR_BROADCOM,
363 PCI_PRODUCT_BROADCOM_BCM5705K,
364 "Broadcom BCM5705K Gigabit Ethernet",
365 },
366 { PCI_VENDOR_BROADCOM,
367 PCI_PRODUCT_BROADCOM_BCM5705M,
368 "Broadcom BCM5705M Gigabit Ethernet",
369 },
370 { PCI_VENDOR_BROADCOM,
371 PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
372 "Broadcom BCM5705M Gigabit Ethernet",
373 },
374 { PCI_VENDOR_BROADCOM,
375 PCI_PRODUCT_BROADCOM_BCM5714,
376 "Broadcom BCM5714 Gigabit Ethernet",
377 },
378 { PCI_VENDOR_BROADCOM,
379 PCI_PRODUCT_BROADCOM_BCM5714S,
380 "Broadcom BCM5714S Gigabit Ethernet",
381 },
382 { PCI_VENDOR_BROADCOM,
383 PCI_PRODUCT_BROADCOM_BCM5715,
384 "Broadcom BCM5715 Gigabit Ethernet",
385 },
386 { PCI_VENDOR_BROADCOM,
387 PCI_PRODUCT_BROADCOM_BCM5715S,
388 "Broadcom BCM5715S Gigabit Ethernet",
389 },
390 { PCI_VENDOR_BROADCOM,
391 PCI_PRODUCT_BROADCOM_BCM5717,
392 "Broadcom BCM5717 Gigabit Ethernet",
393 },
394 { PCI_VENDOR_BROADCOM,
395 PCI_PRODUCT_BROADCOM_BCM5718,
396 "Broadcom BCM5718 Gigabit Ethernet",
397 },
398 { PCI_VENDOR_BROADCOM,
399 PCI_PRODUCT_BROADCOM_BCM5720,
400 "Broadcom BCM5720 Gigabit Ethernet",
401 },
402 { PCI_VENDOR_BROADCOM,
403 PCI_PRODUCT_BROADCOM_BCM5721,
404 "Broadcom BCM5721 Gigabit Ethernet",
405 },
406 { PCI_VENDOR_BROADCOM,
407 PCI_PRODUCT_BROADCOM_BCM5722,
408 "Broadcom BCM5722 Gigabit Ethernet",
409 },
410 { PCI_VENDOR_BROADCOM,
411 PCI_PRODUCT_BROADCOM_BCM5723,
412 "Broadcom BCM5723 Gigabit Ethernet",
413 },
414 { PCI_VENDOR_BROADCOM,
415 PCI_PRODUCT_BROADCOM_BCM5724,
416 "Broadcom BCM5724 Gigabit Ethernet",
417 },
418 { PCI_VENDOR_BROADCOM,
419 PCI_PRODUCT_BROADCOM_BCM5750,
420 "Broadcom BCM5750 Gigabit Ethernet",
421 },
422 { PCI_VENDOR_BROADCOM,
423 PCI_PRODUCT_BROADCOM_BCM5750M,
424 "Broadcom BCM5750M Gigabit Ethernet",
425 },
426 { PCI_VENDOR_BROADCOM,
427 PCI_PRODUCT_BROADCOM_BCM5751,
428 "Broadcom BCM5751 Gigabit Ethernet",
429 },
430 { PCI_VENDOR_BROADCOM,
431 PCI_PRODUCT_BROADCOM_BCM5751F,
432 "Broadcom BCM5751F Gigabit Ethernet",
433 },
434 { PCI_VENDOR_BROADCOM,
435 PCI_PRODUCT_BROADCOM_BCM5751M,
436 "Broadcom BCM5751M Gigabit Ethernet",
437 },
438 { PCI_VENDOR_BROADCOM,
439 PCI_PRODUCT_BROADCOM_BCM5752,
440 "Broadcom BCM5752 Gigabit Ethernet",
441 },
442 { PCI_VENDOR_BROADCOM,
443 PCI_PRODUCT_BROADCOM_BCM5752M,
444 "Broadcom BCM5752M Gigabit Ethernet",
445 },
446 { PCI_VENDOR_BROADCOM,
447 PCI_PRODUCT_BROADCOM_BCM5753,
448 "Broadcom BCM5753 Gigabit Ethernet",
449 },
450 { PCI_VENDOR_BROADCOM,
451 PCI_PRODUCT_BROADCOM_BCM5753F,
452 "Broadcom BCM5753F Gigabit Ethernet",
453 },
454 { PCI_VENDOR_BROADCOM,
455 PCI_PRODUCT_BROADCOM_BCM5753M,
456 "Broadcom BCM5753M Gigabit Ethernet",
457 },
458 { PCI_VENDOR_BROADCOM,
459 PCI_PRODUCT_BROADCOM_BCM5754,
460 "Broadcom BCM5754 Gigabit Ethernet",
461 },
462 { PCI_VENDOR_BROADCOM,
463 PCI_PRODUCT_BROADCOM_BCM5754M,
464 "Broadcom BCM5754M Gigabit Ethernet",
465 },
466 { PCI_VENDOR_BROADCOM,
467 PCI_PRODUCT_BROADCOM_BCM5755,
468 "Broadcom BCM5755 Gigabit Ethernet",
469 },
470 { PCI_VENDOR_BROADCOM,
471 PCI_PRODUCT_BROADCOM_BCM5755M,
472 "Broadcom BCM5755M Gigabit Ethernet",
473 },
474 { PCI_VENDOR_BROADCOM,
475 PCI_PRODUCT_BROADCOM_BCM5756,
476 "Broadcom BCM5756 Gigabit Ethernet",
477 },
478 { PCI_VENDOR_BROADCOM,
479 PCI_PRODUCT_BROADCOM_BCM5761,
480 "Broadcom BCM5761 Gigabit Ethernet",
481 },
482 { PCI_VENDOR_BROADCOM,
483 PCI_PRODUCT_BROADCOM_BCM5761E,
484 "Broadcom BCM5761E Gigabit Ethernet",
485 },
486 { PCI_VENDOR_BROADCOM,
487 PCI_PRODUCT_BROADCOM_BCM5761S,
488 "Broadcom BCM5761S Gigabit Ethernet",
489 },
490 { PCI_VENDOR_BROADCOM,
491 PCI_PRODUCT_BROADCOM_BCM5761SE,
492 "Broadcom BCM5761SE Gigabit Ethernet",
493 },
494 { PCI_VENDOR_BROADCOM,
495 PCI_PRODUCT_BROADCOM_BCM5764,
496 "Broadcom BCM5764 Gigabit Ethernet",
497 },
498 { PCI_VENDOR_BROADCOM,
499 PCI_PRODUCT_BROADCOM_BCM5780,
500 "Broadcom BCM5780 Gigabit Ethernet",
501 },
502 { PCI_VENDOR_BROADCOM,
503 PCI_PRODUCT_BROADCOM_BCM5780S,
504 "Broadcom BCM5780S Gigabit Ethernet",
505 },
506 { PCI_VENDOR_BROADCOM,
507 PCI_PRODUCT_BROADCOM_BCM5781,
508 "Broadcom BCM5781 Gigabit Ethernet",
509 },
510 { PCI_VENDOR_BROADCOM,
511 PCI_PRODUCT_BROADCOM_BCM5782,
512 "Broadcom BCM5782 Gigabit Ethernet",
513 },
514 { PCI_VENDOR_BROADCOM,
515 PCI_PRODUCT_BROADCOM_BCM5784M,
516 "BCM5784M NetLink 1000baseT Ethernet",
517 },
518 { PCI_VENDOR_BROADCOM,
519 PCI_PRODUCT_BROADCOM_BCM5786,
520 "Broadcom BCM5786 Gigabit Ethernet",
521 },
522 { PCI_VENDOR_BROADCOM,
523 PCI_PRODUCT_BROADCOM_BCM5787,
524 "Broadcom BCM5787 Gigabit Ethernet",
525 },
526 { PCI_VENDOR_BROADCOM,
527 PCI_PRODUCT_BROADCOM_BCM5787M,
528 "Broadcom BCM5787M Gigabit Ethernet",
529 },
530 { PCI_VENDOR_BROADCOM,
531 PCI_PRODUCT_BROADCOM_BCM5788,
532 "Broadcom BCM5788 Gigabit Ethernet",
533 },
534 { PCI_VENDOR_BROADCOM,
535 PCI_PRODUCT_BROADCOM_BCM5789,
536 "Broadcom BCM5789 Gigabit Ethernet",
537 },
538 { PCI_VENDOR_BROADCOM,
539 PCI_PRODUCT_BROADCOM_BCM5901,
540 "Broadcom BCM5901 Fast Ethernet",
541 },
542 { PCI_VENDOR_BROADCOM,
543 PCI_PRODUCT_BROADCOM_BCM5901A2,
544 "Broadcom BCM5901A2 Fast Ethernet",
545 },
546 { PCI_VENDOR_BROADCOM,
547 PCI_PRODUCT_BROADCOM_BCM5903M,
548 "Broadcom BCM5903M Fast Ethernet",
549 },
550 { PCI_VENDOR_BROADCOM,
551 PCI_PRODUCT_BROADCOM_BCM5906,
552 "Broadcom BCM5906 Fast Ethernet",
553 },
554 { PCI_VENDOR_BROADCOM,
555 PCI_PRODUCT_BROADCOM_BCM5906M,
556 "Broadcom BCM5906M Fast Ethernet",
557 },
558 { PCI_VENDOR_BROADCOM,
559 PCI_PRODUCT_BROADCOM_BCM57760,
560 "Broadcom BCM57760 Fast Ethernet",
561 },
562 { PCI_VENDOR_BROADCOM,
563 PCI_PRODUCT_BROADCOM_BCM57761,
564 "Broadcom BCM57761 Fast Ethernet",
565 },
566 { PCI_VENDOR_BROADCOM,
567 PCI_PRODUCT_BROADCOM_BCM57762,
568 "Broadcom BCM57762 Gigabit Ethernet",
569 },
570 { PCI_VENDOR_BROADCOM,
571 PCI_PRODUCT_BROADCOM_BCM57765,
572 "Broadcom BCM57765 Fast Ethernet",
573 },
574 { PCI_VENDOR_BROADCOM,
575 PCI_PRODUCT_BROADCOM_BCM57780,
576 "Broadcom BCM57780 Fast Ethernet",
577 },
578 { PCI_VENDOR_BROADCOM,
579 PCI_PRODUCT_BROADCOM_BCM57781,
580 "Broadcom BCM57781 Fast Ethernet",
581 },
582 { PCI_VENDOR_BROADCOM,
583 PCI_PRODUCT_BROADCOM_BCM57785,
584 "Broadcom BCM57785 Fast Ethernet",
585 },
586 { PCI_VENDOR_BROADCOM,
587 PCI_PRODUCT_BROADCOM_BCM57788,
588 "Broadcom BCM57788 Fast Ethernet",
589 },
590 { PCI_VENDOR_BROADCOM,
591 PCI_PRODUCT_BROADCOM_BCM57790,
592 "Broadcom BCM57790 Fast Ethernet",
593 },
594 { PCI_VENDOR_BROADCOM,
595 PCI_PRODUCT_BROADCOM_BCM57791,
596 "Broadcom BCM57791 Fast Ethernet",
597 },
598 { PCI_VENDOR_BROADCOM,
599 PCI_PRODUCT_BROADCOM_BCM57795,
600 "Broadcom BCM57795 Fast Ethernet",
601 },
602 { PCI_VENDOR_SCHNEIDERKOCH,
603 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
604 "SysKonnect SK-9Dx1 Gigabit Ethernet",
605 },
606 { PCI_VENDOR_3COM,
607 PCI_PRODUCT_3COM_3C996,
608 "3Com 3c996 Gigabit Ethernet",
609 },
610 { PCI_VENDOR_FUJITSU4,
611 PCI_PRODUCT_FUJITSU4_PW008GE4,
612 "Fujitsu PW008GE4 Gigabit Ethernet",
613 },
614 { PCI_VENDOR_FUJITSU4,
615 PCI_PRODUCT_FUJITSU4_PW008GE5,
616 "Fujitsu PW008GE5 Gigabit Ethernet",
617 },
618 { PCI_VENDOR_FUJITSU4,
619 PCI_PRODUCT_FUJITSU4_PP250_450_LAN,
620 "Fujitsu Primepower 250/450 Gigabit Ethernet",
621 },
622 { 0,
623 0,
624 NULL },
625 };
626
627 /*
628 * XXX: how to handle variants based on 5750 and derivatives:
629 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which
630 * in general behave like a 5705, except with additional quirks.
631 * This driver's current handling of the 5721 is wrong;
632 * how we map ASIC revision to "quirks" needs more thought.
633 * (defined here until the thought is done).
634 */
635 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY)
636 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY)
637 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS)
638 #define BGE_IS_5750_OR_BEYOND(sc) ((sc)->bge_flags & BGE_5750_PLUS)
639 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS)
640 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE)
641
642 static const struct bge_revision {
643 uint32_t br_chipid;
644 const char *br_name;
645 } bge_revisions[] = {
646 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
647 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
648 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
649 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
650 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
651 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
652 /* This is treated like a BCM5700 Bx */
653 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
654 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
655 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
656 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
657 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
658 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
659 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
660 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
661 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
662 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
663 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
664 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
665 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
666 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
667 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
668 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
669 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
670 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
671 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
672 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
673 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
674 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
675 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
676 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
677 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
678 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
679 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
680 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
681 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
682 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
683 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
684 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
685 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
686 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
687 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
688 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
689 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
690 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
691 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
692 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
693 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
694 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
695 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
696 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
697 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
698 /* 5754 and 5787 share the same ASIC ID */
699 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
700 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
701 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
702 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
703 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
704 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
705 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
706 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
707
708 { 0, NULL }
709 };
710
711 /*
712 * Some defaults for major revisions, so that newer steppings
713 * that we don't know about have a shot at working.
714 */
715 static const struct bge_revision bge_majorrevs[] = {
716 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
717 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
718 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
719 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
720 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
721 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
722 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
723 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
724 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
725 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
726 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
727 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
728 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
729 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
730 /* 5754 and 5787 share the same ASIC ID */
731 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
732 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
733 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
734 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
735 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
736 { BGE_ASICREV_BCM57766, "unknown BCM57766" },
737
738 { 0, NULL }
739 };
740
741 static int bge_allow_asf = 1;
742
743 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc),
744 bge_probe, bge_attach, NULL, NULL);
745
746 static uint32_t
747 bge_readmem_ind(struct bge_softc *sc, int off)
748 {
749 pcireg_t val;
750
751 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
752 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
753 return val;
754 }
755
756 static void
757 bge_writemem_ind(struct bge_softc *sc, int off, int val)
758 {
759 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
760 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
761 }
762
763 /*
764 * PCI Express only
765 */
766 static void
767 bge_set_max_readrq(struct bge_softc *sc)
768 {
769 pcireg_t val;
770
771 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
772 + PCI_PCIE_DCSR);
773 if ((val & PCI_PCIE_DCSR_MAX_READ_REQ) !=
774 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
775 aprint_verbose_dev(sc->bge_dev,
776 "adjust device control 0x%04x ", val);
777 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ;
778 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
779 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
780 + PCI_PCIE_DCSR, val);
781 aprint_verbose("-> 0x%04x\n", val);
782 }
783 }
784
785 #ifdef notdef
786 static uint32_t
787 bge_readreg_ind(struct bge_softc *sc, int off)
788 {
789 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
790 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
791 }
792 #endif
793
794 static void
795 bge_writereg_ind(struct bge_softc *sc, int off, int val)
796 {
797 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
798 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
799 }
800
801 static void
802 bge_writemem_direct(struct bge_softc *sc, int off, int val)
803 {
804 CSR_WRITE_4(sc, off, val);
805 }
806
807 static void
808 bge_writembx(struct bge_softc *sc, int off, int val)
809 {
810 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
811 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
812
813 CSR_WRITE_4(sc, off, val);
814 }
815
816 static uint8_t
817 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
818 {
819 uint32_t access, byte = 0;
820 int i;
821
822 /* Lock. */
823 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
824 for (i = 0; i < 8000; i++) {
825 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
826 break;
827 DELAY(20);
828 }
829 if (i == 8000)
830 return 1;
831
832 /* Enable access. */
833 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
834 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
835
836 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
837 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
838 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
839 DELAY(10);
840 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
841 DELAY(10);
842 break;
843 }
844 }
845
846 if (i == BGE_TIMEOUT * 10) {
847 aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
848 return 1;
849 }
850
851 /* Get result. */
852 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
853
854 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
855
856 /* Disable access. */
857 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
858
859 /* Unlock. */
860 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
861 CSR_READ_4(sc, BGE_NVRAM_SWARB);
862
863 return 0;
864 }
865
866 /*
867 * Read a sequence of bytes from NVRAM.
868 */
869 static int
870 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
871 {
872 int error = 0, i;
873 uint8_t byte = 0;
874
875 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
876 return 1;
877
878 for (i = 0; i < cnt; i++) {
879 error = bge_nvram_getbyte(sc, off + i, &byte);
880 if (error)
881 break;
882 *(dest + i) = byte;
883 }
884
885 return (error ? 1 : 0);
886 }
887
888 /*
889 * Read a byte of data stored in the EEPROM at address 'addr.' The
890 * BCM570x supports both the traditional bitbang interface and an
891 * auto access interface for reading the EEPROM. We use the auto
892 * access method.
893 */
894 static uint8_t
895 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
896 {
897 int i;
898 uint32_t byte = 0;
899
900 /*
901 * Enable use of auto EEPROM access so we can avoid
902 * having to use the bitbang method.
903 */
904 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
905
906 /* Reset the EEPROM, load the clock period. */
907 CSR_WRITE_4(sc, BGE_EE_ADDR,
908 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
909 DELAY(20);
910
911 /* Issue the read EEPROM command. */
912 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
913
914 /* Wait for completion */
915 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
916 DELAY(10);
917 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
918 break;
919 }
920
921 if (i == BGE_TIMEOUT * 10) {
922 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
923 return 1;
924 }
925
926 /* Get result. */
927 byte = CSR_READ_4(sc, BGE_EE_DATA);
928
929 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
930
931 return 0;
932 }
933
934 /*
935 * Read a sequence of bytes from the EEPROM.
936 */
937 static int
938 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
939 {
940 int error = 0, i;
941 uint8_t byte = 0;
942 char *dest = destv;
943
944 for (i = 0; i < cnt; i++) {
945 error = bge_eeprom_getbyte(sc, off + i, &byte);
946 if (error)
947 break;
948 *(dest + i) = byte;
949 }
950
951 return (error ? 1 : 0);
952 }
953
954 static int
955 bge_miibus_readreg(device_t dev, int phy, int reg)
956 {
957 struct bge_softc *sc = device_private(dev);
958 uint32_t val;
959 uint32_t autopoll;
960 int i;
961
962 /*
963 * Broadcom's own driver always assumes the internal
964 * PHY is at GMII address 1. On some chips, the PHY responds
965 * to accesses at all addresses, which could cause us to
966 * bogusly attach the PHY 32 times at probe type. Always
967 * restricting the lookup to address 1 is simpler than
968 * trying to figure out which chips revisions should be
969 * special-cased.
970 */
971 if (phy != 1)
972 return 0;
973
974 /* Reading with autopolling on may trigger PCI errors */
975 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
976 if (autopoll & BGE_MIMODE_AUTOPOLL) {
977 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
978 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
979 DELAY(40);
980 }
981
982 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
983 BGE_MIPHY(phy) | BGE_MIREG(reg));
984
985 for (i = 0; i < BGE_TIMEOUT; i++) {
986 val = CSR_READ_4(sc, BGE_MI_COMM);
987 if (!(val & BGE_MICOMM_BUSY))
988 break;
989 delay(10);
990 }
991
992 if (i == BGE_TIMEOUT) {
993 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
994 val = 0;
995 goto done;
996 }
997
998 val = CSR_READ_4(sc, BGE_MI_COMM);
999
1000 done:
1001 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1002 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1003 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1004 DELAY(40);
1005 }
1006
1007 if (val & BGE_MICOMM_READFAIL)
1008 return 0;
1009
1010 return (val & 0xFFFF);
1011 }
1012
1013 static void
1014 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
1015 {
1016 struct bge_softc *sc = device_private(dev);
1017 uint32_t autopoll;
1018 int i;
1019
1020 if (phy!=1) {
1021 return;
1022 }
1023
1024 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1025 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) {
1026 return;
1027 }
1028
1029 /* Reading with autopolling on may trigger PCI errors */
1030 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1031 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1032 delay(40);
1033 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1034 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1035 delay(10); /* 40 usec is supposed to be adequate */
1036 }
1037
1038 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1039 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1040
1041 for (i = 0; i < BGE_TIMEOUT; i++) {
1042 delay(10);
1043 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
1044 delay(5);
1045 CSR_READ_4(sc, BGE_MI_COMM);
1046 break;
1047 }
1048 }
1049
1050 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1051 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1052 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1053 delay(40);
1054 }
1055
1056 if (i == BGE_TIMEOUT)
1057 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1058 }
1059
1060 static void
1061 bge_miibus_statchg(struct ifnet *ifp)
1062 {
1063 struct bge_softc *sc = ifp->if_softc;
1064 struct mii_data *mii = &sc->bge_mii;
1065
1066 /*
1067 * Get flow control negotiation result.
1068 */
1069 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1070 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
1071 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1072 mii->mii_media_active &= ~IFM_ETH_FMASK;
1073 }
1074
1075 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
1076 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1077 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1078 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
1079 else
1080 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
1081
1082 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1083 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
1084 else
1085 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
1086
1087 /*
1088 * 802.3x flow control
1089 */
1090 if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1091 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
1092 else
1093 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
1094
1095 if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1096 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
1097 else
1098 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
1099 }
1100
1101 /*
1102 * Update rx threshold levels to values in a particular slot
1103 * of the interrupt-mitigation table bge_rx_threshes.
1104 */
1105 static void
1106 bge_set_thresh(struct ifnet *ifp, int lvl)
1107 {
1108 struct bge_softc *sc = ifp->if_softc;
1109 int s;
1110
1111 /* For now, just save the new Rx-intr thresholds and record
1112 * that a threshold update is pending. Updating the hardware
1113 * registers here (even at splhigh()) is observed to
1114 * occasionaly cause glitches where Rx-interrupts are not
1115 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
1116 */
1117 s = splnet();
1118 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
1119 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
1120 sc->bge_pending_rxintr_change = 1;
1121 splx(s);
1122
1123 return;
1124 }
1125
1126
1127 /*
1128 * Update Rx thresholds of all bge devices
1129 */
1130 static void
1131 bge_update_all_threshes(int lvl)
1132 {
1133 struct ifnet *ifp;
1134 const char * const namebuf = "bge";
1135 int namelen;
1136
1137 if (lvl < 0)
1138 lvl = 0;
1139 else if (lvl >= NBGE_RX_THRESH)
1140 lvl = NBGE_RX_THRESH - 1;
1141
1142 namelen = strlen(namebuf);
1143 /*
1144 * Now search all the interfaces for this name/number
1145 */
1146 IFNET_FOREACH(ifp) {
1147 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
1148 continue;
1149 /* We got a match: update if doing auto-threshold-tuning */
1150 if (bge_auto_thresh)
1151 bge_set_thresh(ifp, lvl);
1152 }
1153 }
1154
1155 /*
1156 * Handle events that have triggered interrupts.
1157 */
1158 static void
1159 bge_handle_events(struct bge_softc *sc)
1160 {
1161
1162 return;
1163 }
1164
1165 /*
1166 * Memory management for jumbo frames.
1167 */
1168
1169 static int
1170 bge_alloc_jumbo_mem(struct bge_softc *sc)
1171 {
1172 char *ptr, *kva;
1173 bus_dma_segment_t seg;
1174 int i, rseg, state, error;
1175 struct bge_jpool_entry *entry;
1176
1177 state = error = 0;
1178
1179 /* Grab a big chunk o' storage. */
1180 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
1181 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1182 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1183 return ENOBUFS;
1184 }
1185
1186 state = 1;
1187 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
1188 BUS_DMA_NOWAIT)) {
1189 aprint_error_dev(sc->bge_dev,
1190 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
1191 error = ENOBUFS;
1192 goto out;
1193 }
1194
1195 state = 2;
1196 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
1197 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
1198 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1199 error = ENOBUFS;
1200 goto out;
1201 }
1202
1203 state = 3;
1204 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1205 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
1206 aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
1207 error = ENOBUFS;
1208 goto out;
1209 }
1210
1211 state = 4;
1212 sc->bge_cdata.bge_jumbo_buf = (void *)kva;
1213 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
1214
1215 SLIST_INIT(&sc->bge_jfree_listhead);
1216 SLIST_INIT(&sc->bge_jinuse_listhead);
1217
1218 /*
1219 * Now divide it up into 9K pieces and save the addresses
1220 * in an array.
1221 */
1222 ptr = sc->bge_cdata.bge_jumbo_buf;
1223 for (i = 0; i < BGE_JSLOTS; i++) {
1224 sc->bge_cdata.bge_jslots[i] = ptr;
1225 ptr += BGE_JLEN;
1226 entry = malloc(sizeof(struct bge_jpool_entry),
1227 M_DEVBUF, M_NOWAIT);
1228 if (entry == NULL) {
1229 aprint_error_dev(sc->bge_dev,
1230 "no memory for jumbo buffer queue!\n");
1231 error = ENOBUFS;
1232 goto out;
1233 }
1234 entry->slot = i;
1235 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1236 entry, jpool_entries);
1237 }
1238 out:
1239 if (error != 0) {
1240 switch (state) {
1241 case 4:
1242 bus_dmamap_unload(sc->bge_dmatag,
1243 sc->bge_cdata.bge_rx_jumbo_map);
1244 case 3:
1245 bus_dmamap_destroy(sc->bge_dmatag,
1246 sc->bge_cdata.bge_rx_jumbo_map);
1247 case 2:
1248 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
1249 case 1:
1250 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1251 break;
1252 default:
1253 break;
1254 }
1255 }
1256
1257 return error;
1258 }
1259
1260 /*
1261 * Allocate a jumbo buffer.
1262 */
1263 static void *
1264 bge_jalloc(struct bge_softc *sc)
1265 {
1266 struct bge_jpool_entry *entry;
1267
1268 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
1269
1270 if (entry == NULL) {
1271 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
1272 return NULL;
1273 }
1274
1275 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
1276 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
1277 return (sc->bge_cdata.bge_jslots[entry->slot]);
1278 }
1279
1280 /*
1281 * Release a jumbo buffer.
1282 */
1283 static void
1284 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1285 {
1286 struct bge_jpool_entry *entry;
1287 struct bge_softc *sc;
1288 int i, s;
1289
1290 /* Extract the softc struct pointer. */
1291 sc = (struct bge_softc *)arg;
1292
1293 if (sc == NULL)
1294 panic("bge_jfree: can't find softc pointer!");
1295
1296 /* calculate the slot this buffer belongs to */
1297
1298 i = ((char *)buf
1299 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
1300
1301 if ((i < 0) || (i >= BGE_JSLOTS))
1302 panic("bge_jfree: asked to free buffer that we don't manage!");
1303
1304 s = splvm();
1305 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
1306 if (entry == NULL)
1307 panic("bge_jfree: buffer not in use!");
1308 entry->slot = i;
1309 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
1310 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
1311
1312 if (__predict_true(m != NULL))
1313 pool_cache_put(mb_cache, m);
1314 splx(s);
1315 }
1316
1317
1318 /*
1319 * Initialize a standard receive ring descriptor.
1320 */
1321 static int
1322 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
1323 bus_dmamap_t dmamap)
1324 {
1325 struct mbuf *m_new = NULL;
1326 struct bge_rx_bd *r;
1327 int error;
1328
1329 if (dmamap == NULL) {
1330 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
1331 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
1332 if (error != 0)
1333 return error;
1334 }
1335
1336 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
1337
1338 if (m == NULL) {
1339 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1340 if (m_new == NULL)
1341 return ENOBUFS;
1342
1343 MCLGET(m_new, M_DONTWAIT);
1344 if (!(m_new->m_flags & M_EXT)) {
1345 m_freem(m_new);
1346 return ENOBUFS;
1347 }
1348 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1349
1350 } else {
1351 m_new = m;
1352 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1353 m_new->m_data = m_new->m_ext.ext_buf;
1354 }
1355 if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1356 m_adj(m_new, ETHER_ALIGN);
1357 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
1358 BUS_DMA_READ|BUS_DMA_NOWAIT))
1359 return ENOBUFS;
1360 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
1361 BUS_DMASYNC_PREREAD);
1362
1363 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
1364 r = &sc->bge_rdata->bge_rx_std_ring[i];
1365 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
1366 r->bge_flags = BGE_RXBDFLAG_END;
1367 r->bge_len = m_new->m_len;
1368 r->bge_idx = i;
1369
1370 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1371 offsetof(struct bge_ring_data, bge_rx_std_ring) +
1372 i * sizeof (struct bge_rx_bd),
1373 sizeof (struct bge_rx_bd),
1374 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1375
1376 return 0;
1377 }
1378
1379 /*
1380 * Initialize a jumbo receive ring descriptor. This allocates
1381 * a jumbo buffer from the pool managed internally by the driver.
1382 */
1383 static int
1384 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1385 {
1386 struct mbuf *m_new = NULL;
1387 struct bge_rx_bd *r;
1388 void *buf = NULL;
1389
1390 if (m == NULL) {
1391
1392 /* Allocate the mbuf. */
1393 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1394 if (m_new == NULL)
1395 return ENOBUFS;
1396
1397 /* Allocate the jumbo buffer */
1398 buf = bge_jalloc(sc);
1399 if (buf == NULL) {
1400 m_freem(m_new);
1401 aprint_error_dev(sc->bge_dev,
1402 "jumbo allocation failed -- packet dropped!\n");
1403 return ENOBUFS;
1404 }
1405
1406 /* Attach the buffer to the mbuf. */
1407 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1408 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1409 bge_jfree, sc);
1410 m_new->m_flags |= M_EXT_RW;
1411 } else {
1412 m_new = m;
1413 buf = m_new->m_data = m_new->m_ext.ext_buf;
1414 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1415 }
1416 if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1417 m_adj(m_new, ETHER_ALIGN);
1418 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1419 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
1420 BUS_DMASYNC_PREREAD);
1421 /* Set up the descriptor. */
1422 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1423 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1424 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1425 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1426 r->bge_len = m_new->m_len;
1427 r->bge_idx = i;
1428
1429 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1430 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1431 i * sizeof (struct bge_rx_bd),
1432 sizeof (struct bge_rx_bd),
1433 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1434
1435 return 0;
1436 }
1437
1438 /*
1439 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1440 * that's 1MB or memory, which is a lot. For now, we fill only the first
1441 * 256 ring entries and hope that our CPU is fast enough to keep up with
1442 * the NIC.
1443 */
1444 static int
1445 bge_init_rx_ring_std(struct bge_softc *sc)
1446 {
1447 int i;
1448
1449 if (sc->bge_flags & BGE_RXRING_VALID)
1450 return 0;
1451
1452 for (i = 0; i < BGE_SSLOTS; i++) {
1453 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1454 return ENOBUFS;
1455 }
1456
1457 sc->bge_std = i - 1;
1458 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1459
1460 sc->bge_flags |= BGE_RXRING_VALID;
1461
1462 return 0;
1463 }
1464
1465 static void
1466 bge_free_rx_ring_std(struct bge_softc *sc)
1467 {
1468 int i;
1469
1470 if (!(sc->bge_flags & BGE_RXRING_VALID))
1471 return;
1472
1473 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1474 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1475 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1476 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1477 bus_dmamap_destroy(sc->bge_dmatag,
1478 sc->bge_cdata.bge_rx_std_map[i]);
1479 }
1480 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1481 sizeof(struct bge_rx_bd));
1482 }
1483
1484 sc->bge_flags &= ~BGE_RXRING_VALID;
1485 }
1486
1487 static int
1488 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1489 {
1490 int i;
1491 volatile struct bge_rcb *rcb;
1492
1493 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1494 return 0;
1495
1496 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1497 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1498 return ENOBUFS;
1499 }
1500
1501 sc->bge_jumbo = i - 1;
1502 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1503
1504 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1505 rcb->bge_maxlen_flags = 0;
1506 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1507
1508 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1509
1510 return 0;
1511 }
1512
1513 static void
1514 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1515 {
1516 int i;
1517
1518 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1519 return;
1520
1521 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1522 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1523 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1524 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1525 }
1526 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1527 sizeof(struct bge_rx_bd));
1528 }
1529
1530 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1531 }
1532
1533 static void
1534 bge_free_tx_ring(struct bge_softc *sc)
1535 {
1536 int i;
1537 struct txdmamap_pool_entry *dma;
1538
1539 if (!(sc->bge_flags & BGE_TXRING_VALID))
1540 return;
1541
1542 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1543 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1544 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1545 sc->bge_cdata.bge_tx_chain[i] = NULL;
1546 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1547 link);
1548 sc->txdma[i] = 0;
1549 }
1550 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1551 sizeof(struct bge_tx_bd));
1552 }
1553
1554 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1555 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1556 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1557 free(dma, M_DEVBUF);
1558 }
1559
1560 sc->bge_flags &= ~BGE_TXRING_VALID;
1561 }
1562
1563 static int
1564 bge_init_tx_ring(struct bge_softc *sc)
1565 {
1566 int i;
1567 bus_dmamap_t dmamap;
1568 struct txdmamap_pool_entry *dma;
1569
1570 if (sc->bge_flags & BGE_TXRING_VALID)
1571 return 0;
1572
1573 sc->bge_txcnt = 0;
1574 sc->bge_tx_saved_considx = 0;
1575
1576 /* Initialize transmit producer index for host-memory send ring. */
1577 sc->bge_tx_prodidx = 0;
1578 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1579 /* 5700 b2 errata */
1580 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1581 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1582
1583 /* NIC-memory send ring not used; initialize to zero. */
1584 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1585 /* 5700 b2 errata */
1586 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1587 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1588
1589 SLIST_INIT(&sc->txdma_list);
1590 for (i = 0; i < BGE_RSLOTS; i++) {
1591 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1592 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1593 &dmamap))
1594 return ENOBUFS;
1595 if (dmamap == NULL)
1596 panic("dmamap NULL in bge_init_tx_ring");
1597 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1598 if (dma == NULL) {
1599 aprint_error_dev(sc->bge_dev,
1600 "can't alloc txdmamap_pool_entry\n");
1601 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1602 return ENOMEM;
1603 }
1604 dma->dmamap = dmamap;
1605 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1606 }
1607
1608 sc->bge_flags |= BGE_TXRING_VALID;
1609
1610 return 0;
1611 }
1612
1613 static void
1614 bge_setmulti(struct bge_softc *sc)
1615 {
1616 struct ethercom *ac = &sc->ethercom;
1617 struct ifnet *ifp = &ac->ec_if;
1618 struct ether_multi *enm;
1619 struct ether_multistep step;
1620 uint32_t hashes[4] = { 0, 0, 0, 0 };
1621 uint32_t h;
1622 int i;
1623
1624 if (ifp->if_flags & IFF_PROMISC)
1625 goto allmulti;
1626
1627 /* Now program new ones. */
1628 ETHER_FIRST_MULTI(step, ac, enm);
1629 while (enm != NULL) {
1630 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1631 /*
1632 * We must listen to a range of multicast addresses.
1633 * For now, just accept all multicasts, rather than
1634 * trying to set only those filter bits needed to match
1635 * the range. (At this time, the only use of address
1636 * ranges is for IP multicast routing, for which the
1637 * range is big enough to require all bits set.)
1638 */
1639 goto allmulti;
1640 }
1641
1642 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1643
1644 /* Just want the 7 least-significant bits. */
1645 h &= 0x7f;
1646
1647 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1648 ETHER_NEXT_MULTI(step, enm);
1649 }
1650
1651 ifp->if_flags &= ~IFF_ALLMULTI;
1652 goto setit;
1653
1654 allmulti:
1655 ifp->if_flags |= IFF_ALLMULTI;
1656 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1657
1658 setit:
1659 for (i = 0; i < 4; i++)
1660 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1661 }
1662
1663 static void
1664 bge_sig_pre_reset(struct bge_softc *sc, int type)
1665 {
1666 /*
1667 * Some chips don't like this so only do this if ASF is enabled
1668 */
1669 if (sc->bge_asf_mode)
1670 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1671
1672 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1673 switch (type) {
1674 case BGE_RESET_START:
1675 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1676 break;
1677 case BGE_RESET_STOP:
1678 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1679 break;
1680 }
1681 }
1682 }
1683
1684 static void
1685 bge_sig_post_reset(struct bge_softc *sc, int type)
1686 {
1687
1688 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1689 switch (type) {
1690 case BGE_RESET_START:
1691 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1692 /* START DONE */
1693 break;
1694 case BGE_RESET_STOP:
1695 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1696 break;
1697 }
1698 }
1699 }
1700
1701 static void
1702 bge_sig_legacy(struct bge_softc *sc, int type)
1703 {
1704
1705 if (sc->bge_asf_mode) {
1706 switch (type) {
1707 case BGE_RESET_START:
1708 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1709 break;
1710 case BGE_RESET_STOP:
1711 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1712 break;
1713 }
1714 }
1715 }
1716
1717 static void
1718 bge_stop_fw(struct bge_softc *sc)
1719 {
1720 int i;
1721
1722 if (sc->bge_asf_mode) {
1723 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1724 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1725 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1726
1727 for (i = 0; i < 100; i++) {
1728 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1729 break;
1730 DELAY(10);
1731 }
1732 }
1733 }
1734
1735 static int
1736 bge_poll_fw(struct bge_softc *sc)
1737 {
1738 uint32_t val;
1739 int i;
1740
1741 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1742 for (i = 0; i < BGE_TIMEOUT; i++) {
1743 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
1744 if (val & BGE_VCPU_STATUS_INIT_DONE)
1745 break;
1746 DELAY(100);
1747 }
1748 if (i >= BGE_TIMEOUT) {
1749 aprint_error_dev(sc->bge_dev, "reset timed out\n");
1750 return -1;
1751 }
1752 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) {
1753 /*
1754 * Poll the value location we just wrote until
1755 * we see the 1's complement of the magic number.
1756 * This indicates that the firmware initialization
1757 * is complete.
1758 * XXX 1000ms for Flash and 10000ms for SEEPROM.
1759 */
1760 for (i = 0; i < BGE_TIMEOUT; i++) {
1761 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1762 if (val == ~BGE_MAGIC_NUMBER)
1763 break;
1764 DELAY(10);
1765 }
1766
1767 if (i >= BGE_TIMEOUT) {
1768 aprint_error_dev(sc->bge_dev,
1769 "firmware handshake timed out, val = %x\n", val);
1770 return -1;
1771 }
1772 }
1773
1774 return 0;
1775 }
1776
1777 /*
1778 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1779 * self-test results.
1780 */
1781 static int
1782 bge_chipinit(struct bge_softc *sc)
1783 {
1784 int i;
1785 uint32_t dma_rw_ctl;
1786
1787 /* Set endianness before we access any non-PCI registers. */
1788 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1789 BGE_INIT);
1790
1791 /* Set power state to D0. */
1792 bge_setpowerstate(sc, 0);
1793
1794 /* Clear the MAC control register */
1795 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1796
1797 /*
1798 * Clear the MAC statistics block in the NIC's
1799 * internal memory.
1800 */
1801 for (i = BGE_STATS_BLOCK;
1802 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1803 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1804
1805 for (i = BGE_STATUS_BLOCK;
1806 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1807 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1808
1809 /* Set up the PCI DMA control register. */
1810 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
1811 if (sc->bge_flags & BGE_PCIE) {
1812 /* Read watermark not used, 128 bytes for write. */
1813 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
1814 device_xname(sc->bge_dev)));
1815 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1816 } else if (sc->bge_flags & BGE_PCIX) {
1817 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
1818 device_xname(sc->bge_dev)));
1819 /* PCI-X bus */
1820 if (BGE_IS_5714_FAMILY(sc)) {
1821 /* 256 bytes for read and write. */
1822 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1823 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1824
1825 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1826 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1827 else
1828 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1829 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1830 /* 1536 bytes for read, 384 bytes for write. */
1831 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1832 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1833 } else {
1834 /* 384 bytes for read and write. */
1835 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1836 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1837 (0x0F);
1838 }
1839
1840 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1841 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1842 uint32_t tmp;
1843
1844 /* Set ONEDMA_ATONCE for hardware workaround. */
1845 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1846 if (tmp == 6 || tmp == 7)
1847 dma_rw_ctl |=
1848 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1849
1850 /* Set PCI-X DMA write workaround. */
1851 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1852 }
1853 } else {
1854 /* Conventional PCI bus: 256 bytes for read and write. */
1855 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
1856 device_xname(sc->bge_dev)));
1857 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1858 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1859
1860 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1861 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1862 dma_rw_ctl |= 0x0F;
1863 }
1864
1865 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1866 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1867 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1868 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1869
1870 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1871 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1872 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1873
1874 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1875 dma_rw_ctl);
1876
1877 /*
1878 * Set up general mode register.
1879 */
1880 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1881 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1882 BGE_MODECTL_TX_NO_PHDR_CSUM);
1883
1884 /*
1885 * BCM5701 B5 have a bug causing data corruption when using
1886 * 64-bit DMA reads, which can be terminated early and then
1887 * completed later as 32-bit accesses, in combination with
1888 * certain bridges.
1889 */
1890 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1891 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1892 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1893
1894 /*
1895 * Tell the firmware the driver is running
1896 */
1897 if (sc->bge_asf_mode & ASF_STACKUP)
1898 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1899
1900 /*
1901 * Disable memory write invalidate. Apparently it is not supported
1902 * properly by these devices.
1903 */
1904 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
1905 PCI_COMMAND_INVALIDATE_ENABLE);
1906
1907 #ifdef __brokenalpha__
1908 /*
1909 * Must insure that we do not cross an 8K (bytes) boundary
1910 * for DMA reads. Our highest limit is 1K bytes. This is a
1911 * restriction on some ALPHA platforms with early revision
1912 * 21174 PCI chipsets, such as the AlphaPC 164lx
1913 */
1914 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1915 #endif
1916
1917 /* Set the timer prescaler (always 66MHz) */
1918 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1919
1920 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1921 DELAY(40); /* XXX */
1922
1923 /* Put PHY into ready state */
1924 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1925 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1926 DELAY(40);
1927 }
1928
1929 return 0;
1930 }
1931
1932 static int
1933 bge_blockinit(struct bge_softc *sc)
1934 {
1935 volatile struct bge_rcb *rcb;
1936 bus_size_t rcb_addr;
1937 int i;
1938 struct ifnet *ifp = &sc->ethercom.ec_if;
1939 bge_hostaddr taddr;
1940 uint32_t val;
1941
1942 /*
1943 * Initialize the memory window pointer register so that
1944 * we can access the first 32K of internal NIC RAM. This will
1945 * allow us to set up the TX send ring RCBs and the RX return
1946 * ring RCBs, plus other things which live in NIC memory.
1947 */
1948
1949 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1950
1951 /* Step 33: Configure mbuf memory pool */
1952 if (BGE_IS_5700_FAMILY(sc)) {
1953 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1954 BGE_BUFFPOOL_1);
1955
1956 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1957 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1958 else
1959 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1960
1961 /* Configure DMA resource pool */
1962 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1963 BGE_DMA_DESCRIPTORS);
1964 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1965 }
1966
1967 /* Step 35: Configure mbuf pool watermarks */
1968 #ifdef ORIG_WPAUL_VALUES
1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1970 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1971 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1972 #else
1973
1974 /* new broadcom docs strongly recommend these: */
1975 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1976 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1977 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
1978 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1979 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1980 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1981 } else if (BGE_IS_5705_PLUS(sc)) {
1982 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1983
1984 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1985 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1986 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1987 } else {
1988 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1989 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1990 }
1991 } else if (!BGE_IS_5705_PLUS(sc)) {
1992 if (ifp->if_mtu > ETHER_MAX_LEN) {
1993 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1994 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1995 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1996 } else {
1997 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304);
1998 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152);
1999 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380);
2000 }
2001 } else {
2002 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2003 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2004 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2005 }
2006 #endif
2007
2008 /* Step 36: Configure DMA resource watermarks */
2009 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2010 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
2011
2012 /* Step 38: Enable buffer manager */
2013 CSR_WRITE_4(sc, BGE_BMAN_MODE,
2014 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
2015
2016 /* Step 39: Poll for buffer manager start indication */
2017 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2018 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2019 break;
2020 DELAY(10);
2021 }
2022
2023 if (i == BGE_TIMEOUT * 2) {
2024 aprint_error_dev(sc->bge_dev,
2025 "buffer manager failed to start\n");
2026 return ENXIO;
2027 }
2028
2029 /* Step 40: Enable flow-through queues */
2030 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2031 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2032
2033 /* Wait until queue initialization is complete */
2034 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2035 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2036 break;
2037 DELAY(10);
2038 }
2039
2040 if (i == BGE_TIMEOUT * 2) {
2041 aprint_error_dev(sc->bge_dev,
2042 "flow-through queue init failed\n");
2043 return ENXIO;
2044 }
2045
2046 /* Step 41: Initialize the standard RX ring control block */
2047 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
2048 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
2049 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2050 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2051 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766)
2052 rcb->bge_maxlen_flags =
2053 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2054 else if (BGE_IS_5705_PLUS(sc))
2055 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2056 else
2057 rcb->bge_maxlen_flags =
2058 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2059 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2060 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2061 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2062 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2063 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2064
2065 /*
2066 * Step 42: Initialize the jumbo RX ring control block
2067 * We set the 'ring disabled' bit in the flags
2068 * field until we're actually ready to start
2069 * using this ring (i.e. once we set the MTU
2070 * high enough to require it).
2071 */
2072 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2073 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
2074 BGE_HOSTADDR(rcb->bge_hostaddr,
2075 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
2076 rcb->bge_maxlen_flags =
2077 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
2078 BGE_RCB_FLAG_RING_DISABLED);
2079 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2080 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2081 rcb->bge_hostaddr.bge_addr_hi);
2082 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2083 rcb->bge_hostaddr.bge_addr_lo);
2084 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2085 rcb->bge_maxlen_flags);
2086 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2087
2088 /* Set up dummy disabled mini ring RCB */
2089 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2090 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2091 BGE_RCB_FLAG_RING_DISABLED);
2092 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2093 rcb->bge_maxlen_flags);
2094
2095 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2096 offsetof(struct bge_ring_data, bge_info),
2097 sizeof (struct bge_gib),
2098 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2099 }
2100
2101 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2102 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2103 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2104 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2105 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2106 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2107 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2108 }
2109 /*
2110 * Set the BD ring replenish thresholds. The recommended
2111 * values are 1/8th the number of descriptors allocated to
2112 * each ring.
2113 */
2114 i = BGE_STD_RX_RING_CNT / 8;
2115
2116 /*
2117 * Use a value of 8 for the following chips to workaround HW errata.
2118 * Some of these chips have been added based on empirical
2119 * evidence (they don't work unless this is done).
2120 */
2121 if (BGE_IS_5705_PLUS(sc))
2122 i = 8;
2123
2124 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
2125 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8);
2126
2127 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2128 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2129 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
2130 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2131 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2132 }
2133
2134 /*
2135 * Disable all unused send rings by setting the 'ring disabled'
2136 * bit in the flags field of all the TX send ring control blocks.
2137 * These are located in NIC memory.
2138 */
2139 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2140 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
2141 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2142 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2143 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2144 rcb_addr += sizeof(struct bge_rcb);
2145 }
2146
2147 /* Configure TX RCB 0 (we use only the first ring) */
2148 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2149 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2150 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2151 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2152 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2153 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2154 if (BGE_IS_5700_FAMILY(sc))
2155 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2156 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2157
2158 /* Disable all unused RX return rings */
2159 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2160 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
2161 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2162 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2163 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2164 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2165 BGE_RCB_FLAG_RING_DISABLED));
2166 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2167 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2168 (i * (sizeof(uint64_t))), 0);
2169 rcb_addr += sizeof(struct bge_rcb);
2170 }
2171
2172 /* Initialize RX ring indexes */
2173 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2174 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2175 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2176
2177 /*
2178 * Set up RX return ring 0
2179 * Note that the NIC address for RX return rings is 0x00000000.
2180 * The return rings live entirely within the host, so the
2181 * nicaddr field in the RCB isn't used.
2182 */
2183 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2184 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2185 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2186 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2187 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2188 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2189 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2190
2191 /* Set random backoff seed for TX */
2192 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2193 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
2194 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
2195 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] +
2196 BGE_TX_BACKOFF_SEED_MASK);
2197
2198 /* Set inter-packet gap */
2199 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
2200
2201 /*
2202 * Specify which ring to use for packets that don't match
2203 * any RX rules.
2204 */
2205 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2206
2207 /*
2208 * Configure number of RX lists. One interrupt distribution
2209 * list, sixteen active lists, one bad frames class.
2210 */
2211 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2212
2213 /* Inialize RX list placement stats mask. */
2214 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2215 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2216
2217 /* Disable host coalescing until we get it set up */
2218 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2219
2220 /* Poll to make sure it's shut down. */
2221 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2222 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2223 break;
2224 DELAY(10);
2225 }
2226
2227 if (i == BGE_TIMEOUT * 2) {
2228 aprint_error_dev(sc->bge_dev,
2229 "host coalescing engine failed to idle\n");
2230 return ENXIO;
2231 }
2232
2233 /* Set up host coalescing defaults */
2234 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2235 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2236 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2237 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2238 if (BGE_IS_5700_FAMILY(sc)) {
2239 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2240 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2241 }
2242 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2243 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2244
2245 /* Set up address of statistics block */
2246 if (BGE_IS_5700_FAMILY(sc)) {
2247 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2248 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2249 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2250 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2251 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2252 }
2253
2254 /* Set up address of status block */
2255 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2256 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2257 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2258 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2259 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2260 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2261
2262 /* Turn on host coalescing state machine */
2263 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2264
2265 /* Turn on RX BD completion state machine and enable attentions */
2266 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2267 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2268
2269 /* Turn on RX list placement state machine */
2270 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2271
2272 /* Turn on RX list selector state machine. */
2273 if (BGE_IS_5700_FAMILY(sc))
2274 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2275
2276 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2277 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2278 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2279 BGE_MACMODE_FRMHDR_DMA_ENB;
2280
2281 if (sc->bge_flags & BGE_PHY_FIBER_TBI)
2282 val |= BGE_PORTMODE_TBI;
2283 else if (sc->bge_flags & BGE_PHY_FIBER_MII)
2284 val |= BGE_PORTMODE_GMII;
2285 else
2286 val |= BGE_PORTMODE_MII;
2287
2288 /* Turn on DMA, clear stats */
2289 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2290
2291 /* Set misc. local control, enable interrupts on attentions */
2292 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
2293
2294 #ifdef notdef
2295 /* Assert GPIO pins for PHY reset */
2296 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2297 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2298 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2299 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2300 #endif
2301
2302 #if defined(not_quite_yet)
2303 /* Linux driver enables enable gpio pin #1 on 5700s */
2304 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
2305 sc->bge_local_ctrl_reg |=
2306 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
2307 }
2308 #endif
2309 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2310
2311 /* Turn on DMA completion state machine */
2312 if (BGE_IS_5700_FAMILY(sc))
2313 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2314
2315 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2316
2317 /* Enable host coalescing bug fix; see Linux tg3.c */
2318 if (BGE_IS_5755_PLUS(sc))
2319 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2320
2321 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2322 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2323
2324 /* Turn on write DMA state machine */
2325 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2326
2327 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2328 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2329 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2330 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2331 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2332 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2333 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2334
2335 if (sc->bge_flags & BGE_PCIE)
2336 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2337 if (sc->bge_flags & BGE_TSO)
2338 val |= BGE_RDMAMODE_TSO4_ENABLE;
2339
2340 /* Turn on read DMA state machine */
2341 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2342 delay(40);
2343
2344 /* Turn on RX data completion state machine */
2345 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2346
2347 /* Turn on RX BD initiator state machine */
2348 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2349
2350 /* Turn on RX data and RX BD initiator state machine */
2351 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2352
2353 /* Turn on Mbuf cluster free state machine */
2354 if (BGE_IS_5700_FAMILY(sc))
2355 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2356
2357 /* Turn on send BD completion state machine */
2358 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2359
2360 /* Turn on send data completion state machine */
2361 val = BGE_SDCMODE_ENABLE;
2362 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2363 val |= BGE_SDCMODE_CDELAY;
2364 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2365
2366 /* Turn on send data initiator state machine */
2367 if (sc->bge_flags & BGE_TSO) {
2368 /* XXX: magic value from Linux driver */
2369 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
2370 } else
2371 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2372
2373 /* Turn on send BD initiator state machine */
2374 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2375
2376 /* Turn on send BD selector state machine */
2377 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2378
2379 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2380 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2381 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2382
2383 /* ack/clear link change events */
2384 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2385 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2386 BGE_MACSTAT_LINK_CHANGED);
2387 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2388
2389 /* Enable PHY auto polling (for MII/GMII only) */
2390 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2391 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2392 } else {
2393 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2394 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
2395 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2396 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2397 BGE_EVTENB_MI_INTERRUPT);
2398 }
2399
2400 /*
2401 * Clear any pending link state attention.
2402 * Otherwise some link state change events may be lost until attention
2403 * is cleared by bge_intr() -> bge_link_upd() sequence.
2404 * It's not necessary on newer BCM chips - perhaps enabling link
2405 * state change attentions implies clearing pending attention.
2406 */
2407 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2408 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2409 BGE_MACSTAT_LINK_CHANGED);
2410
2411 /* Enable link state change attentions. */
2412 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2413
2414 return 0;
2415 }
2416
2417 static const struct bge_revision *
2418 bge_lookup_rev(uint32_t chipid)
2419 {
2420 const struct bge_revision *br;
2421
2422 for (br = bge_revisions; br->br_name != NULL; br++) {
2423 if (br->br_chipid == chipid)
2424 return br;
2425 }
2426
2427 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2428 if (br->br_chipid == BGE_ASICREV(chipid))
2429 return br;
2430 }
2431
2432 return NULL;
2433 }
2434
2435 static const struct bge_product *
2436 bge_lookup(const struct pci_attach_args *pa)
2437 {
2438 const struct bge_product *bp;
2439
2440 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2441 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2442 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2443 return bp;
2444 }
2445
2446 return NULL;
2447 }
2448
2449 static int
2450 bge_setpowerstate(struct bge_softc *sc, int powerlevel)
2451 {
2452 #ifdef NOTYET
2453 uint32_t pm_ctl = 0;
2454
2455 /* XXX FIXME: make sure indirect accesses enabled? */
2456 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2457 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2458 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2459
2460 /* clear the PME_assert bit and power state bits, enable PME */
2461 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2462 pm_ctl &= ~PCIM_PSTAT_DMASK;
2463 pm_ctl |= (1 << 8);
2464
2465 if (powerlevel == 0) {
2466 pm_ctl |= PCIM_PSTAT_D0;
2467 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2468 pm_ctl, 2);
2469 DELAY(10000);
2470 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2471 DELAY(10000);
2472
2473 #ifdef NOTYET
2474 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2475 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2476 #endif
2477 DELAY(40); DELAY(40); DELAY(40);
2478 DELAY(10000); /* above not quite adequate on 5700 */
2479 return 0;
2480 }
2481
2482
2483 /*
2484 * Entering ACPI power states D1-D3 is achieved by wiggling
2485 * GMII gpio pins. Example code assumes all hardware vendors
2486 * followed Broadcom's sample pcb layout. Until we verify that
2487 * for all supported OEM cards, states D1-D3 are unsupported.
2488 */
2489 aprint_error_dev(sc->bge_dev,
2490 "power state %d unimplemented; check GPIO pins\n",
2491 powerlevel);
2492 #endif
2493 return EOPNOTSUPP;
2494 }
2495
2496
2497 /*
2498 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2499 * against our list and return its name if we find a match. Note
2500 * that since the Broadcom controller contains VPD support, we
2501 * can get the device name string from the controller itself instead
2502 * of the compiled-in string. This is a little slow, but it guarantees
2503 * we'll always announce the right product name.
2504 */
2505 static int
2506 bge_probe(device_t parent, cfdata_t match, void *aux)
2507 {
2508 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2509
2510 if (bge_lookup(pa) != NULL)
2511 return 1;
2512
2513 return 0;
2514 }
2515
2516 static void
2517 bge_attach(device_t parent, device_t self, void *aux)
2518 {
2519 struct bge_softc *sc = device_private(self);
2520 struct pci_attach_args *pa = aux;
2521 prop_dictionary_t dict;
2522 const struct bge_product *bp;
2523 const struct bge_revision *br;
2524 pci_chipset_tag_t pc;
2525 pci_intr_handle_t ih;
2526 const char *intrstr = NULL;
2527 bus_dma_segment_t seg;
2528 int rseg;
2529 uint32_t hwcfg = 0;
2530 uint32_t command;
2531 struct ifnet *ifp;
2532 uint32_t misccfg;
2533 void * kva;
2534 u_char eaddr[ETHER_ADDR_LEN];
2535 pcireg_t memtype, subid;
2536 bus_addr_t memaddr;
2537 bus_size_t memsize;
2538 uint32_t pm_ctl;
2539 bool no_seeprom;
2540
2541 bp = bge_lookup(pa);
2542 KASSERT(bp != NULL);
2543
2544 sc->sc_pc = pa->pa_pc;
2545 sc->sc_pcitag = pa->pa_tag;
2546 sc->bge_dev = self;
2547
2548 pc = sc->sc_pc;
2549 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
2550
2551 aprint_naive(": Ethernet controller\n");
2552 aprint_normal(": %s\n", bp->bp_name);
2553
2554 /*
2555 * Map control/status registers.
2556 */
2557 DPRINTFN(5, ("Map control/status regs\n"));
2558 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
2559 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2560 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
2561 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
2562
2563 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2564 aprint_error_dev(sc->bge_dev,
2565 "failed to enable memory mapping!\n");
2566 return;
2567 }
2568
2569 DPRINTFN(5, ("pci_mem_find\n"));
2570 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
2571 switch (memtype) {
2572 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2573 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2574 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2575 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2576 &memaddr, &memsize) == 0)
2577 break;
2578 default:
2579 aprint_error_dev(sc->bge_dev, "can't find mem space\n");
2580 return;
2581 }
2582
2583 DPRINTFN(5, ("pci_intr_map\n"));
2584 if (pci_intr_map(pa, &ih)) {
2585 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n");
2586 return;
2587 }
2588
2589 DPRINTFN(5, ("pci_intr_string\n"));
2590 intrstr = pci_intr_string(pc, ih);
2591
2592 DPRINTFN(5, ("pci_intr_establish\n"));
2593 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2594
2595 if (sc->bge_intrhand == NULL) {
2596 aprint_error_dev(sc->bge_dev,
2597 "couldn't establish interrupt%s%s\n",
2598 intrstr ? " at " : "", intrstr ? intrstr : "");
2599 return;
2600 }
2601 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
2602
2603 /*
2604 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2605 * can clobber the chip's PCI config-space power control registers,
2606 * leaving the card in D3 powersave state.
2607 * We do not have memory-mapped registers in this state,
2608 * so force device into D0 state before starting initialization.
2609 */
2610 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
2611 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2612 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2613 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2614 DELAY(1000); /* 27 usec is allegedly sufficent */
2615
2616 /*
2617 * Save ASIC rev.
2618 */
2619 sc->bge_chipid =
2620 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2621 >> BGE_PCIMISCCTL_ASICREV_SHIFT;
2622
2623 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2624 switch (PCI_PRODUCT(pa->pa_id)) {
2625 case PCI_PRODUCT_BROADCOM_BCM5717:
2626 case PCI_PRODUCT_BROADCOM_BCM5718:
2627 case PCI_PRODUCT_BROADCOM_BCM5724: /* ??? */
2628 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2629 BGE_PCI_GEN2_PRODID_ASICREV);
2630 break;
2631 case PCI_PRODUCT_BROADCOM_BCM57761:
2632 case PCI_PRODUCT_BROADCOM_BCM57762:
2633 case PCI_PRODUCT_BROADCOM_BCM57765:
2634 case PCI_PRODUCT_BROADCOM_BCM57781:
2635 case PCI_PRODUCT_BROADCOM_BCM57785:
2636 case PCI_PRODUCT_BROADCOM_BCM57791:
2637 case PCI_PRODUCT_BROADCOM_BCM57795:
2638 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2639 BGE_PCI_GEN15_PRODID_ASICREV);
2640 break;
2641 default:
2642 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2643 BGE_PCI_PRODID_ASICREV);
2644 break;
2645 }
2646 }
2647
2648 if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
2649 &sc->bge_pciecap, NULL) != 0)
2650 || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) {
2651 /* PCIe */
2652 sc->bge_flags |= BGE_PCIE;
2653 bge_set_max_readrq(sc);
2654 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
2655 BGE_PCISTATE_PCI_BUSMODE) == 0) {
2656 /* PCI-X */
2657 sc->bge_flags |= BGE_PCIX;
2658 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
2659 &sc->bge_pcixcap, NULL) == 0)
2660 aprint_error_dev(sc->bge_dev,
2661 "unable to find PCIX capability\n");
2662 }
2663
2664 /* chipid */
2665 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2666 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 ||
2667 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2668 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2669 sc->bge_flags |= BGE_5700_FAMILY;
2670
2671 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 ||
2672 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 ||
2673 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
2674 sc->bge_flags |= BGE_5714_FAMILY;
2675
2676 /* Intentionally exclude BGE_ASICREV_BCM5906 */
2677 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2678 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2679 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2680 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2681 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2682 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
2683 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2684 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766 ||
2685 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2686 sc->bge_flags |= BGE_5755_PLUS;
2687
2688 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
2689 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2690 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 ||
2691 BGE_IS_5755_PLUS(sc) ||
2692 BGE_IS_5714_FAMILY(sc))
2693 sc->bge_flags |= BGE_5750_PLUS;
2694
2695 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 ||
2696 BGE_IS_5750_OR_BEYOND(sc))
2697 sc->bge_flags |= BGE_5705_PLUS;
2698
2699 /*
2700 * When using the BCM5701 in PCI-X mode, data corruption has
2701 * been observed in the first few bytes of some received packets.
2702 * Aligning the packet buffer in memory eliminates the corruption.
2703 * Unfortunately, this misaligns the packet payloads. On platforms
2704 * which do not support unaligned accesses, we will realign the
2705 * payloads by copying the received packets.
2706 */
2707 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2708 sc->bge_flags & BGE_PCIX)
2709 sc->bge_flags |= BGE_RX_ALIGNBUG;
2710
2711 if (BGE_IS_5700_FAMILY(sc))
2712 sc->bge_flags |= BGE_JUMBO_CAPABLE;
2713
2714 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2715 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2716 PCI_VENDOR(subid) == PCI_VENDOR_DELL)
2717 sc->bge_flags |= BGE_NO_3LED;
2718
2719 misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2720 misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2721
2722 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2723 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2724 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2725 sc->bge_flags |= BGE_IS_5788;
2726
2727 /*
2728 * Some controllers seem to require a special firmware to use
2729 * TSO. But the firmware is not available to FreeBSD and Linux
2730 * claims that the TSO performed by the firmware is slower than
2731 * hardware based TSO. Moreover the firmware based TSO has one
2732 * known bug which can't handle TSO if ethernet header + IP/TCP
2733 * header is greater than 80 bytes. The workaround for the TSO
2734 * bug exist but it seems it's too expensive than not using
2735 * TSO at all. Some hardwares also have the TSO bug so limit
2736 * the TSO to the controllers that are not affected TSO issues
2737 * (e.g. 5755 or higher).
2738 */
2739 if (BGE_IS_5755_PLUS(sc)) {
2740 /*
2741 * BCM5754 and BCM5787 shares the same ASIC id so
2742 * explicit device id check is required.
2743 */
2744 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
2745 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
2746 sc->bge_flags |= BGE_TSO;
2747 }
2748
2749 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2750 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2751 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2752 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2753 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2754 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2755 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2756 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2757 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2758 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2759 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2760 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2761 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2762 sc->bge_flags |= BGE_10_100_ONLY;
2763
2764 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2765 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2766 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2767 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2768 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2769 sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
2770
2771 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2772 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2773 sc->bge_flags |= BGE_PHY_CRC_BUG;
2774 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2775 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2776 sc->bge_flags |= BGE_PHY_ADC_BUG;
2777 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2778 sc->bge_flags |= BGE_PHY_5704_A0_BUG;
2779
2780 if (BGE_IS_5705_PLUS(sc) &&
2781 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2782 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2783 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2784 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 &&
2785 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57766 &&
2786 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) {
2787 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2788 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2789 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2790 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2791 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2792 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2793 sc->bge_flags |= BGE_PHY_JITTER_BUG;
2794 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2795 sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
2796 } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
2797 sc->bge_flags |= BGE_PHY_BER_BUG;
2798 }
2799
2800 /*
2801 * SEEPROM check.
2802 * First check if firmware knows we do not have SEEPROM.
2803 */
2804 if (prop_dictionary_get_bool(device_properties(self),
2805 "without-seeprom", &no_seeprom) && no_seeprom)
2806 sc->bge_flags |= BGE_NO_EEPROM;
2807
2808 /* Now check the 'ROM failed' bit on the RX CPU */
2809 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
2810 sc->bge_flags |= BGE_NO_EEPROM;
2811
2812 /* Try to reset the chip. */
2813 DPRINTFN(5, ("bge_reset\n"));
2814 bge_reset(sc);
2815
2816 sc->bge_asf_mode = 0;
2817 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2818 == BGE_MAGIC_NUMBER)) {
2819 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2820 & BGE_HWCFG_ASF) {
2821 sc->bge_asf_mode |= ASF_ENABLE;
2822 sc->bge_asf_mode |= ASF_STACKUP;
2823 if (BGE_IS_5750_OR_BEYOND(sc)) {
2824 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2825 }
2826 }
2827 }
2828
2829 /* Try to reset the chip again the nice way. */
2830 bge_stop_fw(sc);
2831 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2832 if (bge_reset(sc))
2833 aprint_error_dev(sc->bge_dev, "chip reset failed\n");
2834
2835 bge_sig_legacy(sc, BGE_RESET_STOP);
2836 bge_sig_post_reset(sc, BGE_RESET_STOP);
2837
2838 if (bge_chipinit(sc)) {
2839 aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
2840 bge_release_resources(sc);
2841 return;
2842 }
2843
2844 /*
2845 * Get station address from the EEPROM.
2846 */
2847 if (bge_get_eaddr(sc, eaddr)) {
2848 aprint_error_dev(sc->bge_dev,
2849 "failed to read station address\n");
2850 bge_release_resources(sc);
2851 return;
2852 }
2853
2854 br = bge_lookup_rev(sc->bge_chipid);
2855
2856 if (br == NULL) {
2857 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
2858 sc->bge_chipid);
2859 } else {
2860 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
2861 br->br_name, sc->bge_chipid);
2862 }
2863 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2864
2865 /* Allocate the general information block and ring buffers. */
2866 if (pci_dma64_available(pa))
2867 sc->bge_dmatag = pa->pa_dmat64;
2868 else
2869 sc->bge_dmatag = pa->pa_dmat;
2870 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2871 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2872 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2873 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
2874 return;
2875 }
2876 DPRINTFN(5, ("bus_dmamem_map\n"));
2877 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2878 sizeof(struct bge_ring_data), &kva,
2879 BUS_DMA_NOWAIT)) {
2880 aprint_error_dev(sc->bge_dev,
2881 "can't map DMA buffers (%zu bytes)\n",
2882 sizeof(struct bge_ring_data));
2883 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2884 return;
2885 }
2886 DPRINTFN(5, ("bus_dmamem_create\n"));
2887 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2888 sizeof(struct bge_ring_data), 0,
2889 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2890 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
2891 bus_dmamem_unmap(sc->bge_dmatag, kva,
2892 sizeof(struct bge_ring_data));
2893 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2894 return;
2895 }
2896 DPRINTFN(5, ("bus_dmamem_load\n"));
2897 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2898 sizeof(struct bge_ring_data), NULL,
2899 BUS_DMA_NOWAIT)) {
2900 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2901 bus_dmamem_unmap(sc->bge_dmatag, kva,
2902 sizeof(struct bge_ring_data));
2903 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2904 return;
2905 }
2906
2907 DPRINTFN(5, ("bzero\n"));
2908 sc->bge_rdata = (struct bge_ring_data *)kva;
2909
2910 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2911
2912 /* Try to allocate memory for jumbo buffers. */
2913 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2914 if (bge_alloc_jumbo_mem(sc)) {
2915 aprint_error_dev(sc->bge_dev,
2916 "jumbo buffer allocation failed\n");
2917 } else
2918 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2919 }
2920
2921 /* Set default tuneable values. */
2922 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2923 sc->bge_rx_coal_ticks = 150;
2924 sc->bge_rx_max_coal_bds = 64;
2925 #ifdef ORIG_WPAUL_VALUES
2926 sc->bge_tx_coal_ticks = 150;
2927 sc->bge_tx_max_coal_bds = 128;
2928 #else
2929 sc->bge_tx_coal_ticks = 300;
2930 sc->bge_tx_max_coal_bds = 400;
2931 #endif
2932 if (BGE_IS_5705_PLUS(sc)) {
2933 sc->bge_tx_coal_ticks = (12 * 5);
2934 sc->bge_tx_max_coal_bds = (12 * 5);
2935 aprint_verbose_dev(sc->bge_dev,
2936 "setting short Tx thresholds\n");
2937 }
2938
2939 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2940 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2941 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766)
2942 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2943 else if (BGE_IS_5705_PLUS(sc))
2944 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2945 else
2946 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2947
2948 /* Set up ifnet structure */
2949 ifp = &sc->ethercom.ec_if;
2950 ifp->if_softc = sc;
2951 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2952 ifp->if_ioctl = bge_ioctl;
2953 ifp->if_stop = bge_stop;
2954 ifp->if_start = bge_start;
2955 ifp->if_init = bge_init;
2956 ifp->if_watchdog = bge_watchdog;
2957 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2958 IFQ_SET_READY(&ifp->if_snd);
2959 DPRINTFN(5, ("strcpy if_xname\n"));
2960 strcpy(ifp->if_xname, device_xname(sc->bge_dev));
2961
2962 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
2963 sc->ethercom.ec_if.if_capabilities |=
2964 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
2965 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */
2966 sc->ethercom.ec_if.if_capabilities |=
2967 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2968 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
2969 #endif
2970 sc->ethercom.ec_capabilities |=
2971 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2972
2973 if (sc->bge_flags & BGE_TSO)
2974 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
2975
2976 /*
2977 * Do MII setup.
2978 */
2979 DPRINTFN(5, ("mii setup\n"));
2980 sc->bge_mii.mii_ifp = ifp;
2981 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2982 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2983 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2984
2985 /*
2986 * Figure out what sort of media we have by checking the hardware
2987 * config word in the first 32k of NIC internal memory, or fall back to
2988 * the config word in the EEPROM. Note: on some BCM5700 cards,
2989 * this value appears to be unset. If that's the case, we have to rely
2990 * on identifying the NIC by its PCI subsystem ID, as we do below for
2991 * the SysKonnect SK-9D41.
2992 */
2993 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2994 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2995 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
2996 bge_read_eeprom(sc, (void *)&hwcfg,
2997 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2998 hwcfg = be32toh(hwcfg);
2999 }
3000 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3001 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 ||
3002 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3003 if (BGE_IS_5714_FAMILY(sc))
3004 sc->bge_flags |= BGE_PHY_FIBER_MII;
3005 else
3006 sc->bge_flags |= BGE_PHY_FIBER_TBI;
3007 }
3008
3009 /* set phyflags and chipid before mii_attach() */
3010 dict = device_properties(self);
3011 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags);
3012 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
3013
3014 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3015 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3016 bge_ifmedia_sts);
3017 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
3018 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX,
3019 0, NULL);
3020 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3021 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3022 /* Pretend the user requested this setting */
3023 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3024 } else {
3025 /*
3026 * Do transceiver setup and tell the firmware the
3027 * driver is down so we can try to get access the
3028 * probe if ASF is running. Retry a couple of times
3029 * if we get a conflict with the ASF firmware accessing
3030 * the PHY.
3031 */
3032 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3033 bge_asf_driver_up(sc);
3034
3035 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3036 bge_ifmedia_sts);
3037 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff,
3038 MII_PHY_ANY, MII_OFFSET_ANY,
3039 MIIF_FORCEANEG|MIIF_DOPAUSE);
3040
3041 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
3042 aprint_error_dev(sc->bge_dev, "no PHY found!\n");
3043 ifmedia_add(&sc->bge_mii.mii_media,
3044 IFM_ETHER|IFM_MANUAL, 0, NULL);
3045 ifmedia_set(&sc->bge_mii.mii_media,
3046 IFM_ETHER|IFM_MANUAL);
3047 } else
3048 ifmedia_set(&sc->bge_mii.mii_media,
3049 IFM_ETHER|IFM_AUTO);
3050
3051 /*
3052 * Now tell the firmware we are going up after probing the PHY
3053 */
3054 if (sc->bge_asf_mode & ASF_STACKUP)
3055 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3056 }
3057
3058 /*
3059 * Call MI attach routine.
3060 */
3061 DPRINTFN(5, ("if_attach\n"));
3062 if_attach(ifp);
3063 DPRINTFN(5, ("ether_ifattach\n"));
3064 ether_ifattach(ifp, eaddr);
3065 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
3066 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
3067 RND_TYPE_NET, 0);
3068 #ifdef BGE_EVENT_COUNTERS
3069 /*
3070 * Attach event counters.
3071 */
3072 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
3073 NULL, device_xname(sc->bge_dev), "intr");
3074 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
3075 NULL, device_xname(sc->bge_dev), "tx_xoff");
3076 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
3077 NULL, device_xname(sc->bge_dev), "tx_xon");
3078 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
3079 NULL, device_xname(sc->bge_dev), "rx_xoff");
3080 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
3081 NULL, device_xname(sc->bge_dev), "rx_xon");
3082 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
3083 NULL, device_xname(sc->bge_dev), "rx_macctl");
3084 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
3085 NULL, device_xname(sc->bge_dev), "xoffentered");
3086 #endif /* BGE_EVENT_COUNTERS */
3087 DPRINTFN(5, ("callout_init\n"));
3088 callout_init(&sc->bge_timeout, 0);
3089
3090 if (pmf_device_register(self, NULL, NULL))
3091 pmf_class_network_register(self, ifp);
3092 else
3093 aprint_error_dev(self, "couldn't establish power handler\n");
3094
3095 sysctl_bge_init(sc);
3096
3097 #ifdef BGE_DEBUG
3098 bge_debug_info(sc);
3099 #endif
3100 }
3101
3102 static void
3103 bge_release_resources(struct bge_softc *sc)
3104 {
3105 if (sc->bge_vpd_prodname != NULL)
3106 free(sc->bge_vpd_prodname, M_DEVBUF);
3107
3108 if (sc->bge_vpd_readonly != NULL)
3109 free(sc->bge_vpd_readonly, M_DEVBUF);
3110 }
3111
3112 static int
3113 bge_reset(struct bge_softc *sc)
3114 {
3115 uint32_t cachesize, command, pcistate, marbmode;
3116 #if 0
3117 uint32_t new_pcistate;
3118 #endif
3119 pcireg_t devctl, reg;
3120 int i, val;
3121 void (*write_op)(struct bge_softc *, int, int);
3122
3123 if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc)
3124 && (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
3125 if (sc->bge_flags & BGE_PCIE)
3126 write_op = bge_writemem_direct;
3127 else
3128 write_op = bge_writemem_ind;
3129 } else
3130 write_op = bge_writereg_ind;
3131
3132 /* Save some important PCI state. */
3133 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
3134 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
3135 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE);
3136
3137 /* Step 5a: Enable memory arbiter. */
3138 marbmode = 0;
3139 if (BGE_IS_5714_FAMILY(sc))
3140 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
3141 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
3142
3143 /* Step 5b-5d: */
3144 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
3145 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3146 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
3147
3148 /* XXX ???: Disable fastboot on controllers that support it. */
3149 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
3150 BGE_IS_5755_PLUS(sc))
3151 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
3152
3153 /*
3154 * Step 6: Write the magic number to SRAM at offset 0xB50.
3155 * When firmware finishes its initialization it will
3156 * write ~BGE_MAGIC_NUMBER to the same location.
3157 */
3158 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3159
3160 /* Step 7: */
3161 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1);
3162 /*
3163 * XXX: from FreeBSD/Linux; no documentation
3164 */
3165 if (sc->bge_flags & BGE_PCIE) {
3166 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60)
3167 /* PCI Express 1.0 system */
3168 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20);
3169 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3170 /*
3171 * Prevent PCI Express link training
3172 * during global reset.
3173 */
3174 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3175 val |= (1<<29);
3176 }
3177 }
3178
3179 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3180 i = CSR_READ_4(sc, BGE_VCPU_STATUS);
3181 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3182 i | BGE_VCPU_STATUS_DRV_RESET);
3183 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3184 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3185 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3186 }
3187
3188 /*
3189 * Set GPHY Power Down Override to leave GPHY
3190 * powered up in D0 uninitialized.
3191 */
3192 if (BGE_IS_5705_PLUS(sc))
3193 val |= BGE_MISCCFG_KEEP_GPHY_POWER;
3194
3195 /* XXX 5721, 5751 and 5752 */
3196 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750)
3197 val |= BGE_MISCCFG_GRC_RESET_DISABLE;
3198
3199 /* Issue global reset */
3200 write_op(sc, BGE_MISC_CFG, val);
3201
3202 /* Step 8: wait for complete */
3203 if (sc->bge_flags & BGE_PCIE)
3204 delay(100*1000); /* too big */
3205 else
3206 delay(100);
3207
3208 /* From Linux: dummy read to flush PCI posted writes */
3209 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
3210
3211 /* Step 9-10: Reset some of the PCI state that got zapped by reset */
3212 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
3213 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3214 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW
3215 | BGE_PCIMISCCTL_CLOCKCTL_RW);
3216 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
3217 write_op(sc, BGE_MISC_CFG, (65 << 1));
3218
3219 /* Step 11: disable PCI-X Relaxed Ordering. */
3220 if (sc->bge_flags & BGE_PCIX) {
3221 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
3222 + PCI_PCIX_CMD);
3223 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
3224 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER);
3225 }
3226
3227 if (sc->bge_flags & BGE_PCIE) {
3228 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3229 DELAY(500000);
3230 /* XXX: Magic Numbers */
3231 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3232 BGE_PCI_UNKNOWN0);
3233 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3234 BGE_PCI_UNKNOWN0,
3235 reg | (1 << 15));
3236 }
3237 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3238 sc->bge_pciecap + PCI_PCIE_DCSR);
3239 /* Clear enable no snoop and disable relaxed ordering. */
3240 devctl &= ~(0x0010 | PCI_PCIE_DCSR_ENA_NO_SNOOP);
3241 /* Set PCIE max payload size to 128. */
3242 devctl &= ~(0x00e0);
3243 /* Clear device status register. Write 1b to clear */
3244 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED
3245 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED;
3246 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3247 sc->bge_pciecap + PCI_PCIE_DCSR, devctl);
3248 }
3249
3250 /* Step 12: Enable memory arbiter. */
3251 marbmode = 0;
3252 if (BGE_IS_5714_FAMILY(sc))
3253 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
3254 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
3255
3256 /* Step 17: Poll until the firmware initialization is complete */
3257 bge_poll_fw(sc);
3258
3259 /* XXX 5721, 5751 and 5752 */
3260 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
3261 /* Step 19: */
3262 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
3263 /* Step 20: */
3264 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
3265 }
3266
3267 /*
3268 * Step 18: wirte mac mode
3269 * XXX Write 0x0c for 5703S and 5704S
3270 */
3271 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3272
3273
3274 /* Step 21: 5822 B0 errata */
3275 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
3276 pcireg_t msidata;
3277
3278 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3279 BGE_PCI_MSI_DATA);
3280 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
3281 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
3282 msidata);
3283 }
3284
3285 /* Step 23: restore cache line size */
3286 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
3287
3288 #if 0
3289 /*
3290 * XXX Wait for the value of the PCISTATE register to
3291 * return to its original pre-reset state. This is a
3292 * fairly good indicator of reset completion. If we don't
3293 * wait for the reset to fully complete, trying to read
3294 * from the device's non-PCI registers may yield garbage
3295 * results.
3296 */
3297 for (i = 0; i < BGE_TIMEOUT; i++) {
3298 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3299 BGE_PCI_PCISTATE);
3300 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
3301 (pcistate & ~BGE_PCISTATE_RESERVED))
3302 break;
3303 DELAY(10);
3304 }
3305 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
3306 (pcistate & ~BGE_PCISTATE_RESERVED)) {
3307 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n");
3308 }
3309 #endif
3310
3311 /* Step 28: Fix up byte swapping */
3312 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
3313
3314 /* Tell the ASF firmware we are up */
3315 if (sc->bge_asf_mode & ASF_STACKUP)
3316 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3317
3318 /*
3319 * The 5704 in TBI mode apparently needs some special
3320 * adjustment to insure the SERDES drive level is set
3321 * to 1.2V.
3322 */
3323 if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
3324 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3325 uint32_t serdescfg;
3326
3327 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
3328 serdescfg = (serdescfg & ~0xFFF) | 0x880;
3329 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
3330 }
3331
3332 if (sc->bge_flags & BGE_PCIE &&
3333 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3334 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3335 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3336 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 &&
3337 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57766) {
3338 uint32_t v;
3339
3340 /* Enable PCI Express bug fix */
3341 v = CSR_READ_4(sc, 0x7c00);
3342 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
3343 }
3344 DELAY(10000);
3345
3346 return 0;
3347 }
3348
3349 /*
3350 * Frame reception handling. This is called if there's a frame
3351 * on the receive return list.
3352 *
3353 * Note: we have to be able to handle two possibilities here:
3354 * 1) the frame is from the jumbo receive ring
3355 * 2) the frame is from the standard receive ring
3356 */
3357
3358 static void
3359 bge_rxeof(struct bge_softc *sc)
3360 {
3361 struct ifnet *ifp;
3362 uint16_t rx_prod, rx_cons;
3363 int stdcnt = 0, jumbocnt = 0;
3364 bus_dmamap_t dmamap;
3365 bus_addr_t offset, toff;
3366 bus_size_t tlen;
3367 int tosync;
3368
3369 rx_cons = sc->bge_rx_saved_considx;
3370 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3371
3372 /* Nothing to do */
3373 if (rx_cons == rx_prod)
3374 return;
3375
3376 ifp = &sc->ethercom.ec_if;
3377
3378 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3379 offsetof(struct bge_ring_data, bge_status_block),
3380 sizeof (struct bge_status_block),
3381 BUS_DMASYNC_POSTREAD);
3382
3383 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3384 tosync = rx_prod - rx_cons;
3385
3386 if (tosync != 0)
3387 rnd_add_uint32(&sc->rnd_source, tosync);
3388
3389 toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
3390
3391 if (tosync < 0) {
3392 tlen = (sc->bge_return_ring_cnt - rx_cons) *
3393 sizeof (struct bge_rx_bd);
3394 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3395 toff, tlen, BUS_DMASYNC_POSTREAD);
3396 tosync = -tosync;
3397 }
3398
3399 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3400 offset, tosync * sizeof (struct bge_rx_bd),
3401 BUS_DMASYNC_POSTREAD);
3402
3403 while (rx_cons != rx_prod) {
3404 struct bge_rx_bd *cur_rx;
3405 uint32_t rxidx;
3406 struct mbuf *m = NULL;
3407
3408 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
3409
3410 rxidx = cur_rx->bge_idx;
3411 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3412
3413 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3414 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3415 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3416 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3417 jumbocnt++;
3418 bus_dmamap_sync(sc->bge_dmatag,
3419 sc->bge_cdata.bge_rx_jumbo_map,
3420 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
3421 BGE_JLEN, BUS_DMASYNC_POSTREAD);
3422 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3423 ifp->if_ierrors++;
3424 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3425 continue;
3426 }
3427 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
3428 NULL)== ENOBUFS) {
3429 ifp->if_ierrors++;
3430 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3431 continue;
3432 }
3433 } else {
3434 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3435 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3436
3437 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3438 stdcnt++;
3439 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3440 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
3441 if (dmamap == NULL) {
3442 ifp->if_ierrors++;
3443 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3444 continue;
3445 }
3446 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3447 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3448 bus_dmamap_unload(sc->bge_dmatag, dmamap);
3449 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3450 ifp->if_ierrors++;
3451 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3452 continue;
3453 }
3454 if (bge_newbuf_std(sc, sc->bge_std,
3455 NULL, dmamap) == ENOBUFS) {
3456 ifp->if_ierrors++;
3457 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3458 continue;
3459 }
3460 }
3461
3462 ifp->if_ipackets++;
3463 #ifndef __NO_STRICT_ALIGNMENT
3464 /*
3465 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
3466 * the Rx buffer has the layer-2 header unaligned.
3467 * If our CPU requires alignment, re-align by copying.
3468 */
3469 if (sc->bge_flags & BGE_RX_ALIGNBUG) {
3470 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
3471 cur_rx->bge_len);
3472 m->m_data += ETHER_ALIGN;
3473 }
3474 #endif
3475
3476 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3477 m->m_pkthdr.rcvif = ifp;
3478
3479 /*
3480 * Handle BPF listeners. Let the BPF user see the packet.
3481 */
3482 bpf_mtap(ifp, m);
3483
3484 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
3485
3486 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
3487 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
3488 /*
3489 * Rx transport checksum-offload may also
3490 * have bugs with packets which, when transmitted,
3491 * were `runts' requiring padding.
3492 */
3493 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3494 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
3495 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
3496 m->m_pkthdr.csum_data =
3497 cur_rx->bge_tcp_udp_csum;
3498 m->m_pkthdr.csum_flags |=
3499 (M_CSUM_TCPv4|M_CSUM_UDPv4|
3500 M_CSUM_DATA);
3501 }
3502
3503 /*
3504 * If we received a packet with a vlan tag, pass it
3505 * to vlan_input() instead of ether_input().
3506 */
3507 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3508 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue);
3509 }
3510
3511 (*ifp->if_input)(ifp, m);
3512 }
3513
3514 sc->bge_rx_saved_considx = rx_cons;
3515 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3516 if (stdcnt)
3517 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3518 if (jumbocnt)
3519 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3520 }
3521
3522 static void
3523 bge_txeof(struct bge_softc *sc)
3524 {
3525 struct bge_tx_bd *cur_tx = NULL;
3526 struct ifnet *ifp;
3527 struct txdmamap_pool_entry *dma;
3528 bus_addr_t offset, toff;
3529 bus_size_t tlen;
3530 int tosync;
3531 struct mbuf *m;
3532
3533 ifp = &sc->ethercom.ec_if;
3534
3535 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3536 offsetof(struct bge_ring_data, bge_status_block),
3537 sizeof (struct bge_status_block),
3538 BUS_DMASYNC_POSTREAD);
3539
3540 offset = offsetof(struct bge_ring_data, bge_tx_ring);
3541 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
3542 sc->bge_tx_saved_considx;
3543
3544 if (tosync != 0)
3545 rnd_add_uint32(&sc->rnd_source, tosync);
3546
3547 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
3548
3549 if (tosync < 0) {
3550 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
3551 sizeof (struct bge_tx_bd);
3552 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3553 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3554 tosync = -tosync;
3555 }
3556
3557 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3558 offset, tosync * sizeof (struct bge_tx_bd),
3559 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3560
3561 /*
3562 * Go through our tx ring and free mbufs for those
3563 * frames that have been sent.
3564 */
3565 while (sc->bge_tx_saved_considx !=
3566 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
3567 uint32_t idx = 0;
3568
3569 idx = sc->bge_tx_saved_considx;
3570 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
3571 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3572 ifp->if_opackets++;
3573 m = sc->bge_cdata.bge_tx_chain[idx];
3574 if (m != NULL) {
3575 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3576 dma = sc->txdma[idx];
3577 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
3578 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3579 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
3580 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
3581 sc->txdma[idx] = NULL;
3582
3583 m_freem(m);
3584 }
3585 sc->bge_txcnt--;
3586 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3587 ifp->if_timer = 0;
3588 }
3589
3590 if (cur_tx != NULL)
3591 ifp->if_flags &= ~IFF_OACTIVE;
3592 }
3593
3594 static int
3595 bge_intr(void *xsc)
3596 {
3597 struct bge_softc *sc;
3598 struct ifnet *ifp;
3599 uint32_t statusword;
3600
3601 sc = xsc;
3602 ifp = &sc->ethercom.ec_if;
3603
3604 /* It is possible for the interrupt to arrive before
3605 * the status block is updated prior to the interrupt.
3606 * Reading the PCI State register will confirm whether the
3607 * interrupt is ours and will flush the status block.
3608 */
3609
3610 /* read status word from status block */
3611 statusword = sc->bge_rdata->bge_status_block.bge_status;
3612
3613 if ((statusword & BGE_STATFLAG_UPDATED) ||
3614 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
3615 /* Ack interrupt and stop others from occuring. */
3616 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3617
3618 BGE_EVCNT_INCR(sc->bge_ev_intr);
3619
3620 /* clear status word */
3621 sc->bge_rdata->bge_status_block.bge_status = 0;
3622
3623 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3624 statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3625 BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
3626 bge_link_upd(sc);
3627
3628 if (ifp->if_flags & IFF_RUNNING) {
3629 /* Check RX return ring producer/consumer */
3630 bge_rxeof(sc);
3631
3632 /* Check TX ring producer/consumer */
3633 bge_txeof(sc);
3634 }
3635
3636 if (sc->bge_pending_rxintr_change) {
3637 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
3638 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
3639 uint32_t junk;
3640
3641 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
3642 DELAY(10);
3643 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3644
3645 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
3646 DELAY(10);
3647 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3648
3649 sc->bge_pending_rxintr_change = 0;
3650 }
3651 bge_handle_events(sc);
3652
3653 /* Re-enable interrupts. */
3654 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3655
3656 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
3657 bge_start(ifp);
3658
3659 return 1;
3660 } else
3661 return 0;
3662 }
3663
3664 static void
3665 bge_asf_driver_up(struct bge_softc *sc)
3666 {
3667 if (sc->bge_asf_mode & ASF_STACKUP) {
3668 /* Send ASF heartbeat aprox. every 2s */
3669 if (sc->bge_asf_count)
3670 sc->bge_asf_count --;
3671 else {
3672 sc->bge_asf_count = 2;
3673 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3674 BGE_FW_DRV_ALIVE);
3675 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3676 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3677 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3678 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3679 }
3680 }
3681 }
3682
3683 static void
3684 bge_tick(void *xsc)
3685 {
3686 struct bge_softc *sc = xsc;
3687 struct mii_data *mii = &sc->bge_mii;
3688 int s;
3689
3690 s = splnet();
3691
3692 if (BGE_IS_5705_PLUS(sc))
3693 bge_stats_update_regs(sc);
3694 else
3695 bge_stats_update(sc);
3696
3697 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3698 /*
3699 * Since in TBI mode auto-polling can't be used we should poll
3700 * link status manually. Here we register pending link event
3701 * and trigger interrupt.
3702 */
3703 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3704 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3705 } else {
3706 /*
3707 * Do not touch PHY if we have link up. This could break
3708 * IPMI/ASF mode or produce extra input errors.
3709 * (extra input errors was reported for bcm5701 & bcm5704).
3710 */
3711 if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3712 mii_tick(mii);
3713 }
3714
3715 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3716
3717 splx(s);
3718 }
3719
3720 static void
3721 bge_stats_update_regs(struct bge_softc *sc)
3722 {
3723 struct ifnet *ifp = &sc->ethercom.ec_if;
3724
3725 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3726 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3727
3728 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3729 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3730 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3731 }
3732
3733 static void
3734 bge_stats_update(struct bge_softc *sc)
3735 {
3736 struct ifnet *ifp = &sc->ethercom.ec_if;
3737 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3738
3739 #define READ_STAT(sc, stats, stat) \
3740 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3741
3742 ifp->if_collisions +=
3743 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3744 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3745 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3746 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3747 ifp->if_collisions;
3748
3749 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
3750 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
3751 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
3752 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
3753 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
3754 READ_STAT(sc, stats,
3755 xoffPauseFramesReceived.bge_addr_lo));
3756 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
3757 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
3758 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
3759 READ_STAT(sc, stats,
3760 macControlFramesReceived.bge_addr_lo));
3761 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
3762 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
3763
3764 #undef READ_STAT
3765
3766 #ifdef notdef
3767 ifp->if_collisions +=
3768 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3769 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3770 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3771 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3772 ifp->if_collisions;
3773 #endif
3774 }
3775
3776 /*
3777 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3778 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3779 * but when such padded frames employ the bge IP/TCP checksum offload,
3780 * the hardware checksum assist gives incorrect results (possibly
3781 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3782 * If we pad such runts with zeros, the onboard checksum comes out correct.
3783 */
3784 static inline int
3785 bge_cksum_pad(struct mbuf *pkt)
3786 {
3787 struct mbuf *last = NULL;
3788 int padlen;
3789
3790 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3791
3792 /* if there's only the packet-header and we can pad there, use it. */
3793 if (pkt->m_pkthdr.len == pkt->m_len &&
3794 M_TRAILINGSPACE(pkt) >= padlen) {
3795 last = pkt;
3796 } else {
3797 /*
3798 * Walk packet chain to find last mbuf. We will either
3799 * pad there, or append a new mbuf and pad it
3800 * (thus perhaps avoiding the bcm5700 dma-min bug).
3801 */
3802 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3803 continue; /* do nothing */
3804 }
3805
3806 /* `last' now points to last in chain. */
3807 if (M_TRAILINGSPACE(last) < padlen) {
3808 /* Allocate new empty mbuf, pad it. Compact later. */
3809 struct mbuf *n;
3810 MGET(n, M_DONTWAIT, MT_DATA);
3811 if (n == NULL)
3812 return ENOBUFS;
3813 n->m_len = 0;
3814 last->m_next = n;
3815 last = n;
3816 }
3817 }
3818
3819 KDASSERT(!M_READONLY(last));
3820 KDASSERT(M_TRAILINGSPACE(last) >= padlen);
3821
3822 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3823 memset(mtod(last, char *) + last->m_len, 0, padlen);
3824 last->m_len += padlen;
3825 pkt->m_pkthdr.len += padlen;
3826 return 0;
3827 }
3828
3829 /*
3830 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3831 */
3832 static inline int
3833 bge_compact_dma_runt(struct mbuf *pkt)
3834 {
3835 struct mbuf *m, *prev;
3836 int totlen, prevlen;
3837
3838 prev = NULL;
3839 totlen = 0;
3840 prevlen = -1;
3841
3842 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3843 int mlen = m->m_len;
3844 int shortfall = 8 - mlen ;
3845
3846 totlen += mlen;
3847 if (mlen == 0)
3848 continue;
3849 if (mlen >= 8)
3850 continue;
3851
3852 /* If we get here, mbuf data is too small for DMA engine.
3853 * Try to fix by shuffling data to prev or next in chain.
3854 * If that fails, do a compacting deep-copy of the whole chain.
3855 */
3856
3857 /* Internal frag. If fits in prev, copy it there. */
3858 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
3859 memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
3860 prev->m_len += mlen;
3861 m->m_len = 0;
3862 /* XXX stitch chain */
3863 prev->m_next = m_free(m);
3864 m = prev;
3865 continue;
3866 }
3867 else if (m->m_next != NULL &&
3868 M_TRAILINGSPACE(m) >= shortfall &&
3869 m->m_next->m_len >= (8 + shortfall)) {
3870 /* m is writable and have enough data in next, pull up. */
3871
3872 memcpy(m->m_data + m->m_len, m->m_next->m_data,
3873 shortfall);
3874 m->m_len += shortfall;
3875 m->m_next->m_len -= shortfall;
3876 m->m_next->m_data += shortfall;
3877 }
3878 else if (m->m_next == NULL || 1) {
3879 /* Got a runt at the very end of the packet.
3880 * borrow data from the tail of the preceding mbuf and
3881 * update its length in-place. (The original data is still
3882 * valid, so we can do this even if prev is not writable.)
3883 */
3884
3885 /* if we'd make prev a runt, just move all of its data. */
3886 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3887 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3888
3889 if ((prev->m_len - shortfall) < 8)
3890 shortfall = prev->m_len;
3891
3892 #ifdef notyet /* just do the safe slow thing for now */
3893 if (!M_READONLY(m)) {
3894 if (M_LEADINGSPACE(m) < shorfall) {
3895 void *m_dat;
3896 m_dat = (m->m_flags & M_PKTHDR) ?
3897 m->m_pktdat : m->dat;
3898 memmove(m_dat, mtod(m, void*), m->m_len);
3899 m->m_data = m_dat;
3900 }
3901 } else
3902 #endif /* just do the safe slow thing */
3903 {
3904 struct mbuf * n = NULL;
3905 int newprevlen = prev->m_len - shortfall;
3906
3907 MGET(n, M_NOWAIT, MT_DATA);
3908 if (n == NULL)
3909 return ENOBUFS;
3910 KASSERT(m->m_len + shortfall < MLEN
3911 /*,
3912 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3913
3914 /* first copy the data we're stealing from prev */
3915 memcpy(n->m_data, prev->m_data + newprevlen,
3916 shortfall);
3917
3918 /* update prev->m_len accordingly */
3919 prev->m_len -= shortfall;
3920
3921 /* copy data from runt m */
3922 memcpy(n->m_data + shortfall, m->m_data,
3923 m->m_len);
3924
3925 /* n holds what we stole from prev, plus m */
3926 n->m_len = shortfall + m->m_len;
3927
3928 /* stitch n into chain and free m */
3929 n->m_next = m->m_next;
3930 prev->m_next = n;
3931 /* KASSERT(m->m_next == NULL); */
3932 m->m_next = NULL;
3933 m_free(m);
3934 m = n; /* for continuing loop */
3935 }
3936 }
3937 prevlen = m->m_len;
3938 }
3939 return 0;
3940 }
3941
3942 /*
3943 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3944 * pointers to descriptors.
3945 */
3946 static int
3947 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
3948 {
3949 struct bge_tx_bd *f = NULL;
3950 uint32_t frag, cur;
3951 uint16_t csum_flags = 0;
3952 uint16_t txbd_tso_flags = 0;
3953 struct txdmamap_pool_entry *dma;
3954 bus_dmamap_t dmamap;
3955 int i = 0;
3956 struct m_tag *mtag;
3957 int use_tso, maxsegsize, error;
3958
3959 cur = frag = *txidx;
3960
3961 if (m_head->m_pkthdr.csum_flags) {
3962 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3963 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3964 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3965 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3966 }
3967
3968 /*
3969 * If we were asked to do an outboard checksum, and the NIC
3970 * has the bug where it sometimes adds in the Ethernet padding,
3971 * explicitly pad with zeros so the cksum will be correct either way.
3972 * (For now, do this for all chip versions, until newer
3973 * are confirmed to not require the workaround.)
3974 */
3975 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3976 #ifdef notyet
3977 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3978 #endif
3979 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3980 goto check_dma_bug;
3981
3982 if (bge_cksum_pad(m_head) != 0)
3983 return ENOBUFS;
3984
3985 check_dma_bug:
3986 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
3987 goto doit;
3988
3989 /*
3990 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3991 * less than eight bytes. If we encounter a teeny mbuf
3992 * at the end of a chain, we can pad. Otherwise, copy.
3993 */
3994 if (bge_compact_dma_runt(m_head) != 0)
3995 return ENOBUFS;
3996
3997 doit:
3998 dma = SLIST_FIRST(&sc->txdma_list);
3999 if (dma == NULL)
4000 return ENOBUFS;
4001 dmamap = dma->dmamap;
4002
4003 /*
4004 * Set up any necessary TSO state before we start packing...
4005 */
4006 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4007 if (!use_tso) {
4008 maxsegsize = 0;
4009 } else { /* TSO setup */
4010 unsigned mss;
4011 struct ether_header *eh;
4012 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
4013 struct mbuf * m0 = m_head;
4014 struct ip *ip;
4015 struct tcphdr *th;
4016 int iphl, hlen;
4017
4018 /*
4019 * XXX It would be nice if the mbuf pkthdr had offset
4020 * fields for the protocol headers.
4021 */
4022
4023 eh = mtod(m0, struct ether_header *);
4024 switch (htons(eh->ether_type)) {
4025 case ETHERTYPE_IP:
4026 offset = ETHER_HDR_LEN;
4027 break;
4028
4029 case ETHERTYPE_VLAN:
4030 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4031 break;
4032
4033 default:
4034 /*
4035 * Don't support this protocol or encapsulation.
4036 */
4037 return ENOBUFS;
4038 }
4039
4040 /*
4041 * TCP/IP headers are in the first mbuf; we can do
4042 * this the easy way.
4043 */
4044 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4045 hlen = iphl + offset;
4046 if (__predict_false(m0->m_len <
4047 (hlen + sizeof(struct tcphdr)))) {
4048
4049 aprint_debug_dev(sc->bge_dev,
4050 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
4051 "not handled yet\n",
4052 m0->m_len, hlen+ sizeof(struct tcphdr));
4053 #ifdef NOTYET
4054 /*
4055 * XXX jonathan (at) NetBSD.org: untested.
4056 * how to force this branch to be taken?
4057 */
4058 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain);
4059
4060 m_copydata(m0, offset, sizeof(ip), &ip);
4061 m_copydata(m0, hlen, sizeof(th), &th);
4062
4063 ip.ip_len = 0;
4064
4065 m_copyback(m0, hlen + offsetof(struct ip, ip_len),
4066 sizeof(ip.ip_len), &ip.ip_len);
4067
4068 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4069 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4070
4071 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4072 sizeof(th.th_sum), &th.th_sum);
4073
4074 hlen += th.th_off << 2;
4075 iptcp_opt_words = hlen;
4076 #else
4077 /*
4078 * if_wm "hard" case not yet supported, can we not
4079 * mandate it out of existence?
4080 */
4081 (void) ip; (void)th; (void) ip_tcp_hlen;
4082
4083 return ENOBUFS;
4084 #endif
4085 } else {
4086 ip = (struct ip *) (mtod(m0, char *) + offset);
4087 th = (struct tcphdr *) (mtod(m0, char *) + hlen);
4088 ip_tcp_hlen = iphl + (th->th_off << 2);
4089
4090 /* Total IP/TCP options, in 32-bit words */
4091 iptcp_opt_words = (ip_tcp_hlen
4092 - sizeof(struct tcphdr)
4093 - sizeof(struct ip)) >> 2;
4094 }
4095 if (BGE_IS_5750_OR_BEYOND(sc)) {
4096 th->th_sum = 0;
4097 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM);
4098 } else {
4099 /*
4100 * XXX jonathan (at) NetBSD.org: 5705 untested.
4101 * Requires TSO firmware patch for 5701/5703/5704.
4102 */
4103 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4104 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4105 }
4106
4107 mss = m_head->m_pkthdr.segsz;
4108 txbd_tso_flags |=
4109 BGE_TXBDFLAG_CPU_PRE_DMA |
4110 BGE_TXBDFLAG_CPU_POST_DMA;
4111
4112 /*
4113 * Our NIC TSO-assist assumes TSO has standard, optionless
4114 * IPv4 and TCP headers, which total 40 bytes. By default,
4115 * the NIC copies 40 bytes of IP/TCP header from the
4116 * supplied header into the IP/TCP header portion of
4117 * each post-TSO-segment. If the supplied packet has IP or
4118 * TCP options, we need to tell the NIC to copy those extra
4119 * bytes into each post-TSO header, in addition to the normal
4120 * 40-byte IP/TCP header (and to leave space accordingly).
4121 * Unfortunately, the driver encoding of option length
4122 * varies across different ASIC families.
4123 */
4124 tcp_seg_flags = 0;
4125 if (iptcp_opt_words) {
4126 if (BGE_IS_5705_PLUS(sc)) {
4127 tcp_seg_flags =
4128 iptcp_opt_words << 11;
4129 } else {
4130 txbd_tso_flags |=
4131 iptcp_opt_words << 12;
4132 }
4133 }
4134 maxsegsize = mss | tcp_seg_flags;
4135 ip->ip_len = htons(mss + ip_tcp_hlen);
4136
4137 } /* TSO setup */
4138
4139 /*
4140 * Start packing the mbufs in this chain into
4141 * the fragment pointers. Stop when we run out
4142 * of fragments or hit the end of the mbuf chain.
4143 */
4144 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
4145 BUS_DMA_NOWAIT);
4146 if (error)
4147 return ENOBUFS;
4148 /*
4149 * Sanity check: avoid coming within 16 descriptors
4150 * of the end of the ring.
4151 */
4152 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
4153 BGE_TSO_PRINTF(("%s: "
4154 " dmamap_load_mbuf too close to ring wrap\n",
4155 device_xname(sc->bge_dev)));
4156 goto fail_unload;
4157 }
4158
4159 mtag = sc->ethercom.ec_nvlans ?
4160 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
4161
4162
4163 /* Iterate over dmap-map fragments. */
4164 for (i = 0; i < dmamap->dm_nsegs; i++) {
4165 f = &sc->bge_rdata->bge_tx_ring[frag];
4166 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
4167 break;
4168
4169 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
4170 f->bge_len = dmamap->dm_segs[i].ds_len;
4171
4172 /*
4173 * For 5751 and follow-ons, for TSO we must turn
4174 * off checksum-assist flag in the tx-descr, and
4175 * supply the ASIC-revision-specific encoding
4176 * of TSO flags and segsize.
4177 */
4178 if (use_tso) {
4179 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) {
4180 f->bge_rsvd = maxsegsize;
4181 f->bge_flags = csum_flags | txbd_tso_flags;
4182 } else {
4183 f->bge_rsvd = 0;
4184 f->bge_flags =
4185 (csum_flags | txbd_tso_flags) & 0x0fff;
4186 }
4187 } else {
4188 f->bge_rsvd = 0;
4189 f->bge_flags = csum_flags;
4190 }
4191
4192 if (mtag != NULL) {
4193 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
4194 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
4195 } else {
4196 f->bge_vlan_tag = 0;
4197 }
4198 cur = frag;
4199 BGE_INC(frag, BGE_TX_RING_CNT);
4200 }
4201
4202 if (i < dmamap->dm_nsegs) {
4203 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
4204 device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
4205 goto fail_unload;
4206 }
4207
4208 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4209 BUS_DMASYNC_PREWRITE);
4210
4211 if (frag == sc->bge_tx_saved_considx) {
4212 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
4213 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
4214
4215 goto fail_unload;
4216 }
4217
4218 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4219 sc->bge_cdata.bge_tx_chain[cur] = m_head;
4220 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
4221 sc->txdma[cur] = dma;
4222 sc->bge_txcnt += dmamap->dm_nsegs;
4223
4224 *txidx = frag;
4225
4226 return 0;
4227
4228 fail_unload:
4229 bus_dmamap_unload(sc->bge_dmatag, dmamap);
4230
4231 return ENOBUFS;
4232 }
4233
4234 /*
4235 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4236 * to the mbuf data regions directly in the transmit descriptors.
4237 */
4238 static void
4239 bge_start(struct ifnet *ifp)
4240 {
4241 struct bge_softc *sc;
4242 struct mbuf *m_head = NULL;
4243 uint32_t prodidx;
4244 int pkts = 0;
4245
4246 sc = ifp->if_softc;
4247
4248 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4249 return;
4250
4251 prodidx = sc->bge_tx_prodidx;
4252
4253 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
4254 IFQ_POLL(&ifp->if_snd, m_head);
4255 if (m_head == NULL)
4256 break;
4257
4258 #if 0
4259 /*
4260 * XXX
4261 * safety overkill. If this is a fragmented packet chain
4262 * with delayed TCP/UDP checksums, then only encapsulate
4263 * it if we have enough descriptors to handle the entire
4264 * chain at once.
4265 * (paranoia -- may not actually be needed)
4266 */
4267 if (m_head->m_flags & M_FIRSTFRAG &&
4268 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4269 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4270 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
4271 ifp->if_flags |= IFF_OACTIVE;
4272 break;
4273 }
4274 }
4275 #endif
4276
4277 /*
4278 * Pack the data into the transmit ring. If we
4279 * don't have room, set the OACTIVE flag and wait
4280 * for the NIC to drain the ring.
4281 */
4282 if (bge_encap(sc, m_head, &prodidx)) {
4283 ifp->if_flags |= IFF_OACTIVE;
4284 break;
4285 }
4286
4287 /* now we are committed to transmit the packet */
4288 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4289 pkts++;
4290
4291 /*
4292 * If there's a BPF listener, bounce a copy of this frame
4293 * to him.
4294 */
4295 bpf_mtap(ifp, m_head);
4296 }
4297 if (pkts == 0)
4298 return;
4299
4300 /* Transmit */
4301 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4302 /* 5700 b2 errata */
4303 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
4304 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4305
4306 sc->bge_tx_prodidx = prodidx;
4307
4308 /*
4309 * Set a timeout in case the chip goes out to lunch.
4310 */
4311 ifp->if_timer = 5;
4312 }
4313
4314 static int
4315 bge_init(struct ifnet *ifp)
4316 {
4317 struct bge_softc *sc = ifp->if_softc;
4318 const uint16_t *m;
4319 uint32_t mode;
4320 int s, error = 0;
4321
4322 s = splnet();
4323
4324 ifp = &sc->ethercom.ec_if;
4325
4326 /* Cancel pending I/O and flush buffers. */
4327 bge_stop(ifp, 0);
4328
4329 bge_stop_fw(sc);
4330 bge_sig_pre_reset(sc, BGE_RESET_START);
4331 bge_reset(sc);
4332 bge_sig_legacy(sc, BGE_RESET_START);
4333 bge_sig_post_reset(sc, BGE_RESET_START);
4334
4335 bge_chipinit(sc);
4336
4337 /*
4338 * Init the various state machines, ring
4339 * control blocks and firmware.
4340 */
4341 error = bge_blockinit(sc);
4342 if (error != 0) {
4343 aprint_error_dev(sc->bge_dev, "initialization error %d\n",
4344 error);
4345 splx(s);
4346 return error;
4347 }
4348
4349 ifp = &sc->ethercom.ec_if;
4350
4351 /* Specify MTU. */
4352 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4353 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
4354
4355 /* Load our MAC address. */
4356 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
4357 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4358 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4359
4360 /* Enable or disable promiscuous mode as needed. */
4361 if (ifp->if_flags & IFF_PROMISC)
4362 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4363 else
4364 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4365
4366 /* Program multicast filter. */
4367 bge_setmulti(sc);
4368
4369 /* Init RX ring. */
4370 bge_init_rx_ring_std(sc);
4371
4372 /*
4373 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4374 * memory to insure that the chip has in fact read the first
4375 * entry of the ring.
4376 */
4377 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4378 uint32_t v, i;
4379 for (i = 0; i < 10; i++) {
4380 DELAY(20);
4381 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4382 if (v == (MCLBYTES - ETHER_ALIGN))
4383 break;
4384 }
4385 if (i == 10)
4386 aprint_error_dev(sc->bge_dev,
4387 "5705 A0 chip failed to load RX ring\n");
4388 }
4389
4390 /* Init jumbo RX ring. */
4391 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
4392 bge_init_rx_ring_jumbo(sc);
4393
4394 /* Init our RX return ring index */
4395 sc->bge_rx_saved_considx = 0;
4396
4397 /* Init TX ring. */
4398 bge_init_tx_ring(sc);
4399
4400 /* Enable TX MAC state machine lockup fix. */
4401 mode = CSR_READ_4(sc, BGE_TX_MODE);
4402 if (BGE_IS_5755_PLUS(sc) ||
4403 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4404 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4405
4406 /* Turn on transmitter */
4407 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4408 DELAY(100);
4409
4410 /* Turn on receiver */
4411 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4412 DELAY(10);
4413
4414 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4415
4416 /* Tell firmware we're alive. */
4417 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4418
4419 /* Enable host interrupts. */
4420 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4421 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4422 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4423
4424 if ((error = bge_ifmedia_upd(ifp)) != 0)
4425 goto out;
4426
4427 ifp->if_flags |= IFF_RUNNING;
4428 ifp->if_flags &= ~IFF_OACTIVE;
4429
4430 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
4431
4432 out:
4433 sc->bge_if_flags = ifp->if_flags;
4434 splx(s);
4435
4436 return error;
4437 }
4438
4439 /*
4440 * Set media options.
4441 */
4442 static int
4443 bge_ifmedia_upd(struct ifnet *ifp)
4444 {
4445 struct bge_softc *sc = ifp->if_softc;
4446 struct mii_data *mii = &sc->bge_mii;
4447 struct ifmedia *ifm = &sc->bge_ifmedia;
4448 int rc;
4449
4450 /* If this is a 1000baseX NIC, enable the TBI port. */
4451 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4452 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4453 return EINVAL;
4454 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4455 case IFM_AUTO:
4456 /*
4457 * The BCM5704 ASIC appears to have a special
4458 * mechanism for programming the autoneg
4459 * advertisement registers in TBI mode.
4460 */
4461 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4462 uint32_t sgdig;
4463 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4464 if (sgdig & BGE_SGDIGSTS_DONE) {
4465 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4466 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4467 sgdig |= BGE_SGDIGCFG_AUTO |
4468 BGE_SGDIGCFG_PAUSE_CAP |
4469 BGE_SGDIGCFG_ASYM_PAUSE;
4470 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4471 sgdig | BGE_SGDIGCFG_SEND);
4472 DELAY(5);
4473 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4474 }
4475 }
4476 break;
4477 case IFM_1000_SX:
4478 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4479 BGE_CLRBIT(sc, BGE_MAC_MODE,
4480 BGE_MACMODE_HALF_DUPLEX);
4481 } else {
4482 BGE_SETBIT(sc, BGE_MAC_MODE,
4483 BGE_MACMODE_HALF_DUPLEX);
4484 }
4485 break;
4486 default:
4487 return EINVAL;
4488 }
4489 /* XXX 802.3x flow control for 1000BASE-SX */
4490 return 0;
4491 }
4492
4493 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4494 if ((rc = mii_mediachg(mii)) == ENXIO)
4495 return 0;
4496
4497 /*
4498 * Force an interrupt so that we will call bge_link_upd
4499 * if needed and clear any pending link state attention.
4500 * Without this we are not getting any further interrupts
4501 * for link state changes and thus will not UP the link and
4502 * not be able to send in bge_start. The only way to get
4503 * things working was to receive a packet and get a RX intr.
4504 */
4505 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4506 sc->bge_flags & BGE_IS_5788)
4507 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4508 else
4509 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4510
4511 return rc;
4512 }
4513
4514 /*
4515 * Report current media status.
4516 */
4517 static void
4518 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4519 {
4520 struct bge_softc *sc = ifp->if_softc;
4521 struct mii_data *mii = &sc->bge_mii;
4522
4523 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4524 ifmr->ifm_status = IFM_AVALID;
4525 ifmr->ifm_active = IFM_ETHER;
4526 if (CSR_READ_4(sc, BGE_MAC_STS) &
4527 BGE_MACSTAT_TBI_PCS_SYNCHED)
4528 ifmr->ifm_status |= IFM_ACTIVE;
4529 ifmr->ifm_active |= IFM_1000_SX;
4530 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4531 ifmr->ifm_active |= IFM_HDX;
4532 else
4533 ifmr->ifm_active |= IFM_FDX;
4534 return;
4535 }
4536
4537 mii_pollstat(mii);
4538 ifmr->ifm_status = mii->mii_media_status;
4539 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4540 sc->bge_flowflags;
4541 }
4542
4543 static int
4544 bge_ifflags_cb(struct ethercom *ec)
4545 {
4546 struct ifnet *ifp = &ec->ec_if;
4547 struct bge_softc *sc = ifp->if_softc;
4548 int change = ifp->if_flags ^ sc->bge_if_flags;
4549
4550 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
4551 return ENETRESET;
4552 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
4553 return 0;
4554
4555 if ((ifp->if_flags & IFF_PROMISC) == 0)
4556 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4557 else
4558 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4559
4560 bge_setmulti(sc);
4561
4562 sc->bge_if_flags = ifp->if_flags;
4563 return 0;
4564 }
4565
4566 static int
4567 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
4568 {
4569 struct bge_softc *sc = ifp->if_softc;
4570 struct ifreq *ifr = (struct ifreq *) data;
4571 int s, error = 0;
4572 struct mii_data *mii;
4573
4574 s = splnet();
4575
4576 switch (command) {
4577 case SIOCSIFMEDIA:
4578 /* XXX Flow control is not supported for 1000BASE-SX */
4579 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4580 ifr->ifr_media &= ~IFM_ETH_FMASK;
4581 sc->bge_flowflags = 0;
4582 }
4583
4584 /* Flow control requires full-duplex mode. */
4585 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4586 (ifr->ifr_media & IFM_FDX) == 0) {
4587 ifr->ifr_media &= ~IFM_ETH_FMASK;
4588 }
4589 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4590 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4591 /* We can do both TXPAUSE and RXPAUSE. */
4592 ifr->ifr_media |=
4593 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4594 }
4595 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4596 }
4597 /* FALLTHROUGH */
4598 case SIOCGIFMEDIA:
4599 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4600 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4601 command);
4602 } else {
4603 mii = &sc->bge_mii;
4604 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4605 command);
4606 }
4607 break;
4608 default:
4609 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
4610 break;
4611
4612 error = 0;
4613
4614 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
4615 ;
4616 else if (ifp->if_flags & IFF_RUNNING)
4617 bge_setmulti(sc);
4618 break;
4619 }
4620
4621 splx(s);
4622
4623 return error;
4624 }
4625
4626 static void
4627 bge_watchdog(struct ifnet *ifp)
4628 {
4629 struct bge_softc *sc;
4630
4631 sc = ifp->if_softc;
4632
4633 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
4634
4635 ifp->if_flags &= ~IFF_RUNNING;
4636 bge_init(ifp);
4637
4638 ifp->if_oerrors++;
4639 }
4640
4641 static void
4642 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
4643 {
4644 int i;
4645
4646 BGE_CLRBIT(sc, reg, bit);
4647
4648 for (i = 0; i < 1000; i++) {
4649 if ((CSR_READ_4(sc, reg) & bit) == 0)
4650 return;
4651 delay(100);
4652 }
4653
4654 /*
4655 * Doesn't print only when the register is BGE_SRS_MODE. It occurs
4656 * on some environment (and once after boot?)
4657 */
4658 if (reg != BGE_SRS_MODE)
4659 aprint_error_dev(sc->bge_dev,
4660 "block failed to stop: reg 0x%lx, bit 0x%08x\n",
4661 (u_long)reg, bit);
4662 }
4663
4664 /*
4665 * Stop the adapter and free any mbufs allocated to the
4666 * RX and TX lists.
4667 */
4668 static void
4669 bge_stop(struct ifnet *ifp, int disable)
4670 {
4671 struct bge_softc *sc = ifp->if_softc;
4672
4673 callout_stop(&sc->bge_timeout);
4674
4675 /*
4676 * Tell firmware we're shutting down.
4677 */
4678 bge_stop_fw(sc);
4679 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4680
4681 /* Disable host interrupts. */
4682 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4683 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4684
4685 /*
4686 * Disable all of the receiver blocks
4687 */
4688 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4689 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4690 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4691 if (BGE_IS_5700_FAMILY(sc))
4692 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4693 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4694 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4695 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4696
4697 /*
4698 * Disable all of the transmit blocks
4699 */
4700 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4701 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4702 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4703 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4704 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4705 if (BGE_IS_5700_FAMILY(sc))
4706 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4707 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4708
4709 /*
4710 * Shut down all of the memory managers and related
4711 * state machines.
4712 */
4713 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4714 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4715 if (BGE_IS_5700_FAMILY(sc))
4716 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4717
4718 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4719 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4720
4721 if (BGE_IS_5700_FAMILY(sc)) {
4722 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4723 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4724 }
4725
4726 bge_reset(sc);
4727 bge_sig_legacy(sc, BGE_RESET_STOP);
4728 bge_sig_post_reset(sc, BGE_RESET_STOP);
4729
4730 /*
4731 * Keep the ASF firmware running if up.
4732 */
4733 if (sc->bge_asf_mode & ASF_STACKUP)
4734 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4735 else
4736 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4737
4738 /* Free the RX lists. */
4739 bge_free_rx_ring_std(sc);
4740
4741 /* Free jumbo RX list. */
4742 if (BGE_IS_JUMBO_CAPABLE(sc))
4743 bge_free_rx_ring_jumbo(sc);
4744
4745 /* Free TX buffers. */
4746 bge_free_tx_ring(sc);
4747
4748 /*
4749 * Isolate/power down the PHY.
4750 */
4751 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI))
4752 mii_down(&sc->bge_mii);
4753
4754 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4755
4756 /* Clear MAC's link state (PHY may still have link UP). */
4757 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4758
4759 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4760 }
4761
4762 static void
4763 bge_link_upd(struct bge_softc *sc)
4764 {
4765 struct ifnet *ifp = &sc->ethercom.ec_if;
4766 struct mii_data *mii = &sc->bge_mii;
4767 uint32_t status;
4768 int link;
4769
4770 /* Clear 'pending link event' flag */
4771 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4772
4773 /*
4774 * Process link state changes.
4775 * Grrr. The link status word in the status block does
4776 * not work correctly on the BCM5700 rev AX and BX chips,
4777 * according to all available information. Hence, we have
4778 * to enable MII interrupts in order to properly obtain
4779 * async link changes. Unfortunately, this also means that
4780 * we have to read the MAC status register to detect link
4781 * changes, thereby adding an additional register access to
4782 * the interrupt handler.
4783 */
4784
4785 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4786 status = CSR_READ_4(sc, BGE_MAC_STS);
4787 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4788 mii_pollstat(mii);
4789
4790 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4791 mii->mii_media_status & IFM_ACTIVE &&
4792 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4793 BGE_STS_SETBIT(sc, BGE_STS_LINK);
4794 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4795 (!(mii->mii_media_status & IFM_ACTIVE) ||
4796 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4797 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4798
4799 /* Clear the interrupt */
4800 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4801 BGE_EVTENB_MI_INTERRUPT);
4802 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4803 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4804 BRGPHY_INTRS);
4805 }
4806 return;
4807 }
4808
4809 if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4810 status = CSR_READ_4(sc, BGE_MAC_STS);
4811 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4812 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4813 BGE_STS_SETBIT(sc, BGE_STS_LINK);
4814 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4815 BGE_CLRBIT(sc, BGE_MAC_MODE,
4816 BGE_MACMODE_TBI_SEND_CFGS);
4817 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4818 if_link_state_change(ifp, LINK_STATE_UP);
4819 }
4820 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4821 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4822 if_link_state_change(ifp, LINK_STATE_DOWN);
4823 }
4824 /*
4825 * Discard link events for MII/GMII cards if MI auto-polling disabled.
4826 * This should not happen since mii callouts are locked now, but
4827 * we keep this check for debug.
4828 */
4829 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4830 /*
4831 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
4832 * bit in status word always set. Workaround this bug by
4833 * reading PHY link status directly.
4834 */
4835 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4836 BGE_STS_LINK : 0;
4837
4838 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4839 mii_pollstat(mii);
4840
4841 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4842 mii->mii_media_status & IFM_ACTIVE &&
4843 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4844 BGE_STS_SETBIT(sc, BGE_STS_LINK);
4845 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4846 (!(mii->mii_media_status & IFM_ACTIVE) ||
4847 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4848 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4849 }
4850 }
4851
4852 /* Clear the attention */
4853 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4854 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4855 BGE_MACSTAT_LINK_CHANGED);
4856 }
4857
4858 static int
4859 sysctl_bge_verify(SYSCTLFN_ARGS)
4860 {
4861 int error, t;
4862 struct sysctlnode node;
4863
4864 node = *rnode;
4865 t = *(int*)rnode->sysctl_data;
4866 node.sysctl_data = &t;
4867 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4868 if (error || newp == NULL)
4869 return error;
4870
4871 #if 0
4872 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
4873 node.sysctl_num, rnode->sysctl_num));
4874 #endif
4875
4876 if (node.sysctl_num == bge_rxthresh_nodenum) {
4877 if (t < 0 || t >= NBGE_RX_THRESH)
4878 return EINVAL;
4879 bge_update_all_threshes(t);
4880 } else
4881 return EINVAL;
4882
4883 *(int*)rnode->sysctl_data = t;
4884
4885 return 0;
4886 }
4887
4888 /*
4889 * Set up sysctl(3) MIB, hw.bge.*.
4890 */
4891 static void
4892 sysctl_bge_init(struct bge_softc *sc)
4893 {
4894 int rc, bge_root_num;
4895 const struct sysctlnode *node;
4896
4897 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, NULL,
4898 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
4899 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
4900 goto out;
4901 }
4902
4903 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
4904 0, CTLTYPE_NODE, "bge",
4905 SYSCTL_DESCR("BGE interface controls"),
4906 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
4907 goto out;
4908 }
4909
4910 bge_root_num = node->sysctl_num;
4911
4912 /* BGE Rx interrupt mitigation level */
4913 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
4914 CTLFLAG_READWRITE,
4915 CTLTYPE_INT, "rx_lvl",
4916 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
4917 sysctl_bge_verify, 0,
4918 &bge_rx_thresh_lvl,
4919 0, CTL_HW, bge_root_num, CTL_CREATE,
4920 CTL_EOL)) != 0) {
4921 goto out;
4922 }
4923
4924 bge_rxthresh_nodenum = node->sysctl_num;
4925
4926 return;
4927
4928 out:
4929 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
4930 }
4931
4932 #ifdef BGE_DEBUG
4933 void
4934 bge_debug_info(struct bge_softc *sc)
4935 {
4936
4937 printf("Hardware Flags:\n");
4938 if (BGE_IS_5755_PLUS(sc))
4939 printf(" - 5755 Plus\n");
4940 if (BGE_IS_5750_OR_BEYOND(sc))
4941 printf(" - 5750 Plus\n");
4942 if (BGE_IS_5705_PLUS(sc))
4943 printf(" - 5705 Plus\n");
4944 if (BGE_IS_5714_FAMILY(sc))
4945 printf(" - 5714 Family\n");
4946 if (BGE_IS_5700_FAMILY(sc))
4947 printf(" - 5700 Family\n");
4948 if (sc->bge_flags & BGE_IS_5788)
4949 printf(" - 5788\n");
4950 if (sc->bge_flags & BGE_JUMBO_CAPABLE)
4951 printf(" - Supports Jumbo Frames\n");
4952 if (sc->bge_flags & BGE_NO_EEPROM)
4953 printf(" - No EEPROM\n");
4954 if (sc->bge_flags & BGE_PCIX)
4955 printf(" - PCI-X Bus\n");
4956 if (sc->bge_flags & BGE_PCIE)
4957 printf(" - PCI Express Bus\n");
4958 if (sc->bge_flags & BGE_NO_3LED)
4959 printf(" - No 3 LEDs\n");
4960 if (sc->bge_flags & BGE_RX_ALIGNBUG)
4961 printf(" - RX Alignment Bug\n");
4962 if (sc->bge_flags & BGE_TSO)
4963 printf(" - TSO\n");
4964 }
4965 #endif /* BGE_DEBUG */
4966
4967 static int
4968 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4969 {
4970 prop_dictionary_t dict;
4971 prop_data_t ea;
4972
4973 if ((sc->bge_flags & BGE_NO_EEPROM) == 0)
4974 return 1;
4975
4976 dict = device_properties(sc->bge_dev);
4977 ea = prop_dictionary_get(dict, "mac-address");
4978 if (ea != NULL) {
4979 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
4980 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
4981 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
4982 return 0;
4983 }
4984
4985 return 1;
4986 }
4987
4988 static int
4989 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4990 {
4991 uint32_t mac_addr;
4992
4993 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
4994 if ((mac_addr >> 16) == 0x484b) {
4995 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4996 ether_addr[1] = (uint8_t)mac_addr;
4997 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
4998 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4999 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5000 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5001 ether_addr[5] = (uint8_t)mac_addr;
5002 return 0;
5003 }
5004 return 1;
5005 }
5006
5007 static int
5008 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5009 {
5010 int mac_offset = BGE_EE_MAC_OFFSET;
5011
5012 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5013 mac_offset = BGE_EE_MAC_OFFSET_5906;
5014
5015 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5016 ETHER_ADDR_LEN));
5017 }
5018
5019 static int
5020 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5021 {
5022
5023 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5024 return 1;
5025
5026 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5027 ETHER_ADDR_LEN));
5028 }
5029
5030 static int
5031 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5032 {
5033 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5034 /* NOTE: Order is critical */
5035 bge_get_eaddr_fw,
5036 bge_get_eaddr_mem,
5037 bge_get_eaddr_nvram,
5038 bge_get_eaddr_eeprom,
5039 NULL
5040 };
5041 const bge_eaddr_fcn_t *func;
5042
5043 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5044 if ((*func)(sc, eaddr) == 0)
5045 break;
5046 }
5047 return (*func == NULL ? ENXIO : 0);
5048 }
5049