if_bge.c revision 1.363 1 /* $NetBSD: if_bge.c,v 1.363 2022/07/03 13:21:28 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.363 2022/07/03 13:21:28 skrll Exp $");
83
84 #include <sys/param.h>
85
86 #include <sys/callout.h>
87 #include <sys/device.h>
88 #include <sys/malloc.h>
89 #include <sys/mbuf.h>
90 #include <sys/kernel.h>
91 #include <sys/rndsource.h>
92 #include <sys/socket.h>
93 #include <sys/sockio.h>
94 #include <sys/sysctl.h>
95 #include <sys/systm.h>
96
97 #include <net/if.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_ether.h>
101 #include <net/bpf.h>
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #endif
109
110 /* Headers for TCP Segmentation Offload (TSO) */
111 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
112 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
113 #include <netinet/ip.h> /* for struct ip */
114 #include <netinet/tcp.h> /* for struct tcphdr */
115
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119
120 #include <dev/mii/mii.h>
121 #include <dev/mii/miivar.h>
122 #include <dev/mii/miidevs.h>
123 #include <dev/mii/brgphyreg.h>
124
125 #include <dev/pci/if_bgereg.h>
126 #include <dev/pci/if_bgevar.h>
127
128 #include <prop/proplib.h>
129
130 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
131
132
133 /*
134 * Tunable thresholds for rx-side bge interrupt mitigation.
135 */
136
137 /*
138 * The pairs of values below were obtained from empirical measurement
139 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
140 * interrupt for every N packets received, where N is, approximately,
141 * the second value (rx_max_bds) in each pair. The values are chosen
142 * such that moving from one pair to the succeeding pair was observed
143 * to roughly halve interrupt rate under sustained input packet load.
144 * The values were empirically chosen to avoid overflowing internal
145 * limits on the bcm5700: increasing rx_ticks much beyond 600
146 * results in internal wrapping and higher interrupt rates.
147 * The limit of 46 frames was chosen to match NFS workloads.
148 *
149 * These values also work well on bcm5701, bcm5704C, and (less
150 * tested) bcm5703. On other chipsets, (including the Altima chip
151 * family), the larger values may overflow internal chip limits,
152 * leading to increasing interrupt rates rather than lower interrupt
153 * rates.
154 *
155 * Applications using heavy interrupt mitigation (interrupting every
156 * 32 or 46 frames) in both directions may need to increase the TCP
157 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
158 * full link bandwidth, due to ACKs and window updates lingering
159 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
160 */
161 static const struct bge_load_rx_thresh {
162 int rx_ticks;
163 int rx_max_bds; }
164 bge_rx_threshes[] = {
165 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */
166 { 32, 2 },
167 { 50, 4 },
168 { 100, 8 },
169 { 192, 16 },
170 { 416, 32 },
171 { 598, 46 }
172 };
173 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
174
175 /* XXX patchable; should be sysctl'able */
176 static int bge_auto_thresh = 1;
177 static int bge_rx_thresh_lvl;
178
179 static int bge_rxthresh_nodenum;
180
181 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
182
183 static uint32_t bge_chipid(const struct pci_attach_args *);
184 static int bge_can_use_msi(struct bge_softc *);
185 static int bge_probe(device_t, cfdata_t, void *);
186 static void bge_attach(device_t, device_t, void *);
187 static int bge_detach(device_t, int);
188 static void bge_release_resources(struct bge_softc *);
189
190 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
191 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
192 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
193 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
194 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
195
196 static void bge_txeof(struct bge_softc *);
197 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
198 static void bge_rxeof(struct bge_softc *);
199
200 static void bge_asf_driver_up (struct bge_softc *);
201 static void bge_tick(void *);
202 static void bge_stats_update(struct bge_softc *);
203 static void bge_stats_update_regs(struct bge_softc *);
204 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
205
206 static int bge_intr(void *);
207 static void bge_start(struct ifnet *);
208 static int bge_ifflags_cb(struct ethercom *);
209 static int bge_ioctl(struct ifnet *, u_long, void *);
210 static int bge_init(struct ifnet *);
211 static void bge_stop(struct ifnet *, int);
212 static void bge_watchdog(struct ifnet *);
213 static int bge_ifmedia_upd(struct ifnet *);
214 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
215
216 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
217 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
218
219 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
220 static int bge_read_eeprom(struct bge_softc *, void *, int, int);
221 static void bge_setmulti(struct bge_softc *);
222
223 static void bge_handle_events(struct bge_softc *);
224 static int bge_alloc_jumbo_mem(struct bge_softc *);
225 #if 0 /* XXX */
226 static void bge_free_jumbo_mem(struct bge_softc *);
227 #endif
228 static void *bge_jalloc(struct bge_softc *);
229 static void bge_jfree(struct mbuf *, void *, size_t, void *);
230 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
231 bus_dmamap_t);
232 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
233 static int bge_init_rx_ring_std(struct bge_softc *);
234 static void bge_free_rx_ring_std(struct bge_softc *m, bool);
235 static int bge_init_rx_ring_jumbo(struct bge_softc *);
236 static void bge_free_rx_ring_jumbo(struct bge_softc *);
237 static void bge_free_tx_ring(struct bge_softc *m, bool);
238 static int bge_init_tx_ring(struct bge_softc *);
239
240 static int bge_chipinit(struct bge_softc *);
241 static int bge_blockinit(struct bge_softc *);
242 static int bge_phy_addr(struct bge_softc *);
243 static uint32_t bge_readmem_ind(struct bge_softc *, int);
244 static void bge_writemem_ind(struct bge_softc *, int, int);
245 static void bge_writembx(struct bge_softc *, int, int);
246 static void bge_writembx_flush(struct bge_softc *, int, int);
247 static void bge_writemem_direct(struct bge_softc *, int, int);
248 static void bge_writereg_ind(struct bge_softc *, int, int);
249 static void bge_set_max_readrq(struct bge_softc *);
250
251 static int bge_miibus_readreg(device_t, int, int, uint16_t *);
252 static int bge_miibus_writereg(device_t, int, int, uint16_t);
253 static void bge_miibus_statchg(struct ifnet *);
254
255 #define BGE_RESET_SHUTDOWN 0
256 #define BGE_RESET_START 1
257 #define BGE_RESET_SUSPEND 2
258 static void bge_sig_post_reset(struct bge_softc *, int);
259 static void bge_sig_legacy(struct bge_softc *, int);
260 static void bge_sig_pre_reset(struct bge_softc *, int);
261 static void bge_wait_for_event_ack(struct bge_softc *);
262 static void bge_stop_fw(struct bge_softc *);
263 static int bge_reset(struct bge_softc *);
264 static void bge_link_upd(struct bge_softc *);
265 static void bge_sysctl_init(struct bge_softc *);
266 static int bge_sysctl_verify(SYSCTLFN_PROTO);
267
268 static void bge_ape_lock_init(struct bge_softc *);
269 static void bge_ape_read_fw_ver(struct bge_softc *);
270 static int bge_ape_lock(struct bge_softc *, int);
271 static void bge_ape_unlock(struct bge_softc *, int);
272 static void bge_ape_send_event(struct bge_softc *, uint32_t);
273 static void bge_ape_driver_state_change(struct bge_softc *, int);
274
275 #ifdef BGE_DEBUG
276 #define DPRINTF(x) if (bgedebug) printf x
277 #define DPRINTFN(n, x) if (bgedebug >= (n)) printf x
278 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
279 int bgedebug = 0;
280 int bge_tso_debug = 0;
281 void bge_debug_info(struct bge_softc *);
282 #else
283 #define DPRINTF(x)
284 #define DPRINTFN(n, x)
285 #define BGE_TSO_PRINTF(x)
286 #endif
287
288 #ifdef BGE_EVENT_COUNTERS
289 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
290 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
291 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
292 #else
293 #define BGE_EVCNT_INCR(ev) /* nothing */
294 #define BGE_EVCNT_ADD(ev, val) /* nothing */
295 #define BGE_EVCNT_UPD(ev, val) /* nothing */
296 #endif
297
298 #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b
299 /*
300 * The BCM5700 documentation seems to indicate that the hardware still has the
301 * Alteon vendor ID burned into it, though it should always be overridden by
302 * the value in the EEPROM. We'll check for it anyway.
303 */
304 static const struct bge_product {
305 pci_vendor_id_t bp_vendor;
306 pci_product_id_t bp_product;
307 const char *bp_name;
308 } bge_products[] = {
309 { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" },
310 { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" },
311 { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" },
312 { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" },
313 { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" },
314 { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" },
315 { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" },
316 { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" },
317 { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" },
318 { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" },
319 { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" },
320 { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" },
321 { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" },
322 { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" },
323 { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" },
324 { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" },
325 { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" },
326 { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" },
327 { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" },
328 { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" },
329 { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" },
330 { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" },
331 { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" },
332 { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" },
333 { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" },
334 { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" },
335 { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" },
336 { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" },
337 { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" },
338 { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" },
339 { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" },
340 { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" },
341 { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" },
342 { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" },
343 { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" },
344 { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" },
345 { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" },
346 { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" },
347 { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" },
348 { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" },
349 { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" },
350 { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" },
351 { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" },
352 { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" },
353 { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" },
354 { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" },
355 { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" },
356 { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" },
357 { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" },
358 { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" },
359 { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" },
360 { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" },
361 { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" },
362 { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" },
363 { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" },
364 { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" },
365 { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" },
366 { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" },
367 { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" },
368 { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" },
369 { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" },
370 { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" },
371 { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" },
372 { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" },
373 { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" },
374 { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" },
375 { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" },
376 { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" },
377 { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" },
378 { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" },
379 { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" },
380 { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" },
381 { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" },
382 { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" },
383 { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" },
384 { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" },
385 { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" },
386 { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" },
387 { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" },
388 { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" },
389 { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" },
390 { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" },
391 { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" },
392 { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" },
393 { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" },
394 { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" },
395 { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" },
396 { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" },
397 { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" },
398 { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" },
399 { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" },
400 { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" },
401 { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" },
402 { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" },
403 { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" },
404 { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" },
405 { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" },
406 { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" },
407 { 0, 0, NULL },
408 };
409
410 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE)
411 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY)
412 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS)
413 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY)
414 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS)
415 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS)
416 #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY)
417 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS)
418 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS)
419
420 static const struct bge_revision {
421 uint32_t br_chipid;
422 const char *br_name;
423 } bge_revisions[] = {
424 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
425 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
426 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
427 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
428 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
429 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
430 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
431 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
432 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
433 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
434 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
435 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
436 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
437 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
438 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
439 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
440 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
441 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
442 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
443 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
444 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
445 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
446 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
447 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
448 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
449 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
450 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
451 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
452 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
453 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
454 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
455 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
456 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
457 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
458 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
459 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
460 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
461 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
462 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
463 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
464 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
465 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
466 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
467 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
468 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
469 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
470 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
471 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
472 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
473 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
474 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
475 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
476 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
477 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
478 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" },
479 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
480 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
481 { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" },
482 /* 5754 and 5787 share the same ASIC ID */
483 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
484 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
485 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
486 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
487 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
488 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
489 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
490 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
491 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" },
492 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
493 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
494
495 { 0, NULL }
496 };
497
498 /*
499 * Some defaults for major revisions, so that newer steppings
500 * that we don't know about have a shot at working.
501 */
502 static const struct bge_revision bge_majorrevs[] = {
503 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
504 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
505 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
506 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
507 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
508 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
509 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
510 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
511 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
512 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
513 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
514 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
515 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
516 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
517 /* 5754 and 5787 share the same ASIC ID */
518 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
519 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
520 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
521 { BGE_ASICREV_BCM57766, "unknown BCM57766" },
522 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
523 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
524 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
525 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
526 { BGE_ASICREV_BCM5762, "unknown BCM5762" },
527
528 { 0, NULL }
529 };
530
531 static int bge_allow_asf = 1;
532
533 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc),
534 bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
535
536 static uint32_t
537 bge_readmem_ind(struct bge_softc *sc, int off)
538 {
539 pcireg_t val;
540
541 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
542 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
543 return 0;
544
545 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
546 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
547 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
548 return val;
549 }
550
551 static void
552 bge_writemem_ind(struct bge_softc *sc, int off, int val)
553 {
554
555 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
556 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
557 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
558 }
559
560 /*
561 * PCI Express only
562 */
563 static void
564 bge_set_max_readrq(struct bge_softc *sc)
565 {
566 pcireg_t val;
567
568 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
569 + PCIE_DCSR);
570 val &= ~PCIE_DCSR_MAX_READ_REQ;
571 switch (sc->bge_expmrq) {
572 case 2048:
573 val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048;
574 break;
575 case 4096:
576 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
577 break;
578 default:
579 panic("incorrect expmrq value(%d)", sc->bge_expmrq);
580 break;
581 }
582 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
583 + PCIE_DCSR, val);
584 }
585
586 #ifdef notdef
587 static uint32_t
588 bge_readreg_ind(struct bge_softc *sc, int off)
589 {
590 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
591 return pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA);
592 }
593 #endif
594
595 static void
596 bge_writereg_ind(struct bge_softc *sc, int off, int val)
597 {
598 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
599 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
600 }
601
602 static void
603 bge_writemem_direct(struct bge_softc *sc, int off, int val)
604 {
605 CSR_WRITE_4(sc, off, val);
606 }
607
608 static void
609 bge_writembx(struct bge_softc *sc, int off, int val)
610 {
611 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
612 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
613
614 CSR_WRITE_4(sc, off, val);
615 }
616
617 static void
618 bge_writembx_flush(struct bge_softc *sc, int off, int val)
619 {
620 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
621 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
622
623 CSR_WRITE_4_FLUSH(sc, off, val);
624 }
625
626 /*
627 * Clear all stale locks and select the lock for this driver instance.
628 */
629 void
630 bge_ape_lock_init(struct bge_softc *sc)
631 {
632 struct pci_attach_args *pa = &(sc->bge_pa);
633 uint32_t bit, regbase;
634 int i;
635
636 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
637 regbase = BGE_APE_LOCK_GRANT;
638 else
639 regbase = BGE_APE_PER_LOCK_GRANT;
640
641 /* Clear any stale locks. */
642 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
643 switch (i) {
644 case BGE_APE_LOCK_PHY0:
645 case BGE_APE_LOCK_PHY1:
646 case BGE_APE_LOCK_PHY2:
647 case BGE_APE_LOCK_PHY3:
648 bit = BGE_APE_LOCK_GRANT_DRIVER0;
649 break;
650 default:
651 if (pa->pa_function == 0)
652 bit = BGE_APE_LOCK_GRANT_DRIVER0;
653 else
654 bit = (1 << pa->pa_function);
655 }
656 APE_WRITE_4(sc, regbase + 4 * i, bit);
657 }
658
659 /* Select the PHY lock based on the device's function number. */
660 switch (pa->pa_function) {
661 case 0:
662 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
663 break;
664 case 1:
665 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
666 break;
667 case 2:
668 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
669 break;
670 case 3:
671 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
672 break;
673 default:
674 printf("%s: PHY lock not supported on function\n",
675 device_xname(sc->bge_dev));
676 break;
677 }
678 }
679
680 /*
681 * Check for APE firmware, set flags, and print version info.
682 */
683 void
684 bge_ape_read_fw_ver(struct bge_softc *sc)
685 {
686 const char *fwtype;
687 uint32_t apedata, features;
688
689 /* Check for a valid APE signature in shared memory. */
690 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
691 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
692 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
693 return;
694 }
695
696 /* Check if APE firmware is running. */
697 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
698 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
699 printf("%s: APE signature found but FW status not ready! "
700 "0x%08x\n", device_xname(sc->bge_dev), apedata);
701 return;
702 }
703
704 sc->bge_mfw_flags |= BGE_MFW_ON_APE;
705
706 /* Fetch the APE firwmare type and version. */
707 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
708 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
709 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
710 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
711 fwtype = "NCSI";
712 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
713 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
714 fwtype = "DASH";
715 } else
716 fwtype = "UNKN";
717
718 /* Print the APE firmware version. */
719 aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype,
720 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
721 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
722 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
723 (apedata & BGE_APE_FW_VERSION_BLDMSK));
724 }
725
726 int
727 bge_ape_lock(struct bge_softc *sc, int locknum)
728 {
729 struct pci_attach_args *pa = &(sc->bge_pa);
730 uint32_t bit, gnt, req, status;
731 int i, off;
732
733 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
734 return 0;
735
736 /* Lock request/grant registers have different bases. */
737 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
738 req = BGE_APE_LOCK_REQ;
739 gnt = BGE_APE_LOCK_GRANT;
740 } else {
741 req = BGE_APE_PER_LOCK_REQ;
742 gnt = BGE_APE_PER_LOCK_GRANT;
743 }
744
745 off = 4 * locknum;
746
747 switch (locknum) {
748 case BGE_APE_LOCK_GPIO:
749 /* Lock required when using GPIO. */
750 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
751 return 0;
752 if (pa->pa_function == 0)
753 bit = BGE_APE_LOCK_REQ_DRIVER0;
754 else
755 bit = (1 << pa->pa_function);
756 break;
757 case BGE_APE_LOCK_GRC:
758 /* Lock required to reset the device. */
759 if (pa->pa_function == 0)
760 bit = BGE_APE_LOCK_REQ_DRIVER0;
761 else
762 bit = (1 << pa->pa_function);
763 break;
764 case BGE_APE_LOCK_MEM:
765 /* Lock required when accessing certain APE memory. */
766 if (pa->pa_function == 0)
767 bit = BGE_APE_LOCK_REQ_DRIVER0;
768 else
769 bit = (1 << pa->pa_function);
770 break;
771 case BGE_APE_LOCK_PHY0:
772 case BGE_APE_LOCK_PHY1:
773 case BGE_APE_LOCK_PHY2:
774 case BGE_APE_LOCK_PHY3:
775 /* Lock required when accessing PHYs. */
776 bit = BGE_APE_LOCK_REQ_DRIVER0;
777 break;
778 default:
779 return EINVAL;
780 }
781
782 /* Request a lock. */
783 APE_WRITE_4_FLUSH(sc, req + off, bit);
784
785 /* Wait up to 1 second to acquire lock. */
786 for (i = 0; i < 20000; i++) {
787 status = APE_READ_4(sc, gnt + off);
788 if (status == bit)
789 break;
790 DELAY(50);
791 }
792
793 /* Handle any errors. */
794 if (status != bit) {
795 printf("%s: APE lock %d request failed! "
796 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
797 device_xname(sc->bge_dev),
798 locknum, req + off, bit & 0xFFFF, gnt + off,
799 status & 0xFFFF);
800 /* Revoke the lock request. */
801 APE_WRITE_4(sc, gnt + off, bit);
802 return EBUSY;
803 }
804
805 return 0;
806 }
807
808 void
809 bge_ape_unlock(struct bge_softc *sc, int locknum)
810 {
811 struct pci_attach_args *pa = &(sc->bge_pa);
812 uint32_t bit, gnt;
813 int off;
814
815 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
816 return;
817
818 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
819 gnt = BGE_APE_LOCK_GRANT;
820 else
821 gnt = BGE_APE_PER_LOCK_GRANT;
822
823 off = 4 * locknum;
824
825 switch (locknum) {
826 case BGE_APE_LOCK_GPIO:
827 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
828 return;
829 if (pa->pa_function == 0)
830 bit = BGE_APE_LOCK_GRANT_DRIVER0;
831 else
832 bit = (1 << pa->pa_function);
833 break;
834 case BGE_APE_LOCK_GRC:
835 if (pa->pa_function == 0)
836 bit = BGE_APE_LOCK_GRANT_DRIVER0;
837 else
838 bit = (1 << pa->pa_function);
839 break;
840 case BGE_APE_LOCK_MEM:
841 if (pa->pa_function == 0)
842 bit = BGE_APE_LOCK_GRANT_DRIVER0;
843 else
844 bit = (1 << pa->pa_function);
845 break;
846 case BGE_APE_LOCK_PHY0:
847 case BGE_APE_LOCK_PHY1:
848 case BGE_APE_LOCK_PHY2:
849 case BGE_APE_LOCK_PHY3:
850 bit = BGE_APE_LOCK_GRANT_DRIVER0;
851 break;
852 default:
853 return;
854 }
855
856 /* Write and flush for consecutive bge_ape_lock() */
857 APE_WRITE_4_FLUSH(sc, gnt + off, bit);
858 }
859
860 /*
861 * Send an event to the APE firmware.
862 */
863 void
864 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
865 {
866 uint32_t apedata;
867 int i;
868
869 /* NCSI does not support APE events. */
870 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
871 return;
872
873 /* Wait up to 1ms for APE to service previous event. */
874 for (i = 10; i > 0; i--) {
875 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
876 break;
877 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
878 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
879 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
880 BGE_APE_EVENT_STATUS_EVENT_PENDING);
881 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
882 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
883 break;
884 }
885 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
886 DELAY(100);
887 }
888 if (i == 0) {
889 printf("%s: APE event 0x%08x send timed out\n",
890 device_xname(sc->bge_dev), event);
891 }
892 }
893
894 void
895 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
896 {
897 uint32_t apedata, event;
898
899 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
900 return;
901
902 switch (kind) {
903 case BGE_RESET_START:
904 /* If this is the first load, clear the load counter. */
905 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
906 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
907 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
908 else {
909 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
910 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
911 }
912 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
913 BGE_APE_HOST_SEG_SIG_MAGIC);
914 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
915 BGE_APE_HOST_SEG_LEN_MAGIC);
916
917 /* Add some version info if bge(4) supports it. */
918 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
919 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
920 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
921 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
922 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
923 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
924 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
925 BGE_APE_HOST_DRVR_STATE_START);
926 event = BGE_APE_EVENT_STATUS_STATE_START;
927 break;
928 case BGE_RESET_SHUTDOWN:
929 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
930 BGE_APE_HOST_DRVR_STATE_UNLOAD);
931 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
932 break;
933 case BGE_RESET_SUSPEND:
934 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
935 break;
936 default:
937 return;
938 }
939
940 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
941 BGE_APE_EVENT_STATUS_STATE_CHNGE);
942 }
943
944 static uint8_t
945 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
946 {
947 uint32_t access, byte = 0;
948 int i;
949
950 /* Lock. */
951 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
952 for (i = 0; i < 8000; i++) {
953 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
954 break;
955 DELAY(20);
956 }
957 if (i == 8000)
958 return 1;
959
960 /* Enable access. */
961 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
962 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
963
964 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
965 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
966 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
967 DELAY(10);
968 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
969 DELAY(10);
970 break;
971 }
972 }
973
974 if (i == BGE_TIMEOUT * 10) {
975 aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
976 return 1;
977 }
978
979 /* Get result. */
980 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
981
982 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
983
984 /* Disable access. */
985 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
986
987 /* Unlock. */
988 CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
989
990 return 0;
991 }
992
993 /*
994 * Read a sequence of bytes from NVRAM.
995 */
996 static int
997 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
998 {
999 int error = 0, i;
1000 uint8_t byte = 0;
1001
1002 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1003 return 1;
1004
1005 for (i = 0; i < cnt; i++) {
1006 error = bge_nvram_getbyte(sc, off + i, &byte);
1007 if (error)
1008 break;
1009 *(dest + i) = byte;
1010 }
1011
1012 return error ? 1 : 0;
1013 }
1014
1015 /*
1016 * Read a byte of data stored in the EEPROM at address 'addr.' The
1017 * BCM570x supports both the traditional bitbang interface and an
1018 * auto access interface for reading the EEPROM. We use the auto
1019 * access method.
1020 */
1021 static uint8_t
1022 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1023 {
1024 int i;
1025 uint32_t byte = 0;
1026
1027 /*
1028 * Enable use of auto EEPROM access so we can avoid
1029 * having to use the bitbang method.
1030 */
1031 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1032
1033 /* Reset the EEPROM, load the clock period. */
1034 CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR,
1035 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1036 DELAY(20);
1037
1038 /* Issue the read EEPROM command. */
1039 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
1040
1041 /* Wait for completion */
1042 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
1043 DELAY(10);
1044 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
1045 break;
1046 }
1047
1048 if (i == BGE_TIMEOUT * 10) {
1049 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
1050 return 1;
1051 }
1052
1053 /* Get result. */
1054 byte = CSR_READ_4(sc, BGE_EE_DATA);
1055
1056 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
1057
1058 return 0;
1059 }
1060
1061 /*
1062 * Read a sequence of bytes from the EEPROM.
1063 */
1064 static int
1065 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
1066 {
1067 int error = 0, i;
1068 uint8_t byte = 0;
1069 char *dest = destv;
1070
1071 for (i = 0; i < cnt; i++) {
1072 error = bge_eeprom_getbyte(sc, off + i, &byte);
1073 if (error)
1074 break;
1075 *(dest + i) = byte;
1076 }
1077
1078 return error ? 1 : 0;
1079 }
1080
1081 static int
1082 bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
1083 {
1084 struct bge_softc * const sc = device_private(dev);
1085 uint32_t data;
1086 uint32_t autopoll;
1087 int rv = 0;
1088 int i;
1089
1090 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1091 return -1;
1092
1093 /* Reading with autopolling on may trigger PCI errors */
1094 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1095 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1096 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1097 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1098 DELAY(80);
1099 }
1100
1101 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
1102 BGE_MIPHY(phy) | BGE_MIREG(reg));
1103
1104 for (i = 0; i < BGE_TIMEOUT; i++) {
1105 delay(10);
1106 data = CSR_READ_4(sc, BGE_MI_COMM);
1107 if (!(data & BGE_MICOMM_BUSY)) {
1108 DELAY(5);
1109 data = CSR_READ_4(sc, BGE_MI_COMM);
1110 break;
1111 }
1112 }
1113
1114 if (i == BGE_TIMEOUT) {
1115 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1116 rv = ETIMEDOUT;
1117 } else if ((data & BGE_MICOMM_READFAIL) != 0) {
1118 /* XXX This error occurs on some devices while attaching. */
1119 aprint_debug_dev(sc->bge_dev, "PHY read I/O error\n");
1120 rv = EIO;
1121 } else
1122 *val = data & BGE_MICOMM_DATA;
1123
1124 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1125 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1126 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1127 DELAY(80);
1128 }
1129
1130 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1131
1132 return rv;
1133 }
1134
1135 static int
1136 bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
1137 {
1138 struct bge_softc * const sc = device_private(dev);
1139 uint32_t data, autopoll;
1140 int rv = 0;
1141 int i;
1142
1143 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1144 (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL))
1145 return 0;
1146
1147 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1148 return -1;
1149
1150 /* Reading with autopolling on may trigger PCI errors */
1151 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1152 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1153 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1154 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1155 DELAY(80);
1156 }
1157
1158 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1159 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1160
1161 for (i = 0; i < BGE_TIMEOUT; i++) {
1162 delay(10);
1163 data = CSR_READ_4(sc, BGE_MI_COMM);
1164 if (!(data & BGE_MICOMM_BUSY)) {
1165 delay(5);
1166 data = CSR_READ_4(sc, BGE_MI_COMM);
1167 break;
1168 }
1169 }
1170
1171 if (i == BGE_TIMEOUT) {
1172 aprint_error_dev(sc->bge_dev, "PHY write timed out\n");
1173 rv = ETIMEDOUT;
1174 } else if ((data & BGE_MICOMM_READFAIL) != 0) {
1175 aprint_error_dev(sc->bge_dev, "PHY write I/O error\n");
1176 rv = EIO;
1177 }
1178
1179 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1180 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1181 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1182 delay(80);
1183 }
1184
1185 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1186
1187 return rv;
1188 }
1189
1190 static void
1191 bge_miibus_statchg(struct ifnet *ifp)
1192 {
1193 struct bge_softc * const sc = ifp->if_softc;
1194 struct mii_data *mii = &sc->bge_mii;
1195 uint32_t mac_mode, rx_mode, tx_mode;
1196
1197 /*
1198 * Get flow control negotiation result.
1199 */
1200 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1201 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1202 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1203
1204 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1205 mii->mii_media_status & IFM_ACTIVE &&
1206 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1207 BGE_STS_SETBIT(sc, BGE_STS_LINK);
1208 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1209 (!(mii->mii_media_status & IFM_ACTIVE) ||
1210 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1211 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1212
1213 if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1214 return;
1215
1216 /* Set the port mode (MII/GMII) to match the link speed. */
1217 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1218 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1219 tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1220 rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1221 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1222 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1223 mac_mode |= BGE_PORTMODE_GMII;
1224 else
1225 mac_mode |= BGE_PORTMODE_MII;
1226
1227 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1228 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1229 if ((mii->mii_media_active & IFM_FDX) != 0) {
1230 if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1231 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1232 if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1233 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1234 } else
1235 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1236
1237 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode);
1238 DELAY(40);
1239 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1240 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1241 }
1242
1243 /*
1244 * Update rx threshold levels to values in a particular slot
1245 * of the interrupt-mitigation table bge_rx_threshes.
1246 */
1247 static void
1248 bge_set_thresh(struct ifnet *ifp, int lvl)
1249 {
1250 struct bge_softc * const sc = ifp->if_softc;
1251 int s;
1252
1253 /*
1254 * For now, just save the new Rx-intr thresholds and record
1255 * that a threshold update is pending. Updating the hardware
1256 * registers here (even at splhigh()) is observed to
1257 * occasionally cause glitches where Rx-interrupts are not
1258 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
1259 */
1260 s = splnet();
1261 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
1262 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
1263 sc->bge_pending_rxintr_change = 1;
1264 splx(s);
1265 }
1266
1267
1268 /*
1269 * Update Rx thresholds of all bge devices
1270 */
1271 static void
1272 bge_update_all_threshes(int lvl)
1273 {
1274 const char * const namebuf = "bge";
1275 const size_t namelen = strlen(namebuf);
1276 struct ifnet *ifp;
1277
1278 if (lvl < 0)
1279 lvl = 0;
1280 else if (lvl >= NBGE_RX_THRESH)
1281 lvl = NBGE_RX_THRESH - 1;
1282
1283 /*
1284 * Now search all the interfaces for this name/number
1285 */
1286 int s = pserialize_read_enter();
1287 IFNET_READER_FOREACH(ifp) {
1288 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
1289 continue;
1290 /* We got a match: update if doing auto-threshold-tuning */
1291 if (bge_auto_thresh)
1292 bge_set_thresh(ifp, lvl);
1293 }
1294 pserialize_read_exit(s);
1295 }
1296
1297 /*
1298 * Handle events that have triggered interrupts.
1299 */
1300 static void
1301 bge_handle_events(struct bge_softc *sc)
1302 {
1303
1304 return;
1305 }
1306
1307 /*
1308 * Memory management for jumbo frames.
1309 */
1310
1311 static int
1312 bge_alloc_jumbo_mem(struct bge_softc *sc)
1313 {
1314 char *ptr, *kva;
1315 bus_dma_segment_t seg;
1316 int i, rseg, state, error;
1317 struct bge_jpool_entry *entry;
1318
1319 state = error = 0;
1320
1321 /* Grab a big chunk o' storage. */
1322 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
1323 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1324 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1325 return ENOBUFS;
1326 }
1327
1328 state = 1;
1329 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
1330 BUS_DMA_NOWAIT)) {
1331 aprint_error_dev(sc->bge_dev,
1332 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
1333 error = ENOBUFS;
1334 goto out;
1335 }
1336
1337 state = 2;
1338 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
1339 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
1340 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1341 error = ENOBUFS;
1342 goto out;
1343 }
1344
1345 state = 3;
1346 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1347 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
1348 aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
1349 error = ENOBUFS;
1350 goto out;
1351 }
1352
1353 state = 4;
1354 sc->bge_cdata.bge_jumbo_buf = (void *)kva;
1355 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
1356
1357 SLIST_INIT(&sc->bge_jfree_listhead);
1358 SLIST_INIT(&sc->bge_jinuse_listhead);
1359
1360 /*
1361 * Now divide it up into 9K pieces and save the addresses
1362 * in an array.
1363 */
1364 ptr = sc->bge_cdata.bge_jumbo_buf;
1365 for (i = 0; i < BGE_JSLOTS; i++) {
1366 sc->bge_cdata.bge_jslots[i] = ptr;
1367 ptr += BGE_JLEN;
1368 entry = malloc(sizeof(struct bge_jpool_entry),
1369 M_DEVBUF, M_WAITOK);
1370 entry->slot = i;
1371 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1372 entry, jpool_entries);
1373 }
1374 out:
1375 if (error != 0) {
1376 switch (state) {
1377 case 4:
1378 bus_dmamap_unload(sc->bge_dmatag,
1379 sc->bge_cdata.bge_rx_jumbo_map);
1380 /* FALLTHROUGH */
1381 case 3:
1382 bus_dmamap_destroy(sc->bge_dmatag,
1383 sc->bge_cdata.bge_rx_jumbo_map);
1384 /* FALLTHROUGH */
1385 case 2:
1386 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
1387 /* FALLTHROUGH */
1388 case 1:
1389 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1390 break;
1391 default:
1392 break;
1393 }
1394 }
1395
1396 return error;
1397 }
1398
1399 /*
1400 * Allocate a jumbo buffer.
1401 */
1402 static void *
1403 bge_jalloc(struct bge_softc *sc)
1404 {
1405 struct bge_jpool_entry *entry;
1406
1407 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
1408
1409 if (entry == NULL) {
1410 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
1411 return NULL;
1412 }
1413
1414 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
1415 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
1416 return sc->bge_cdata.bge_jslots[entry->slot];
1417 }
1418
1419 /*
1420 * Release a jumbo buffer.
1421 */
1422 static void
1423 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1424 {
1425 struct bge_jpool_entry *entry;
1426 struct bge_softc * const sc = arg;
1427 int i, s;
1428
1429 if (sc == NULL)
1430 panic("bge_jfree: can't find softc pointer!");
1431
1432 /* calculate the slot this buffer belongs to */
1433
1434 i = ((char *)buf
1435 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
1436
1437 if ((i < 0) || (i >= BGE_JSLOTS))
1438 panic("bge_jfree: asked to free buffer that we don't manage!");
1439
1440 s = splvm();
1441 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
1442 if (entry == NULL)
1443 panic("bge_jfree: buffer not in use!");
1444 entry->slot = i;
1445 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
1446 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
1447
1448 if (__predict_true(m != NULL))
1449 pool_cache_put(mb_cache, m);
1450 splx(s);
1451 }
1452
1453
1454 /*
1455 * Initialize a standard receive ring descriptor.
1456 */
1457 static int
1458 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
1459 bus_dmamap_t dmamap)
1460 {
1461 struct mbuf *m_new = NULL;
1462 struct bge_rx_bd *r;
1463 int error;
1464
1465 if (dmamap == NULL)
1466 dmamap = sc->bge_cdata.bge_rx_std_map[i];
1467
1468 if (dmamap == NULL) {
1469 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
1470 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
1471 if (error != 0)
1472 return error;
1473 }
1474
1475 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
1476
1477 if (m == NULL) {
1478 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1479 if (m_new == NULL)
1480 return ENOBUFS;
1481
1482 MCLGET(m_new, M_DONTWAIT);
1483 if (!(m_new->m_flags & M_EXT)) {
1484 m_freem(m_new);
1485 return ENOBUFS;
1486 }
1487 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1488
1489 } else {
1490 m_new = m;
1491 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1492 m_new->m_data = m_new->m_ext.ext_buf;
1493 }
1494 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
1495 m_adj(m_new, ETHER_ALIGN);
1496 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
1497 BUS_DMA_READ | BUS_DMA_NOWAIT)) {
1498 m_freem(m_new);
1499 return ENOBUFS;
1500 }
1501 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
1502 BUS_DMASYNC_PREREAD);
1503
1504 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
1505 r = &sc->bge_rdata->bge_rx_std_ring[i];
1506 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
1507 r->bge_flags = BGE_RXBDFLAG_END;
1508 r->bge_len = m_new->m_len;
1509 r->bge_idx = i;
1510
1511 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1512 offsetof(struct bge_ring_data, bge_rx_std_ring) +
1513 i * sizeof (struct bge_rx_bd),
1514 sizeof (struct bge_rx_bd),
1515 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1516
1517 return 0;
1518 }
1519
1520 /*
1521 * Initialize a jumbo receive ring descriptor. This allocates
1522 * a jumbo buffer from the pool managed internally by the driver.
1523 */
1524 static int
1525 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1526 {
1527 struct mbuf *m_new = NULL;
1528 struct bge_rx_bd *r;
1529 void *buf = NULL;
1530
1531 if (m == NULL) {
1532
1533 /* Allocate the mbuf. */
1534 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1535 if (m_new == NULL)
1536 return ENOBUFS;
1537
1538 /* Allocate the jumbo buffer */
1539 buf = bge_jalloc(sc);
1540 if (buf == NULL) {
1541 m_freem(m_new);
1542 aprint_error_dev(sc->bge_dev,
1543 "jumbo allocation failed -- packet dropped!\n");
1544 return ENOBUFS;
1545 }
1546
1547 /* Attach the buffer to the mbuf. */
1548 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1549 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1550 bge_jfree, sc);
1551 m_new->m_flags |= M_EXT_RW;
1552 } else {
1553 m_new = m;
1554 buf = m_new->m_data = m_new->m_ext.ext_buf;
1555 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1556 }
1557 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
1558 m_adj(m_new, ETHER_ALIGN);
1559 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1560 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
1561 BGE_JLEN, BUS_DMASYNC_PREREAD);
1562 /* Set up the descriptor. */
1563 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1564 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1565 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1566 r->bge_flags = BGE_RXBDFLAG_END | BGE_RXBDFLAG_JUMBO_RING;
1567 r->bge_len = m_new->m_len;
1568 r->bge_idx = i;
1569
1570 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1571 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1572 i * sizeof (struct bge_rx_bd),
1573 sizeof (struct bge_rx_bd),
1574 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1575
1576 return 0;
1577 }
1578
1579 /*
1580 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1581 * that's 1MB or memory, which is a lot. For now, we fill only the first
1582 * 256 ring entries and hope that our CPU is fast enough to keep up with
1583 * the NIC.
1584 */
1585 static int
1586 bge_init_rx_ring_std(struct bge_softc *sc)
1587 {
1588 int i;
1589
1590 if (sc->bge_flags & BGEF_RXRING_VALID)
1591 return 0;
1592
1593 for (i = 0; i < BGE_SSLOTS; i++) {
1594 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1595 return ENOBUFS;
1596 }
1597
1598 sc->bge_std = i - 1;
1599 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1600
1601 sc->bge_flags |= BGEF_RXRING_VALID;
1602
1603 return 0;
1604 }
1605
1606 static void
1607 bge_free_rx_ring_std(struct bge_softc *sc, bool disable)
1608 {
1609 int i;
1610
1611 if (!(sc->bge_flags & BGEF_RXRING_VALID))
1612 return;
1613
1614 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1615 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1616 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1617 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1618 if (disable) {
1619 bus_dmamap_destroy(sc->bge_dmatag,
1620 sc->bge_cdata.bge_rx_std_map[i]);
1621 sc->bge_cdata.bge_rx_std_map[i] = NULL;
1622 }
1623 }
1624 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1625 sizeof(struct bge_rx_bd));
1626 }
1627
1628 sc->bge_flags &= ~BGEF_RXRING_VALID;
1629 }
1630
1631 static int
1632 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1633 {
1634 int i;
1635 volatile struct bge_rcb *rcb;
1636
1637 if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID)
1638 return 0;
1639
1640 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1641 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1642 return ENOBUFS;
1643 }
1644
1645 sc->bge_jumbo = i - 1;
1646 sc->bge_flags |= BGEF_JUMBO_RXRING_VALID;
1647
1648 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1649 rcb->bge_maxlen_flags = 0;
1650 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1651
1652 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1653
1654 return 0;
1655 }
1656
1657 static void
1658 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1659 {
1660 int i;
1661
1662 if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID))
1663 return;
1664
1665 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1666 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1667 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1668 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1669 }
1670 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1671 sizeof(struct bge_rx_bd));
1672 }
1673
1674 sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID;
1675 }
1676
1677 static void
1678 bge_free_tx_ring(struct bge_softc *sc, bool disable)
1679 {
1680 int i;
1681 struct txdmamap_pool_entry *dma;
1682
1683 if (!(sc->bge_flags & BGEF_TXRING_VALID))
1684 return;
1685
1686 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1687 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1688 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1689 sc->bge_cdata.bge_tx_chain[i] = NULL;
1690 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1691 link);
1692 sc->txdma[i] = 0;
1693 }
1694 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1695 sizeof(struct bge_tx_bd));
1696 }
1697
1698 if (disable) {
1699 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1700 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1701 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1702 if (sc->bge_dma64) {
1703 bus_dmamap_destroy(sc->bge_dmatag32,
1704 dma->dmamap32);
1705 }
1706 free(dma, M_DEVBUF);
1707 }
1708 SLIST_INIT(&sc->txdma_list);
1709 }
1710
1711 sc->bge_flags &= ~BGEF_TXRING_VALID;
1712 }
1713
1714 static int
1715 bge_init_tx_ring(struct bge_softc *sc)
1716 {
1717 struct ifnet * const ifp = &sc->ethercom.ec_if;
1718 int i;
1719 bus_dmamap_t dmamap, dmamap32;
1720 bus_size_t maxsegsz;
1721 struct txdmamap_pool_entry *dma;
1722
1723 if (sc->bge_flags & BGEF_TXRING_VALID)
1724 return 0;
1725
1726 sc->bge_txcnt = 0;
1727 sc->bge_tx_saved_considx = 0;
1728
1729 /* Initialize transmit producer index for host-memory send ring. */
1730 sc->bge_tx_prodidx = 0;
1731 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1732 /* 5700 b2 errata */
1733 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1734 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1735
1736 /* NIC-memory send ring not used; initialize to zero. */
1737 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1738 /* 5700 b2 errata */
1739 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1740 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1741
1742 /* Limit DMA segment size for some chips */
1743 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) &&
1744 (ifp->if_mtu <= ETHERMTU))
1745 maxsegsz = 2048;
1746 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1747 maxsegsz = 4096;
1748 else
1749 maxsegsz = ETHER_MAX_LEN_JUMBO;
1750
1751 if (SLIST_FIRST(&sc->txdma_list) != NULL)
1752 goto alloc_done;
1753
1754 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1755 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1756 BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1757 &dmamap))
1758 return ENOBUFS;
1759 if (dmamap == NULL)
1760 panic("dmamap NULL in bge_init_tx_ring");
1761 if (sc->bge_dma64) {
1762 if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX,
1763 BGE_NTXSEG, maxsegsz, 0,
1764 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1765 &dmamap32)) {
1766 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1767 return ENOBUFS;
1768 }
1769 if (dmamap32 == NULL)
1770 panic("dmamap32 NULL in bge_init_tx_ring");
1771 } else
1772 dmamap32 = dmamap;
1773 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1774 if (dma == NULL) {
1775 aprint_error_dev(sc->bge_dev,
1776 "can't alloc txdmamap_pool_entry\n");
1777 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1778 if (sc->bge_dma64)
1779 bus_dmamap_destroy(sc->bge_dmatag32, dmamap32);
1780 return ENOMEM;
1781 }
1782 dma->dmamap = dmamap;
1783 dma->dmamap32 = dmamap32;
1784 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1785 }
1786 alloc_done:
1787 sc->bge_flags |= BGEF_TXRING_VALID;
1788
1789 return 0;
1790 }
1791
1792 static void
1793 bge_setmulti(struct bge_softc *sc)
1794 {
1795 struct ethercom * const ec = &sc->ethercom;
1796 struct ifnet * const ifp = &ec->ec_if;
1797 struct ether_multi *enm;
1798 struct ether_multistep step;
1799 uint32_t hashes[4] = { 0, 0, 0, 0 };
1800 uint32_t h;
1801 int i;
1802
1803 if (ifp->if_flags & IFF_PROMISC)
1804 goto allmulti;
1805
1806 /* Now program new ones. */
1807 ETHER_LOCK(ec);
1808 ETHER_FIRST_MULTI(step, ec, enm);
1809 while (enm != NULL) {
1810 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1811 /*
1812 * We must listen to a range of multicast addresses.
1813 * For now, just accept all multicasts, rather than
1814 * trying to set only those filter bits needed to match
1815 * the range. (At this time, the only use of address
1816 * ranges is for IP multicast routing, for which the
1817 * range is big enough to require all bits set.)
1818 */
1819 ETHER_UNLOCK(ec);
1820 goto allmulti;
1821 }
1822
1823 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1824
1825 /* Just want the 7 least-significant bits. */
1826 h &= 0x7f;
1827
1828 hashes[(h & 0x60) >> 5] |= 1U << (h & 0x1F);
1829 ETHER_NEXT_MULTI(step, enm);
1830 }
1831 ETHER_UNLOCK(ec);
1832
1833 ifp->if_flags &= ~IFF_ALLMULTI;
1834 goto setit;
1835
1836 allmulti:
1837 ifp->if_flags |= IFF_ALLMULTI;
1838 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1839
1840 setit:
1841 for (i = 0; i < 4; i++)
1842 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1843 }
1844
1845 static void
1846 bge_sig_pre_reset(struct bge_softc *sc, int type)
1847 {
1848
1849 /*
1850 * Some chips don't like this so only do this if ASF is enabled
1851 */
1852 if (sc->bge_asf_mode)
1853 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1854
1855 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1856 switch (type) {
1857 case BGE_RESET_START:
1858 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1859 BGE_FW_DRV_STATE_START);
1860 break;
1861 case BGE_RESET_SHUTDOWN:
1862 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1863 BGE_FW_DRV_STATE_UNLOAD);
1864 break;
1865 case BGE_RESET_SUSPEND:
1866 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1867 BGE_FW_DRV_STATE_SUSPEND);
1868 break;
1869 }
1870 }
1871
1872 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1873 bge_ape_driver_state_change(sc, type);
1874 }
1875
1876 static void
1877 bge_sig_post_reset(struct bge_softc *sc, int type)
1878 {
1879
1880 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1881 switch (type) {
1882 case BGE_RESET_START:
1883 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1884 BGE_FW_DRV_STATE_START_DONE);
1885 /* START DONE */
1886 break;
1887 case BGE_RESET_SHUTDOWN:
1888 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1889 BGE_FW_DRV_STATE_UNLOAD_DONE);
1890 break;
1891 }
1892 }
1893
1894 if (type == BGE_RESET_SHUTDOWN)
1895 bge_ape_driver_state_change(sc, type);
1896 }
1897
1898 static void
1899 bge_sig_legacy(struct bge_softc *sc, int type)
1900 {
1901
1902 if (sc->bge_asf_mode) {
1903 switch (type) {
1904 case BGE_RESET_START:
1905 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1906 BGE_FW_DRV_STATE_START);
1907 break;
1908 case BGE_RESET_SHUTDOWN:
1909 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1910 BGE_FW_DRV_STATE_UNLOAD);
1911 break;
1912 }
1913 }
1914 }
1915
1916 static void
1917 bge_wait_for_event_ack(struct bge_softc *sc)
1918 {
1919 int i;
1920
1921 /* wait up to 2500usec */
1922 for (i = 0; i < 250; i++) {
1923 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1924 BGE_RX_CPU_DRV_EVENT))
1925 break;
1926 DELAY(10);
1927 }
1928 }
1929
1930 static void
1931 bge_stop_fw(struct bge_softc *sc)
1932 {
1933
1934 if (sc->bge_asf_mode) {
1935 bge_wait_for_event_ack(sc);
1936
1937 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1938 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
1939 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1940
1941 bge_wait_for_event_ack(sc);
1942 }
1943 }
1944
1945 static int
1946 bge_poll_fw(struct bge_softc *sc)
1947 {
1948 uint32_t val;
1949 int i;
1950
1951 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1952 for (i = 0; i < BGE_TIMEOUT; i++) {
1953 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
1954 if (val & BGE_VCPU_STATUS_INIT_DONE)
1955 break;
1956 DELAY(100);
1957 }
1958 if (i >= BGE_TIMEOUT) {
1959 aprint_error_dev(sc->bge_dev, "reset timed out\n");
1960 return -1;
1961 }
1962 } else {
1963 /*
1964 * Poll the value location we just wrote until
1965 * we see the 1's complement of the magic number.
1966 * This indicates that the firmware initialization
1967 * is complete.
1968 * XXX 1000ms for Flash and 10000ms for SEEPROM.
1969 */
1970 for (i = 0; i < BGE_TIMEOUT; i++) {
1971 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
1972 if (val == ~BGE_SRAM_FW_MB_MAGIC)
1973 break;
1974 DELAY(10);
1975 }
1976
1977 if ((i >= BGE_TIMEOUT)
1978 && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) {
1979 aprint_error_dev(sc->bge_dev,
1980 "firmware handshake timed out, val = %x\n", val);
1981 return -1;
1982 }
1983 }
1984
1985 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
1986 /* tg3 says we have to wait extra time */
1987 delay(10 * 1000);
1988 }
1989
1990 return 0;
1991 }
1992
1993 int
1994 bge_phy_addr(struct bge_softc *sc)
1995 {
1996 struct pci_attach_args *pa = &(sc->bge_pa);
1997 int phy_addr = 1;
1998
1999 /*
2000 * PHY address mapping for various devices.
2001 *
2002 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2003 * ---------+-------+-------+-------+-------+
2004 * BCM57XX | 1 | X | X | X |
2005 * BCM5704 | 1 | X | 1 | X |
2006 * BCM5717 | 1 | 8 | 2 | 9 |
2007 * BCM5719 | 1 | 8 | 2 | 9 |
2008 * BCM5720 | 1 | 8 | 2 | 9 |
2009 *
2010 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
2011 * ---------+-------+-------+-------+-------+
2012 * BCM57XX | X | X | X | X |
2013 * BCM5704 | X | X | X | X |
2014 * BCM5717 | X | X | X | X |
2015 * BCM5719 | 3 | 10 | 4 | 11 |
2016 * BCM5720 | X | X | X | X |
2017 *
2018 * Other addresses may respond but they are not
2019 * IEEE compliant PHYs and should be ignored.
2020 */
2021 switch (BGE_ASICREV(sc->bge_chipid)) {
2022 case BGE_ASICREV_BCM5717:
2023 case BGE_ASICREV_BCM5719:
2024 case BGE_ASICREV_BCM5720:
2025 phy_addr = pa->pa_function;
2026 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
2027 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
2028 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
2029 } else {
2030 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2031 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
2032 }
2033 }
2034
2035 return phy_addr;
2036 }
2037
2038 /*
2039 * Do endian, PCI and DMA initialization. Also check the on-board ROM
2040 * self-test results.
2041 */
2042 static int
2043 bge_chipinit(struct bge_softc *sc)
2044 {
2045 uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg;
2046 int i;
2047
2048 /* Set endianness before we access any non-PCI registers. */
2049 misc_ctl = BGE_INIT;
2050 if (sc->bge_flags & BGEF_TAGGED_STATUS)
2051 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
2052 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
2053 misc_ctl);
2054
2055 /*
2056 * Clear the MAC statistics block in the NIC's
2057 * internal memory.
2058 */
2059 for (i = BGE_STATS_BLOCK;
2060 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
2061 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
2062
2063 for (i = BGE_STATUS_BLOCK;
2064 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
2065 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
2066
2067 /* 5717 workaround from tg3 */
2068 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2069 /* Save */
2070 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2071
2072 /* Temporary modify MODE_CTL to control TLP */
2073 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2074 CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1);
2075
2076 /* Control TLP */
2077 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2078 BGE_TLP_PHYCTL1);
2079 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1,
2080 reg | BGE_TLP_PHYCTL1_EN_L1PLLPD);
2081
2082 /* Restore */
2083 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2084 }
2085
2086 if (BGE_IS_57765_FAMILY(sc)) {
2087 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
2088 /* Save */
2089 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2090
2091 /* Temporary modify MODE_CTL to control TLP */
2092 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2093 CSR_WRITE_4(sc, BGE_MODE_CTL,
2094 reg | BGE_MODECTL_PCIE_TLPADDR1);
2095
2096 /* Control TLP */
2097 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2098 BGE_TLP_PHYCTL5);
2099 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5,
2100 reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ);
2101
2102 /* Restore */
2103 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2104 }
2105 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
2106 /*
2107 * For the 57766 and non Ax versions of 57765, bootcode
2108 * needs to setup the PCIE Fast Training Sequence (FTS)
2109 * value to prevent transmit hangs.
2110 */
2111 reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
2112 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
2113 reg | BGE_CPMU_PADRNG_CTL_RDIV2);
2114
2115 /* Save */
2116 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2117
2118 /* Temporary modify MODE_CTL to control TLP */
2119 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2120 CSR_WRITE_4(sc, BGE_MODE_CTL,
2121 reg | BGE_MODECTL_PCIE_TLPADDR0);
2122
2123 /* Control TLP */
2124 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2125 BGE_TLP_FTSMAX);
2126 reg &= ~BGE_TLP_FTSMAX_MSK;
2127 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX,
2128 reg | BGE_TLP_FTSMAX_VAL);
2129
2130 /* Restore */
2131 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2132 }
2133
2134 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
2135 reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
2136 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
2137 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
2138 }
2139
2140 /* Set up the PCI DMA control register. */
2141 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
2142 if (sc->bge_flags & BGEF_PCIE) {
2143 /* Read watermark not used, 128 bytes for write. */
2144 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
2145 device_xname(sc->bge_dev)));
2146 if (sc->bge_mps >= 256)
2147 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
2148 else
2149 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
2150 } else if (sc->bge_flags & BGEF_PCIX) {
2151 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
2152 device_xname(sc->bge_dev)));
2153 /* PCI-X bus */
2154 if (BGE_IS_5714_FAMILY(sc)) {
2155 /* 256 bytes for read and write. */
2156 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
2157 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
2158
2159 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
2160 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
2161 else
2162 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
2163 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
2164 /*
2165 * In the BCM5703, the DMA read watermark should
2166 * be set to less than or equal to the maximum
2167 * memory read byte count of the PCI-X command
2168 * register.
2169 */
2170 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
2171 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
2172 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2173 /* 1536 bytes for read, 384 bytes for write. */
2174 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
2175 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
2176 } else {
2177 /* 384 bytes for read and write. */
2178 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
2179 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
2180 (0x0F);
2181 }
2182
2183 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2184 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2185 uint32_t tmp;
2186
2187 /* Set ONEDMA_ATONCE for hardware workaround. */
2188 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
2189 if (tmp == 6 || tmp == 7)
2190 dma_rw_ctl |=
2191 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
2192
2193 /* Set PCI-X DMA write workaround. */
2194 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
2195 }
2196 } else {
2197 /* Conventional PCI bus: 256 bytes for read and write. */
2198 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
2199 device_xname(sc->bge_dev)));
2200 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
2201 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
2202
2203 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
2204 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
2205 dma_rw_ctl |= 0x0F;
2206 }
2207
2208 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2209 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
2210 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
2211 BGE_PCIDMARWCTL_ASRT_ALL_BE;
2212
2213 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2214 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2215 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
2216
2217 if (BGE_IS_57765_PLUS(sc)) {
2218 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
2219 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
2220 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
2221
2222 /*
2223 * Enable HW workaround for controllers that misinterpret
2224 * a status tag update and leave interrupts permanently
2225 * disabled.
2226 */
2227 if (!BGE_IS_57765_FAMILY(sc) &&
2228 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2229 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
2230 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
2231 }
2232
2233 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
2234 dma_rw_ctl);
2235
2236 /*
2237 * Set up general mode register.
2238 */
2239 mode_ctl = BGE_DMA_SWAP_OPTIONS;
2240 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2241 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2242 /* Retain Host-2-BMC settings written by APE firmware. */
2243 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
2244 (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
2245 BGE_MODECTL_WORDSWAP_B2HRX_DATA |
2246 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
2247 }
2248 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
2249 BGE_MODECTL_TX_NO_PHDR_CSUM;
2250
2251 /*
2252 * BCM5701 B5 have a bug causing data corruption when using
2253 * 64-bit DMA reads, which can be terminated early and then
2254 * completed later as 32-bit accesses, in combination with
2255 * certain bridges.
2256 */
2257 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2258 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
2259 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
2260
2261 /*
2262 * Tell the firmware the driver is running
2263 */
2264 if (sc->bge_asf_mode & ASF_STACKUP)
2265 mode_ctl |= BGE_MODECTL_STACKUP;
2266
2267 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2268
2269 /*
2270 * Disable memory write invalidate. Apparently it is not supported
2271 * properly by these devices.
2272 */
2273 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
2274 PCI_COMMAND_INVALIDATE_ENABLE);
2275
2276 #ifdef __brokenalpha__
2277 /*
2278 * Must insure that we do not cross an 8K (bytes) boundary
2279 * for DMA reads. Our highest limit is 1K bytes. This is a
2280 * restriction on some ALPHA platforms with early revision
2281 * 21174 PCI chipsets, such as the AlphaPC 164lx
2282 */
2283 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
2284 #endif
2285
2286 /* Set the timer prescaler (always 66MHz) */
2287 CSR_WRITE_4_FLUSH(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
2288
2289 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2290 DELAY(40); /* XXX */
2291
2292 /* Put PHY into ready state */
2293 BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
2294 DELAY(40);
2295 }
2296
2297 return 0;
2298 }
2299
2300 static int
2301 bge_blockinit(struct bge_softc *sc)
2302 {
2303 volatile struct bge_rcb *rcb;
2304 bus_size_t rcb_addr;
2305 struct ifnet * const ifp = &sc->ethercom.ec_if;
2306 bge_hostaddr taddr;
2307 uint32_t dmactl, rdmareg, mimode, val;
2308 int i, limit;
2309
2310 /*
2311 * Initialize the memory window pointer register so that
2312 * we can access the first 32K of internal NIC RAM. This will
2313 * allow us to set up the TX send ring RCBs and the RX return
2314 * ring RCBs, plus other things which live in NIC memory.
2315 */
2316 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
2317
2318 if (!BGE_IS_5705_PLUS(sc)) {
2319 /* 57XX step 33 */
2320 /* Configure mbuf memory pool */
2321 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
2322
2323 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2324 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
2325 else
2326 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
2327
2328 /* 57XX step 34 */
2329 /* Configure DMA resource pool */
2330 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
2331 BGE_DMA_DESCRIPTORS);
2332 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
2333 }
2334
2335 /* 5718 step 11, 57XX step 35 */
2336 /*
2337 * Configure mbuf pool watermarks. New broadcom docs strongly
2338 * recommend these.
2339 */
2340 if (BGE_IS_5717_PLUS(sc)) {
2341 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2342 if (ifp->if_mtu > ETHERMTU) {
2343 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
2344 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
2345 } else {
2346 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
2347 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
2348 }
2349 } else if (BGE_IS_5705_PLUS(sc)) {
2350 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2351
2352 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2353 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
2354 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
2355 } else {
2356 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2357 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2358 }
2359 } else {
2360 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
2361 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
2362 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2363 }
2364
2365 /* 57XX step 36 */
2366 /* Configure DMA resource watermarks */
2367 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2368 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
2369
2370 /* 5718 step 13, 57XX step 38 */
2371 /* Enable buffer manager */
2372 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN;
2373 /*
2374 * Change the arbitration algorithm of TXMBUF read request to
2375 * round-robin instead of priority based for BCM5719. When
2376 * TXFIFO is almost empty, RDMA will hold its request until
2377 * TXFIFO is not almost empty.
2378 */
2379 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2380 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
2381 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2382 sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2383 sc->bge_chipid == BGE_CHIPID_BCM5720_A0)
2384 val |= BGE_BMANMODE_LOMBUF_ATTN;
2385 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
2386
2387 /* 57XX step 39 */
2388 /* Poll for buffer manager start indication */
2389 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2390 DELAY(10);
2391 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2392 break;
2393 }
2394
2395 if (i == BGE_TIMEOUT * 2) {
2396 aprint_error_dev(sc->bge_dev,
2397 "buffer manager failed to start\n");
2398 return ENXIO;
2399 }
2400
2401 /* 57XX step 40 */
2402 /* Enable flow-through queues */
2403 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2404 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2405
2406 /* Wait until queue initialization is complete */
2407 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2408 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2409 break;
2410 DELAY(10);
2411 }
2412
2413 if (i == BGE_TIMEOUT * 2) {
2414 aprint_error_dev(sc->bge_dev,
2415 "flow-through queue init failed\n");
2416 return ENXIO;
2417 }
2418
2419 /*
2420 * Summary of rings supported by the controller:
2421 *
2422 * Standard Receive Producer Ring
2423 * - This ring is used to feed receive buffers for "standard"
2424 * sized frames (typically 1536 bytes) to the controller.
2425 *
2426 * Jumbo Receive Producer Ring
2427 * - This ring is used to feed receive buffers for jumbo sized
2428 * frames (i.e. anything bigger than the "standard" frames)
2429 * to the controller.
2430 *
2431 * Mini Receive Producer Ring
2432 * - This ring is used to feed receive buffers for "mini"
2433 * sized frames to the controller.
2434 * - This feature required external memory for the controller
2435 * but was never used in a production system. Should always
2436 * be disabled.
2437 *
2438 * Receive Return Ring
2439 * - After the controller has placed an incoming frame into a
2440 * receive buffer that buffer is moved into a receive return
2441 * ring. The driver is then responsible to passing the
2442 * buffer up to the stack. Many versions of the controller
2443 * support multiple RR rings.
2444 *
2445 * Send Ring
2446 * - This ring is used for outgoing frames. Many versions of
2447 * the controller support multiple send rings.
2448 */
2449
2450 /* 5718 step 15, 57XX step 41 */
2451 /* Initialize the standard RX ring control block */
2452 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
2453 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
2454 /* 5718 step 16 */
2455 if (BGE_IS_57765_PLUS(sc)) {
2456 /*
2457 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2458 * Bits 15-2 : Maximum RX frame size
2459 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2460 * Bit 0 : Reserved
2461 */
2462 rcb->bge_maxlen_flags =
2463 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2464 } else if (BGE_IS_5705_PLUS(sc)) {
2465 /*
2466 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2467 * Bits 15-2 : Reserved (should be 0)
2468 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2469 * Bit 0 : Reserved
2470 */
2471 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2472 } else {
2473 /*
2474 * Ring size is always XXX entries
2475 * Bits 31-16: Maximum RX frame size
2476 * Bits 15-2 : Reserved (should be 0)
2477 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2478 * Bit 0 : Reserved
2479 */
2480 rcb->bge_maxlen_flags =
2481 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2482 }
2483 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2484 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2485 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2486 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2487 else
2488 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2489 /* Write the standard receive producer ring control block. */
2490 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2491 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2492 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2493 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2494
2495 /* Reset the standard receive producer ring producer index. */
2496 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2497
2498 /* 57XX step 42 */
2499 /*
2500 * Initialize the jumbo RX ring control block
2501 * We set the 'ring disabled' bit in the flags
2502 * field until we're actually ready to start
2503 * using this ring (i.e. once we set the MTU
2504 * high enough to require it).
2505 */
2506 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2507 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
2508 BGE_HOSTADDR(rcb->bge_hostaddr,
2509 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
2510 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2511 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2512 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2513 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2514 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2515 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2516 else
2517 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2518 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2519 rcb->bge_hostaddr.bge_addr_hi);
2520 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2521 rcb->bge_hostaddr.bge_addr_lo);
2522 /* Program the jumbo receive producer ring RCB parameters. */
2523 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2524 rcb->bge_maxlen_flags);
2525 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2526 /* Reset the jumbo receive producer ring producer index. */
2527 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2528 }
2529
2530 /* 57XX step 43 */
2531 /* Disable the mini receive producer ring RCB. */
2532 if (BGE_IS_5700_FAMILY(sc)) {
2533 /* Set up dummy disabled mini ring RCB */
2534 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2535 rcb->bge_maxlen_flags =
2536 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2537 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2538 rcb->bge_maxlen_flags);
2539 /* Reset the mini receive producer ring producer index. */
2540 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2541
2542 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2543 offsetof(struct bge_ring_data, bge_info),
2544 sizeof (struct bge_gib),
2545 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2546 }
2547
2548 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2549 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2550 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2551 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2552 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2553 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2554 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2555 }
2556 /* 5718 step 14, 57XX step 44 */
2557 /*
2558 * The BD ring replenish thresholds control how often the
2559 * hardware fetches new BD's from the producer rings in host
2560 * memory. Setting the value too low on a busy system can
2561 * starve the hardware and recue the throughpout.
2562 *
2563 * Set the BD ring replenish thresholds. The recommended
2564 * values are 1/8th the number of descriptors allocated to
2565 * each ring, but since we try to avoid filling the entire
2566 * ring we set these to the minimal value of 8. This needs to
2567 * be done on several of the supported chip revisions anyway,
2568 * to work around HW bugs.
2569 */
2570 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2571 if (BGE_IS_JUMBO_CAPABLE(sc))
2572 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
2573
2574 /* 5718 step 18 */
2575 if (BGE_IS_5717_PLUS(sc)) {
2576 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2577 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2578 }
2579
2580 /* 57XX step 45 */
2581 /*
2582 * Disable all send rings by setting the 'ring disabled' bit
2583 * in the flags field of all the TX send ring control blocks,
2584 * located in NIC memory.
2585 */
2586 if (BGE_IS_5700_FAMILY(sc)) {
2587 /* 5700 to 5704 had 16 send rings. */
2588 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2589 } else if (BGE_IS_5717_PLUS(sc)) {
2590 limit = BGE_TX_RINGS_5717_MAX;
2591 } else if (BGE_IS_57765_FAMILY(sc) ||
2592 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2593 limit = BGE_TX_RINGS_57765_MAX;
2594 } else
2595 limit = 1;
2596 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2597 for (i = 0; i < limit; i++) {
2598 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2599 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2600 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2601 rcb_addr += sizeof(struct bge_rcb);
2602 }
2603
2604 /* 57XX step 46 and 47 */
2605 /* Configure send ring RCB 0 (we use only the first ring) */
2606 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2607 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2608 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2609 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2610 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2611 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2612 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2613 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2614 else
2615 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2616 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2617 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2618 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2619
2620 /* 57XX step 48 */
2621 /*
2622 * Disable all receive return rings by setting the
2623 * 'ring diabled' bit in the flags field of all the receive
2624 * return ring control blocks, located in NIC memory.
2625 */
2626 if (BGE_IS_5717_PLUS(sc)) {
2627 /* Should be 17, use 16 until we get an SRAM map. */
2628 limit = 16;
2629 } else if (BGE_IS_5700_FAMILY(sc))
2630 limit = BGE_RX_RINGS_MAX;
2631 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2632 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
2633 BGE_IS_57765_FAMILY(sc))
2634 limit = 4;
2635 else
2636 limit = 1;
2637 /* Disable all receive return rings */
2638 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2639 for (i = 0; i < limit; i++) {
2640 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2641 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2642 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2643 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2644 BGE_RCB_FLAG_RING_DISABLED));
2645 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2646 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2647 (i * (sizeof(uint64_t))), 0);
2648 rcb_addr += sizeof(struct bge_rcb);
2649 }
2650
2651 /* 57XX step 49 */
2652 /*
2653 * Set up receive return ring 0. Note that the NIC address
2654 * for RX return rings is 0x0. The return rings live entirely
2655 * within the host, so the nicaddr field in the RCB isn't used.
2656 */
2657 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2658 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2659 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2660 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2661 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2662 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2663 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2664
2665 /* 5718 step 24, 57XX step 53 */
2666 /* Set random backoff seed for TX */
2667 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2668 (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
2669 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
2670 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) &
2671 BGE_TX_BACKOFF_SEED_MASK);
2672
2673 /* 5718 step 26, 57XX step 55 */
2674 /* Set inter-packet gap */
2675 val = 0x2620;
2676 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2677 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2678 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2679 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2680 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2681
2682 /* 5718 step 27, 57XX step 56 */
2683 /*
2684 * Specify which ring to use for packets that don't match
2685 * any RX rules.
2686 */
2687 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2688
2689 /* 5718 step 28, 57XX step 57 */
2690 /*
2691 * Configure number of RX lists. One interrupt distribution
2692 * list, sixteen active lists, one bad frames class.
2693 */
2694 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2695
2696 /* 5718 step 29, 57XX step 58 */
2697 /* Inialize RX list placement stats mask. */
2698 if (BGE_IS_575X_PLUS(sc)) {
2699 val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK);
2700 val &= ~BGE_RXLPSTATCONTROL_DACK_FIX;
2701 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val);
2702 } else
2703 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2704
2705 /* 5718 step 30, 57XX step 59 */
2706 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2707
2708 /* 5718 step 33, 57XX step 62 */
2709 /* Disable host coalescing until we get it set up */
2710 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2711
2712 /* 5718 step 34, 57XX step 63 */
2713 /* Poll to make sure it's shut down. */
2714 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2715 DELAY(10);
2716 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2717 break;
2718 }
2719
2720 if (i == BGE_TIMEOUT * 2) {
2721 aprint_error_dev(sc->bge_dev,
2722 "host coalescing engine failed to idle\n");
2723 return ENXIO;
2724 }
2725
2726 /* 5718 step 35, 36, 37 */
2727 /* Set up host coalescing defaults */
2728 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2729 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2730 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2731 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2732 if (!(BGE_IS_5705_PLUS(sc))) {
2733 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2734 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2735 }
2736 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2737 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2738
2739 /* Set up address of statistics block */
2740 if (BGE_IS_5700_FAMILY(sc)) {
2741 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2742 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2743 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2744 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2745 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2746 }
2747
2748 /* 5718 step 38 */
2749 /* Set up address of status block */
2750 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2751 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2752 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2753 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2754 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2755 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2756
2757 /* Set up status block size. */
2758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2759 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2760 val = BGE_STATBLKSZ_FULL;
2761 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2762 } else {
2763 val = BGE_STATBLKSZ_32BYTE;
2764 bzero(&sc->bge_rdata->bge_status_block, 32);
2765 }
2766
2767 /* 5718 step 39, 57XX step 73 */
2768 /* Turn on host coalescing state machine */
2769 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2770
2771 /* 5718 step 40, 57XX step 74 */
2772 /* Turn on RX BD completion state machine and enable attentions */
2773 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2774 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2775
2776 /* 5718 step 41, 57XX step 75 */
2777 /* Turn on RX list placement state machine */
2778 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2779
2780 /* 57XX step 76 */
2781 /* Turn on RX list selector state machine. */
2782 if (!(BGE_IS_5705_PLUS(sc)))
2783 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2784
2785 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2786 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2787 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2788 BGE_MACMODE_FRMHDR_DMA_ENB;
2789
2790 if (sc->bge_flags & BGEF_FIBER_TBI)
2791 val |= BGE_PORTMODE_TBI;
2792 else if (sc->bge_flags & BGEF_FIBER_MII)
2793 val |= BGE_PORTMODE_GMII;
2794 else
2795 val |= BGE_PORTMODE_MII;
2796
2797 /* 5718 step 42 and 43, 57XX step 77 and 78 */
2798 /* Allow APE to send/receive frames. */
2799 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2800 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2801
2802 /* Turn on DMA, clear stats */
2803 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
2804 /* 5718 step 44 */
2805 DELAY(40);
2806
2807 /* 5718 step 45, 57XX step 79 */
2808 /* Set misc. local control, enable interrupts on attentions */
2809 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2810 if (BGE_IS_5717_PLUS(sc)) {
2811 CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */
2812 /* 5718 step 46 */
2813 DELAY(100);
2814 }
2815
2816 /* 57XX step 81 */
2817 /* Turn on DMA completion state machine */
2818 if (!(BGE_IS_5705_PLUS(sc)))
2819 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2820
2821 /* 5718 step 47, 57XX step 82 */
2822 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2823
2824 /* 5718 step 48 */
2825 /* Enable host coalescing bug fix. */
2826 if (BGE_IS_5755_PLUS(sc))
2827 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2828
2829 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2830 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2831
2832 /* Turn on write DMA state machine */
2833 CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val);
2834 /* 5718 step 49 */
2835 DELAY(40);
2836
2837 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2838
2839 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2840 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2841
2842 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2843 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2844 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2845 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2846 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2847 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2848
2849 if (sc->bge_flags & BGEF_PCIE)
2850 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2851 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
2852 if (ifp->if_mtu <= ETHERMTU)
2853 val |= BGE_RDMAMODE_JMB_2K_MMRR;
2854 }
2855 if (sc->bge_flags & BGEF_TSO) {
2856 val |= BGE_RDMAMODE_TSO4_ENABLE;
2857 if (BGE_IS_5717_PLUS(sc))
2858 val |= BGE_RDMAMODE_TSO6_ENABLE;
2859 }
2860
2861 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2862 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2863 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2864 BGE_RDMAMODE_H2BNC_VLAN_DET;
2865 /*
2866 * Allow multiple outstanding read requests from
2867 * non-LSO read DMA engine.
2868 */
2869 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2870 }
2871
2872 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2873 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2874 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2875 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
2876 BGE_IS_57765_PLUS(sc)) {
2877 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2878 rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2879 else
2880 rdmareg = BGE_RDMA_RSRVCTRL;
2881 dmactl = CSR_READ_4(sc, rdmareg);
2882 /*
2883 * Adjust tx margin to prevent TX data corruption and
2884 * fix internal FIFO overflow.
2885 */
2886 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2887 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2888 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2889 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2890 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2891 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2892 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2893 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2894 }
2895 /*
2896 * Enable fix for read DMA FIFO overruns.
2897 * The fix is to limit the number of RX BDs
2898 * the hardware would fetch at a time.
2899 */
2900 CSR_WRITE_4(sc, rdmareg, dmactl |
2901 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2902 }
2903
2904 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2905 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2906 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2907 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2908 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2909 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2910 /*
2911 * Allow 4KB burst length reads for non-LSO frames.
2912 * Enable 512B burst length reads for buffer descriptors.
2913 */
2914 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2915 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2916 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2917 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2918 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2919 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2920 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2921 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2922 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2923 }
2924 /* Turn on read DMA state machine */
2925 CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
2926 /* 5718 step 52 */
2927 delay(40);
2928
2929 if (sc->bge_flags & BGEF_RDMA_BUG) {
2930 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2931 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2932 if ((val & 0xFFFF) > BGE_FRAMELEN)
2933 break;
2934 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
2935 break;
2936 }
2937 if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2938 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2939 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2940 val |= BGE_RDMA_TX_LENGTH_WA_5719;
2941 else
2942 val |= BGE_RDMA_TX_LENGTH_WA_5720;
2943 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2944 }
2945 }
2946
2947 /* 5718 step 56, 57XX step 84 */
2948 /* Turn on RX data completion state machine */
2949 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2950
2951 /* Turn on RX data and RX BD initiator state machine */
2952 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2953
2954 /* 57XX step 85 */
2955 /* Turn on Mbuf cluster free state machine */
2956 if (!BGE_IS_5705_PLUS(sc))
2957 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2958
2959 /* 5718 step 57, 57XX step 86 */
2960 /* Turn on send data completion state machine */
2961 val = BGE_SDCMODE_ENABLE;
2962 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2963 val |= BGE_SDCMODE_CDELAY;
2964 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2965
2966 /* 5718 step 58 */
2967 /* Turn on send BD completion state machine */
2968 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2969
2970 /* 57XX step 88 */
2971 /* Turn on RX BD initiator state machine */
2972 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2973
2974 /* 5718 step 60, 57XX step 90 */
2975 /* Turn on send data initiator state machine */
2976 if (sc->bge_flags & BGEF_TSO) {
2977 /* XXX: magic value from Linux driver */
2978 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2979 BGE_SDIMODE_HW_LSO_PRE_DMA);
2980 } else
2981 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2982
2983 /* 5718 step 61, 57XX step 91 */
2984 /* Turn on send BD initiator state machine */
2985 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2986
2987 /* 5718 step 62, 57XX step 92 */
2988 /* Turn on send BD selector state machine */
2989 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2990
2991 /* 5718 step 31, 57XX step 60 */
2992 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2993 /* 5718 step 32, 57XX step 61 */
2994 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2995 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2996
2997 /* ack/clear link change events */
2998 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2999 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3000 BGE_MACSTAT_LINK_CHANGED);
3001 CSR_WRITE_4(sc, BGE_MI_STS, 0);
3002
3003 /*
3004 * Enable attention when the link has changed state for
3005 * devices that use auto polling.
3006 */
3007 if (sc->bge_flags & BGEF_FIBER_TBI) {
3008 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
3009 } else {
3010 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
3011 mimode = BGE_MIMODE_500KHZ_CONST;
3012 else
3013 mimode = BGE_MIMODE_BASE;
3014 /* 5718 step 68. 5718 step 69 (optionally). */
3015 if (BGE_IS_5700_FAMILY(sc) ||
3016 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
3017 mimode |= BGE_MIMODE_AUTOPOLL;
3018 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
3019 }
3020 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
3021 CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
3022 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
3023 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3024 BGE_EVTENB_MI_INTERRUPT);
3025 }
3026
3027 /*
3028 * Clear any pending link state attention.
3029 * Otherwise some link state change events may be lost until attention
3030 * is cleared by bge_intr() -> bge_link_upd() sequence.
3031 * It's not necessary on newer BCM chips - perhaps enabling link
3032 * state change attentions implies clearing pending attention.
3033 */
3034 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3035 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3036 BGE_MACSTAT_LINK_CHANGED);
3037
3038 /* Enable link state change attentions. */
3039 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
3040
3041 return 0;
3042 }
3043
3044 static const struct bge_revision *
3045 bge_lookup_rev(uint32_t chipid)
3046 {
3047 const struct bge_revision *br;
3048
3049 for (br = bge_revisions; br->br_name != NULL; br++) {
3050 if (br->br_chipid == chipid)
3051 return br;
3052 }
3053
3054 for (br = bge_majorrevs; br->br_name != NULL; br++) {
3055 if (br->br_chipid == BGE_ASICREV(chipid))
3056 return br;
3057 }
3058
3059 return NULL;
3060 }
3061
3062 static const struct bge_product *
3063 bge_lookup(const struct pci_attach_args *pa)
3064 {
3065 const struct bge_product *bp;
3066
3067 for (bp = bge_products; bp->bp_name != NULL; bp++) {
3068 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
3069 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
3070 return bp;
3071 }
3072
3073 return NULL;
3074 }
3075
3076 static uint32_t
3077 bge_chipid(const struct pci_attach_args *pa)
3078 {
3079 uint32_t id;
3080
3081 id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
3082 >> BGE_PCIMISCCTL_ASICREV_SHIFT;
3083
3084 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
3085 switch (PCI_PRODUCT(pa->pa_id)) {
3086 case PCI_PRODUCT_BROADCOM_BCM5717:
3087 case PCI_PRODUCT_BROADCOM_BCM5718:
3088 case PCI_PRODUCT_BROADCOM_BCM5719:
3089 case PCI_PRODUCT_BROADCOM_BCM5720:
3090 case PCI_PRODUCT_BROADCOM_BCM5725:
3091 case PCI_PRODUCT_BROADCOM_BCM5727:
3092 case PCI_PRODUCT_BROADCOM_BCM5762:
3093 case PCI_PRODUCT_BROADCOM_BCM57764:
3094 case PCI_PRODUCT_BROADCOM_BCM57767:
3095 case PCI_PRODUCT_BROADCOM_BCM57787:
3096 id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3097 BGE_PCI_GEN2_PRODID_ASICREV);
3098 break;
3099 case PCI_PRODUCT_BROADCOM_BCM57761:
3100 case PCI_PRODUCT_BROADCOM_BCM57762:
3101 case PCI_PRODUCT_BROADCOM_BCM57765:
3102 case PCI_PRODUCT_BROADCOM_BCM57766:
3103 case PCI_PRODUCT_BROADCOM_BCM57781:
3104 case PCI_PRODUCT_BROADCOM_BCM57782:
3105 case PCI_PRODUCT_BROADCOM_BCM57785:
3106 case PCI_PRODUCT_BROADCOM_BCM57786:
3107 case PCI_PRODUCT_BROADCOM_BCM57791:
3108 case PCI_PRODUCT_BROADCOM_BCM57795:
3109 id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3110 BGE_PCI_GEN15_PRODID_ASICREV);
3111 break;
3112 default:
3113 id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3114 BGE_PCI_PRODID_ASICREV);
3115 break;
3116 }
3117 }
3118
3119 return id;
3120 }
3121
3122 /*
3123 * Return true if MSI can be used with this device.
3124 */
3125 static int
3126 bge_can_use_msi(struct bge_softc *sc)
3127 {
3128 int can_use_msi = 0;
3129
3130 switch (BGE_ASICREV(sc->bge_chipid)) {
3131 case BGE_ASICREV_BCM5714_A0:
3132 case BGE_ASICREV_BCM5714:
3133 /*
3134 * Apparently, MSI doesn't work when these chips are
3135 * configured in single-port mode.
3136 */
3137 break;
3138 case BGE_ASICREV_BCM5750:
3139 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
3140 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
3141 can_use_msi = 1;
3142 break;
3143 default:
3144 if (BGE_IS_575X_PLUS(sc))
3145 can_use_msi = 1;
3146 }
3147 return can_use_msi;
3148 }
3149
3150 /*
3151 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
3152 * against our list and return its name if we find a match. Note
3153 * that since the Broadcom controller contains VPD support, we
3154 * can get the device name string from the controller itself instead
3155 * of the compiled-in string. This is a little slow, but it guarantees
3156 * we'll always announce the right product name.
3157 */
3158 static int
3159 bge_probe(device_t parent, cfdata_t match, void *aux)
3160 {
3161 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
3162
3163 if (bge_lookup(pa) != NULL)
3164 return 1;
3165
3166 return 0;
3167 }
3168
3169 static void
3170 bge_attach(device_t parent, device_t self, void *aux)
3171 {
3172 struct bge_softc * const sc = device_private(self);
3173 struct pci_attach_args * const pa = aux;
3174 prop_dictionary_t dict;
3175 const struct bge_product *bp;
3176 const struct bge_revision *br;
3177 pci_chipset_tag_t pc;
3178 const char *intrstr = NULL;
3179 uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5;
3180 uint32_t command;
3181 struct ifnet *ifp;
3182 struct mii_data * const mii = &sc->bge_mii;
3183 uint32_t misccfg, mimode, macmode;
3184 void * kva;
3185 u_char eaddr[ETHER_ADDR_LEN];
3186 pcireg_t memtype, subid, reg;
3187 bus_addr_t memaddr;
3188 uint32_t pm_ctl;
3189 bool no_seeprom;
3190 int capmask, trys;
3191 int mii_flags;
3192 int map_flags;
3193 char intrbuf[PCI_INTRSTR_LEN];
3194
3195 bp = bge_lookup(pa);
3196 KASSERT(bp != NULL);
3197
3198 sc->sc_pc = pa->pa_pc;
3199 sc->sc_pcitag = pa->pa_tag;
3200 sc->bge_dev = self;
3201
3202 sc->bge_pa = *pa;
3203 pc = sc->sc_pc;
3204 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
3205
3206 aprint_naive(": Ethernet controller\n");
3207 aprint_normal(": %s Ethernet\n", bp->bp_name);
3208
3209 /*
3210 * Map control/status registers.
3211 */
3212 DPRINTFN(5, ("Map control/status regs\n"));
3213 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
3214 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
3215 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
3216 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
3217
3218 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
3219 aprint_error_dev(sc->bge_dev,
3220 "failed to enable memory mapping!\n");
3221 return;
3222 }
3223
3224 DPRINTFN(5, ("pci_mem_find\n"));
3225 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
3226 switch (memtype) {
3227 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3228 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3229 #if 0
3230 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
3231 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
3232 &memaddr, &sc->bge_bsize) == 0)
3233 break;
3234 #else
3235 /*
3236 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
3237 * system get NMI on boot (PR#48451). This problem might not be
3238 * the driver's bug but our PCI common part's bug. Until we
3239 * find a real reason, we ignore the prefetchable bit.
3240 */
3241 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0,
3242 memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) {
3243 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3244 if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize,
3245 map_flags, &sc->bge_bhandle) == 0) {
3246 sc->bge_btag = pa->pa_memt;
3247 break;
3248 }
3249 }
3250 #endif
3251 /* FALLTHROUGH */
3252 default:
3253 aprint_error_dev(sc->bge_dev, "can't find mem space\n");
3254 return;
3255 }
3256
3257 /* Save various chip information. */
3258 sc->bge_chipid = bge_chipid(pa);
3259 sc->bge_phy_addr = bge_phy_addr(sc);
3260
3261 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
3262 &sc->bge_pciecap, NULL) != 0) {
3263 /* PCIe */
3264 sc->bge_flags |= BGEF_PCIE;
3265 /* Extract supported maximum payload size. */
3266 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3267 sc->bge_pciecap + PCIE_DCAP);
3268 sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD);
3269 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
3270 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3271 sc->bge_expmrq = 2048;
3272 else
3273 sc->bge_expmrq = 4096;
3274 bge_set_max_readrq(sc);
3275 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) {
3276 /* PCIe without PCIe cap */
3277 sc->bge_flags |= BGEF_PCIE;
3278 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
3279 BGE_PCISTATE_PCI_BUSMODE) == 0) {
3280 /* PCI-X */
3281 sc->bge_flags |= BGEF_PCIX;
3282 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
3283 &sc->bge_pcixcap, NULL) == 0)
3284 aprint_error_dev(sc->bge_dev,
3285 "unable to find PCIX capability\n");
3286 }
3287
3288 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) {
3289 /*
3290 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
3291 * can clobber the chip's PCI config-space power control
3292 * registers, leaving the card in D3 powersave state. We do
3293 * not have memory-mapped registers in this state, so force
3294 * device into D0 state before starting initialization.
3295 */
3296 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
3297 pm_ctl &= ~(PCI_PWR_D0 | PCI_PWR_D1 | PCI_PWR_D2 | PCI_PWR_D3);
3298 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
3299 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
3300 DELAY(1000); /* 27 usec is allegedly sufficient */
3301 }
3302
3303 /* Save chipset family. */
3304 switch (BGE_ASICREV(sc->bge_chipid)) {
3305 case BGE_ASICREV_BCM5717:
3306 case BGE_ASICREV_BCM5719:
3307 case BGE_ASICREV_BCM5720:
3308 sc->bge_flags |= BGEF_5717_PLUS;
3309 /* FALLTHROUGH */
3310 case BGE_ASICREV_BCM5762:
3311 case BGE_ASICREV_BCM57765:
3312 case BGE_ASICREV_BCM57766:
3313 if (!BGE_IS_5717_PLUS(sc))
3314 sc->bge_flags |= BGEF_57765_FAMILY;
3315 sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS |
3316 BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE;
3317 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
3318 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
3319 /*
3320 * Enable work around for DMA engine miscalculation
3321 * of TXMBUF available space.
3322 */
3323 sc->bge_flags |= BGEF_RDMA_BUG;
3324
3325 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
3326 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) {
3327 /* Jumbo frame on BCM5719 A0 does not work. */
3328 sc->bge_flags &= ~BGEF_JUMBO_CAPABLE;
3329 }
3330 }
3331 break;
3332 case BGE_ASICREV_BCM5755:
3333 case BGE_ASICREV_BCM5761:
3334 case BGE_ASICREV_BCM5784:
3335 case BGE_ASICREV_BCM5785:
3336 case BGE_ASICREV_BCM5787:
3337 case BGE_ASICREV_BCM57780:
3338 sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS;
3339 break;
3340 case BGE_ASICREV_BCM5700:
3341 case BGE_ASICREV_BCM5701:
3342 case BGE_ASICREV_BCM5703:
3343 case BGE_ASICREV_BCM5704:
3344 sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE;
3345 break;
3346 case BGE_ASICREV_BCM5714_A0:
3347 case BGE_ASICREV_BCM5780:
3348 case BGE_ASICREV_BCM5714:
3349 sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE;
3350 /* FALLTHROUGH */
3351 case BGE_ASICREV_BCM5750:
3352 case BGE_ASICREV_BCM5752:
3353 case BGE_ASICREV_BCM5906:
3354 sc->bge_flags |= BGEF_575X_PLUS;
3355 /* FALLTHROUGH */
3356 case BGE_ASICREV_BCM5705:
3357 sc->bge_flags |= BGEF_5705_PLUS;
3358 break;
3359 }
3360
3361 /* Identify chips with APE processor. */
3362 switch (BGE_ASICREV(sc->bge_chipid)) {
3363 case BGE_ASICREV_BCM5717:
3364 case BGE_ASICREV_BCM5719:
3365 case BGE_ASICREV_BCM5720:
3366 case BGE_ASICREV_BCM5761:
3367 case BGE_ASICREV_BCM5762:
3368 sc->bge_flags |= BGEF_APE;
3369 break;
3370 }
3371
3372 /*
3373 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3374 * not actually a MAC controller bug but an issue with the embedded
3375 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3376 */
3377 if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0))
3378 sc->bge_flags |= BGEF_40BIT_BUG;
3379
3380 /* Chips with APE need BAR2 access for APE registers/memory. */
3381 if ((sc->bge_flags & BGEF_APE) != 0) {
3382 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
3383 #if 0
3384 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
3385 &sc->bge_apetag, &sc->bge_apehandle, NULL,
3386 &sc->bge_apesize)) {
3387 aprint_error_dev(sc->bge_dev,
3388 "couldn't map BAR2 memory\n");
3389 return;
3390 }
3391 #else
3392 /*
3393 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
3394 * system get NMI on boot (PR#48451). This problem might not be
3395 * the driver's bug but our PCI common part's bug. Until we
3396 * find a real reason, we ignore the prefetchable bit.
3397 */
3398 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2,
3399 memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) {
3400 aprint_error_dev(sc->bge_dev,
3401 "couldn't map BAR2 memory\n");
3402 return;
3403 }
3404
3405 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3406 if (bus_space_map(pa->pa_memt, memaddr,
3407 sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) {
3408 aprint_error_dev(sc->bge_dev,
3409 "couldn't map BAR2 memory\n");
3410 return;
3411 }
3412 sc->bge_apetag = pa->pa_memt;
3413 #endif
3414
3415 /* Enable APE register/memory access by host driver. */
3416 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
3417 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3418 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3419 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3420 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
3421
3422 bge_ape_lock_init(sc);
3423 bge_ape_read_fw_ver(sc);
3424 }
3425
3426 /* Identify the chips that use an CPMU. */
3427 if (BGE_IS_5717_PLUS(sc) ||
3428 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
3429 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
3430 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
3431 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
3432 sc->bge_flags |= BGEF_CPMU_PRESENT;
3433
3434 /*
3435 * When using the BCM5701 in PCI-X mode, data corruption has
3436 * been observed in the first few bytes of some received packets.
3437 * Aligning the packet buffer in memory eliminates the corruption.
3438 * Unfortunately, this misaligns the packet payloads. On platforms
3439 * which do not support unaligned accesses, we will realign the
3440 * payloads by copying the received packets.
3441 */
3442 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
3443 sc->bge_flags & BGEF_PCIX)
3444 sc->bge_flags |= BGEF_RX_ALIGNBUG;
3445
3446 if (BGE_IS_5700_FAMILY(sc))
3447 sc->bge_flags |= BGEF_JUMBO_CAPABLE;
3448
3449 misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
3450 misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
3451
3452 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3453 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3454 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
3455 sc->bge_flags |= BGEF_IS_5788;
3456
3457 /*
3458 * Some controllers seem to require a special firmware to use
3459 * TSO. But the firmware is not available to FreeBSD and Linux
3460 * claims that the TSO performed by the firmware is slower than
3461 * hardware based TSO. Moreover the firmware based TSO has one
3462 * known bug which can't handle TSO if ethernet header + IP/TCP
3463 * header is greater than 80 bytes. The workaround for the TSO
3464 * bug exist but it seems it's too expensive than not using
3465 * TSO at all. Some hardwares also have the TSO bug so limit
3466 * the TSO to the controllers that are not affected TSO issues
3467 * (e.g. 5755 or higher).
3468 */
3469 if (BGE_IS_5755_PLUS(sc)) {
3470 /*
3471 * BCM5754 and BCM5787 shares the same ASIC id so
3472 * explicit device id check is required.
3473 */
3474 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
3475 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
3476 sc->bge_flags |= BGEF_TSO;
3477 /* TSO on BCM5719 A0 does not work. */
3478 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
3479 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0))
3480 sc->bge_flags &= ~BGEF_TSO;
3481 }
3482
3483 capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */
3484 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
3485 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3486 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3487 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
3488 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
3489 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
3490 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
3491 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
3492 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
3493 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
3494 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
3495 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
3496 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
3497 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
3498 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3499 /* These chips are 10/100 only. */
3500 capmask &= ~BMSR_EXTSTAT;
3501 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3502 }
3503
3504 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3505 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3506 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
3507 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
3508 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3509
3510 /* Set various PHY bug flags. */
3511 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3512 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3513 sc->bge_phy_flags |= BGEPHYF_CRC_BUG;
3514 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
3515 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
3516 sc->bge_phy_flags |= BGEPHYF_ADC_BUG;
3517 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3518 sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG;
3519 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3520 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
3521 PCI_VENDOR(subid) == PCI_VENDOR_DELL)
3522 sc->bge_phy_flags |= BGEPHYF_NO_3LED;
3523 if (BGE_IS_5705_PLUS(sc) &&
3524 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
3525 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3526 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
3527 !BGE_IS_57765_PLUS(sc)) {
3528 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
3529 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
3530 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
3531 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
3532 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
3533 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
3534 sc->bge_phy_flags |= BGEPHYF_JITTER_BUG;
3535 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
3536 sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM;
3537 } else
3538 sc->bge_phy_flags |= BGEPHYF_BER_BUG;
3539 }
3540
3541 /*
3542 * SEEPROM check.
3543 * First check if firmware knows we do not have SEEPROM.
3544 */
3545 if (prop_dictionary_get_bool(device_properties(self),
3546 "without-seeprom", &no_seeprom) && no_seeprom)
3547 sc->bge_flags |= BGEF_NO_EEPROM;
3548
3549 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
3550 sc->bge_flags |= BGEF_NO_EEPROM;
3551
3552 /* Now check the 'ROM failed' bit on the RX CPU */
3553 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
3554 sc->bge_flags |= BGEF_NO_EEPROM;
3555
3556 sc->bge_asf_mode = 0;
3557 /* No ASF if APE present. */
3558 if ((sc->bge_flags & BGEF_APE) == 0) {
3559 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3560 BGE_SRAM_DATA_SIG_MAGIC)) {
3561 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
3562 BGE_HWCFG_ASF) {
3563 sc->bge_asf_mode |= ASF_ENABLE;
3564 sc->bge_asf_mode |= ASF_STACKUP;
3565 if (BGE_IS_575X_PLUS(sc))
3566 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3567 }
3568 }
3569 }
3570
3571 int counts[PCI_INTR_TYPE_SIZE] = {
3572 [PCI_INTR_TYPE_INTX] = 1,
3573 [PCI_INTR_TYPE_MSI] = 1,
3574 [PCI_INTR_TYPE_MSIX] = 1,
3575 };
3576 int max_type = PCI_INTR_TYPE_MSIX;
3577
3578 if (!bge_can_use_msi(sc)) {
3579 /* MSI broken, allow only INTx */
3580 max_type = PCI_INTR_TYPE_INTX;
3581 }
3582
3583 if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) {
3584 aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n");
3585 return;
3586 }
3587
3588 DPRINTFN(5, ("pci_intr_string\n"));
3589 intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf,
3590 sizeof(intrbuf));
3591 DPRINTFN(5, ("pci_intr_establish\n"));
3592 sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0],
3593 IPL_NET, bge_intr, sc, device_xname(sc->bge_dev));
3594 if (sc->bge_intrhand == NULL) {
3595 pci_intr_release(pc, sc->bge_pihp, 1);
3596 sc->bge_pihp = NULL;
3597
3598 aprint_error_dev(self, "couldn't establish interrupt");
3599 if (intrstr != NULL)
3600 aprint_error(" at %s", intrstr);
3601 aprint_error("\n");
3602 return;
3603 }
3604 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
3605
3606 switch (pci_intr_type(pc, sc->bge_pihp[0])) {
3607 case PCI_INTR_TYPE_MSIX:
3608 case PCI_INTR_TYPE_MSI:
3609 KASSERT(bge_can_use_msi(sc));
3610 sc->bge_flags |= BGEF_MSI;
3611 break;
3612 default:
3613 /* nothing to do */
3614 break;
3615 }
3616
3617 /*
3618 * All controllers except BCM5700 supports tagged status but
3619 * we use tagged status only for MSI case on BCM5717. Otherwise
3620 * MSI on BCM5717 does not work.
3621 */
3622 if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI)
3623 sc->bge_flags |= BGEF_TAGGED_STATUS;
3624
3625 /*
3626 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM
3627 * lock in bge_reset().
3628 */
3629 CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR,
3630 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
3631 delay(1000);
3632 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
3633
3634 bge_stop_fw(sc);
3635 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
3636 if (bge_reset(sc))
3637 aprint_error_dev(sc->bge_dev, "chip reset failed\n");
3638
3639 /*
3640 * Read the hardware config word in the first 32k of NIC internal
3641 * memory, or fall back to the config word in the EEPROM.
3642 * Note: on some BCM5700 cards, this value appears to be unset.
3643 */
3644 hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0;
3645 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3646 BGE_SRAM_DATA_SIG_MAGIC) {
3647 uint32_t tmp;
3648
3649 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3650 tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >>
3651 BGE_SRAM_DATA_VER_SHIFT;
3652 if ((0 < tmp) && (tmp < 0x100))
3653 hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2);
3654 if (sc->bge_flags & BGEF_PCIE)
3655 hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3);
3656 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
3657 hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4);
3658 if (BGE_IS_5717_PLUS(sc))
3659 hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5);
3660 } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) {
3661 bge_read_eeprom(sc, (void *)&hwcfg,
3662 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
3663 hwcfg = be32toh(hwcfg);
3664 }
3665 aprint_normal_dev(sc->bge_dev,
3666 "HW config %08x, %08x, %08x, %08x %08x\n",
3667 hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5);
3668
3669 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
3670 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
3671
3672 if (bge_chipinit(sc)) {
3673 aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
3674 bge_release_resources(sc);
3675 return;
3676 }
3677
3678 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3679 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL,
3680 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUTEN1);
3681 DELAY(100);
3682 }
3683
3684 /* Set MI_MODE */
3685 mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
3686 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
3687 mimode |= BGE_MIMODE_500KHZ_CONST;
3688 else
3689 mimode |= BGE_MIMODE_BASE;
3690 CSR_WRITE_4_FLUSH(sc, BGE_MI_MODE, mimode);
3691 DELAY(80);
3692
3693 /*
3694 * Get station address from the EEPROM.
3695 */
3696 if (bge_get_eaddr(sc, eaddr)) {
3697 aprint_error_dev(sc->bge_dev,
3698 "failed to read station address\n");
3699 bge_release_resources(sc);
3700 return;
3701 }
3702
3703 br = bge_lookup_rev(sc->bge_chipid);
3704
3705 if (br == NULL) {
3706 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
3707 sc->bge_chipid);
3708 } else {
3709 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
3710 br->br_name, sc->bge_chipid);
3711 }
3712 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
3713
3714 /* Allocate the general information block and ring buffers. */
3715 if (pci_dma64_available(pa)) {
3716 sc->bge_dmatag = pa->pa_dmat64;
3717 sc->bge_dmatag32 = pa->pa_dmat;
3718 sc->bge_dma64 = true;
3719 } else {
3720 sc->bge_dmatag = pa->pa_dmat;
3721 sc->bge_dmatag32 = pa->pa_dmat;
3722 sc->bge_dma64 = false;
3723 }
3724
3725 /* 40bit DMA workaround */
3726 if (sizeof(bus_addr_t) > 4) {
3727 if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) {
3728 bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */
3729
3730 if (bus_dmatag_subregion(olddmatag, 0,
3731 (bus_addr_t)__MASK(40),
3732 &(sc->bge_dmatag), BUS_DMA_NOWAIT) != 0) {
3733 aprint_error_dev(self,
3734 "WARNING: failed to restrict dma range,"
3735 " falling back to parent bus dma range\n");
3736 sc->bge_dmatag = olddmatag;
3737 }
3738 }
3739 }
3740 SLIST_INIT(&sc->txdma_list);
3741 DPRINTFN(5, ("bus_dmamem_alloc\n"));
3742 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
3743 PAGE_SIZE, 0, &sc->bge_ring_seg, 1,
3744 &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) {
3745 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
3746 return;
3747 }
3748 DPRINTFN(5, ("bus_dmamem_map\n"));
3749 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
3750 sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva,
3751 BUS_DMA_NOWAIT)) {
3752 aprint_error_dev(sc->bge_dev,
3753 "can't map DMA buffers (%zu bytes)\n",
3754 sizeof(struct bge_ring_data));
3755 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3756 sc->bge_ring_rseg);
3757 return;
3758 }
3759 DPRINTFN(5, ("bus_dmamem_create\n"));
3760 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
3761 sizeof(struct bge_ring_data), 0,
3762 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
3763 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
3764 bus_dmamem_unmap(sc->bge_dmatag, kva,
3765 sizeof(struct bge_ring_data));
3766 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3767 sc->bge_ring_rseg);
3768 return;
3769 }
3770 DPRINTFN(5, ("bus_dmamem_load\n"));
3771 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
3772 sizeof(struct bge_ring_data), NULL,
3773 BUS_DMA_NOWAIT)) {
3774 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3775 bus_dmamem_unmap(sc->bge_dmatag, kva,
3776 sizeof(struct bge_ring_data));
3777 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3778 sc->bge_ring_rseg);
3779 return;
3780 }
3781
3782 DPRINTFN(5, ("bzero\n"));
3783 sc->bge_rdata = (struct bge_ring_data *)kva;
3784
3785 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
3786
3787 /* Try to allocate memory for jumbo buffers. */
3788 if (BGE_IS_JUMBO_CAPABLE(sc)) {
3789 if (bge_alloc_jumbo_mem(sc)) {
3790 aprint_error_dev(sc->bge_dev,
3791 "jumbo buffer allocation failed\n");
3792 } else
3793 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3794 }
3795
3796 /* Set default tuneable values. */
3797 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3798 sc->bge_rx_coal_ticks = 150;
3799 sc->bge_rx_max_coal_bds = 64;
3800 sc->bge_tx_coal_ticks = 300;
3801 sc->bge_tx_max_coal_bds = 400;
3802 if (BGE_IS_5705_PLUS(sc)) {
3803 sc->bge_tx_coal_ticks = (12 * 5);
3804 sc->bge_tx_max_coal_bds = (12 * 5);
3805 aprint_verbose_dev(sc->bge_dev,
3806 "setting short Tx thresholds\n");
3807 }
3808
3809 if (BGE_IS_5717_PLUS(sc))
3810 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3811 else if (BGE_IS_5705_PLUS(sc))
3812 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3813 else
3814 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3815
3816 /* Set up ifnet structure */
3817 ifp = &sc->ethercom.ec_if;
3818 ifp->if_softc = sc;
3819 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3820 ifp->if_ioctl = bge_ioctl;
3821 ifp->if_stop = bge_stop;
3822 ifp->if_start = bge_start;
3823 ifp->if_init = bge_init;
3824 ifp->if_watchdog = bge_watchdog;
3825 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
3826 IFQ_SET_READY(&ifp->if_snd);
3827 DPRINTFN(5, ("strcpy if_xname\n"));
3828 strcpy(ifp->if_xname, device_xname(sc->bge_dev));
3829
3830 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
3831 sc->ethercom.ec_if.if_capabilities |=
3832 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
3833 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */
3834 sc->ethercom.ec_if.if_capabilities |=
3835 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3836 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
3837 #endif
3838 sc->ethercom.ec_capabilities |=
3839 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
3840 sc->ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3841
3842 if (sc->bge_flags & BGEF_TSO)
3843 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
3844
3845 /*
3846 * Do MII setup.
3847 */
3848 DPRINTFN(5, ("mii setup\n"));
3849 mii->mii_ifp = ifp;
3850 mii->mii_readreg = bge_miibus_readreg;
3851 mii->mii_writereg = bge_miibus_writereg;
3852 mii->mii_statchg = bge_miibus_statchg;
3853
3854 /*
3855 * Figure out what sort of media we have by checking the hardware
3856 * config word. Note: on some BCM5700 cards, this value appears to be
3857 * unset. If that's the case, we have to rely on identifying the NIC
3858 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41.
3859 * The SysKonnect SK-9D41 is a 1000baseSX card.
3860 */
3861 if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
3862 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3863 if (BGE_IS_5705_PLUS(sc)) {
3864 sc->bge_flags |= BGEF_FIBER_MII;
3865 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3866 } else
3867 sc->bge_flags |= BGEF_FIBER_TBI;
3868 }
3869
3870 /* Set bge_phy_flags before prop_dictionary_set_uint32() */
3871 if (BGE_IS_JUMBO_CAPABLE(sc))
3872 sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE;
3873
3874 /* set phyflags and chipid before mii_attach() */
3875 dict = device_properties(self);
3876 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags);
3877 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
3878
3879 macmode = CSR_READ_4(sc, BGE_MAC_MODE);
3880 macmode &= ~BGE_MACMODE_PORTMODE;
3881 /* Initialize ifmedia structures. */
3882 if (sc->bge_flags & BGEF_FIBER_TBI) {
3883 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE,
3884 macmode | BGE_PORTMODE_TBI);
3885 DELAY(40);
3886
3887 sc->ethercom.ec_ifmedia = &sc->bge_ifmedia;
3888 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3889 bge_ifmedia_sts);
3890 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
3891 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX |IFM_FDX,
3892 0, NULL);
3893 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3894 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3895 /* Pretend the user requested this setting */
3896 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3897 } else {
3898 uint16_t phyreg;
3899 int rv;
3900 /*
3901 * Do transceiver setup and tell the firmware the
3902 * driver is down so we can try to get access the
3903 * probe if ASF is running. Retry a couple of times
3904 * if we get a conflict with the ASF firmware accessing
3905 * the PHY.
3906 */
3907 if (sc->bge_flags & BGEF_FIBER_MII)
3908 macmode |= BGE_PORTMODE_GMII;
3909 else
3910 macmode |= BGE_PORTMODE_MII;
3911 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, macmode);
3912 DELAY(40);
3913
3914 /*
3915 * Do transceiver setup and tell the firmware the
3916 * driver is down so we can try to get access the
3917 * probe if ASF is running. Retry a couple of times
3918 * if we get a conflict with the ASF firmware accessing
3919 * the PHY.
3920 */
3921 trys = 0;
3922 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3923 sc->ethercom.ec_mii = mii;
3924 ifmedia_init(&mii->mii_media, 0, bge_ifmedia_upd,
3925 bge_ifmedia_sts);
3926 mii_flags = MIIF_DOPAUSE;
3927 if (sc->bge_flags & BGEF_FIBER_MII)
3928 mii_flags |= MIIF_HAVEFIBER;
3929 again:
3930 bge_asf_driver_up(sc);
3931 rv = bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
3932 MII_BMCR, &phyreg);
3933 if ((rv != 0) || ((phyreg & BMCR_PDOWN) != 0)) {
3934 int i;
3935
3936 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
3937 MII_BMCR, BMCR_RESET);
3938 /* Wait up to 500ms for it to complete. */
3939 for (i = 0; i < 500; i++) {
3940 bge_miibus_readreg(sc->bge_dev,
3941 sc->bge_phy_addr, MII_BMCR, &phyreg);
3942 if ((phyreg & BMCR_RESET) == 0)
3943 break;
3944 DELAY(1000);
3945 }
3946 }
3947
3948 mii_attach(sc->bge_dev, mii, capmask, sc->bge_phy_addr,
3949 MII_OFFSET_ANY, mii_flags);
3950
3951 if (LIST_EMPTY(&mii->mii_phys) && (trys++ < 4))
3952 goto again;
3953
3954 if (LIST_EMPTY(&mii->mii_phys)) {
3955 aprint_error_dev(sc->bge_dev, "no PHY found!\n");
3956 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
3957 0, NULL);
3958 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
3959 } else
3960 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
3961
3962 /*
3963 * Now tell the firmware we are going up after probing the PHY
3964 */
3965 if (sc->bge_asf_mode & ASF_STACKUP)
3966 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3967 }
3968
3969 /*
3970 * Call MI attach routine.
3971 */
3972 DPRINTFN(5, ("if_attach\n"));
3973 if_attach(ifp);
3974 if_deferred_start_init(ifp, NULL);
3975 DPRINTFN(5, ("ether_ifattach\n"));
3976 ether_ifattach(ifp, eaddr);
3977 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
3978 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
3979 RND_TYPE_NET, RND_FLAG_DEFAULT);
3980 #ifdef BGE_EVENT_COUNTERS
3981 /*
3982 * Attach event counters.
3983 */
3984 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
3985 NULL, device_xname(sc->bge_dev), "intr");
3986 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR,
3987 NULL, device_xname(sc->bge_dev), "intr_spurious");
3988 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR,
3989 NULL, device_xname(sc->bge_dev), "intr_spurious2");
3990 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
3991 NULL, device_xname(sc->bge_dev), "tx_xoff");
3992 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
3993 NULL, device_xname(sc->bge_dev), "tx_xon");
3994 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
3995 NULL, device_xname(sc->bge_dev), "rx_xoff");
3996 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
3997 NULL, device_xname(sc->bge_dev), "rx_xon");
3998 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
3999 NULL, device_xname(sc->bge_dev), "rx_macctl");
4000 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
4001 NULL, device_xname(sc->bge_dev), "xoffentered");
4002 #endif /* BGE_EVENT_COUNTERS */
4003 DPRINTFN(5, ("callout_init\n"));
4004 callout_init(&sc->bge_timeout, 0);
4005 callout_setfunc(&sc->bge_timeout, bge_tick, sc);
4006
4007 if (pmf_device_register(self, NULL, NULL))
4008 pmf_class_network_register(self, ifp);
4009 else
4010 aprint_error_dev(self, "couldn't establish power handler\n");
4011
4012 bge_sysctl_init(sc);
4013
4014 #ifdef BGE_DEBUG
4015 bge_debug_info(sc);
4016 #endif
4017 }
4018
4019 /*
4020 * Stop all chip I/O so that the kernel's probe routines don't
4021 * get confused by errant DMAs when rebooting.
4022 */
4023 static int
4024 bge_detach(device_t self, int flags __unused)
4025 {
4026 struct bge_softc * const sc = device_private(self);
4027 struct ifnet * const ifp = &sc->ethercom.ec_if;
4028 int s;
4029
4030 s = splnet();
4031 /* Stop the interface. Callouts are stopped in it. */
4032 bge_stop(ifp, 1);
4033 splx(s);
4034
4035 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
4036
4037 ether_ifdetach(ifp);
4038 if_detach(ifp);
4039
4040 /* Delete all remaining media. */
4041 ifmedia_fini(&sc->bge_mii.mii_media);
4042
4043 bge_release_resources(sc);
4044
4045 return 0;
4046 }
4047
4048 static void
4049 bge_release_resources(struct bge_softc *sc)
4050 {
4051
4052 /* Detach sysctl */
4053 if (sc->bge_log != NULL)
4054 sysctl_teardown(&sc->bge_log);
4055
4056 #ifdef BGE_EVENT_COUNTERS
4057 /* Detach event counters. */
4058 evcnt_detach(&sc->bge_ev_intr);
4059 evcnt_detach(&sc->bge_ev_intr_spurious);
4060 evcnt_detach(&sc->bge_ev_intr_spurious2);
4061 evcnt_detach(&sc->bge_ev_tx_xoff);
4062 evcnt_detach(&sc->bge_ev_tx_xon);
4063 evcnt_detach(&sc->bge_ev_rx_xoff);
4064 evcnt_detach(&sc->bge_ev_rx_xon);
4065 evcnt_detach(&sc->bge_ev_rx_macctl);
4066 evcnt_detach(&sc->bge_ev_xoffentered);
4067 #endif /* BGE_EVENT_COUNTERS */
4068
4069 /* Disestablish the interrupt handler */
4070 if (sc->bge_intrhand != NULL) {
4071 pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
4072 pci_intr_release(sc->sc_pc, sc->bge_pihp, 1);
4073 sc->bge_intrhand = NULL;
4074 }
4075
4076 if (sc->bge_dmatag != NULL) {
4077 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
4078 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
4079 bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata,
4080 sizeof(struct bge_ring_data));
4081 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
4082 sc->bge_ring_rseg);
4083 }
4084
4085 /* Unmap the device registers */
4086 if (sc->bge_bsize != 0) {
4087 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
4088 sc->bge_bsize = 0;
4089 }
4090
4091 /* Unmap the APE registers */
4092 if (sc->bge_apesize != 0) {
4093 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
4094 sc->bge_apesize);
4095 sc->bge_apesize = 0;
4096 }
4097 }
4098
4099 static int
4100 bge_reset(struct bge_softc *sc)
4101 {
4102 uint32_t cachesize, command;
4103 uint32_t reset, mac_mode, mac_mode_mask;
4104 pcireg_t devctl, reg;
4105 int i, val;
4106 void (*write_op)(struct bge_softc *, int, int);
4107
4108 /* Make mask for BGE_MAC_MODE register. */
4109 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
4110 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4111 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
4112 /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */
4113 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
4114
4115 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
4116 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
4117 if (sc->bge_flags & BGEF_PCIE)
4118 write_op = bge_writemem_direct;
4119 else
4120 write_op = bge_writemem_ind;
4121 } else
4122 write_op = bge_writereg_ind;
4123
4124 /* 57XX step 4 */
4125 /* Acquire the NVM lock */
4126 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 &&
4127 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
4128 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) {
4129 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
4130 for (i = 0; i < 8000; i++) {
4131 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
4132 BGE_NVRAMSWARB_GNT1)
4133 break;
4134 DELAY(20);
4135 }
4136 if (i == 8000) {
4137 printf("%s: NVRAM lock timedout!\n",
4138 device_xname(sc->bge_dev));
4139 }
4140 }
4141
4142 /* Take APE lock when performing reset. */
4143 bge_ape_lock(sc, BGE_APE_LOCK_GRC);
4144
4145 /* 57XX step 3 */
4146 /* Save some important PCI state. */
4147 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
4148 /* 5718 reset step 3 */
4149 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
4150
4151 /* 5718 reset step 5, 57XX step 5b-5d */
4152 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
4153 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4154 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
4155
4156 /* XXX ???: Disable fastboot on controllers that support it. */
4157 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
4158 BGE_IS_5755_PLUS(sc))
4159 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
4160
4161 /* 5718 reset step 2, 57XX step 6 */
4162 /*
4163 * Write the magic number to SRAM at offset 0xB50.
4164 * When firmware finishes its initialization it will
4165 * write ~BGE_MAGIC_NUMBER to the same location.
4166 */
4167 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
4168
4169 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
4170 val = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
4171 val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
4172 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
4173 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val);
4174 }
4175
4176 /* 5718 reset step 6, 57XX step 7 */
4177 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
4178 /*
4179 * XXX: from FreeBSD/Linux; no documentation
4180 */
4181 if (sc->bge_flags & BGEF_PCIE) {
4182 if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) &&
4183 !BGE_IS_57765_PLUS(sc) &&
4184 (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) ==
4185 (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) {
4186 /* PCI Express 1.0 system */
4187 CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG,
4188 BGE_PHY_PCIE_SCRAM_MODE);
4189 }
4190 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
4191 /*
4192 * Prevent PCI Express link training
4193 * during global reset.
4194 */
4195 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
4196 reset |= (1 << 29);
4197 }
4198 }
4199
4200 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
4201 i = CSR_READ_4(sc, BGE_VCPU_STATUS);
4202 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
4203 i | BGE_VCPU_STATUS_DRV_RESET);
4204 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
4205 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
4206 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
4207 }
4208
4209 /*
4210 * Set GPHY Power Down Override to leave GPHY
4211 * powered up in D0 uninitialized.
4212 */
4213 if (BGE_IS_5705_PLUS(sc) &&
4214 (sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
4215 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
4216
4217 /* Issue global reset */
4218 write_op(sc, BGE_MISC_CFG, reset);
4219
4220 /* 5718 reset step 7, 57XX step 8 */
4221 if (sc->bge_flags & BGEF_PCIE)
4222 delay(100*1000); /* too big */
4223 else
4224 delay(1000);
4225
4226 if (sc->bge_flags & BGEF_PCIE) {
4227 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
4228 DELAY(500000);
4229 /* XXX: Magic Numbers */
4230 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4231 BGE_PCI_UNKNOWN0);
4232 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4233 BGE_PCI_UNKNOWN0,
4234 reg | (1 << 15));
4235 }
4236 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4237 sc->bge_pciecap + PCIE_DCSR);
4238 /* Clear enable no snoop and disable relaxed ordering. */
4239 devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD |
4240 PCIE_DCSR_ENA_NO_SNOOP);
4241
4242 /* Set PCIE max payload size to 128 for older PCIe devices */
4243 if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
4244 devctl &= ~(0x00e0);
4245 /* Clear device status register. Write 1b to clear */
4246 devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED
4247 | PCIE_DCSR_NFED | PCIE_DCSR_CED;
4248 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4249 sc->bge_pciecap + PCIE_DCSR, devctl);
4250 bge_set_max_readrq(sc);
4251 }
4252
4253 /* From Linux: dummy read to flush PCI posted writes */
4254 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
4255
4256 /*
4257 * Reset some of the PCI state that got zapped by reset
4258 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be
4259 * set, too.
4260 */
4261 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
4262 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4263 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
4264 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
4265 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
4266 (sc->bge_flags & BGEF_PCIX) != 0)
4267 val |= BGE_PCISTATE_RETRY_SAME_DMA;
4268 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4269 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
4270 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
4271 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
4272 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val);
4273 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
4274 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
4275
4276 /* 57xx step 11: disable PCI-X Relaxed Ordering. */
4277 if (sc->bge_flags & BGEF_PCIX) {
4278 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
4279 + PCIX_CMD);
4280 /* Set max memory read byte count to 2K */
4281 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
4282 reg &= ~PCIX_CMD_BYTECNT_MASK;
4283 reg |= PCIX_CMD_BCNT_2048;
4284 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){
4285 /*
4286 * For 5704, set max outstanding split transaction
4287 * field to 0 (0 means it supports 1 request)
4288 */
4289 reg &= ~(PCIX_CMD_SPLTRANS_MASK
4290 | PCIX_CMD_BYTECNT_MASK);
4291 reg |= PCIX_CMD_BCNT_2048;
4292 }
4293 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
4294 + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER);
4295 }
4296
4297 /* 5718 reset step 10, 57XX step 12 */
4298 /* Enable memory arbiter. */
4299 if (BGE_IS_5714_FAMILY(sc)) {
4300 val = CSR_READ_4(sc, BGE_MARB_MODE);
4301 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
4302 } else
4303 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4304
4305 /* XXX 5721, 5751 and 5752 */
4306 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
4307 /* Step 19: */
4308 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
4309 /* Step 20: */
4310 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
4311 }
4312
4313 /* 5718 reset step 12, 57XX step 15 and 16 */
4314 /* Fix up byte swapping */
4315 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
4316
4317 /* 5718 reset step 13, 57XX step 17 */
4318 /* Poll until the firmware initialization is complete */
4319 bge_poll_fw(sc);
4320
4321 /* 57XX step 21 */
4322 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
4323 pcireg_t msidata;
4324
4325 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4326 BGE_PCI_MSI_DATA);
4327 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
4328 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
4329 msidata);
4330 }
4331
4332 /* 57XX step 18 */
4333 /* Write mac mode. */
4334 val = CSR_READ_4(sc, BGE_MAC_MODE);
4335 /* Restore mac_mode_mask's bits using mac_mode */
4336 val = (val & ~mac_mode_mask) | mac_mode;
4337 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
4338 DELAY(40);
4339
4340 bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
4341
4342 /*
4343 * The 5704 in TBI mode apparently needs some special
4344 * adjustment to insure the SERDES drive level is set
4345 * to 1.2V.
4346 */
4347 if (sc->bge_flags & BGEF_FIBER_TBI &&
4348 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4349 uint32_t serdescfg;
4350
4351 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
4352 serdescfg = (serdescfg & ~0xFFF) | 0x880;
4353 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
4354 }
4355
4356 if (sc->bge_flags & BGEF_PCIE &&
4357 !BGE_IS_57765_PLUS(sc) &&
4358 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
4359 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
4360 uint32_t v;
4361
4362 /* Enable PCI Express bug fix */
4363 v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG);
4364 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG,
4365 v | BGE_TLP_DATA_FIFO_PROTECT);
4366 }
4367
4368 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
4369 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
4370 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
4371
4372 return 0;
4373 }
4374
4375 /*
4376 * Frame reception handling. This is called if there's a frame
4377 * on the receive return list.
4378 *
4379 * Note: we have to be able to handle two possibilities here:
4380 * 1) the frame is from the jumbo receive ring
4381 * 2) the frame is from the standard receive ring
4382 */
4383
4384 static void
4385 bge_rxeof(struct bge_softc *sc)
4386 {
4387 struct ifnet * const ifp = &sc->ethercom.ec_if;
4388 uint16_t rx_prod, rx_cons;
4389 int stdcnt = 0, jumbocnt = 0;
4390 bus_dmamap_t dmamap;
4391 bus_addr_t offset, toff;
4392 bus_size_t tlen;
4393 int tosync;
4394
4395 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4396 offsetof(struct bge_ring_data, bge_status_block),
4397 sizeof (struct bge_status_block),
4398 BUS_DMASYNC_POSTREAD);
4399
4400 rx_cons = sc->bge_rx_saved_considx;
4401 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
4402
4403 /* Nothing to do */
4404 if (rx_cons == rx_prod)
4405 return;
4406
4407 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
4408 tosync = rx_prod - rx_cons;
4409
4410 if (tosync != 0)
4411 rnd_add_uint32(&sc->rnd_source, tosync);
4412
4413 toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
4414
4415 if (tosync < 0) {
4416 tlen = (sc->bge_return_ring_cnt - rx_cons) *
4417 sizeof (struct bge_rx_bd);
4418 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4419 toff, tlen, BUS_DMASYNC_POSTREAD);
4420 tosync = -tosync;
4421 }
4422
4423 if (tosync != 0) {
4424 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4425 offset, tosync * sizeof (struct bge_rx_bd),
4426 BUS_DMASYNC_POSTREAD);
4427 }
4428
4429 while (rx_cons != rx_prod) {
4430 struct bge_rx_bd *cur_rx;
4431 uint32_t rxidx;
4432 struct mbuf *m = NULL;
4433
4434 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
4435
4436 rxidx = cur_rx->bge_idx;
4437 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
4438
4439 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
4440 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4441 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
4442 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
4443 jumbocnt++;
4444 bus_dmamap_sync(sc->bge_dmatag,
4445 sc->bge_cdata.bge_rx_jumbo_map,
4446 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
4447 BGE_JLEN, BUS_DMASYNC_POSTREAD);
4448 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4449 if_statinc(ifp, if_ierrors);
4450 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
4451 continue;
4452 }
4453 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
4454 NULL)== ENOBUFS) {
4455 if_statinc(ifp, if_ierrors);
4456 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
4457 continue;
4458 }
4459 } else {
4460 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4461 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
4462
4463 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
4464 stdcnt++;
4465 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
4466 sc->bge_cdata.bge_rx_std_map[rxidx] = NULL;
4467 if (dmamap == NULL) {
4468 if_statinc(ifp, if_ierrors);
4469 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
4470 continue;
4471 }
4472 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
4473 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
4474 bus_dmamap_unload(sc->bge_dmatag, dmamap);
4475 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4476 if_statinc(ifp, if_ierrors);
4477 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
4478 continue;
4479 }
4480 if (bge_newbuf_std(sc, sc->bge_std,
4481 NULL, dmamap) == ENOBUFS) {
4482 if_statinc(ifp, if_ierrors);
4483 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
4484 continue;
4485 }
4486 }
4487
4488 #ifndef __NO_STRICT_ALIGNMENT
4489 /*
4490 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
4491 * the Rx buffer has the layer-2 header unaligned.
4492 * If our CPU requires alignment, re-align by copying.
4493 */
4494 if (sc->bge_flags & BGEF_RX_ALIGNBUG) {
4495 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
4496 cur_rx->bge_len);
4497 m->m_data += ETHER_ALIGN;
4498 }
4499 #endif
4500
4501 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
4502 m_set_rcvif(m, ifp);
4503
4504 bge_rxcsum(sc, cur_rx, m);
4505
4506 /*
4507 * If we received a packet with a vlan tag, pass it
4508 * to vlan_input() instead of ether_input().
4509 */
4510 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)
4511 vlan_set_tag(m, cur_rx->bge_vlan_tag);
4512
4513 if_percpuq_enqueue(ifp->if_percpuq, m);
4514 }
4515
4516 sc->bge_rx_saved_considx = rx_cons;
4517 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
4518 if (stdcnt)
4519 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
4520 if (jumbocnt)
4521 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
4522 }
4523
4524 static void
4525 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
4526 {
4527
4528 if (BGE_IS_57765_PLUS(sc)) {
4529 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4530 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
4531 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
4532 if ((cur_rx->bge_error_flag &
4533 BGE_RXERRFLAG_IP_CSUM_NOK) != 0)
4534 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
4535 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4536 m->m_pkthdr.csum_data =
4537 cur_rx->bge_tcp_udp_csum;
4538 m->m_pkthdr.csum_flags |=
4539 (M_CSUM_TCPv4 | M_CSUM_UDPv4 |M_CSUM_DATA);
4540 }
4541 }
4542 } else {
4543 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
4544 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
4545 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
4546 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
4547 /*
4548 * Rx transport checksum-offload may also
4549 * have bugs with packets which, when transmitted,
4550 * were `runts' requiring padding.
4551 */
4552 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4553 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
4554 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
4555 m->m_pkthdr.csum_data =
4556 cur_rx->bge_tcp_udp_csum;
4557 m->m_pkthdr.csum_flags |=
4558 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_DATA);
4559 }
4560 }
4561 }
4562
4563 static void
4564 bge_txeof(struct bge_softc *sc)
4565 {
4566 struct ifnet * const ifp = &sc->ethercom.ec_if;
4567 struct bge_tx_bd *cur_tx = NULL;
4568 struct txdmamap_pool_entry *dma;
4569 bus_addr_t offset, toff;
4570 bus_size_t tlen;
4571 int tosync;
4572 struct mbuf *m;
4573
4574 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4575 offsetof(struct bge_ring_data, bge_status_block),
4576 sizeof (struct bge_status_block),
4577 BUS_DMASYNC_POSTREAD);
4578
4579 offset = offsetof(struct bge_ring_data, bge_tx_ring);
4580 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
4581 sc->bge_tx_saved_considx;
4582
4583 if (tosync != 0)
4584 rnd_add_uint32(&sc->rnd_source, tosync);
4585
4586 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
4587
4588 if (tosync < 0) {
4589 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
4590 sizeof (struct bge_tx_bd);
4591 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4592 toff, tlen, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4593 tosync = -tosync;
4594 }
4595
4596 if (tosync != 0) {
4597 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4598 offset, tosync * sizeof (struct bge_tx_bd),
4599 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4600 }
4601
4602 /*
4603 * Go through our tx ring and free mbufs for those
4604 * frames that have been sent.
4605 */
4606 while (sc->bge_tx_saved_considx !=
4607 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
4608 uint32_t idx = sc->bge_tx_saved_considx;
4609 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
4610 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4611 if_statinc(ifp, if_opackets);
4612 m = sc->bge_cdata.bge_tx_chain[idx];
4613 if (m != NULL) {
4614 sc->bge_cdata.bge_tx_chain[idx] = NULL;
4615 dma = sc->txdma[idx];
4616 if (dma->is_dma32) {
4617 bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32,
4618 0, dma->dmamap32->dm_mapsize,
4619 BUS_DMASYNC_POSTWRITE);
4620 bus_dmamap_unload(
4621 sc->bge_dmatag32, dma->dmamap32);
4622 } else {
4623 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap,
4624 0, dma->dmamap->dm_mapsize,
4625 BUS_DMASYNC_POSTWRITE);
4626 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
4627 }
4628 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
4629 sc->txdma[idx] = NULL;
4630
4631 m_freem(m);
4632 }
4633 sc->bge_txcnt--;
4634 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4635 ifp->if_timer = 0;
4636 }
4637
4638 if (cur_tx != NULL)
4639 ifp->if_flags &= ~IFF_OACTIVE;
4640 }
4641
4642 static int
4643 bge_intr(void *xsc)
4644 {
4645 struct bge_softc * const sc = xsc;
4646 struct ifnet * const ifp = &sc->ethercom.ec_if;
4647 uint32_t pcistate, statusword, statustag;
4648 uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE;
4649
4650
4651 /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */
4652 if (BGE_IS_5717_PLUS(sc))
4653 intrmask = 0;
4654
4655 /*
4656 * It is possible for the interrupt to arrive before
4657 * the status block is updated prior to the interrupt.
4658 * Reading the PCI State register will confirm whether the
4659 * interrupt is ours and will flush the status block.
4660 */
4661 pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE);
4662
4663 /* read status word from status block */
4664 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4665 offsetof(struct bge_ring_data, bge_status_block),
4666 sizeof (struct bge_status_block),
4667 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4668 statusword = sc->bge_rdata->bge_status_block.bge_status;
4669 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
4670
4671 if (sc->bge_flags & BGEF_TAGGED_STATUS) {
4672 if (sc->bge_lasttag == statustag &&
4673 (~pcistate & intrmask)) {
4674 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious);
4675 return 0;
4676 }
4677 sc->bge_lasttag = statustag;
4678 } else {
4679 if (!(statusword & BGE_STATFLAG_UPDATED) &&
4680 !(~pcistate & intrmask)) {
4681 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2);
4682 return 0;
4683 }
4684 statustag = 0;
4685 }
4686 /* Ack interrupt and stop others from occurring. */
4687 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
4688 BGE_EVCNT_INCR(sc->bge_ev_intr);
4689
4690 /* clear status word */
4691 sc->bge_rdata->bge_status_block.bge_status = 0;
4692
4693 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4694 offsetof(struct bge_ring_data, bge_status_block),
4695 sizeof (struct bge_status_block),
4696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4697
4698 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4699 statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
4700 BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
4701 bge_link_upd(sc);
4702
4703 if (ifp->if_flags & IFF_RUNNING) {
4704 /* Check RX return ring producer/consumer */
4705 bge_rxeof(sc);
4706
4707 /* Check TX ring producer/consumer */
4708 bge_txeof(sc);
4709 }
4710
4711 if (sc->bge_pending_rxintr_change) {
4712 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
4713 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
4714
4715 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
4716 DELAY(10);
4717 (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4718
4719 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
4720 DELAY(10);
4721 (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4722
4723 sc->bge_pending_rxintr_change = 0;
4724 }
4725 bge_handle_events(sc);
4726
4727 /* Re-enable interrupts. */
4728 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag);
4729
4730 if (ifp->if_flags & IFF_RUNNING)
4731 if_schedule_deferred_start(ifp);
4732
4733 return 1;
4734 }
4735
4736 static void
4737 bge_asf_driver_up(struct bge_softc *sc)
4738 {
4739 if (sc->bge_asf_mode & ASF_STACKUP) {
4740 /* Send ASF heartbeat aprox. every 2s */
4741 if (sc->bge_asf_count)
4742 sc->bge_asf_count --;
4743 else {
4744 sc->bge_asf_count = 2;
4745
4746 bge_wait_for_event_ack(sc);
4747
4748 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4749 BGE_FW_CMD_DRV_ALIVE3);
4750 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4751 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4752 BGE_FW_HB_TIMEOUT_SEC);
4753 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
4754 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4755 BGE_RX_CPU_DRV_EVENT);
4756 }
4757 }
4758 }
4759
4760 static void
4761 bge_tick(void *xsc)
4762 {
4763 struct bge_softc * const sc = xsc;
4764 struct mii_data * const mii = &sc->bge_mii;
4765 int s;
4766
4767 s = splnet();
4768
4769 if (BGE_IS_5705_PLUS(sc))
4770 bge_stats_update_regs(sc);
4771 else
4772 bge_stats_update(sc);
4773
4774 if (sc->bge_flags & BGEF_FIBER_TBI) {
4775 /*
4776 * Since in TBI mode auto-polling can't be used we should poll
4777 * link status manually. Here we register pending link event
4778 * and trigger interrupt.
4779 */
4780 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4781 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4782 } else {
4783 /*
4784 * Do not touch PHY if we have link up. This could break
4785 * IPMI/ASF mode or produce extra input errors.
4786 * (extra input errors was reported for bcm5701 & bcm5704).
4787 */
4788 if (!BGE_STS_BIT(sc, BGE_STS_LINK))
4789 mii_tick(mii);
4790 }
4791
4792 bge_asf_driver_up(sc);
4793
4794 if (!sc->bge_detaching)
4795 callout_schedule(&sc->bge_timeout, hz);
4796
4797 splx(s);
4798 }
4799
4800 static void
4801 bge_stats_update_regs(struct bge_softc *sc)
4802 {
4803 struct ifnet *const ifp = &sc->ethercom.ec_if;
4804
4805 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
4806
4807 if_statadd_ref(nsr, if_collisions,
4808 CSR_READ_4(sc, BGE_MAC_STATS +
4809 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)));
4810
4811 /*
4812 * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0,
4813 * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames
4814 * (silicon bug). There's no reliable workaround so just
4815 * ignore the counter
4816 */
4817 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
4818 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4819 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) {
4820 if_statadd_ref(nsr, if_ierrors,
4821 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
4822 }
4823 if_statadd_ref(nsr, if_ierrors,
4824 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS));
4825 if_statadd_ref(nsr, if_ierrors,
4826 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS));
4827
4828 IF_STAT_PUTREF(ifp);
4829
4830 if (sc->bge_flags & BGEF_RDMA_BUG) {
4831 uint32_t val, ucast, mcast, bcast;
4832
4833 ucast = CSR_READ_4(sc, BGE_MAC_STATS +
4834 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
4835 mcast = CSR_READ_4(sc, BGE_MAC_STATS +
4836 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
4837 bcast = CSR_READ_4(sc, BGE_MAC_STATS +
4838 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
4839
4840 /*
4841 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4842 * frames, it's safe to disable workaround for DMA engine's
4843 * miscalculation of TXMBUF space.
4844 */
4845 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
4846 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
4847 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
4848 val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
4849 else
4850 val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
4851 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
4852 sc->bge_flags &= ~BGEF_RDMA_BUG;
4853 }
4854 }
4855 }
4856
4857 static void
4858 bge_stats_update(struct bge_softc *sc)
4859 {
4860 struct ifnet * const ifp = &sc->ethercom.ec_if;
4861 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4862
4863 #define READ_STAT(sc, stats, stat) \
4864 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4865
4866 uint64_t collisions =
4867 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
4868 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
4869 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
4870 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo));
4871
4872 if_statadd(ifp, if_collisions, collisions - sc->bge_if_collisions);
4873 sc->bge_if_collisions = collisions;
4874
4875
4876 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
4877 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
4878 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
4879 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
4880 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
4881 READ_STAT(sc, stats,
4882 xoffPauseFramesReceived.bge_addr_lo));
4883 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
4884 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
4885 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
4886 READ_STAT(sc, stats,
4887 macControlFramesReceived.bge_addr_lo));
4888 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
4889 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
4890
4891 #undef READ_STAT
4892
4893 #ifdef notdef
4894 ifp->if_collisions +=
4895 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
4896 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
4897 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
4898 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
4899 ifp->if_collisions;
4900 #endif
4901 }
4902
4903 /*
4904 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4905 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4906 * but when such padded frames employ the bge IP/TCP checksum offload,
4907 * the hardware checksum assist gives incorrect results (possibly
4908 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4909 * If we pad such runts with zeros, the onboard checksum comes out correct.
4910 */
4911 static inline int
4912 bge_cksum_pad(struct mbuf *pkt)
4913 {
4914 struct mbuf *last = NULL;
4915 int padlen;
4916
4917 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
4918
4919 /* if there's only the packet-header and we can pad there, use it. */
4920 if (pkt->m_pkthdr.len == pkt->m_len &&
4921 M_TRAILINGSPACE(pkt) >= padlen) {
4922 last = pkt;
4923 } else {
4924 /*
4925 * Walk packet chain to find last mbuf. We will either
4926 * pad there, or append a new mbuf and pad it
4927 * (thus perhaps avoiding the bcm5700 dma-min bug).
4928 */
4929 for (last = pkt; last->m_next != NULL; last = last->m_next) {
4930 continue; /* do nothing */
4931 }
4932
4933 /* `last' now points to last in chain. */
4934 if (M_TRAILINGSPACE(last) < padlen) {
4935 /* Allocate new empty mbuf, pad it. Compact later. */
4936 struct mbuf *n;
4937 MGET(n, M_DONTWAIT, MT_DATA);
4938 if (n == NULL)
4939 return ENOBUFS;
4940 n->m_len = 0;
4941 last->m_next = n;
4942 last = n;
4943 }
4944 }
4945
4946 KDASSERT(!M_READONLY(last));
4947 KDASSERT(M_TRAILINGSPACE(last) >= padlen);
4948
4949 /* Now zero the pad area, to avoid the bge cksum-assist bug */
4950 memset(mtod(last, char *) + last->m_len, 0, padlen);
4951 last->m_len += padlen;
4952 pkt->m_pkthdr.len += padlen;
4953 return 0;
4954 }
4955
4956 /*
4957 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
4958 */
4959 static inline int
4960 bge_compact_dma_runt(struct mbuf *pkt)
4961 {
4962 struct mbuf *m, *prev;
4963 int totlen;
4964
4965 prev = NULL;
4966 totlen = 0;
4967
4968 for (m = pkt; m != NULL; prev = m, m = m->m_next) {
4969 int mlen = m->m_len;
4970 int shortfall = 8 - mlen ;
4971
4972 totlen += mlen;
4973 if (mlen == 0)
4974 continue;
4975 if (mlen >= 8)
4976 continue;
4977
4978 /*
4979 * If we get here, mbuf data is too small for DMA engine.
4980 * Try to fix by shuffling data to prev or next in chain.
4981 * If that fails, do a compacting deep-copy of the whole chain.
4982 */
4983
4984 /* Internal frag. If fits in prev, copy it there. */
4985 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
4986 memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
4987 prev->m_len += mlen;
4988 m->m_len = 0;
4989 /* XXX stitch chain */
4990 prev->m_next = m_free(m);
4991 m = prev;
4992 continue;
4993 } else if (m->m_next != NULL &&
4994 M_TRAILINGSPACE(m) >= shortfall &&
4995 m->m_next->m_len >= (8 + shortfall)) {
4996 /* m is writable and have enough data in next, pull up. */
4997
4998 memcpy(m->m_data + m->m_len, m->m_next->m_data,
4999 shortfall);
5000 m->m_len += shortfall;
5001 m->m_next->m_len -= shortfall;
5002 m->m_next->m_data += shortfall;
5003 } else if (m->m_next == NULL || 1) {
5004 /*
5005 * Got a runt at the very end of the packet.
5006 * borrow data from the tail of the preceding mbuf and
5007 * update its length in-place. (The original data is
5008 * still valid, so we can do this even if prev is not
5009 * writable.)
5010 */
5011
5012 /*
5013 * If we'd make prev a runt, just move all of its data.
5014 */
5015 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
5016 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
5017
5018 if ((prev->m_len - shortfall) < 8)
5019 shortfall = prev->m_len;
5020
5021 #ifdef notyet /* just do the safe slow thing for now */
5022 if (!M_READONLY(m)) {
5023 if (M_LEADINGSPACE(m) < shorfall) {
5024 void *m_dat;
5025 m_dat = M_BUFADDR(m);
5026 memmove(m_dat, mtod(m, void*),
5027 m->m_len);
5028 m->m_data = m_dat;
5029 }
5030 } else
5031 #endif /* just do the safe slow thing */
5032 {
5033 struct mbuf * n = NULL;
5034 int newprevlen = prev->m_len - shortfall;
5035
5036 MGET(n, M_NOWAIT, MT_DATA);
5037 if (n == NULL)
5038 return ENOBUFS;
5039 KASSERT(m->m_len + shortfall < MLEN
5040 /*,
5041 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
5042
5043 /* first copy the data we're stealing from prev */
5044 memcpy(n->m_data, prev->m_data + newprevlen,
5045 shortfall);
5046
5047 /* update prev->m_len accordingly */
5048 prev->m_len -= shortfall;
5049
5050 /* copy data from runt m */
5051 memcpy(n->m_data + shortfall, m->m_data,
5052 m->m_len);
5053
5054 /* n holds what we stole from prev, plus m */
5055 n->m_len = shortfall + m->m_len;
5056
5057 /* stitch n into chain and free m */
5058 n->m_next = m->m_next;
5059 prev->m_next = n;
5060 /* KASSERT(m->m_next == NULL); */
5061 m->m_next = NULL;
5062 m_free(m);
5063 m = n; /* for continuing loop */
5064 }
5065 }
5066 }
5067 return 0;
5068 }
5069
5070 /*
5071 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
5072 * pointers to descriptors.
5073 */
5074 static int
5075 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
5076 {
5077 struct ifnet * const ifp = &sc->ethercom.ec_if;
5078 struct bge_tx_bd *f, *prev_f;
5079 uint32_t frag, cur;
5080 uint16_t csum_flags = 0;
5081 uint16_t txbd_tso_flags = 0;
5082 struct txdmamap_pool_entry *dma;
5083 bus_dmamap_t dmamap;
5084 bus_dma_tag_t dmatag;
5085 int i = 0;
5086 int use_tso, maxsegsize, error;
5087 bool have_vtag;
5088 uint16_t vtag;
5089 bool remap;
5090
5091 if (m_head->m_pkthdr.csum_flags) {
5092 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
5093 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
5094 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4 |M_CSUM_UDPv4))
5095 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
5096 }
5097
5098 /*
5099 * If we were asked to do an outboard checksum, and the NIC
5100 * has the bug where it sometimes adds in the Ethernet padding,
5101 * explicitly pad with zeros so the cksum will be correct either way.
5102 * (For now, do this for all chip versions, until newer
5103 * are confirmed to not require the workaround.)
5104 */
5105 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
5106 #ifdef notyet
5107 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
5108 #endif
5109 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
5110 goto check_dma_bug;
5111
5112 if (bge_cksum_pad(m_head) != 0)
5113 return ENOBUFS;
5114
5115 check_dma_bug:
5116 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
5117 goto doit;
5118
5119 /*
5120 * bcm5700 Revision B silicon cannot handle DMA descriptors with
5121 * less than eight bytes. If we encounter a teeny mbuf
5122 * at the end of a chain, we can pad. Otherwise, copy.
5123 */
5124 if (bge_compact_dma_runt(m_head) != 0)
5125 return ENOBUFS;
5126
5127 doit:
5128 dma = SLIST_FIRST(&sc->txdma_list);
5129 if (dma == NULL) {
5130 ifp->if_flags |= IFF_OACTIVE;
5131 return ENOBUFS;
5132 }
5133 dmamap = dma->dmamap;
5134 dmatag = sc->bge_dmatag;
5135 dma->is_dma32 = false;
5136
5137 /*
5138 * Set up any necessary TSO state before we start packing...
5139 */
5140 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5141 if (!use_tso) {
5142 maxsegsize = 0;
5143 } else { /* TSO setup */
5144 unsigned mss;
5145 struct ether_header *eh;
5146 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
5147 unsigned bge_hlen;
5148 struct mbuf * m0 = m_head;
5149 struct ip *ip;
5150 struct tcphdr *th;
5151 int iphl, hlen;
5152
5153 /*
5154 * XXX It would be nice if the mbuf pkthdr had offset
5155 * fields for the protocol headers.
5156 */
5157
5158 eh = mtod(m0, struct ether_header *);
5159 switch (htons(eh->ether_type)) {
5160 case ETHERTYPE_IP:
5161 offset = ETHER_HDR_LEN;
5162 break;
5163
5164 case ETHERTYPE_VLAN:
5165 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5166 break;
5167
5168 default:
5169 /*
5170 * Don't support this protocol or encapsulation.
5171 */
5172 return ENOBUFS;
5173 }
5174
5175 /*
5176 * TCP/IP headers are in the first mbuf; we can do
5177 * this the easy way.
5178 */
5179 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5180 hlen = iphl + offset;
5181 if (__predict_false(m0->m_len <
5182 (hlen + sizeof(struct tcphdr)))) {
5183
5184 aprint_error_dev(sc->bge_dev,
5185 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
5186 "not handled yet\n",
5187 m0->m_len, hlen+ sizeof(struct tcphdr));
5188 #ifdef NOTYET
5189 /*
5190 * XXX jonathan (at) NetBSD.org: untested.
5191 * how to force this branch to be taken?
5192 */
5193 BGE_EVCNT_INCR(sc->bge_ev_txtsopain);
5194
5195 m_copydata(m0, offset, sizeof(ip), &ip);
5196 m_copydata(m0, hlen, sizeof(th), &th);
5197
5198 ip.ip_len = 0;
5199
5200 m_copyback(m0, hlen + offsetof(struct ip, ip_len),
5201 sizeof(ip.ip_len), &ip.ip_len);
5202
5203 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5204 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5205
5206 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5207 sizeof(th.th_sum), &th.th_sum);
5208
5209 hlen += th.th_off << 2;
5210 iptcp_opt_words = hlen;
5211 #else
5212 /*
5213 * if_wm "hard" case not yet supported, can we not
5214 * mandate it out of existence?
5215 */
5216 (void) ip; (void)th; (void) ip_tcp_hlen;
5217
5218 return ENOBUFS;
5219 #endif
5220 } else {
5221 ip = (struct ip *) (mtod(m0, char *) + offset);
5222 th = (struct tcphdr *) (mtod(m0, char *) + hlen);
5223 ip_tcp_hlen = iphl + (th->th_off << 2);
5224
5225 /* Total IP/TCP options, in 32-bit words */
5226 iptcp_opt_words = (ip_tcp_hlen
5227 - sizeof(struct tcphdr)
5228 - sizeof(struct ip)) >> 2;
5229 }
5230 if (BGE_IS_575X_PLUS(sc)) {
5231 th->th_sum = 0;
5232 csum_flags = 0;
5233 } else {
5234 /*
5235 * XXX jonathan (at) NetBSD.org: 5705 untested.
5236 * Requires TSO firmware patch for 5701/5703/5704.
5237 */
5238 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5239 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5240 }
5241
5242 mss = m_head->m_pkthdr.segsz;
5243 txbd_tso_flags |=
5244 BGE_TXBDFLAG_CPU_PRE_DMA |
5245 BGE_TXBDFLAG_CPU_POST_DMA;
5246
5247 /*
5248 * Our NIC TSO-assist assumes TSO has standard, optionless
5249 * IPv4 and TCP headers, which total 40 bytes. By default,
5250 * the NIC copies 40 bytes of IP/TCP header from the
5251 * supplied header into the IP/TCP header portion of
5252 * each post-TSO-segment. If the supplied packet has IP or
5253 * TCP options, we need to tell the NIC to copy those extra
5254 * bytes into each post-TSO header, in addition to the normal
5255 * 40-byte IP/TCP header (and to leave space accordingly).
5256 * Unfortunately, the driver encoding of option length
5257 * varies across different ASIC families.
5258 */
5259 tcp_seg_flags = 0;
5260 bge_hlen = ip_tcp_hlen >> 2;
5261 if (BGE_IS_5717_PLUS(sc)) {
5262 tcp_seg_flags = (bge_hlen & 0x3) << 14;
5263 txbd_tso_flags |=
5264 ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2);
5265 } else if (BGE_IS_5705_PLUS(sc)) {
5266 tcp_seg_flags = bge_hlen << 11;
5267 } else {
5268 /* XXX iptcp_opt_words or bge_hlen ? */
5269 txbd_tso_flags |= iptcp_opt_words << 12;
5270 }
5271 maxsegsize = mss | tcp_seg_flags;
5272 ip->ip_len = htons(mss + ip_tcp_hlen);
5273 ip->ip_sum = 0;
5274
5275 } /* TSO setup */
5276
5277 have_vtag = vlan_has_tag(m_head);
5278 if (have_vtag)
5279 vtag = vlan_get_tag(m_head);
5280
5281 /*
5282 * Start packing the mbufs in this chain into
5283 * the fragment pointers. Stop when we run out
5284 * of fragments or hit the end of the mbuf chain.
5285 */
5286 remap = true;
5287 load_again:
5288 error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT);
5289 if (__predict_false(error)) {
5290 if (error == EFBIG && remap) {
5291 struct mbuf *m;
5292 remap = false;
5293 m = m_defrag(m_head, M_NOWAIT);
5294 if (m != NULL) {
5295 KASSERT(m == m_head);
5296 goto load_again;
5297 }
5298 }
5299 return error;
5300 }
5301 /*
5302 * Sanity check: avoid coming within 16 descriptors
5303 * of the end of the ring.
5304 */
5305 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
5306 BGE_TSO_PRINTF(("%s: "
5307 " dmamap_load_mbuf too close to ring wrap\n",
5308 device_xname(sc->bge_dev)));
5309 goto fail_unload;
5310 }
5311
5312 /* Iterate over dmap-map fragments. */
5313 f = prev_f = NULL;
5314 cur = frag = *txidx;
5315
5316 for (i = 0; i < dmamap->dm_nsegs; i++) {
5317 f = &sc->bge_rdata->bge_tx_ring[frag];
5318 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
5319 break;
5320
5321 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
5322 f->bge_len = dmamap->dm_segs[i].ds_len;
5323 if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && (
5324 (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) !=
5325 ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) ||
5326 (prev_f != NULL &&
5327 prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi))
5328 ) {
5329 /*
5330 * watchdog timeout issue was observed with TSO,
5331 * limiting DMA address space to 32bits seems to
5332 * address the issue.
5333 */
5334 bus_dmamap_unload(dmatag, dmamap);
5335 dmatag = sc->bge_dmatag32;
5336 dmamap = dma->dmamap32;
5337 dma->is_dma32 = true;
5338 remap = true;
5339 goto load_again;
5340 }
5341
5342 /*
5343 * For 5751 and follow-ons, for TSO we must turn
5344 * off checksum-assist flag in the tx-descr, and
5345 * supply the ASIC-revision-specific encoding
5346 * of TSO flags and segsize.
5347 */
5348 if (use_tso) {
5349 if (BGE_IS_575X_PLUS(sc) || i == 0) {
5350 f->bge_rsvd = maxsegsize;
5351 f->bge_flags = csum_flags | txbd_tso_flags;
5352 } else {
5353 f->bge_rsvd = 0;
5354 f->bge_flags =
5355 (csum_flags | txbd_tso_flags) & 0x0fff;
5356 }
5357 } else {
5358 f->bge_rsvd = 0;
5359 f->bge_flags = csum_flags;
5360 }
5361
5362 if (have_vtag) {
5363 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
5364 f->bge_vlan_tag = vtag;
5365 } else {
5366 f->bge_vlan_tag = 0;
5367 }
5368 prev_f = f;
5369 cur = frag;
5370 BGE_INC(frag, BGE_TX_RING_CNT);
5371 }
5372
5373 if (i < dmamap->dm_nsegs) {
5374 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
5375 device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
5376 goto fail_unload;
5377 }
5378
5379 bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize,
5380 BUS_DMASYNC_PREWRITE);
5381
5382 if (frag == sc->bge_tx_saved_considx) {
5383 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
5384 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
5385
5386 goto fail_unload;
5387 }
5388
5389 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
5390 sc->bge_cdata.bge_tx_chain[cur] = m_head;
5391 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
5392 sc->txdma[cur] = dma;
5393 sc->bge_txcnt += dmamap->dm_nsegs;
5394
5395 *txidx = frag;
5396
5397 return 0;
5398
5399 fail_unload:
5400 bus_dmamap_unload(dmatag, dmamap);
5401 ifp->if_flags |= IFF_OACTIVE;
5402
5403 return ENOBUFS;
5404 }
5405
5406 /*
5407 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5408 * to the mbuf data regions directly in the transmit descriptors.
5409 */
5410 static void
5411 bge_start(struct ifnet *ifp)
5412 {
5413 struct bge_softc * const sc = ifp->if_softc;
5414 struct mbuf *m_head = NULL;
5415 struct mbuf *m;
5416 uint32_t prodidx;
5417 int pkts = 0;
5418 int error;
5419
5420 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5421 return;
5422
5423 prodidx = sc->bge_tx_prodidx;
5424
5425 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
5426 IFQ_POLL(&ifp->if_snd, m_head);
5427 if (m_head == NULL)
5428 break;
5429
5430 #if 0
5431 /*
5432 * XXX
5433 * safety overkill. If this is a fragmented packet chain
5434 * with delayed TCP/UDP checksums, then only encapsulate
5435 * it if we have enough descriptors to handle the entire
5436 * chain at once.
5437 * (paranoia -- may not actually be needed)
5438 */
5439 if (m_head->m_flags & M_FIRSTFRAG &&
5440 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
5441 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
5442 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
5443 ifp->if_flags |= IFF_OACTIVE;
5444 break;
5445 }
5446 }
5447 #endif
5448
5449 /*
5450 * Pack the data into the transmit ring. If we
5451 * don't have room, set the OACTIVE flag and wait
5452 * for the NIC to drain the ring.
5453 */
5454 error = bge_encap(sc, m_head, &prodidx);
5455 if (__predict_false(error)) {
5456 if (ifp->if_flags & IFF_OACTIVE) {
5457 /* just wait for the transmit ring to drain */
5458 break;
5459 }
5460 IFQ_DEQUEUE(&ifp->if_snd, m);
5461 KASSERT(m == m_head);
5462 m_freem(m_head);
5463 continue;
5464 }
5465
5466 /* now we are committed to transmit the packet */
5467 IFQ_DEQUEUE(&ifp->if_snd, m);
5468 KASSERT(m == m_head);
5469 pkts++;
5470
5471 /*
5472 * If there's a BPF listener, bounce a copy of this frame
5473 * to him.
5474 */
5475 bpf_mtap(ifp, m_head, BPF_D_OUT);
5476 }
5477 if (pkts == 0)
5478 return;
5479
5480 /* Transmit */
5481 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5482 /* 5700 b2 errata */
5483 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
5484 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5485
5486 sc->bge_tx_prodidx = prodidx;
5487
5488 /*
5489 * Set a timeout in case the chip goes out to lunch.
5490 */
5491 ifp->if_timer = 5;
5492 }
5493
5494 static int
5495 bge_init(struct ifnet *ifp)
5496 {
5497 struct bge_softc * const sc = ifp->if_softc;
5498 const uint16_t *m;
5499 uint32_t mode, reg;
5500 int s, error = 0;
5501
5502 s = splnet();
5503
5504 KASSERT(ifp == &sc->ethercom.ec_if);
5505
5506 /* Cancel pending I/O and flush buffers. */
5507 bge_stop(ifp, 0);
5508
5509 bge_stop_fw(sc);
5510 bge_sig_pre_reset(sc, BGE_RESET_START);
5511 bge_reset(sc);
5512 bge_sig_legacy(sc, BGE_RESET_START);
5513
5514 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
5515 reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
5516 reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE |
5517 BGE_CPMU_CTRL_LINK_IDLE_MODE);
5518 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
5519
5520 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
5521 reg &= ~BGE_CPMU_LSPD_10MB_CLK;
5522 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
5523 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
5524
5525 reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD);
5526 reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK;
5527 reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25;
5528 CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg);
5529
5530 reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC);
5531 reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK;
5532 reg |= BGE_CPMU_HST_ACC_MACCLK_6_25;
5533 CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg);
5534 }
5535
5536 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
5537 pcireg_t aercap;
5538
5539 reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH);
5540 reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK)
5541 | BGE_PCIE_PWRMNG_L1THRESH_4MS
5542 | BGE_PCIE_PWRMNG_EXTASPMTMR_EN;
5543 CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg);
5544
5545 reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY);
5546 reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK)
5547 | BGE_PCIE_EIDLE_DELAY_13CLK;
5548 CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg);
5549
5550 /* Clear correctable error */
5551 if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag,
5552 PCI_EXTCAP_AER, &aercap, NULL) != 0)
5553 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
5554 aercap + PCI_AER_COR_STATUS, 0xffffffff);
5555
5556 reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
5557 reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
5558 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
5559 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg);
5560 }
5561
5562 bge_sig_post_reset(sc, BGE_RESET_START);
5563
5564 bge_chipinit(sc);
5565
5566 /*
5567 * Init the various state machines, ring
5568 * control blocks and firmware.
5569 */
5570 error = bge_blockinit(sc);
5571 if (error != 0) {
5572 aprint_error_dev(sc->bge_dev, "initialization error %d\n",
5573 error);
5574 splx(s);
5575 return error;
5576 }
5577
5578 /* 5718 step 25, 57XX step 54 */
5579 /* Specify MTU. */
5580 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
5581 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
5582
5583 /* 5718 step 23 */
5584 /* Load our MAC address. */
5585 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
5586 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
5587 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI,
5588 ((uint32_t)htons(m[1]) << 16) | htons(m[2]));
5589
5590 /* Enable or disable promiscuous mode as needed. */
5591 if (ifp->if_flags & IFF_PROMISC)
5592 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5593 else
5594 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5595
5596 /* Program multicast filter. */
5597 bge_setmulti(sc);
5598
5599 /* Init RX ring. */
5600 bge_init_rx_ring_std(sc);
5601
5602 /*
5603 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5604 * memory to insure that the chip has in fact read the first
5605 * entry of the ring.
5606 */
5607 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
5608 uint32_t v, i;
5609 for (i = 0; i < 10; i++) {
5610 DELAY(20);
5611 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
5612 if (v == (MCLBYTES - ETHER_ALIGN))
5613 break;
5614 }
5615 if (i == 10)
5616 aprint_error_dev(sc->bge_dev,
5617 "5705 A0 chip failed to load RX ring\n");
5618 }
5619
5620 /* Init jumbo RX ring. */
5621 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
5622 bge_init_rx_ring_jumbo(sc);
5623
5624 /* Init our RX return ring index */
5625 sc->bge_rx_saved_considx = 0;
5626
5627 /* Init TX ring. */
5628 bge_init_tx_ring(sc);
5629
5630 /* 5718 step 63, 57XX step 94 */
5631 /* Enable TX MAC state machine lockup fix. */
5632 mode = CSR_READ_4(sc, BGE_TX_MODE);
5633 if (BGE_IS_5755_PLUS(sc) ||
5634 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5635 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5636 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
5637 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
5638 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5639 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5640 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5641 }
5642
5643 /* Turn on transmitter */
5644 CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5645 /* 5718 step 64 */
5646 DELAY(100);
5647
5648 /* 5718 step 65, 57XX step 95 */
5649 /* Turn on receiver */
5650 mode = CSR_READ_4(sc, BGE_RX_MODE);
5651 if (BGE_IS_5755_PLUS(sc))
5652 mode |= BGE_RXMODE_IPV6_ENABLE;
5653 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
5654 mode |= BGE_RXMODE_IPV4_FRAG_FIX;
5655 CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
5656 /* 5718 step 66 */
5657 DELAY(10);
5658
5659 /* 5718 step 12, 57XX step 37 */
5660 /*
5661 * XXX Doucments of 5718 series and 577xx say the recommended value
5662 * is 1, but tg3 set 1 only on 57765 series.
5663 */
5664 if (BGE_IS_57765_PLUS(sc))
5665 reg = 1;
5666 else
5667 reg = 2;
5668 CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg);
5669
5670 /* Tell firmware we're alive. */
5671 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5672
5673 /* Enable host interrupts. */
5674 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5675 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5676 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0);
5677
5678 if ((error = bge_ifmedia_upd(ifp)) != 0)
5679 goto out;
5680
5681 ifp->if_flags |= IFF_RUNNING;
5682 ifp->if_flags &= ~IFF_OACTIVE;
5683
5684 callout_schedule(&sc->bge_timeout, hz);
5685
5686 out:
5687 sc->bge_if_flags = ifp->if_flags;
5688 splx(s);
5689
5690 return error;
5691 }
5692
5693 /*
5694 * Set media options.
5695 */
5696 static int
5697 bge_ifmedia_upd(struct ifnet *ifp)
5698 {
5699 struct bge_softc * const sc = ifp->if_softc;
5700 struct mii_data * const mii = &sc->bge_mii;
5701 struct ifmedia * const ifm = &sc->bge_ifmedia;
5702 int rc;
5703
5704 /* If this is a 1000baseX NIC, enable the TBI port. */
5705 if (sc->bge_flags & BGEF_FIBER_TBI) {
5706 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5707 return EINVAL;
5708 switch (IFM_SUBTYPE(ifm->ifm_media)) {
5709 case IFM_AUTO:
5710 /*
5711 * The BCM5704 ASIC appears to have a special
5712 * mechanism for programming the autoneg
5713 * advertisement registers in TBI mode.
5714 */
5715 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
5716 uint32_t sgdig;
5717 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5718 if (sgdig & BGE_SGDIGSTS_DONE) {
5719 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5720 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5721 sgdig |= BGE_SGDIGCFG_AUTO |
5722 BGE_SGDIGCFG_PAUSE_CAP |
5723 BGE_SGDIGCFG_ASYM_PAUSE;
5724 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
5725 sgdig | BGE_SGDIGCFG_SEND);
5726 DELAY(5);
5727 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
5728 sgdig);
5729 }
5730 }
5731 break;
5732 case IFM_1000_SX:
5733 if ((ifm->ifm_media & IFM_FDX) != 0) {
5734 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE,
5735 BGE_MACMODE_HALF_DUPLEX);
5736 } else {
5737 BGE_SETBIT_FLUSH(sc, BGE_MAC_MODE,
5738 BGE_MACMODE_HALF_DUPLEX);
5739 }
5740 DELAY(40);
5741 break;
5742 default:
5743 return EINVAL;
5744 }
5745 /* XXX 802.3x flow control for 1000BASE-SX */
5746 return 0;
5747 }
5748
5749 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) &&
5750 (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) {
5751 uint32_t reg;
5752
5753 reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
5754 if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) {
5755 reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY;
5756 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
5757 }
5758 }
5759
5760 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
5761 if ((rc = mii_mediachg(mii)) == ENXIO)
5762 return 0;
5763
5764 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
5765 uint32_t reg;
5766
5767 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK);
5768 if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK)
5769 == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) {
5770 reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK;
5771 delay(40);
5772 CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg);
5773 }
5774 }
5775
5776 /*
5777 * Force an interrupt so that we will call bge_link_upd
5778 * if needed and clear any pending link state attention.
5779 * Without this we are not getting any further interrupts
5780 * for link state changes and thus will not UP the link and
5781 * not be able to send in bge_start. The only way to get
5782 * things working was to receive a packet and get a RX intr.
5783 */
5784 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
5785 sc->bge_flags & BGEF_IS_5788)
5786 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5787 else
5788 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5789
5790 return rc;
5791 }
5792
5793 /*
5794 * Report current media status.
5795 */
5796 static void
5797 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5798 {
5799 struct bge_softc * const sc = ifp->if_softc;
5800 struct mii_data * const mii = &sc->bge_mii;
5801
5802 if (sc->bge_flags & BGEF_FIBER_TBI) {
5803 ifmr->ifm_status = IFM_AVALID;
5804 ifmr->ifm_active = IFM_ETHER;
5805 if (CSR_READ_4(sc, BGE_MAC_STS) &
5806 BGE_MACSTAT_TBI_PCS_SYNCHED)
5807 ifmr->ifm_status |= IFM_ACTIVE;
5808 ifmr->ifm_active |= IFM_1000_SX;
5809 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5810 ifmr->ifm_active |= IFM_HDX;
5811 else
5812 ifmr->ifm_active |= IFM_FDX;
5813 return;
5814 }
5815
5816 mii_pollstat(mii);
5817 ifmr->ifm_status = mii->mii_media_status;
5818 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
5819 sc->bge_flowflags;
5820 }
5821
5822 static int
5823 bge_ifflags_cb(struct ethercom *ec)
5824 {
5825 struct ifnet * const ifp = &ec->ec_if;
5826 struct bge_softc * const sc = ifp->if_softc;
5827 u_short change = ifp->if_flags ^ sc->bge_if_flags;
5828
5829 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5830 return ENETRESET;
5831 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
5832 return 0;
5833
5834 if ((ifp->if_flags & IFF_PROMISC) == 0)
5835 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5836 else
5837 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5838
5839 bge_setmulti(sc);
5840
5841 sc->bge_if_flags = ifp->if_flags;
5842 return 0;
5843 }
5844
5845 static int
5846 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
5847 {
5848 struct bge_softc * const sc = ifp->if_softc;
5849 struct ifreq * const ifr = (struct ifreq *) data;
5850 int s, error = 0;
5851 struct mii_data *mii;
5852
5853 s = splnet();
5854
5855 switch (command) {
5856 case SIOCSIFMEDIA:
5857 /* XXX Flow control is not supported for 1000BASE-SX */
5858 if (sc->bge_flags & BGEF_FIBER_TBI) {
5859 ifr->ifr_media &= ~IFM_ETH_FMASK;
5860 sc->bge_flowflags = 0;
5861 }
5862
5863 /* Flow control requires full-duplex mode. */
5864 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
5865 (ifr->ifr_media & IFM_FDX) == 0) {
5866 ifr->ifr_media &= ~IFM_ETH_FMASK;
5867 }
5868 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
5869 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
5870 /* We can do both TXPAUSE and RXPAUSE. */
5871 ifr->ifr_media |=
5872 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
5873 }
5874 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
5875 }
5876
5877 if (sc->bge_flags & BGEF_FIBER_TBI) {
5878 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
5879 command);
5880 } else {
5881 mii = &sc->bge_mii;
5882 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
5883 command);
5884 }
5885 break;
5886 default:
5887 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5888 break;
5889
5890 error = 0;
5891
5892 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
5893 ;
5894 else if (ifp->if_flags & IFF_RUNNING)
5895 bge_setmulti(sc);
5896 break;
5897 }
5898
5899 splx(s);
5900
5901 return error;
5902 }
5903
5904 static void
5905 bge_watchdog(struct ifnet *ifp)
5906 {
5907 struct bge_softc * const sc = ifp->if_softc;
5908 uint32_t status;
5909
5910 /* If pause frames are active then don't reset the hardware. */
5911 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
5912 status = CSR_READ_4(sc, BGE_RX_STS);
5913 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
5914 /*
5915 * If link partner has us in XOFF state then wait for
5916 * the condition to clear.
5917 */
5918 CSR_WRITE_4(sc, BGE_RX_STS, status);
5919 ifp->if_timer = 5;
5920 return;
5921 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
5922 (status & BGE_RXSTAT_RCVD_XON) != 0) {
5923 /*
5924 * If link partner has us in XOFF state then wait for
5925 * the condition to clear.
5926 */
5927 CSR_WRITE_4(sc, BGE_RX_STS, status);
5928 ifp->if_timer = 5;
5929 return;
5930 }
5931 /*
5932 * Any other condition is unexpected and the controller
5933 * should be reset.
5934 */
5935 }
5936
5937 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
5938
5939 ifp->if_flags &= ~IFF_RUNNING;
5940 bge_init(ifp);
5941
5942 if_statinc(ifp, if_oerrors);
5943 }
5944
5945 static void
5946 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
5947 {
5948 int i;
5949
5950 BGE_CLRBIT_FLUSH(sc, reg, bit);
5951
5952 for (i = 0; i < 1000; i++) {
5953 delay(100);
5954 if ((CSR_READ_4(sc, reg) & bit) == 0)
5955 return;
5956 }
5957
5958 /*
5959 * Doesn't print only when the register is BGE_SRS_MODE. It occurs
5960 * on some environment (and once after boot?)
5961 */
5962 if (reg != BGE_SRS_MODE)
5963 aprint_error_dev(sc->bge_dev,
5964 "block failed to stop: reg 0x%lx, bit 0x%08x\n",
5965 (u_long)reg, bit);
5966 }
5967
5968 /*
5969 * Stop the adapter and free any mbufs allocated to the
5970 * RX and TX lists.
5971 */
5972 static void
5973 bge_stop(struct ifnet *ifp, int disable)
5974 {
5975 struct bge_softc * const sc = ifp->if_softc;
5976
5977 if (disable) {
5978 sc->bge_detaching = 1;
5979 callout_halt(&sc->bge_timeout, NULL);
5980 } else
5981 callout_stop(&sc->bge_timeout);
5982
5983 /* Disable host interrupts. */
5984 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5985 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
5986
5987 /*
5988 * Tell firmware we're shutting down.
5989 */
5990 bge_stop_fw(sc);
5991 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
5992
5993 /*
5994 * Disable all of the receiver blocks.
5995 */
5996 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5997 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5998 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5999 if (BGE_IS_5700_FAMILY(sc))
6000 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
6001 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
6002 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
6003 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
6004
6005 /*
6006 * Disable all of the transmit blocks.
6007 */
6008 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
6009 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
6010 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
6011 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
6012 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
6013 if (BGE_IS_5700_FAMILY(sc))
6014 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
6015 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
6016
6017 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB);
6018 delay(40);
6019
6020 bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
6021
6022 /*
6023 * Shut down all of the memory managers and related
6024 * state machines.
6025 */
6026 /* 5718 step 5a,5b */
6027 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
6028 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
6029 if (BGE_IS_5700_FAMILY(sc))
6030 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
6031
6032 /* 5718 step 5c,5d */
6033 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
6034 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
6035
6036 if (BGE_IS_5700_FAMILY(sc)) {
6037 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
6038 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
6039 }
6040
6041 bge_reset(sc);
6042 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
6043 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
6044
6045 /*
6046 * Keep the ASF firmware running if up.
6047 */
6048 if (sc->bge_asf_mode & ASF_STACKUP)
6049 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6050 else
6051 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6052
6053 /* Free the RX lists. */
6054 bge_free_rx_ring_std(sc, disable);
6055
6056 /* Free jumbo RX list. */
6057 if (BGE_IS_JUMBO_CAPABLE(sc))
6058 bge_free_rx_ring_jumbo(sc);
6059
6060 /* Free TX buffers. */
6061 bge_free_tx_ring(sc, disable);
6062
6063 /*
6064 * Isolate/power down the PHY.
6065 */
6066 if (!(sc->bge_flags & BGEF_FIBER_TBI))
6067 mii_down(&sc->bge_mii);
6068
6069 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
6070
6071 /* Clear MAC's link state (PHY may still have link UP). */
6072 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6073
6074 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6075 }
6076
6077 static void
6078 bge_link_upd(struct bge_softc *sc)
6079 {
6080 struct ifnet * const ifp = &sc->ethercom.ec_if;
6081 struct mii_data * const mii = &sc->bge_mii;
6082 uint32_t status;
6083 uint16_t phyval;
6084 int link;
6085
6086 /* Clear 'pending link event' flag */
6087 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
6088
6089 /*
6090 * Process link state changes.
6091 * Grrr. The link status word in the status block does
6092 * not work correctly on the BCM5700 rev AX and BX chips,
6093 * according to all available information. Hence, we have
6094 * to enable MII interrupts in order to properly obtain
6095 * async link changes. Unfortunately, this also means that
6096 * we have to read the MAC status register to detect link
6097 * changes, thereby adding an additional register access to
6098 * the interrupt handler.
6099 */
6100
6101 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
6102 status = CSR_READ_4(sc, BGE_MAC_STS);
6103 if (status & BGE_MACSTAT_MI_INTERRUPT) {
6104 mii_pollstat(mii);
6105
6106 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
6107 mii->mii_media_status & IFM_ACTIVE &&
6108 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
6109 BGE_STS_SETBIT(sc, BGE_STS_LINK);
6110 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
6111 (!(mii->mii_media_status & IFM_ACTIVE) ||
6112 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
6113 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6114
6115 /* Clear the interrupt */
6116 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
6117 BGE_EVTENB_MI_INTERRUPT);
6118 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
6119 BRGPHY_MII_ISR, &phyval);
6120 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
6121 BRGPHY_MII_IMR, BRGPHY_INTRS);
6122 }
6123 return;
6124 }
6125
6126 if (sc->bge_flags & BGEF_FIBER_TBI) {
6127 status = CSR_READ_4(sc, BGE_MAC_STS);
6128 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
6129 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
6130 BGE_STS_SETBIT(sc, BGE_STS_LINK);
6131 if (BGE_ASICREV(sc->bge_chipid)
6132 == BGE_ASICREV_BCM5704) {
6133 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE,
6134 BGE_MACMODE_TBI_SEND_CFGS);
6135 DELAY(40);
6136 }
6137 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
6138 if_link_state_change(ifp, LINK_STATE_UP);
6139 }
6140 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
6141 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6142 if_link_state_change(ifp, LINK_STATE_DOWN);
6143 }
6144 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
6145 /*
6146 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
6147 * bit in status word always set. Workaround this bug by
6148 * reading PHY link status directly.
6149 */
6150 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
6151 BGE_STS_LINK : 0;
6152
6153 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
6154 mii_pollstat(mii);
6155
6156 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
6157 mii->mii_media_status & IFM_ACTIVE &&
6158 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
6159 BGE_STS_SETBIT(sc, BGE_STS_LINK);
6160 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
6161 (!(mii->mii_media_status & IFM_ACTIVE) ||
6162 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
6163 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6164 }
6165 } else {
6166 /*
6167 * For controllers that call mii_tick, we have to poll
6168 * link status.
6169 */
6170 mii_pollstat(mii);
6171 }
6172
6173 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
6174 uint32_t reg, scale;
6175
6176 reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) &
6177 BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK;
6178 if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5)
6179 scale = 65;
6180 else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25)
6181 scale = 6;
6182 else
6183 scale = 12;
6184
6185 reg = CSR_READ_4(sc, BGE_MISC_CFG) &
6186 ~BGE_MISCCFG_TIMER_PRESCALER;
6187 reg |= scale << 1;
6188 CSR_WRITE_4(sc, BGE_MISC_CFG, reg);
6189 }
6190 /* Clear the attention */
6191 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
6192 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
6193 BGE_MACSTAT_LINK_CHANGED);
6194 }
6195
6196 static int
6197 bge_sysctl_verify(SYSCTLFN_ARGS)
6198 {
6199 int error, t;
6200 struct sysctlnode node;
6201
6202 node = *rnode;
6203 t = *(int*)rnode->sysctl_data;
6204 node.sysctl_data = &t;
6205 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6206 if (error || newp == NULL)
6207 return error;
6208
6209 #if 0
6210 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
6211 node.sysctl_num, rnode->sysctl_num));
6212 #endif
6213
6214 if (node.sysctl_num == bge_rxthresh_nodenum) {
6215 if (t < 0 || t >= NBGE_RX_THRESH)
6216 return EINVAL;
6217 bge_update_all_threshes(t);
6218 } else
6219 return EINVAL;
6220
6221 *(int*)rnode->sysctl_data = t;
6222
6223 return 0;
6224 }
6225
6226 /*
6227 * Set up sysctl(3) MIB, hw.bge.*.
6228 */
6229 static void
6230 bge_sysctl_init(struct bge_softc *sc)
6231 {
6232 int rc, bge_root_num;
6233 const struct sysctlnode *node;
6234
6235 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6236 0, CTLTYPE_NODE, "bge",
6237 SYSCTL_DESCR("BGE interface controls"),
6238 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
6239 goto out;
6240 }
6241
6242 bge_root_num = node->sysctl_num;
6243
6244 /* BGE Rx interrupt mitigation level */
6245 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6246 CTLFLAG_READWRITE,
6247 CTLTYPE_INT, "rx_lvl",
6248 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
6249 bge_sysctl_verify, 0,
6250 &bge_rx_thresh_lvl,
6251 0, CTL_HW, bge_root_num, CTL_CREATE,
6252 CTL_EOL)) != 0) {
6253 goto out;
6254 }
6255
6256 bge_rxthresh_nodenum = node->sysctl_num;
6257
6258 return;
6259
6260 out:
6261 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
6262 }
6263
6264 #ifdef BGE_DEBUG
6265 void
6266 bge_debug_info(struct bge_softc *sc)
6267 {
6268
6269 printf("Hardware Flags:\n");
6270 if (BGE_IS_57765_PLUS(sc))
6271 printf(" - 57765 Plus\n");
6272 if (BGE_IS_5717_PLUS(sc))
6273 printf(" - 5717 Plus\n");
6274 if (BGE_IS_5755_PLUS(sc))
6275 printf(" - 5755 Plus\n");
6276 if (BGE_IS_575X_PLUS(sc))
6277 printf(" - 575X Plus\n");
6278 if (BGE_IS_5705_PLUS(sc))
6279 printf(" - 5705 Plus\n");
6280 if (BGE_IS_5714_FAMILY(sc))
6281 printf(" - 5714 Family\n");
6282 if (BGE_IS_5700_FAMILY(sc))
6283 printf(" - 5700 Family\n");
6284 if (sc->bge_flags & BGEF_IS_5788)
6285 printf(" - 5788\n");
6286 if (sc->bge_flags & BGEF_JUMBO_CAPABLE)
6287 printf(" - Supports Jumbo Frames\n");
6288 if (sc->bge_flags & BGEF_NO_EEPROM)
6289 printf(" - No EEPROM\n");
6290 if (sc->bge_flags & BGEF_PCIX)
6291 printf(" - PCI-X Bus\n");
6292 if (sc->bge_flags & BGEF_PCIE)
6293 printf(" - PCI Express Bus\n");
6294 if (sc->bge_flags & BGEF_RX_ALIGNBUG)
6295 printf(" - RX Alignment Bug\n");
6296 if (sc->bge_flags & BGEF_APE)
6297 printf(" - APE\n");
6298 if (sc->bge_flags & BGEF_CPMU_PRESENT)
6299 printf(" - CPMU\n");
6300 if (sc->bge_flags & BGEF_TSO)
6301 printf(" - TSO\n");
6302 if (sc->bge_flags & BGEF_TAGGED_STATUS)
6303 printf(" - TAGGED_STATUS\n");
6304
6305 /* PHY related */
6306 if (sc->bge_phy_flags & BGEPHYF_NO_3LED)
6307 printf(" - No 3 LEDs\n");
6308 if (sc->bge_phy_flags & BGEPHYF_CRC_BUG)
6309 printf(" - CRC bug\n");
6310 if (sc->bge_phy_flags & BGEPHYF_ADC_BUG)
6311 printf(" - ADC bug\n");
6312 if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG)
6313 printf(" - 5704 A0 bug\n");
6314 if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG)
6315 printf(" - jitter bug\n");
6316 if (sc->bge_phy_flags & BGEPHYF_BER_BUG)
6317 printf(" - BER bug\n");
6318 if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM)
6319 printf(" - adjust trim\n");
6320 if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED)
6321 printf(" - no wirespeed\n");
6322
6323 /* ASF related */
6324 if (sc->bge_asf_mode & ASF_ENABLE)
6325 printf(" - ASF enable\n");
6326 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE)
6327 printf(" - ASF new handshake\n");
6328 if (sc->bge_asf_mode & ASF_STACKUP)
6329 printf(" - ASF stackup\n");
6330 }
6331 #endif /* BGE_DEBUG */
6332
6333 static int
6334 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6335 {
6336 prop_dictionary_t dict;
6337 prop_data_t ea;
6338
6339 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0)
6340 return 1;
6341
6342 dict = device_properties(sc->bge_dev);
6343 ea = prop_dictionary_get(dict, "mac-address");
6344 if (ea != NULL) {
6345 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
6346 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
6347 memcpy(ether_addr, prop_data_value(ea), ETHER_ADDR_LEN);
6348 return 0;
6349 }
6350
6351 return 1;
6352 }
6353
6354 static int
6355 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6356 {
6357 uint32_t mac_addr;
6358
6359 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6360 if ((mac_addr >> 16) == 0x484b) {
6361 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6362 ether_addr[1] = (uint8_t)mac_addr;
6363 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6364 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6365 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6366 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6367 ether_addr[5] = (uint8_t)mac_addr;
6368 return 0;
6369 }
6370 return 1;
6371 }
6372
6373 static int
6374 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6375 {
6376 int mac_offset = BGE_EE_MAC_OFFSET;
6377
6378 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
6379 mac_offset = BGE_EE_MAC_OFFSET_5906;
6380
6381 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6382 ETHER_ADDR_LEN));
6383 }
6384
6385 static int
6386 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6387 {
6388
6389 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
6390 return 1;
6391
6392 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6393 ETHER_ADDR_LEN));
6394 }
6395
6396 static int
6397 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6398 {
6399 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6400 /* NOTE: Order is critical */
6401 bge_get_eaddr_fw,
6402 bge_get_eaddr_mem,
6403 bge_get_eaddr_nvram,
6404 bge_get_eaddr_eeprom,
6405 NULL
6406 };
6407 const bge_eaddr_fcn_t *func;
6408
6409 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6410 if ((*func)(sc, eaddr) == 0)
6411 break;
6412 }
6413 return *func == NULL ? ENXIO : 0;
6414 }
6415