if_bge.c revision 1.327 1 /* $NetBSD: if_bge.c,v 1.327 2019/02/20 17:00:20 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.327 2019/02/20 17:00:20 msaitoh Exp $");
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/sockio.h>
88 #include <sys/mbuf.h>
89 #include <sys/malloc.h>
90 #include <sys/kernel.h>
91 #include <sys/device.h>
92 #include <sys/socket.h>
93 #include <sys/sysctl.h>
94
95 #include <net/if.h>
96 #include <net/if_dl.h>
97 #include <net/if_media.h>
98 #include <net/if_ether.h>
99
100 #include <sys/rndsource.h>
101
102 #ifdef INET
103 #include <netinet/in.h>
104 #include <netinet/in_systm.h>
105 #include <netinet/in_var.h>
106 #include <netinet/ip.h>
107 #endif
108
109 /* Headers for TCP Segmentation Offload (TSO) */
110 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
111 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
112 #include <netinet/ip.h> /* for struct ip */
113 #include <netinet/tcp.h> /* for struct tcphdr */
114
115
116 #include <net/bpf.h>
117
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
120 #include <dev/pci/pcidevs.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/brgphyreg.h>
126
127 #include <dev/pci/if_bgereg.h>
128 #include <dev/pci/if_bgevar.h>
129
130 #include <prop/proplib.h>
131
132 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
133
134
135 /*
136 * Tunable thresholds for rx-side bge interrupt mitigation.
137 */
138
139 /*
140 * The pairs of values below were obtained from empirical measurement
141 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
142 * interrupt for every N packets received, where N is, approximately,
143 * the second value (rx_max_bds) in each pair. The values are chosen
144 * such that moving from one pair to the succeeding pair was observed
145 * to roughly halve interrupt rate under sustained input packet load.
146 * The values were empirically chosen to avoid overflowing internal
147 * limits on the bcm5700: increasing rx_ticks much beyond 600
148 * results in internal wrapping and higher interrupt rates.
149 * The limit of 46 frames was chosen to match NFS workloads.
150 *
151 * These values also work well on bcm5701, bcm5704C, and (less
152 * tested) bcm5703. On other chipsets, (including the Altima chip
153 * family), the larger values may overflow internal chip limits,
154 * leading to increasing interrupt rates rather than lower interrupt
155 * rates.
156 *
157 * Applications using heavy interrupt mitigation (interrupting every
158 * 32 or 46 frames) in both directions may need to increase the TCP
159 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
160 * full link bandwidth, due to ACKs and window updates lingering
161 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
162 */
163 static const struct bge_load_rx_thresh {
164 int rx_ticks;
165 int rx_max_bds; }
166 bge_rx_threshes[] = {
167 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */
168 { 32, 2 },
169 { 50, 4 },
170 { 100, 8 },
171 { 192, 16 },
172 { 416, 32 },
173 { 598, 46 }
174 };
175 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
176
177 /* XXX patchable; should be sysctl'able */
178 static int bge_auto_thresh = 1;
179 static int bge_rx_thresh_lvl;
180
181 static int bge_rxthresh_nodenum;
182
183 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
184
185 static uint32_t bge_chipid(const struct pci_attach_args *);
186 static int bge_can_use_msi(struct bge_softc *);
187 static int bge_probe(device_t, cfdata_t, void *);
188 static void bge_attach(device_t, device_t, void *);
189 static int bge_detach(device_t, int);
190 static void bge_release_resources(struct bge_softc *);
191
192 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
193 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
194 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
195 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
196 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
197
198 static void bge_txeof(struct bge_softc *);
199 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
200 static void bge_rxeof(struct bge_softc *);
201
202 static void bge_asf_driver_up (struct bge_softc *);
203 static void bge_tick(void *);
204 static void bge_stats_update(struct bge_softc *);
205 static void bge_stats_update_regs(struct bge_softc *);
206 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
207
208 static int bge_intr(void *);
209 static void bge_start(struct ifnet *);
210 static int bge_ifflags_cb(struct ethercom *);
211 static int bge_ioctl(struct ifnet *, u_long, void *);
212 static int bge_init(struct ifnet *);
213 static void bge_stop(struct ifnet *, int);
214 static void bge_watchdog(struct ifnet *);
215 static int bge_ifmedia_upd(struct ifnet *);
216 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
217
218 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
219 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
220
221 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
222 static int bge_read_eeprom(struct bge_softc *, void *, int, int);
223 static void bge_setmulti(struct bge_softc *);
224
225 static void bge_handle_events(struct bge_softc *);
226 static int bge_alloc_jumbo_mem(struct bge_softc *);
227 #if 0 /* XXX */
228 static void bge_free_jumbo_mem(struct bge_softc *);
229 #endif
230 static void *bge_jalloc(struct bge_softc *);
231 static void bge_jfree(struct mbuf *, void *, size_t, void *);
232 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
233 bus_dmamap_t);
234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
235 static int bge_init_rx_ring_std(struct bge_softc *);
236 static void bge_free_rx_ring_std(struct bge_softc *m, bool);
237 static int bge_init_rx_ring_jumbo(struct bge_softc *);
238 static void bge_free_rx_ring_jumbo(struct bge_softc *);
239 static void bge_free_tx_ring(struct bge_softc *m, bool);
240 static int bge_init_tx_ring(struct bge_softc *);
241
242 static int bge_chipinit(struct bge_softc *);
243 static int bge_blockinit(struct bge_softc *);
244 static int bge_phy_addr(struct bge_softc *);
245 static uint32_t bge_readmem_ind(struct bge_softc *, int);
246 static void bge_writemem_ind(struct bge_softc *, int, int);
247 static void bge_writembx(struct bge_softc *, int, int);
248 static void bge_writembx_flush(struct bge_softc *, int, int);
249 static void bge_writemem_direct(struct bge_softc *, int, int);
250 static void bge_writereg_ind(struct bge_softc *, int, int);
251 static void bge_set_max_readrq(struct bge_softc *);
252
253 static int bge_miibus_readreg(device_t, int, int, uint16_t *);
254 static int bge_miibus_writereg(device_t, int, int, uint16_t);
255 static void bge_miibus_statchg(struct ifnet *);
256
257 #define BGE_RESET_SHUTDOWN 0
258 #define BGE_RESET_START 1
259 #define BGE_RESET_SUSPEND 2
260 static void bge_sig_post_reset(struct bge_softc *, int);
261 static void bge_sig_legacy(struct bge_softc *, int);
262 static void bge_sig_pre_reset(struct bge_softc *, int);
263 static void bge_wait_for_event_ack(struct bge_softc *);
264 static void bge_stop_fw(struct bge_softc *);
265 static int bge_reset(struct bge_softc *);
266 static void bge_link_upd(struct bge_softc *);
267 static void bge_sysctl_init(struct bge_softc *);
268 static int bge_sysctl_verify(SYSCTLFN_PROTO);
269
270 static void bge_ape_lock_init(struct bge_softc *);
271 static void bge_ape_read_fw_ver(struct bge_softc *);
272 static int bge_ape_lock(struct bge_softc *, int);
273 static void bge_ape_unlock(struct bge_softc *, int);
274 static void bge_ape_send_event(struct bge_softc *, uint32_t);
275 static void bge_ape_driver_state_change(struct bge_softc *, int);
276
277 #ifdef BGE_DEBUG
278 #define DPRINTF(x) if (bgedebug) printf x
279 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
280 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
281 int bgedebug = 0;
282 int bge_tso_debug = 0;
283 void bge_debug_info(struct bge_softc *);
284 #else
285 #define DPRINTF(x)
286 #define DPRINTFN(n,x)
287 #define BGE_TSO_PRINTF(x)
288 #endif
289
290 #ifdef BGE_EVENT_COUNTERS
291 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
292 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
293 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
294 #else
295 #define BGE_EVCNT_INCR(ev) /* nothing */
296 #define BGE_EVCNT_ADD(ev, val) /* nothing */
297 #define BGE_EVCNT_UPD(ev, val) /* nothing */
298 #endif
299
300 #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b
301 /*
302 * The BCM5700 documentation seems to indicate that the hardware still has the
303 * Alteon vendor ID burned into it, though it should always be overridden by
304 * the value in the EEPROM. We'll check for it anyway.
305 */
306 static const struct bge_product {
307 pci_vendor_id_t bp_vendor;
308 pci_product_id_t bp_product;
309 const char *bp_name;
310 } bge_products[] = {
311 { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" },
312 { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" },
313 { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" },
314 { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" },
315 { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" },
316 { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" },
317 { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" },
318 { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" },
319 { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" },
320 { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" },
321 { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" },
322 { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" },
323 { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" },
324 { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" },
325 { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" },
326 { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" },
327 { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" },
328 { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" },
329 { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" },
330 { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" },
331 { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" },
332 { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" },
333 { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" },
334 { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" },
335 { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" },
336 { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" },
337 { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" },
338 { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" },
339 { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" },
340 { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" },
341 { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" },
342 { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" },
343 { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" },
344 { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" },
345 { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" },
346 { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" },
347 { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" },
348 { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" },
349 { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" },
350 { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" },
351 { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" },
352 { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" },
353 { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" },
354 { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" },
355 { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" },
356 { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" },
357 { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" },
358 { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" },
359 { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" },
360 { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" },
361 { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" },
362 { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" },
363 { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" },
364 { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" },
365 { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" },
366 { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" },
367 { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" },
368 { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" },
369 { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" },
370 { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" },
371 { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" },
372 { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" },
373 { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" },
374 { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" },
375 { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" },
376 { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" },
377 { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" },
378 { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" },
379 { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" },
380 { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" },
381 { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" },
382 { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" },
383 { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" },
384 { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" },
385 { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" },
386 { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" },
387 { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" },
388 { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" },
389 { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" },
390 { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" },
391 { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" },
392 { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" },
393 { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" },
394 { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" },
395 { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" },
396 { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" },
397 { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" },
398 { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" },
399 { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" },
400 { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" },
401 { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" },
402 { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" },
403 { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" },
404 { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" },
405 { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" },
406 { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" },
407 { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" },
408 { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" },
409 { 0, 0, NULL },
410 };
411
412 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE)
413 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY)
414 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS)
415 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY)
416 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS)
417 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS)
418 #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY)
419 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS)
420 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS)
421
422 static const struct bge_revision {
423 uint32_t br_chipid;
424 const char *br_name;
425 } bge_revisions[] = {
426 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
427 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
428 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
429 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
430 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
431 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
432 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
433 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
434 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
435 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
436 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
437 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
438 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
439 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
440 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
441 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
442 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
443 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
444 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
445 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
446 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
447 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
448 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
449 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
450 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
451 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
452 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
453 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
454 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
455 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
456 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
457 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
458 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
459 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
460 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
461 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
462 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
463 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
464 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
465 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
466 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
467 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
468 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
469 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
470 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
471 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
472 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
473 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
474 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
475 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
476 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
477 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
478 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
479 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
480 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" },
481 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
482 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
483 { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" },
484 /* 5754 and 5787 share the same ASIC ID */
485 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
486 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
487 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
488 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
489 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
490 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
491 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
492 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
493 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" },
494 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
495 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
496
497 { 0, NULL }
498 };
499
500 /*
501 * Some defaults for major revisions, so that newer steppings
502 * that we don't know about have a shot at working.
503 */
504 static const struct bge_revision bge_majorrevs[] = {
505 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
506 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
507 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
508 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
509 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
510 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
511 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
512 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
513 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
514 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
515 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
516 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
517 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
518 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
519 /* 5754 and 5787 share the same ASIC ID */
520 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
521 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
522 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
523 { BGE_ASICREV_BCM57766, "unknown BCM57766" },
524 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
525 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
526 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
527 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
528 { BGE_ASICREV_BCM5762, "unknown BCM5762" },
529
530 { 0, NULL }
531 };
532
533 static int bge_allow_asf = 1;
534
535 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc),
536 bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
537
538 static uint32_t
539 bge_readmem_ind(struct bge_softc *sc, int off)
540 {
541 pcireg_t val;
542
543 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
544 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
545 return 0;
546
547 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
548 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
549 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
550 return val;
551 }
552
553 static void
554 bge_writemem_ind(struct bge_softc *sc, int off, int val)
555 {
556
557 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
558 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
559 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
560 }
561
562 /*
563 * PCI Express only
564 */
565 static void
566 bge_set_max_readrq(struct bge_softc *sc)
567 {
568 pcireg_t val;
569
570 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
571 + PCIE_DCSR);
572 val &= ~PCIE_DCSR_MAX_READ_REQ;
573 switch (sc->bge_expmrq) {
574 case 2048:
575 val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048;
576 break;
577 case 4096:
578 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
579 break;
580 default:
581 panic("incorrect expmrq value(%d)", sc->bge_expmrq);
582 break;
583 }
584 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
585 + PCIE_DCSR, val);
586 }
587
588 #ifdef notdef
589 static uint32_t
590 bge_readreg_ind(struct bge_softc *sc, int off)
591 {
592 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
593 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
594 }
595 #endif
596
597 static void
598 bge_writereg_ind(struct bge_softc *sc, int off, int val)
599 {
600 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
601 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
602 }
603
604 static void
605 bge_writemem_direct(struct bge_softc *sc, int off, int val)
606 {
607 CSR_WRITE_4(sc, off, val);
608 }
609
610 static void
611 bge_writembx(struct bge_softc *sc, int off, int val)
612 {
613 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
614 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
615
616 CSR_WRITE_4(sc, off, val);
617 }
618
619 static void
620 bge_writembx_flush(struct bge_softc *sc, int off, int val)
621 {
622 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
623 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625 CSR_WRITE_4_FLUSH(sc, off, val);
626 }
627
628 /*
629 * Clear all stale locks and select the lock for this driver instance.
630 */
631 void
632 bge_ape_lock_init(struct bge_softc *sc)
633 {
634 struct pci_attach_args *pa = &(sc->bge_pa);
635 uint32_t bit, regbase;
636 int i;
637
638 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
639 regbase = BGE_APE_LOCK_GRANT;
640 else
641 regbase = BGE_APE_PER_LOCK_GRANT;
642
643 /* Clear any stale locks. */
644 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
645 switch (i) {
646 case BGE_APE_LOCK_PHY0:
647 case BGE_APE_LOCK_PHY1:
648 case BGE_APE_LOCK_PHY2:
649 case BGE_APE_LOCK_PHY3:
650 bit = BGE_APE_LOCK_GRANT_DRIVER0;
651 break;
652 default:
653 if (pa->pa_function == 0)
654 bit = BGE_APE_LOCK_GRANT_DRIVER0;
655 else
656 bit = (1 << pa->pa_function);
657 }
658 APE_WRITE_4(sc, regbase + 4 * i, bit);
659 }
660
661 /* Select the PHY lock based on the device's function number. */
662 switch (pa->pa_function) {
663 case 0:
664 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
665 break;
666 case 1:
667 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
668 break;
669 case 2:
670 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
671 break;
672 case 3:
673 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
674 break;
675 default:
676 printf("%s: PHY lock not supported on function\n",
677 device_xname(sc->bge_dev));
678 break;
679 }
680 }
681
682 /*
683 * Check for APE firmware, set flags, and print version info.
684 */
685 void
686 bge_ape_read_fw_ver(struct bge_softc *sc)
687 {
688 const char *fwtype;
689 uint32_t apedata, features;
690
691 /* Check for a valid APE signature in shared memory. */
692 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
693 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
694 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
695 return;
696 }
697
698 /* Check if APE firmware is running. */
699 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
700 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
701 printf("%s: APE signature found but FW status not ready! "
702 "0x%08x\n", device_xname(sc->bge_dev), apedata);
703 return;
704 }
705
706 sc->bge_mfw_flags |= BGE_MFW_ON_APE;
707
708 /* Fetch the APE firwmare type and version. */
709 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
710 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
711 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
712 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
713 fwtype = "NCSI";
714 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
715 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
716 fwtype = "DASH";
717 } else
718 fwtype = "UNKN";
719
720 /* Print the APE firmware version. */
721 aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype,
722 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
723 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
724 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
725 (apedata & BGE_APE_FW_VERSION_BLDMSK));
726 }
727
728 int
729 bge_ape_lock(struct bge_softc *sc, int locknum)
730 {
731 struct pci_attach_args *pa = &(sc->bge_pa);
732 uint32_t bit, gnt, req, status;
733 int i, off;
734
735 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
736 return (0);
737
738 /* Lock request/grant registers have different bases. */
739 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
740 req = BGE_APE_LOCK_REQ;
741 gnt = BGE_APE_LOCK_GRANT;
742 } else {
743 req = BGE_APE_PER_LOCK_REQ;
744 gnt = BGE_APE_PER_LOCK_GRANT;
745 }
746
747 off = 4 * locknum;
748
749 switch (locknum) {
750 case BGE_APE_LOCK_GPIO:
751 /* Lock required when using GPIO. */
752 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
753 return (0);
754 if (pa->pa_function == 0)
755 bit = BGE_APE_LOCK_REQ_DRIVER0;
756 else
757 bit = (1 << pa->pa_function);
758 break;
759 case BGE_APE_LOCK_GRC:
760 /* Lock required to reset the device. */
761 if (pa->pa_function == 0)
762 bit = BGE_APE_LOCK_REQ_DRIVER0;
763 else
764 bit = (1 << pa->pa_function);
765 break;
766 case BGE_APE_LOCK_MEM:
767 /* Lock required when accessing certain APE memory. */
768 if (pa->pa_function == 0)
769 bit = BGE_APE_LOCK_REQ_DRIVER0;
770 else
771 bit = (1 << pa->pa_function);
772 break;
773 case BGE_APE_LOCK_PHY0:
774 case BGE_APE_LOCK_PHY1:
775 case BGE_APE_LOCK_PHY2:
776 case BGE_APE_LOCK_PHY3:
777 /* Lock required when accessing PHYs. */
778 bit = BGE_APE_LOCK_REQ_DRIVER0;
779 break;
780 default:
781 return (EINVAL);
782 }
783
784 /* Request a lock. */
785 APE_WRITE_4_FLUSH(sc, req + off, bit);
786
787 /* Wait up to 1 second to acquire lock. */
788 for (i = 0; i < 20000; i++) {
789 status = APE_READ_4(sc, gnt + off);
790 if (status == bit)
791 break;
792 DELAY(50);
793 }
794
795 /* Handle any errors. */
796 if (status != bit) {
797 printf("%s: APE lock %d request failed! "
798 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
799 device_xname(sc->bge_dev),
800 locknum, req + off, bit & 0xFFFF, gnt + off,
801 status & 0xFFFF);
802 /* Revoke the lock request. */
803 APE_WRITE_4(sc, gnt + off, bit);
804 return (EBUSY);
805 }
806
807 return (0);
808 }
809
810 void
811 bge_ape_unlock(struct bge_softc *sc, int locknum)
812 {
813 struct pci_attach_args *pa = &(sc->bge_pa);
814 uint32_t bit, gnt;
815 int off;
816
817 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
818 return;
819
820 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
821 gnt = BGE_APE_LOCK_GRANT;
822 else
823 gnt = BGE_APE_PER_LOCK_GRANT;
824
825 off = 4 * locknum;
826
827 switch (locknum) {
828 case BGE_APE_LOCK_GPIO:
829 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
830 return;
831 if (pa->pa_function == 0)
832 bit = BGE_APE_LOCK_GRANT_DRIVER0;
833 else
834 bit = (1 << pa->pa_function);
835 break;
836 case BGE_APE_LOCK_GRC:
837 if (pa->pa_function == 0)
838 bit = BGE_APE_LOCK_GRANT_DRIVER0;
839 else
840 bit = (1 << pa->pa_function);
841 break;
842 case BGE_APE_LOCK_MEM:
843 if (pa->pa_function == 0)
844 bit = BGE_APE_LOCK_GRANT_DRIVER0;
845 else
846 bit = (1 << pa->pa_function);
847 break;
848 case BGE_APE_LOCK_PHY0:
849 case BGE_APE_LOCK_PHY1:
850 case BGE_APE_LOCK_PHY2:
851 case BGE_APE_LOCK_PHY3:
852 bit = BGE_APE_LOCK_GRANT_DRIVER0;
853 break;
854 default:
855 return;
856 }
857
858 /* Write and flush for consecutive bge_ape_lock() */
859 APE_WRITE_4_FLUSH(sc, gnt + off, bit);
860 }
861
862 /*
863 * Send an event to the APE firmware.
864 */
865 void
866 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
867 {
868 uint32_t apedata;
869 int i;
870
871 /* NCSI does not support APE events. */
872 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
873 return;
874
875 /* Wait up to 1ms for APE to service previous event. */
876 for (i = 10; i > 0; i--) {
877 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
878 break;
879 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
880 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
881 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
882 BGE_APE_EVENT_STATUS_EVENT_PENDING);
883 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
884 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
885 break;
886 }
887 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
888 DELAY(100);
889 }
890 if (i == 0) {
891 printf("%s: APE event 0x%08x send timed out\n",
892 device_xname(sc->bge_dev), event);
893 }
894 }
895
896 void
897 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
898 {
899 uint32_t apedata, event;
900
901 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
902 return;
903
904 switch (kind) {
905 case BGE_RESET_START:
906 /* If this is the first load, clear the load counter. */
907 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
908 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
909 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
910 else {
911 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
912 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
913 }
914 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
915 BGE_APE_HOST_SEG_SIG_MAGIC);
916 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
917 BGE_APE_HOST_SEG_LEN_MAGIC);
918
919 /* Add some version info if bge(4) supports it. */
920 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
921 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
922 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
923 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
924 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
925 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
926 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
927 BGE_APE_HOST_DRVR_STATE_START);
928 event = BGE_APE_EVENT_STATUS_STATE_START;
929 break;
930 case BGE_RESET_SHUTDOWN:
931 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
932 BGE_APE_HOST_DRVR_STATE_UNLOAD);
933 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
934 break;
935 case BGE_RESET_SUSPEND:
936 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
937 break;
938 default:
939 return;
940 }
941
942 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
943 BGE_APE_EVENT_STATUS_STATE_CHNGE);
944 }
945
946 static uint8_t
947 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
948 {
949 uint32_t access, byte = 0;
950 int i;
951
952 /* Lock. */
953 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
954 for (i = 0; i < 8000; i++) {
955 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
956 break;
957 DELAY(20);
958 }
959 if (i == 8000)
960 return 1;
961
962 /* Enable access. */
963 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
964 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
965
966 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
967 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
968 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
969 DELAY(10);
970 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
971 DELAY(10);
972 break;
973 }
974 }
975
976 if (i == BGE_TIMEOUT * 10) {
977 aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
978 return 1;
979 }
980
981 /* Get result. */
982 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
983
984 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
985
986 /* Disable access. */
987 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
988
989 /* Unlock. */
990 CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
991
992 return 0;
993 }
994
995 /*
996 * Read a sequence of bytes from NVRAM.
997 */
998 static int
999 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
1000 {
1001 int error = 0, i;
1002 uint8_t byte = 0;
1003
1004 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1005 return 1;
1006
1007 for (i = 0; i < cnt; i++) {
1008 error = bge_nvram_getbyte(sc, off + i, &byte);
1009 if (error)
1010 break;
1011 *(dest + i) = byte;
1012 }
1013
1014 return (error ? 1 : 0);
1015 }
1016
1017 /*
1018 * Read a byte of data stored in the EEPROM at address 'addr.' The
1019 * BCM570x supports both the traditional bitbang interface and an
1020 * auto access interface for reading the EEPROM. We use the auto
1021 * access method.
1022 */
1023 static uint8_t
1024 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1025 {
1026 int i;
1027 uint32_t byte = 0;
1028
1029 /*
1030 * Enable use of auto EEPROM access so we can avoid
1031 * having to use the bitbang method.
1032 */
1033 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1034
1035 /* Reset the EEPROM, load the clock period. */
1036 CSR_WRITE_4(sc, BGE_EE_ADDR,
1037 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1038 DELAY(20);
1039
1040 /* Issue the read EEPROM command. */
1041 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
1042
1043 /* Wait for completion */
1044 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
1045 DELAY(10);
1046 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
1047 break;
1048 }
1049
1050 if (i == BGE_TIMEOUT * 10) {
1051 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
1052 return 1;
1053 }
1054
1055 /* Get result. */
1056 byte = CSR_READ_4(sc, BGE_EE_DATA);
1057
1058 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
1059
1060 return 0;
1061 }
1062
1063 /*
1064 * Read a sequence of bytes from the EEPROM.
1065 */
1066 static int
1067 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
1068 {
1069 int error = 0, i;
1070 uint8_t byte = 0;
1071 char *dest = destv;
1072
1073 for (i = 0; i < cnt; i++) {
1074 error = bge_eeprom_getbyte(sc, off + i, &byte);
1075 if (error)
1076 break;
1077 *(dest + i) = byte;
1078 }
1079
1080 return (error ? 1 : 0);
1081 }
1082
1083 static int
1084 bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
1085 {
1086 struct bge_softc *sc = device_private(dev);
1087 uint32_t data;
1088 uint32_t autopoll;
1089 int rv = 0;
1090 int i;
1091
1092 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1093 return -1;
1094
1095 /* Reading with autopolling on may trigger PCI errors */
1096 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1097 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1098 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1099 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1100 DELAY(80);
1101 }
1102
1103 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
1104 BGE_MIPHY(phy) | BGE_MIREG(reg));
1105
1106 for (i = 0; i < BGE_TIMEOUT; i++) {
1107 delay(10);
1108 data = CSR_READ_4(sc, BGE_MI_COMM);
1109 if (!(data & BGE_MICOMM_BUSY)) {
1110 DELAY(5);
1111 data = CSR_READ_4(sc, BGE_MI_COMM);
1112 break;
1113 }
1114 }
1115
1116 if (i == BGE_TIMEOUT) {
1117 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1118 rv = ETIMEDOUT;
1119 } else if ((data & BGE_MICOMM_READFAIL) != 0)
1120 rv = -1;
1121 else
1122 *val = data & BGE_MICOMM_DATA;
1123
1124 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1125 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1126 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1127 DELAY(80);
1128 }
1129
1130 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1131
1132 return rv;
1133 }
1134
1135 static int
1136 bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
1137 {
1138 struct bge_softc *sc = device_private(dev);
1139 uint32_t autopoll;
1140 int i;
1141
1142 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1143 (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL))
1144 return 0;
1145
1146 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1147 return -1;
1148
1149 /* Reading with autopolling on may trigger PCI errors */
1150 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1151 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1152 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1153 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1154 DELAY(80);
1155 }
1156
1157 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1158 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1159
1160 for (i = 0; i < BGE_TIMEOUT; i++) {
1161 delay(10);
1162 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
1163 delay(5);
1164 CSR_READ_4(sc, BGE_MI_COMM);
1165 break;
1166 }
1167 }
1168
1169 if (autopoll & BGE_MIMODE_AUTOPOLL) {
1170 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1171 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1172 delay(80);
1173 }
1174
1175 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1176
1177 if (i == BGE_TIMEOUT) {
1178 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1179 return ETIMEDOUT;
1180 }
1181
1182 return 0;
1183 }
1184
1185 static void
1186 bge_miibus_statchg(struct ifnet *ifp)
1187 {
1188 struct bge_softc *sc = ifp->if_softc;
1189 struct mii_data *mii = &sc->bge_mii;
1190 uint32_t mac_mode, rx_mode, tx_mode;
1191
1192 /*
1193 * Get flow control negotiation result.
1194 */
1195 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1196 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1197 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1198
1199 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1200 mii->mii_media_status & IFM_ACTIVE &&
1201 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1202 BGE_STS_SETBIT(sc, BGE_STS_LINK);
1203 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1204 (!(mii->mii_media_status & IFM_ACTIVE) ||
1205 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1206 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1207
1208 if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1209 return;
1210
1211 /* Set the port mode (MII/GMII) to match the link speed. */
1212 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1213 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1214 tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1215 rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1216 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1217 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1218 mac_mode |= BGE_PORTMODE_GMII;
1219 else
1220 mac_mode |= BGE_PORTMODE_MII;
1221
1222 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1223 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1224 if ((mii->mii_media_active & IFM_FDX) != 0) {
1225 if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1226 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1227 if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1228 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1229 } else
1230 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1231
1232 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode);
1233 DELAY(40);
1234 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1235 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1236 }
1237
1238 /*
1239 * Update rx threshold levels to values in a particular slot
1240 * of the interrupt-mitigation table bge_rx_threshes.
1241 */
1242 static void
1243 bge_set_thresh(struct ifnet *ifp, int lvl)
1244 {
1245 struct bge_softc *sc = ifp->if_softc;
1246 int s;
1247
1248 /* For now, just save the new Rx-intr thresholds and record
1249 * that a threshold update is pending. Updating the hardware
1250 * registers here (even at splhigh()) is observed to
1251 * occasionaly cause glitches where Rx-interrupts are not
1252 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
1253 */
1254 s = splnet();
1255 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
1256 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
1257 sc->bge_pending_rxintr_change = 1;
1258 splx(s);
1259 }
1260
1261
1262 /*
1263 * Update Rx thresholds of all bge devices
1264 */
1265 static void
1266 bge_update_all_threshes(int lvl)
1267 {
1268 struct ifnet *ifp;
1269 const char * const namebuf = "bge";
1270 int namelen;
1271 int s;
1272
1273 if (lvl < 0)
1274 lvl = 0;
1275 else if (lvl >= NBGE_RX_THRESH)
1276 lvl = NBGE_RX_THRESH - 1;
1277
1278 namelen = strlen(namebuf);
1279 /*
1280 * Now search all the interfaces for this name/number
1281 */
1282 s = pserialize_read_enter();
1283 IFNET_READER_FOREACH(ifp) {
1284 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
1285 continue;
1286 /* We got a match: update if doing auto-threshold-tuning */
1287 if (bge_auto_thresh)
1288 bge_set_thresh(ifp, lvl);
1289 }
1290 pserialize_read_exit(s);
1291 }
1292
1293 /*
1294 * Handle events that have triggered interrupts.
1295 */
1296 static void
1297 bge_handle_events(struct bge_softc *sc)
1298 {
1299
1300 return;
1301 }
1302
1303 /*
1304 * Memory management for jumbo frames.
1305 */
1306
1307 static int
1308 bge_alloc_jumbo_mem(struct bge_softc *sc)
1309 {
1310 char *ptr, *kva;
1311 bus_dma_segment_t seg;
1312 int i, rseg, state, error;
1313 struct bge_jpool_entry *entry;
1314
1315 state = error = 0;
1316
1317 /* Grab a big chunk o' storage. */
1318 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
1319 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1320 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1321 return ENOBUFS;
1322 }
1323
1324 state = 1;
1325 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
1326 BUS_DMA_NOWAIT)) {
1327 aprint_error_dev(sc->bge_dev,
1328 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
1329 error = ENOBUFS;
1330 goto out;
1331 }
1332
1333 state = 2;
1334 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
1335 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
1336 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1337 error = ENOBUFS;
1338 goto out;
1339 }
1340
1341 state = 3;
1342 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1343 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
1344 aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
1345 error = ENOBUFS;
1346 goto out;
1347 }
1348
1349 state = 4;
1350 sc->bge_cdata.bge_jumbo_buf = (void *)kva;
1351 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
1352
1353 SLIST_INIT(&sc->bge_jfree_listhead);
1354 SLIST_INIT(&sc->bge_jinuse_listhead);
1355
1356 /*
1357 * Now divide it up into 9K pieces and save the addresses
1358 * in an array.
1359 */
1360 ptr = sc->bge_cdata.bge_jumbo_buf;
1361 for (i = 0; i < BGE_JSLOTS; i++) {
1362 sc->bge_cdata.bge_jslots[i] = ptr;
1363 ptr += BGE_JLEN;
1364 entry = malloc(sizeof(struct bge_jpool_entry),
1365 M_DEVBUF, M_NOWAIT);
1366 if (entry == NULL) {
1367 aprint_error_dev(sc->bge_dev,
1368 "no memory for jumbo buffer queue!\n");
1369 error = ENOBUFS;
1370 goto out;
1371 }
1372 entry->slot = i;
1373 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1374 entry, jpool_entries);
1375 }
1376 out:
1377 if (error != 0) {
1378 switch (state) {
1379 case 4:
1380 bus_dmamap_unload(sc->bge_dmatag,
1381 sc->bge_cdata.bge_rx_jumbo_map);
1382 /* FALLTHROUGH */
1383 case 3:
1384 bus_dmamap_destroy(sc->bge_dmatag,
1385 sc->bge_cdata.bge_rx_jumbo_map);
1386 /* FALLTHROUGH */
1387 case 2:
1388 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
1389 /* FALLTHROUGH */
1390 case 1:
1391 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1392 break;
1393 default:
1394 break;
1395 }
1396 }
1397
1398 return error;
1399 }
1400
1401 /*
1402 * Allocate a jumbo buffer.
1403 */
1404 static void *
1405 bge_jalloc(struct bge_softc *sc)
1406 {
1407 struct bge_jpool_entry *entry;
1408
1409 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
1410
1411 if (entry == NULL) {
1412 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
1413 return NULL;
1414 }
1415
1416 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
1417 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
1418 return (sc->bge_cdata.bge_jslots[entry->slot]);
1419 }
1420
1421 /*
1422 * Release a jumbo buffer.
1423 */
1424 static void
1425 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1426 {
1427 struct bge_jpool_entry *entry;
1428 struct bge_softc *sc;
1429 int i, s;
1430
1431 /* Extract the softc struct pointer. */
1432 sc = (struct bge_softc *)arg;
1433
1434 if (sc == NULL)
1435 panic("bge_jfree: can't find softc pointer!");
1436
1437 /* calculate the slot this buffer belongs to */
1438
1439 i = ((char *)buf
1440 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
1441
1442 if ((i < 0) || (i >= BGE_JSLOTS))
1443 panic("bge_jfree: asked to free buffer that we don't manage!");
1444
1445 s = splvm();
1446 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
1447 if (entry == NULL)
1448 panic("bge_jfree: buffer not in use!");
1449 entry->slot = i;
1450 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
1451 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
1452
1453 if (__predict_true(m != NULL))
1454 pool_cache_put(mb_cache, m);
1455 splx(s);
1456 }
1457
1458
1459 /*
1460 * Initialize a standard receive ring descriptor.
1461 */
1462 static int
1463 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
1464 bus_dmamap_t dmamap)
1465 {
1466 struct mbuf *m_new = NULL;
1467 struct bge_rx_bd *r;
1468 int error;
1469
1470 if (dmamap == NULL)
1471 dmamap = sc->bge_cdata.bge_rx_std_map[i];
1472
1473 if (dmamap == NULL) {
1474 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
1475 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
1476 if (error != 0)
1477 return error;
1478 }
1479
1480 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
1481
1482 if (m == NULL) {
1483 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1484 if (m_new == NULL)
1485 return ENOBUFS;
1486
1487 MCLGET(m_new, M_DONTWAIT);
1488 if (!(m_new->m_flags & M_EXT)) {
1489 m_freem(m_new);
1490 return ENOBUFS;
1491 }
1492 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1493
1494 } else {
1495 m_new = m;
1496 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1497 m_new->m_data = m_new->m_ext.ext_buf;
1498 }
1499 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
1500 m_adj(m_new, ETHER_ALIGN);
1501 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
1502 BUS_DMA_READ|BUS_DMA_NOWAIT)) {
1503 m_freem(m_new);
1504 return ENOBUFS;
1505 }
1506 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
1507 BUS_DMASYNC_PREREAD);
1508
1509 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
1510 r = &sc->bge_rdata->bge_rx_std_ring[i];
1511 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
1512 r->bge_flags = BGE_RXBDFLAG_END;
1513 r->bge_len = m_new->m_len;
1514 r->bge_idx = i;
1515
1516 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1517 offsetof(struct bge_ring_data, bge_rx_std_ring) +
1518 i * sizeof (struct bge_rx_bd),
1519 sizeof (struct bge_rx_bd),
1520 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1521
1522 return 0;
1523 }
1524
1525 /*
1526 * Initialize a jumbo receive ring descriptor. This allocates
1527 * a jumbo buffer from the pool managed internally by the driver.
1528 */
1529 static int
1530 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1531 {
1532 struct mbuf *m_new = NULL;
1533 struct bge_rx_bd *r;
1534 void *buf = NULL;
1535
1536 if (m == NULL) {
1537
1538 /* Allocate the mbuf. */
1539 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1540 if (m_new == NULL)
1541 return ENOBUFS;
1542
1543 /* Allocate the jumbo buffer */
1544 buf = bge_jalloc(sc);
1545 if (buf == NULL) {
1546 m_freem(m_new);
1547 aprint_error_dev(sc->bge_dev,
1548 "jumbo allocation failed -- packet dropped!\n");
1549 return ENOBUFS;
1550 }
1551
1552 /* Attach the buffer to the mbuf. */
1553 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1554 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1555 bge_jfree, sc);
1556 m_new->m_flags |= M_EXT_RW;
1557 } else {
1558 m_new = m;
1559 buf = m_new->m_data = m_new->m_ext.ext_buf;
1560 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1561 }
1562 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
1563 m_adj(m_new, ETHER_ALIGN);
1564 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1565 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
1566 BUS_DMASYNC_PREREAD);
1567 /* Set up the descriptor. */
1568 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1569 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1570 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1571 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1572 r->bge_len = m_new->m_len;
1573 r->bge_idx = i;
1574
1575 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1576 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1577 i * sizeof (struct bge_rx_bd),
1578 sizeof (struct bge_rx_bd),
1579 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1580
1581 return 0;
1582 }
1583
1584 /*
1585 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1586 * that's 1MB or memory, which is a lot. For now, we fill only the first
1587 * 256 ring entries and hope that our CPU is fast enough to keep up with
1588 * the NIC.
1589 */
1590 static int
1591 bge_init_rx_ring_std(struct bge_softc *sc)
1592 {
1593 int i;
1594
1595 if (sc->bge_flags & BGEF_RXRING_VALID)
1596 return 0;
1597
1598 for (i = 0; i < BGE_SSLOTS; i++) {
1599 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1600 return ENOBUFS;
1601 }
1602
1603 sc->bge_std = i - 1;
1604 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1605
1606 sc->bge_flags |= BGEF_RXRING_VALID;
1607
1608 return 0;
1609 }
1610
1611 static void
1612 bge_free_rx_ring_std(struct bge_softc *sc, bool disable)
1613 {
1614 int i;
1615
1616 if (!(sc->bge_flags & BGEF_RXRING_VALID))
1617 return;
1618
1619 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1620 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1621 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1622 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1623 if (disable) {
1624 bus_dmamap_destroy(sc->bge_dmatag,
1625 sc->bge_cdata.bge_rx_std_map[i]);
1626 sc->bge_cdata.bge_rx_std_map[i] = NULL;
1627 }
1628 }
1629 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1630 sizeof(struct bge_rx_bd));
1631 }
1632
1633 sc->bge_flags &= ~BGEF_RXRING_VALID;
1634 }
1635
1636 static int
1637 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1638 {
1639 int i;
1640 volatile struct bge_rcb *rcb;
1641
1642 if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID)
1643 return 0;
1644
1645 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1646 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1647 return ENOBUFS;
1648 }
1649
1650 sc->bge_jumbo = i - 1;
1651 sc->bge_flags |= BGEF_JUMBO_RXRING_VALID;
1652
1653 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1654 rcb->bge_maxlen_flags = 0;
1655 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1656
1657 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1658
1659 return 0;
1660 }
1661
1662 static void
1663 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1664 {
1665 int i;
1666
1667 if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID))
1668 return;
1669
1670 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1671 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1672 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1673 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1674 }
1675 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1676 sizeof(struct bge_rx_bd));
1677 }
1678
1679 sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID;
1680 }
1681
1682 static void
1683 bge_free_tx_ring(struct bge_softc *sc, bool disable)
1684 {
1685 int i;
1686 struct txdmamap_pool_entry *dma;
1687
1688 if (!(sc->bge_flags & BGEF_TXRING_VALID))
1689 return;
1690
1691 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1692 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1693 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1694 sc->bge_cdata.bge_tx_chain[i] = NULL;
1695 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1696 link);
1697 sc->txdma[i] = 0;
1698 }
1699 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1700 sizeof(struct bge_tx_bd));
1701 }
1702
1703 if (disable) {
1704 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1705 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1706 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1707 if (sc->bge_dma64) {
1708 bus_dmamap_destroy(sc->bge_dmatag32,
1709 dma->dmamap32);
1710 }
1711 free(dma, M_DEVBUF);
1712 }
1713 SLIST_INIT(&sc->txdma_list);
1714 }
1715
1716 sc->bge_flags &= ~BGEF_TXRING_VALID;
1717 }
1718
1719 static int
1720 bge_init_tx_ring(struct bge_softc *sc)
1721 {
1722 struct ifnet *ifp = &sc->ethercom.ec_if;
1723 int i;
1724 bus_dmamap_t dmamap, dmamap32;
1725 bus_size_t maxsegsz;
1726 struct txdmamap_pool_entry *dma;
1727
1728 if (sc->bge_flags & BGEF_TXRING_VALID)
1729 return 0;
1730
1731 sc->bge_txcnt = 0;
1732 sc->bge_tx_saved_considx = 0;
1733
1734 /* Initialize transmit producer index for host-memory send ring. */
1735 sc->bge_tx_prodidx = 0;
1736 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1737 /* 5700 b2 errata */
1738 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1739 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1740
1741 /* NIC-memory send ring not used; initialize to zero. */
1742 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1743 /* 5700 b2 errata */
1744 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1745 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1746
1747 /* Limit DMA segment size for some chips */
1748 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) &&
1749 (ifp->if_mtu <= ETHERMTU))
1750 maxsegsz = 2048;
1751 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1752 maxsegsz = 4096;
1753 else
1754 maxsegsz = ETHER_MAX_LEN_JUMBO;
1755
1756 if (SLIST_FIRST(&sc->txdma_list) != NULL)
1757 goto alloc_done;
1758
1759 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1760 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1761 BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1762 &dmamap))
1763 return ENOBUFS;
1764 if (dmamap == NULL)
1765 panic("dmamap NULL in bge_init_tx_ring");
1766 if (sc->bge_dma64) {
1767 if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX,
1768 BGE_NTXSEG, maxsegsz, 0,
1769 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1770 &dmamap32)) {
1771 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1772 return ENOBUFS;
1773 }
1774 if (dmamap32 == NULL)
1775 panic("dmamap32 NULL in bge_init_tx_ring");
1776 } else
1777 dmamap32 = dmamap;
1778 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1779 if (dma == NULL) {
1780 aprint_error_dev(sc->bge_dev,
1781 "can't alloc txdmamap_pool_entry\n");
1782 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1783 if (sc->bge_dma64)
1784 bus_dmamap_destroy(sc->bge_dmatag32, dmamap32);
1785 return ENOMEM;
1786 }
1787 dma->dmamap = dmamap;
1788 dma->dmamap32 = dmamap32;
1789 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1790 }
1791 alloc_done:
1792 sc->bge_flags |= BGEF_TXRING_VALID;
1793
1794 return 0;
1795 }
1796
1797 static void
1798 bge_setmulti(struct bge_softc *sc)
1799 {
1800 struct ethercom *ac = &sc->ethercom;
1801 struct ifnet *ifp = &ac->ec_if;
1802 struct ether_multi *enm;
1803 struct ether_multistep step;
1804 uint32_t hashes[4] = { 0, 0, 0, 0 };
1805 uint32_t h;
1806 int i;
1807
1808 if (ifp->if_flags & IFF_PROMISC)
1809 goto allmulti;
1810
1811 /* Now program new ones. */
1812 ETHER_FIRST_MULTI(step, ac, enm);
1813 while (enm != NULL) {
1814 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1815 /*
1816 * We must listen to a range of multicast addresses.
1817 * For now, just accept all multicasts, rather than
1818 * trying to set only those filter bits needed to match
1819 * the range. (At this time, the only use of address
1820 * ranges is for IP multicast routing, for which the
1821 * range is big enough to require all bits set.)
1822 */
1823 goto allmulti;
1824 }
1825
1826 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1827
1828 /* Just want the 7 least-significant bits. */
1829 h &= 0x7f;
1830
1831 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1832 ETHER_NEXT_MULTI(step, enm);
1833 }
1834
1835 ifp->if_flags &= ~IFF_ALLMULTI;
1836 goto setit;
1837
1838 allmulti:
1839 ifp->if_flags |= IFF_ALLMULTI;
1840 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1841
1842 setit:
1843 for (i = 0; i < 4; i++)
1844 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1845 }
1846
1847 static void
1848 bge_sig_pre_reset(struct bge_softc *sc, int type)
1849 {
1850
1851 /*
1852 * Some chips don't like this so only do this if ASF is enabled
1853 */
1854 if (sc->bge_asf_mode)
1855 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1856
1857 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1858 switch (type) {
1859 case BGE_RESET_START:
1860 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1861 BGE_FW_DRV_STATE_START);
1862 break;
1863 case BGE_RESET_SHUTDOWN:
1864 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1865 BGE_FW_DRV_STATE_UNLOAD);
1866 break;
1867 case BGE_RESET_SUSPEND:
1868 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1869 BGE_FW_DRV_STATE_SUSPEND);
1870 break;
1871 }
1872 }
1873
1874 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1875 bge_ape_driver_state_change(sc, type);
1876 }
1877
1878 static void
1879 bge_sig_post_reset(struct bge_softc *sc, int type)
1880 {
1881
1882 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1883 switch (type) {
1884 case BGE_RESET_START:
1885 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1886 BGE_FW_DRV_STATE_START_DONE);
1887 /* START DONE */
1888 break;
1889 case BGE_RESET_SHUTDOWN:
1890 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1891 BGE_FW_DRV_STATE_UNLOAD_DONE);
1892 break;
1893 }
1894 }
1895
1896 if (type == BGE_RESET_SHUTDOWN)
1897 bge_ape_driver_state_change(sc, type);
1898 }
1899
1900 static void
1901 bge_sig_legacy(struct bge_softc *sc, int type)
1902 {
1903
1904 if (sc->bge_asf_mode) {
1905 switch (type) {
1906 case BGE_RESET_START:
1907 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1908 BGE_FW_DRV_STATE_START);
1909 break;
1910 case BGE_RESET_SHUTDOWN:
1911 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1912 BGE_FW_DRV_STATE_UNLOAD);
1913 break;
1914 }
1915 }
1916 }
1917
1918 static void
1919 bge_wait_for_event_ack(struct bge_softc *sc)
1920 {
1921 int i;
1922
1923 /* wait up to 2500usec */
1924 for (i = 0; i < 250; i++) {
1925 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1926 BGE_RX_CPU_DRV_EVENT))
1927 break;
1928 DELAY(10);
1929 }
1930 }
1931
1932 static void
1933 bge_stop_fw(struct bge_softc *sc)
1934 {
1935
1936 if (sc->bge_asf_mode) {
1937 bge_wait_for_event_ack(sc);
1938
1939 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1940 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
1941 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1942
1943 bge_wait_for_event_ack(sc);
1944 }
1945 }
1946
1947 static int
1948 bge_poll_fw(struct bge_softc *sc)
1949 {
1950 uint32_t val;
1951 int i;
1952
1953 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1954 for (i = 0; i < BGE_TIMEOUT; i++) {
1955 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
1956 if (val & BGE_VCPU_STATUS_INIT_DONE)
1957 break;
1958 DELAY(100);
1959 }
1960 if (i >= BGE_TIMEOUT) {
1961 aprint_error_dev(sc->bge_dev, "reset timed out\n");
1962 return -1;
1963 }
1964 } else {
1965 /*
1966 * Poll the value location we just wrote until
1967 * we see the 1's complement of the magic number.
1968 * This indicates that the firmware initialization
1969 * is complete.
1970 * XXX 1000ms for Flash and 10000ms for SEEPROM.
1971 */
1972 for (i = 0; i < BGE_TIMEOUT; i++) {
1973 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
1974 if (val == ~BGE_SRAM_FW_MB_MAGIC)
1975 break;
1976 DELAY(10);
1977 }
1978
1979 if ((i >= BGE_TIMEOUT)
1980 && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) {
1981 aprint_error_dev(sc->bge_dev,
1982 "firmware handshake timed out, val = %x\n", val);
1983 return -1;
1984 }
1985 }
1986
1987 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
1988 /* tg3 says we have to wait extra time */
1989 delay(10 * 1000);
1990 }
1991
1992 return 0;
1993 }
1994
1995 int
1996 bge_phy_addr(struct bge_softc *sc)
1997 {
1998 struct pci_attach_args *pa = &(sc->bge_pa);
1999 int phy_addr = 1;
2000
2001 /*
2002 * PHY address mapping for various devices.
2003 *
2004 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2005 * ---------+-------+-------+-------+-------+
2006 * BCM57XX | 1 | X | X | X |
2007 * BCM5704 | 1 | X | 1 | X |
2008 * BCM5717 | 1 | 8 | 2 | 9 |
2009 * BCM5719 | 1 | 8 | 2 | 9 |
2010 * BCM5720 | 1 | 8 | 2 | 9 |
2011 *
2012 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
2013 * ---------+-------+-------+-------+-------+
2014 * BCM57XX | X | X | X | X |
2015 * BCM5704 | X | X | X | X |
2016 * BCM5717 | X | X | X | X |
2017 * BCM5719 | 3 | 10 | 4 | 11 |
2018 * BCM5720 | X | X | X | X |
2019 *
2020 * Other addresses may respond but they are not
2021 * IEEE compliant PHYs and should be ignored.
2022 */
2023 switch (BGE_ASICREV(sc->bge_chipid)) {
2024 case BGE_ASICREV_BCM5717:
2025 case BGE_ASICREV_BCM5719:
2026 case BGE_ASICREV_BCM5720:
2027 phy_addr = pa->pa_function;
2028 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
2029 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
2030 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
2031 } else {
2032 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2033 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
2034 }
2035 }
2036
2037 return phy_addr;
2038 }
2039
2040 /*
2041 * Do endian, PCI and DMA initialization. Also check the on-board ROM
2042 * self-test results.
2043 */
2044 static int
2045 bge_chipinit(struct bge_softc *sc)
2046 {
2047 uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg;
2048 int i;
2049
2050 /* Set endianness before we access any non-PCI registers. */
2051 misc_ctl = BGE_INIT;
2052 if (sc->bge_flags & BGEF_TAGGED_STATUS)
2053 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
2054 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
2055 misc_ctl);
2056
2057 /*
2058 * Clear the MAC statistics block in the NIC's
2059 * internal memory.
2060 */
2061 for (i = BGE_STATS_BLOCK;
2062 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
2063 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
2064
2065 for (i = BGE_STATUS_BLOCK;
2066 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
2067 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
2068
2069 /* 5717 workaround from tg3 */
2070 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2071 /* Save */
2072 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2073
2074 /* Temporary modify MODE_CTL to control TLP */
2075 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2076 CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1);
2077
2078 /* Control TLP */
2079 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2080 BGE_TLP_PHYCTL1);
2081 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1,
2082 reg | BGE_TLP_PHYCTL1_EN_L1PLLPD);
2083
2084 /* Restore */
2085 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2086 }
2087
2088 if (BGE_IS_57765_FAMILY(sc)) {
2089 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
2090 /* Save */
2091 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2092
2093 /* Temporary modify MODE_CTL to control TLP */
2094 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2095 CSR_WRITE_4(sc, BGE_MODE_CTL,
2096 reg | BGE_MODECTL_PCIE_TLPADDR1);
2097
2098 /* Control TLP */
2099 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2100 BGE_TLP_PHYCTL5);
2101 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5,
2102 reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ);
2103
2104 /* Restore */
2105 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2106 }
2107 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
2108 /*
2109 * For the 57766 and non Ax versions of 57765, bootcode
2110 * needs to setup the PCIE Fast Training Sequence (FTS)
2111 * value to prevent transmit hangs.
2112 */
2113 reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
2114 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
2115 reg | BGE_CPMU_PADRNG_CTL_RDIV2);
2116
2117 /* Save */
2118 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2119
2120 /* Temporary modify MODE_CTL to control TLP */
2121 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2122 CSR_WRITE_4(sc, BGE_MODE_CTL,
2123 reg | BGE_MODECTL_PCIE_TLPADDR0);
2124
2125 /* Control TLP */
2126 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2127 BGE_TLP_FTSMAX);
2128 reg &= ~BGE_TLP_FTSMAX_MSK;
2129 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX,
2130 reg | BGE_TLP_FTSMAX_VAL);
2131
2132 /* Restore */
2133 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2134 }
2135
2136 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
2137 reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
2138 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
2139 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
2140 }
2141
2142 /* Set up the PCI DMA control register. */
2143 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
2144 if (sc->bge_flags & BGEF_PCIE) {
2145 /* Read watermark not used, 128 bytes for write. */
2146 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
2147 device_xname(sc->bge_dev)));
2148 if (sc->bge_mps >= 256)
2149 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
2150 else
2151 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
2152 } else if (sc->bge_flags & BGEF_PCIX) {
2153 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
2154 device_xname(sc->bge_dev)));
2155 /* PCI-X bus */
2156 if (BGE_IS_5714_FAMILY(sc)) {
2157 /* 256 bytes for read and write. */
2158 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
2159 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
2160
2161 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
2162 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
2163 else
2164 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
2165 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
2166 /*
2167 * In the BCM5703, the DMA read watermark should
2168 * be set to less than or equal to the maximum
2169 * memory read byte count of the PCI-X command
2170 * register.
2171 */
2172 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
2173 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
2174 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2175 /* 1536 bytes for read, 384 bytes for write. */
2176 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
2177 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
2178 } else {
2179 /* 384 bytes for read and write. */
2180 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
2181 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
2182 (0x0F);
2183 }
2184
2185 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2186 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2187 uint32_t tmp;
2188
2189 /* Set ONEDMA_ATONCE for hardware workaround. */
2190 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
2191 if (tmp == 6 || tmp == 7)
2192 dma_rw_ctl |=
2193 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
2194
2195 /* Set PCI-X DMA write workaround. */
2196 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
2197 }
2198 } else {
2199 /* Conventional PCI bus: 256 bytes for read and write. */
2200 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
2201 device_xname(sc->bge_dev)));
2202 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
2203 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
2204
2205 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
2206 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
2207 dma_rw_ctl |= 0x0F;
2208 }
2209
2210 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2211 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
2212 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
2213 BGE_PCIDMARWCTL_ASRT_ALL_BE;
2214
2215 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2216 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2217 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
2218
2219 if (BGE_IS_57765_PLUS(sc)) {
2220 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
2221 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
2222 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
2223
2224 /*
2225 * Enable HW workaround for controllers that misinterpret
2226 * a status tag update and leave interrupts permanently
2227 * disabled.
2228 */
2229 if (!BGE_IS_57765_FAMILY(sc) &&
2230 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2231 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
2232 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
2233 }
2234
2235 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
2236 dma_rw_ctl);
2237
2238 /*
2239 * Set up general mode register.
2240 */
2241 mode_ctl = BGE_DMA_SWAP_OPTIONS;
2242 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2243 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2244 /* Retain Host-2-BMC settings written by APE firmware. */
2245 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
2246 (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
2247 BGE_MODECTL_WORDSWAP_B2HRX_DATA |
2248 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
2249 }
2250 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
2251 BGE_MODECTL_TX_NO_PHDR_CSUM;
2252
2253 /*
2254 * BCM5701 B5 have a bug causing data corruption when using
2255 * 64-bit DMA reads, which can be terminated early and then
2256 * completed later as 32-bit accesses, in combination with
2257 * certain bridges.
2258 */
2259 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2260 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
2261 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
2262
2263 /*
2264 * Tell the firmware the driver is running
2265 */
2266 if (sc->bge_asf_mode & ASF_STACKUP)
2267 mode_ctl |= BGE_MODECTL_STACKUP;
2268
2269 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2270
2271 /*
2272 * Disable memory write invalidate. Apparently it is not supported
2273 * properly by these devices.
2274 */
2275 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
2276 PCI_COMMAND_INVALIDATE_ENABLE);
2277
2278 #ifdef __brokenalpha__
2279 /*
2280 * Must insure that we do not cross an 8K (bytes) boundary
2281 * for DMA reads. Our highest limit is 1K bytes. This is a
2282 * restriction on some ALPHA platforms with early revision
2283 * 21174 PCI chipsets, such as the AlphaPC 164lx
2284 */
2285 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
2286 #endif
2287
2288 /* Set the timer prescaler (always 66MHz) */
2289 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
2290
2291 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2292 DELAY(40); /* XXX */
2293
2294 /* Put PHY into ready state */
2295 BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
2296 DELAY(40);
2297 }
2298
2299 return 0;
2300 }
2301
2302 static int
2303 bge_blockinit(struct bge_softc *sc)
2304 {
2305 volatile struct bge_rcb *rcb;
2306 bus_size_t rcb_addr;
2307 struct ifnet *ifp = &sc->ethercom.ec_if;
2308 bge_hostaddr taddr;
2309 uint32_t dmactl, rdmareg, mimode, val;
2310 int i, limit;
2311
2312 /*
2313 * Initialize the memory window pointer register so that
2314 * we can access the first 32K of internal NIC RAM. This will
2315 * allow us to set up the TX send ring RCBs and the RX return
2316 * ring RCBs, plus other things which live in NIC memory.
2317 */
2318 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
2319
2320 if (!BGE_IS_5705_PLUS(sc)) {
2321 /* 57XX step 33 */
2322 /* Configure mbuf memory pool */
2323 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
2324 BGE_BUFFPOOL_1);
2325
2326 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2327 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
2328 else
2329 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
2330
2331 /* 57XX step 34 */
2332 /* Configure DMA resource pool */
2333 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
2334 BGE_DMA_DESCRIPTORS);
2335 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
2336 }
2337
2338 /* 5718 step 11, 57XX step 35 */
2339 /*
2340 * Configure mbuf pool watermarks. New broadcom docs strongly
2341 * recommend these.
2342 */
2343 if (BGE_IS_5717_PLUS(sc)) {
2344 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2345 if (ifp->if_mtu > ETHERMTU) {
2346 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
2347 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
2348 } else {
2349 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
2350 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
2351 }
2352 } else if (BGE_IS_5705_PLUS(sc)) {
2353 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2354
2355 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2356 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
2357 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
2358 } else {
2359 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2360 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2361 }
2362 } else {
2363 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
2364 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
2365 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2366 }
2367
2368 /* 57XX step 36 */
2369 /* Configure DMA resource watermarks */
2370 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2371 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
2372
2373 /* 5718 step 13, 57XX step 38 */
2374 /* Enable buffer manager */
2375 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN;
2376 /*
2377 * Change the arbitration algorithm of TXMBUF read request to
2378 * round-robin instead of priority based for BCM5719. When
2379 * TXFIFO is almost empty, RDMA will hold its request until
2380 * TXFIFO is not almost empty.
2381 */
2382 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2383 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
2384 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2385 sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2386 sc->bge_chipid == BGE_CHIPID_BCM5720_A0)
2387 val |= BGE_BMANMODE_LOMBUF_ATTN;
2388 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
2389
2390 /* 57XX step 39 */
2391 /* Poll for buffer manager start indication */
2392 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2393 DELAY(10);
2394 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2395 break;
2396 }
2397
2398 if (i == BGE_TIMEOUT * 2) {
2399 aprint_error_dev(sc->bge_dev,
2400 "buffer manager failed to start\n");
2401 return ENXIO;
2402 }
2403
2404 /* 57XX step 40 */
2405 /* Enable flow-through queues */
2406 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2407 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2408
2409 /* Wait until queue initialization is complete */
2410 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2411 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2412 break;
2413 DELAY(10);
2414 }
2415
2416 if (i == BGE_TIMEOUT * 2) {
2417 aprint_error_dev(sc->bge_dev,
2418 "flow-through queue init failed\n");
2419 return ENXIO;
2420 }
2421
2422 /*
2423 * Summary of rings supported by the controller:
2424 *
2425 * Standard Receive Producer Ring
2426 * - This ring is used to feed receive buffers for "standard"
2427 * sized frames (typically 1536 bytes) to the controller.
2428 *
2429 * Jumbo Receive Producer Ring
2430 * - This ring is used to feed receive buffers for jumbo sized
2431 * frames (i.e. anything bigger than the "standard" frames)
2432 * to the controller.
2433 *
2434 * Mini Receive Producer Ring
2435 * - This ring is used to feed receive buffers for "mini"
2436 * sized frames to the controller.
2437 * - This feature required external memory for the controller
2438 * but was never used in a production system. Should always
2439 * be disabled.
2440 *
2441 * Receive Return Ring
2442 * - After the controller has placed an incoming frame into a
2443 * receive buffer that buffer is moved into a receive return
2444 * ring. The driver is then responsible to passing the
2445 * buffer up to the stack. Many versions of the controller
2446 * support multiple RR rings.
2447 *
2448 * Send Ring
2449 * - This ring is used for outgoing frames. Many versions of
2450 * the controller support multiple send rings.
2451 */
2452
2453 /* 5718 step 15, 57XX step 41 */
2454 /* Initialize the standard RX ring control block */
2455 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
2456 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
2457 /* 5718 step 16 */
2458 if (BGE_IS_57765_PLUS(sc)) {
2459 /*
2460 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2461 * Bits 15-2 : Maximum RX frame size
2462 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2463 * Bit 0 : Reserved
2464 */
2465 rcb->bge_maxlen_flags =
2466 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2467 } else if (BGE_IS_5705_PLUS(sc)) {
2468 /*
2469 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2470 * Bits 15-2 : Reserved (should be 0)
2471 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2472 * Bit 0 : Reserved
2473 */
2474 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2475 } else {
2476 /*
2477 * Ring size is always XXX entries
2478 * Bits 31-16: Maximum RX frame size
2479 * Bits 15-2 : Reserved (should be 0)
2480 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2481 * Bit 0 : Reserved
2482 */
2483 rcb->bge_maxlen_flags =
2484 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2485 }
2486 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2487 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2488 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2489 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2490 else
2491 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2492 /* Write the standard receive producer ring control block. */
2493 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2494 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2495 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2496 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2497
2498 /* Reset the standard receive producer ring producer index. */
2499 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2500
2501 /* 57XX step 42 */
2502 /*
2503 * Initialize the jumbo RX ring control block
2504 * We set the 'ring disabled' bit in the flags
2505 * field until we're actually ready to start
2506 * using this ring (i.e. once we set the MTU
2507 * high enough to require it).
2508 */
2509 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2510 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
2511 BGE_HOSTADDR(rcb->bge_hostaddr,
2512 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
2513 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2514 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2515 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2516 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2517 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2518 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2519 else
2520 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2521 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2522 rcb->bge_hostaddr.bge_addr_hi);
2523 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2524 rcb->bge_hostaddr.bge_addr_lo);
2525 /* Program the jumbo receive producer ring RCB parameters. */
2526 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2527 rcb->bge_maxlen_flags);
2528 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2529 /* Reset the jumbo receive producer ring producer index. */
2530 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2531 }
2532
2533 /* 57XX step 43 */
2534 /* Disable the mini receive producer ring RCB. */
2535 if (BGE_IS_5700_FAMILY(sc)) {
2536 /* Set up dummy disabled mini ring RCB */
2537 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2538 rcb->bge_maxlen_flags =
2539 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2540 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2541 rcb->bge_maxlen_flags);
2542 /* Reset the mini receive producer ring producer index. */
2543 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2544
2545 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2546 offsetof(struct bge_ring_data, bge_info),
2547 sizeof (struct bge_gib),
2548 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2549 }
2550
2551 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2552 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2553 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2554 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2555 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2556 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2557 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2558 }
2559 /* 5718 step 14, 57XX step 44 */
2560 /*
2561 * The BD ring replenish thresholds control how often the
2562 * hardware fetches new BD's from the producer rings in host
2563 * memory. Setting the value too low on a busy system can
2564 * starve the hardware and recue the throughpout.
2565 *
2566 * Set the BD ring replenish thresholds. The recommended
2567 * values are 1/8th the number of descriptors allocated to
2568 * each ring, but since we try to avoid filling the entire
2569 * ring we set these to the minimal value of 8. This needs to
2570 * be done on several of the supported chip revisions anyway,
2571 * to work around HW bugs.
2572 */
2573 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2574 if (BGE_IS_JUMBO_CAPABLE(sc))
2575 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
2576
2577 /* 5718 step 18 */
2578 if (BGE_IS_5717_PLUS(sc)) {
2579 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2580 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2581 }
2582
2583 /* 57XX step 45 */
2584 /*
2585 * Disable all send rings by setting the 'ring disabled' bit
2586 * in the flags field of all the TX send ring control blocks,
2587 * located in NIC memory.
2588 */
2589 if (BGE_IS_5700_FAMILY(sc)) {
2590 /* 5700 to 5704 had 16 send rings. */
2591 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2592 } else if (BGE_IS_5717_PLUS(sc)) {
2593 limit = BGE_TX_RINGS_5717_MAX;
2594 } else if (BGE_IS_57765_FAMILY(sc) ||
2595 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2596 limit = BGE_TX_RINGS_57765_MAX;
2597 } else
2598 limit = 1;
2599 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2600 for (i = 0; i < limit; i++) {
2601 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2602 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2603 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2604 rcb_addr += sizeof(struct bge_rcb);
2605 }
2606
2607 /* 57XX step 46 and 47 */
2608 /* Configure send ring RCB 0 (we use only the first ring) */
2609 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2610 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2611 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2612 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2613 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2614 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2615 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2616 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2617 else
2618 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2619 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2620 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2621 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2622
2623 /* 57XX step 48 */
2624 /*
2625 * Disable all receive return rings by setting the
2626 * 'ring diabled' bit in the flags field of all the receive
2627 * return ring control blocks, located in NIC memory.
2628 */
2629 if (BGE_IS_5717_PLUS(sc)) {
2630 /* Should be 17, use 16 until we get an SRAM map. */
2631 limit = 16;
2632 } else if (BGE_IS_5700_FAMILY(sc))
2633 limit = BGE_RX_RINGS_MAX;
2634 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2635 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
2636 BGE_IS_57765_FAMILY(sc))
2637 limit = 4;
2638 else
2639 limit = 1;
2640 /* Disable all receive return rings */
2641 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2642 for (i = 0; i < limit; i++) {
2643 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2644 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2645 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2646 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2647 BGE_RCB_FLAG_RING_DISABLED));
2648 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2649 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2650 (i * (sizeof(uint64_t))), 0);
2651 rcb_addr += sizeof(struct bge_rcb);
2652 }
2653
2654 /* 57XX step 49 */
2655 /*
2656 * Set up receive return ring 0. Note that the NIC address
2657 * for RX return rings is 0x0. The return rings live entirely
2658 * within the host, so the nicaddr field in the RCB isn't used.
2659 */
2660 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2661 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2662 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2663 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2664 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2665 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2666 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2667
2668 /* 5718 step 24, 57XX step 53 */
2669 /* Set random backoff seed for TX */
2670 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2671 (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
2672 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
2673 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) &
2674 BGE_TX_BACKOFF_SEED_MASK);
2675
2676 /* 5718 step 26, 57XX step 55 */
2677 /* Set inter-packet gap */
2678 val = 0x2620;
2679 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2680 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2681 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2682 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2683 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2684
2685 /* 5718 step 27, 57XX step 56 */
2686 /*
2687 * Specify which ring to use for packets that don't match
2688 * any RX rules.
2689 */
2690 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2691
2692 /* 5718 step 28, 57XX step 57 */
2693 /*
2694 * Configure number of RX lists. One interrupt distribution
2695 * list, sixteen active lists, one bad frames class.
2696 */
2697 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2698
2699 /* 5718 step 29, 57XX step 58 */
2700 /* Inialize RX list placement stats mask. */
2701 if (BGE_IS_575X_PLUS(sc)) {
2702 val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK);
2703 val &= ~BGE_RXLPSTATCONTROL_DACK_FIX;
2704 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val);
2705 } else
2706 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2707
2708 /* 5718 step 30, 57XX step 59 */
2709 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2710
2711 /* 5718 step 33, 57XX step 62 */
2712 /* Disable host coalescing until we get it set up */
2713 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2714
2715 /* 5718 step 34, 57XX step 63 */
2716 /* Poll to make sure it's shut down. */
2717 for (i = 0; i < BGE_TIMEOUT * 2; i++) {
2718 DELAY(10);
2719 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2720 break;
2721 }
2722
2723 if (i == BGE_TIMEOUT * 2) {
2724 aprint_error_dev(sc->bge_dev,
2725 "host coalescing engine failed to idle\n");
2726 return ENXIO;
2727 }
2728
2729 /* 5718 step 35, 36, 37 */
2730 /* Set up host coalescing defaults */
2731 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2732 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2733 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2734 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2735 if (!(BGE_IS_5705_PLUS(sc))) {
2736 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2737 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2738 }
2739 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2740 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2741
2742 /* Set up address of statistics block */
2743 if (BGE_IS_5700_FAMILY(sc)) {
2744 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2745 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2746 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2747 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2748 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2749 }
2750
2751 /* 5718 step 38 */
2752 /* Set up address of status block */
2753 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2754 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2755 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2756 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2757 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2758 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2759
2760 /* Set up status block size. */
2761 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2762 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2763 val = BGE_STATBLKSZ_FULL;
2764 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2765 } else {
2766 val = BGE_STATBLKSZ_32BYTE;
2767 bzero(&sc->bge_rdata->bge_status_block, 32);
2768 }
2769
2770 /* 5718 step 39, 57XX step 73 */
2771 /* Turn on host coalescing state machine */
2772 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2773
2774 /* 5718 step 40, 57XX step 74 */
2775 /* Turn on RX BD completion state machine and enable attentions */
2776 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2777 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2778
2779 /* 5718 step 41, 57XX step 75 */
2780 /* Turn on RX list placement state machine */
2781 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2782
2783 /* 57XX step 76 */
2784 /* Turn on RX list selector state machine. */
2785 if (!(BGE_IS_5705_PLUS(sc)))
2786 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2787
2788 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2789 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2790 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2791 BGE_MACMODE_FRMHDR_DMA_ENB;
2792
2793 if (sc->bge_flags & BGEF_FIBER_TBI)
2794 val |= BGE_PORTMODE_TBI;
2795 else if (sc->bge_flags & BGEF_FIBER_MII)
2796 val |= BGE_PORTMODE_GMII;
2797 else
2798 val |= BGE_PORTMODE_MII;
2799
2800 /* 5718 step 42 and 43, 57XX step 77 and 78 */
2801 /* Allow APE to send/receive frames. */
2802 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2803 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2804
2805 /* Turn on DMA, clear stats */
2806 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
2807 /* 5718 step 44 */
2808 DELAY(40);
2809
2810 /* 5718 step 45, 57XX step 79 */
2811 /* Set misc. local control, enable interrupts on attentions */
2812 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2813 if (BGE_IS_5717_PLUS(sc)) {
2814 CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */
2815 /* 5718 step 46 */
2816 DELAY(100);
2817 }
2818
2819 /* 57XX step 81 */
2820 /* Turn on DMA completion state machine */
2821 if (!(BGE_IS_5705_PLUS(sc)))
2822 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2823
2824 /* 5718 step 47, 57XX step 82 */
2825 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2826
2827 /* 5718 step 48 */
2828 /* Enable host coalescing bug fix. */
2829 if (BGE_IS_5755_PLUS(sc))
2830 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2831
2832 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2833 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2834
2835 /* Turn on write DMA state machine */
2836 CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val);
2837 /* 5718 step 49 */
2838 DELAY(40);
2839
2840 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2841
2842 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2843 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2844
2845 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2846 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2847 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2848 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2849 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2850 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2851
2852 if (sc->bge_flags & BGEF_PCIE)
2853 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2854 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
2855 if (ifp->if_mtu <= ETHERMTU)
2856 val |= BGE_RDMAMODE_JMB_2K_MMRR;
2857 }
2858 if (sc->bge_flags & BGEF_TSO) {
2859 val |= BGE_RDMAMODE_TSO4_ENABLE;
2860 if (BGE_IS_5717_PLUS(sc))
2861 val |= BGE_RDMAMODE_TSO6_ENABLE;
2862 }
2863
2864 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2865 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2866 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2867 BGE_RDMAMODE_H2BNC_VLAN_DET;
2868 /*
2869 * Allow multiple outstanding read requests from
2870 * non-LSO read DMA engine.
2871 */
2872 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2873 }
2874
2875 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2876 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2877 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2878 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
2879 BGE_IS_57765_PLUS(sc)) {
2880 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2881 rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2882 else
2883 rdmareg = BGE_RDMA_RSRVCTRL;
2884 dmactl = CSR_READ_4(sc, rdmareg);
2885 /*
2886 * Adjust tx margin to prevent TX data corruption and
2887 * fix internal FIFO overflow.
2888 */
2889 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2890 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2891 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2892 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2893 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2894 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2895 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2896 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2897 }
2898 /*
2899 * Enable fix for read DMA FIFO overruns.
2900 * The fix is to limit the number of RX BDs
2901 * the hardware would fetch at a fime.
2902 */
2903 CSR_WRITE_4(sc, rdmareg, dmactl |
2904 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2905 }
2906
2907 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2908 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2909 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2910 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2911 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2912 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2913 /*
2914 * Allow 4KB burst length reads for non-LSO frames.
2915 * Enable 512B burst length reads for buffer descriptors.
2916 */
2917 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2918 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2919 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2920 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2921 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2922 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2923 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2924 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2925 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2926 }
2927 /* Turn on read DMA state machine */
2928 CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
2929 /* 5718 step 52 */
2930 delay(40);
2931
2932 if (sc->bge_flags & BGEF_RDMA_BUG) {
2933 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2934 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2935 if ((val & 0xFFFF) > BGE_FRAMELEN)
2936 break;
2937 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
2938 break;
2939 }
2940 if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2941 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2942 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2943 val |= BGE_RDMA_TX_LENGTH_WA_5719;
2944 else
2945 val |= BGE_RDMA_TX_LENGTH_WA_5720;
2946 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2947 }
2948 }
2949
2950 /* 5718 step 56, 57XX step 84 */
2951 /* Turn on RX data completion state machine */
2952 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2953
2954 /* Turn on RX data and RX BD initiator state machine */
2955 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2956
2957 /* 57XX step 85 */
2958 /* Turn on Mbuf cluster free state machine */
2959 if (!BGE_IS_5705_PLUS(sc))
2960 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2961
2962 /* 5718 step 57, 57XX step 86 */
2963 /* Turn on send data completion state machine */
2964 val = BGE_SDCMODE_ENABLE;
2965 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2966 val |= BGE_SDCMODE_CDELAY;
2967 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2968
2969 /* 5718 step 58 */
2970 /* Turn on send BD completion state machine */
2971 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2972
2973 /* 57XX step 88 */
2974 /* Turn on RX BD initiator state machine */
2975 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2976
2977 /* 5718 step 60, 57XX step 90 */
2978 /* Turn on send data initiator state machine */
2979 if (sc->bge_flags & BGEF_TSO) {
2980 /* XXX: magic value from Linux driver */
2981 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2982 BGE_SDIMODE_HW_LSO_PRE_DMA);
2983 } else
2984 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2985
2986 /* 5718 step 61, 57XX step 91 */
2987 /* Turn on send BD initiator state machine */
2988 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2989
2990 /* 5718 step 62, 57XX step 92 */
2991 /* Turn on send BD selector state machine */
2992 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2993
2994 /* 5718 step 31, 57XX step 60 */
2995 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2996 /* 5718 step 32, 57XX step 61 */
2997 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2998 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2999
3000 /* ack/clear link change events */
3001 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3002 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3003 BGE_MACSTAT_LINK_CHANGED);
3004 CSR_WRITE_4(sc, BGE_MI_STS, 0);
3005
3006 /*
3007 * Enable attention when the link has changed state for
3008 * devices that use auto polling.
3009 */
3010 if (sc->bge_flags & BGEF_FIBER_TBI) {
3011 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
3012 } else {
3013 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
3014 mimode = BGE_MIMODE_500KHZ_CONST;
3015 else
3016 mimode = BGE_MIMODE_BASE;
3017 /* 5718 step 68. 5718 step 69 (optionally). */
3018 if (BGE_IS_5700_FAMILY(sc) ||
3019 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
3020 mimode |= BGE_MIMODE_AUTOPOLL;
3021 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
3022 }
3023 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
3024 CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
3025 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
3026 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3027 BGE_EVTENB_MI_INTERRUPT);
3028 }
3029
3030 /*
3031 * Clear any pending link state attention.
3032 * Otherwise some link state change events may be lost until attention
3033 * is cleared by bge_intr() -> bge_link_upd() sequence.
3034 * It's not necessary on newer BCM chips - perhaps enabling link
3035 * state change attentions implies clearing pending attention.
3036 */
3037 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3038 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3039 BGE_MACSTAT_LINK_CHANGED);
3040
3041 /* Enable link state change attentions. */
3042 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
3043
3044 return 0;
3045 }
3046
3047 static const struct bge_revision *
3048 bge_lookup_rev(uint32_t chipid)
3049 {
3050 const struct bge_revision *br;
3051
3052 for (br = bge_revisions; br->br_name != NULL; br++) {
3053 if (br->br_chipid == chipid)
3054 return br;
3055 }
3056
3057 for (br = bge_majorrevs; br->br_name != NULL; br++) {
3058 if (br->br_chipid == BGE_ASICREV(chipid))
3059 return br;
3060 }
3061
3062 return NULL;
3063 }
3064
3065 static const struct bge_product *
3066 bge_lookup(const struct pci_attach_args *pa)
3067 {
3068 const struct bge_product *bp;
3069
3070 for (bp = bge_products; bp->bp_name != NULL; bp++) {
3071 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
3072 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
3073 return bp;
3074 }
3075
3076 return NULL;
3077 }
3078
3079 static uint32_t
3080 bge_chipid(const struct pci_attach_args *pa)
3081 {
3082 uint32_t id;
3083
3084 id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
3085 >> BGE_PCIMISCCTL_ASICREV_SHIFT;
3086
3087 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
3088 switch (PCI_PRODUCT(pa->pa_id)) {
3089 case PCI_PRODUCT_BROADCOM_BCM5717:
3090 case PCI_PRODUCT_BROADCOM_BCM5718:
3091 case PCI_PRODUCT_BROADCOM_BCM5719:
3092 case PCI_PRODUCT_BROADCOM_BCM5720:
3093 case PCI_PRODUCT_BROADCOM_BCM5725:
3094 case PCI_PRODUCT_BROADCOM_BCM5727:
3095 case PCI_PRODUCT_BROADCOM_BCM5762:
3096 case PCI_PRODUCT_BROADCOM_BCM57764:
3097 case PCI_PRODUCT_BROADCOM_BCM57767:
3098 case PCI_PRODUCT_BROADCOM_BCM57787:
3099 id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3100 BGE_PCI_GEN2_PRODID_ASICREV);
3101 break;
3102 case PCI_PRODUCT_BROADCOM_BCM57761:
3103 case PCI_PRODUCT_BROADCOM_BCM57762:
3104 case PCI_PRODUCT_BROADCOM_BCM57765:
3105 case PCI_PRODUCT_BROADCOM_BCM57766:
3106 case PCI_PRODUCT_BROADCOM_BCM57781:
3107 case PCI_PRODUCT_BROADCOM_BCM57782:
3108 case PCI_PRODUCT_BROADCOM_BCM57785:
3109 case PCI_PRODUCT_BROADCOM_BCM57786:
3110 case PCI_PRODUCT_BROADCOM_BCM57791:
3111 case PCI_PRODUCT_BROADCOM_BCM57795:
3112 id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3113 BGE_PCI_GEN15_PRODID_ASICREV);
3114 break;
3115 default:
3116 id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3117 BGE_PCI_PRODID_ASICREV);
3118 break;
3119 }
3120 }
3121
3122 return id;
3123 }
3124
3125 /*
3126 * Return true if MSI can be used with this device.
3127 */
3128 static int
3129 bge_can_use_msi(struct bge_softc *sc)
3130 {
3131 int can_use_msi = 0;
3132
3133 switch (BGE_ASICREV(sc->bge_chipid)) {
3134 case BGE_ASICREV_BCM5714_A0:
3135 case BGE_ASICREV_BCM5714:
3136 /*
3137 * Apparently, MSI doesn't work when these chips are
3138 * configured in single-port mode.
3139 */
3140 break;
3141 case BGE_ASICREV_BCM5750:
3142 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
3143 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
3144 can_use_msi = 1;
3145 break;
3146 default:
3147 if (BGE_IS_575X_PLUS(sc))
3148 can_use_msi = 1;
3149 }
3150 return (can_use_msi);
3151 }
3152
3153 /*
3154 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
3155 * against our list and return its name if we find a match. Note
3156 * that since the Broadcom controller contains VPD support, we
3157 * can get the device name string from the controller itself instead
3158 * of the compiled-in string. This is a little slow, but it guarantees
3159 * we'll always announce the right product name.
3160 */
3161 static int
3162 bge_probe(device_t parent, cfdata_t match, void *aux)
3163 {
3164 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
3165
3166 if (bge_lookup(pa) != NULL)
3167 return 1;
3168
3169 return 0;
3170 }
3171
3172 static void
3173 bge_attach(device_t parent, device_t self, void *aux)
3174 {
3175 struct bge_softc *sc = device_private(self);
3176 struct pci_attach_args *pa = aux;
3177 prop_dictionary_t dict;
3178 const struct bge_product *bp;
3179 const struct bge_revision *br;
3180 pci_chipset_tag_t pc;
3181 const char *intrstr = NULL;
3182 uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5;
3183 uint32_t command;
3184 struct ifnet *ifp;
3185 uint32_t misccfg, mimode;
3186 void * kva;
3187 u_char eaddr[ETHER_ADDR_LEN];
3188 pcireg_t memtype, subid, reg;
3189 bus_addr_t memaddr;
3190 uint32_t pm_ctl;
3191 bool no_seeprom;
3192 int capmask;
3193 int mii_flags;
3194 int map_flags;
3195 char intrbuf[PCI_INTRSTR_LEN];
3196
3197 bp = bge_lookup(pa);
3198 KASSERT(bp != NULL);
3199
3200 sc->sc_pc = pa->pa_pc;
3201 sc->sc_pcitag = pa->pa_tag;
3202 sc->bge_dev = self;
3203
3204 sc->bge_pa = *pa;
3205 pc = sc->sc_pc;
3206 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
3207
3208 aprint_naive(": Ethernet controller\n");
3209 aprint_normal(": %s Ethernet\n", bp->bp_name);
3210
3211 /*
3212 * Map control/status registers.
3213 */
3214 DPRINTFN(5, ("Map control/status regs\n"));
3215 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
3216 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
3217 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
3218 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
3219
3220 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
3221 aprint_error_dev(sc->bge_dev,
3222 "failed to enable memory mapping!\n");
3223 return;
3224 }
3225
3226 DPRINTFN(5, ("pci_mem_find\n"));
3227 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
3228 switch (memtype) {
3229 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3230 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3231 #if 0
3232 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
3233 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
3234 &memaddr, &sc->bge_bsize) == 0)
3235 break;
3236 #else
3237 /*
3238 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
3239 * system get NMI on boot (PR#48451). This problem might not be
3240 * the driver's bug but our PCI common part's bug. Until we
3241 * find a real reason, we ignore the prefetchable bit.
3242 */
3243 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0,
3244 memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) {
3245 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3246 if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize,
3247 map_flags, &sc->bge_bhandle) == 0) {
3248 sc->bge_btag = pa->pa_memt;
3249 break;
3250 }
3251 }
3252 #endif
3253 /* FALLTHROUGH */
3254 default:
3255 aprint_error_dev(sc->bge_dev, "can't find mem space\n");
3256 return;
3257 }
3258
3259 /* Save various chip information. */
3260 sc->bge_chipid = bge_chipid(pa);
3261 sc->bge_phy_addr = bge_phy_addr(sc);
3262
3263 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
3264 &sc->bge_pciecap, NULL) != 0) {
3265 /* PCIe */
3266 sc->bge_flags |= BGEF_PCIE;
3267 /* Extract supported maximum payload size. */
3268 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3269 sc->bge_pciecap + PCIE_DCAP);
3270 sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD);
3271 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
3272 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3273 sc->bge_expmrq = 2048;
3274 else
3275 sc->bge_expmrq = 4096;
3276 bge_set_max_readrq(sc);
3277 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) {
3278 /* PCIe without PCIe cap */
3279 sc->bge_flags |= BGEF_PCIE;
3280 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
3281 BGE_PCISTATE_PCI_BUSMODE) == 0) {
3282 /* PCI-X */
3283 sc->bge_flags |= BGEF_PCIX;
3284 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
3285 &sc->bge_pcixcap, NULL) == 0)
3286 aprint_error_dev(sc->bge_dev,
3287 "unable to find PCIX capability\n");
3288 }
3289
3290 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) {
3291 /*
3292 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
3293 * can clobber the chip's PCI config-space power control
3294 * registers, leaving the card in D3 powersave state. We do
3295 * not have memory-mapped registers in this state, so force
3296 * device into D0 state before starting initialization.
3297 */
3298 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
3299 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
3300 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
3301 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
3302 DELAY(1000); /* 27 usec is allegedly sufficent */
3303 }
3304
3305 /* Save chipset family. */
3306 switch (BGE_ASICREV(sc->bge_chipid)) {
3307 case BGE_ASICREV_BCM5717:
3308 case BGE_ASICREV_BCM5719:
3309 case BGE_ASICREV_BCM5720:
3310 sc->bge_flags |= BGEF_5717_PLUS;
3311 /* FALLTHROUGH */
3312 case BGE_ASICREV_BCM5762:
3313 case BGE_ASICREV_BCM57765:
3314 case BGE_ASICREV_BCM57766:
3315 if (!BGE_IS_5717_PLUS(sc))
3316 sc->bge_flags |= BGEF_57765_FAMILY;
3317 sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS |
3318 BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE;
3319 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
3320 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
3321 /*
3322 * Enable work around for DMA engine miscalculation
3323 * of TXMBUF available space.
3324 */
3325 sc->bge_flags |= BGEF_RDMA_BUG;
3326
3327 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
3328 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) {
3329 /* Jumbo frame on BCM5719 A0 does not work. */
3330 sc->bge_flags &= ~BGEF_JUMBO_CAPABLE;
3331 }
3332 }
3333 break;
3334 case BGE_ASICREV_BCM5755:
3335 case BGE_ASICREV_BCM5761:
3336 case BGE_ASICREV_BCM5784:
3337 case BGE_ASICREV_BCM5785:
3338 case BGE_ASICREV_BCM5787:
3339 case BGE_ASICREV_BCM57780:
3340 sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS;
3341 break;
3342 case BGE_ASICREV_BCM5700:
3343 case BGE_ASICREV_BCM5701:
3344 case BGE_ASICREV_BCM5703:
3345 case BGE_ASICREV_BCM5704:
3346 sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE;
3347 break;
3348 case BGE_ASICREV_BCM5714_A0:
3349 case BGE_ASICREV_BCM5780:
3350 case BGE_ASICREV_BCM5714:
3351 sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE;
3352 /* FALLTHROUGH */
3353 case BGE_ASICREV_BCM5750:
3354 case BGE_ASICREV_BCM5752:
3355 case BGE_ASICREV_BCM5906:
3356 sc->bge_flags |= BGEF_575X_PLUS;
3357 /* FALLTHROUGH */
3358 case BGE_ASICREV_BCM5705:
3359 sc->bge_flags |= BGEF_5705_PLUS;
3360 break;
3361 }
3362
3363 /* Identify chips with APE processor. */
3364 switch (BGE_ASICREV(sc->bge_chipid)) {
3365 case BGE_ASICREV_BCM5717:
3366 case BGE_ASICREV_BCM5719:
3367 case BGE_ASICREV_BCM5720:
3368 case BGE_ASICREV_BCM5761:
3369 case BGE_ASICREV_BCM5762:
3370 sc->bge_flags |= BGEF_APE;
3371 break;
3372 }
3373
3374 /*
3375 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3376 * not actually a MAC controller bug but an issue with the embedded
3377 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3378 */
3379 if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0))
3380 sc->bge_flags |= BGEF_40BIT_BUG;
3381
3382 /* Chips with APE need BAR2 access for APE registers/memory. */
3383 if ((sc->bge_flags & BGEF_APE) != 0) {
3384 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
3385 #if 0
3386 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
3387 &sc->bge_apetag, &sc->bge_apehandle, NULL,
3388 &sc->bge_apesize)) {
3389 aprint_error_dev(sc->bge_dev,
3390 "couldn't map BAR2 memory\n");
3391 return;
3392 }
3393 #else
3394 /*
3395 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
3396 * system get NMI on boot (PR#48451). This problem might not be
3397 * the driver's bug but our PCI common part's bug. Until we
3398 * find a real reason, we ignore the prefetchable bit.
3399 */
3400 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2,
3401 memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) {
3402 aprint_error_dev(sc->bge_dev,
3403 "couldn't map BAR2 memory\n");
3404 return;
3405 }
3406
3407 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3408 if (bus_space_map(pa->pa_memt, memaddr,
3409 sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) {
3410 aprint_error_dev(sc->bge_dev,
3411 "couldn't map BAR2 memory\n");
3412 return;
3413 }
3414 sc->bge_apetag = pa->pa_memt;
3415 #endif
3416
3417 /* Enable APE register/memory access by host driver. */
3418 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
3419 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3420 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3421 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3422 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
3423
3424 bge_ape_lock_init(sc);
3425 bge_ape_read_fw_ver(sc);
3426 }
3427
3428 /* Identify the chips that use an CPMU. */
3429 if (BGE_IS_5717_PLUS(sc) ||
3430 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
3431 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
3432 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
3433 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
3434 sc->bge_flags |= BGEF_CPMU_PRESENT;
3435
3436 /* Set MI_MODE */
3437 mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
3438 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
3439 mimode |= BGE_MIMODE_500KHZ_CONST;
3440 else
3441 mimode |= BGE_MIMODE_BASE;
3442 CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
3443
3444 /*
3445 * When using the BCM5701 in PCI-X mode, data corruption has
3446 * been observed in the first few bytes of some received packets.
3447 * Aligning the packet buffer in memory eliminates the corruption.
3448 * Unfortunately, this misaligns the packet payloads. On platforms
3449 * which do not support unaligned accesses, we will realign the
3450 * payloads by copying the received packets.
3451 */
3452 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
3453 sc->bge_flags & BGEF_PCIX)
3454 sc->bge_flags |= BGEF_RX_ALIGNBUG;
3455
3456 if (BGE_IS_5700_FAMILY(sc))
3457 sc->bge_flags |= BGEF_JUMBO_CAPABLE;
3458
3459 misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
3460 misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
3461
3462 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3463 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3464 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
3465 sc->bge_flags |= BGEF_IS_5788;
3466
3467 /*
3468 * Some controllers seem to require a special firmware to use
3469 * TSO. But the firmware is not available to FreeBSD and Linux
3470 * claims that the TSO performed by the firmware is slower than
3471 * hardware based TSO. Moreover the firmware based TSO has one
3472 * known bug which can't handle TSO if ethernet header + IP/TCP
3473 * header is greater than 80 bytes. The workaround for the TSO
3474 * bug exist but it seems it's too expensive than not using
3475 * TSO at all. Some hardwares also have the TSO bug so limit
3476 * the TSO to the controllers that are not affected TSO issues
3477 * (e.g. 5755 or higher).
3478 */
3479 if (BGE_IS_5755_PLUS(sc)) {
3480 /*
3481 * BCM5754 and BCM5787 shares the same ASIC id so
3482 * explicit device id check is required.
3483 */
3484 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
3485 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
3486 sc->bge_flags |= BGEF_TSO;
3487 /* TSO on BCM5719 A0 does not work. */
3488 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
3489 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0))
3490 sc->bge_flags &= ~BGEF_TSO;
3491 }
3492
3493 capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */
3494 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
3495 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3496 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3497 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
3498 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
3499 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
3500 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
3501 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
3502 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
3503 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
3504 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
3505 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
3506 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
3507 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
3508 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3509 /* These chips are 10/100 only. */
3510 capmask &= ~BMSR_EXTSTAT;
3511 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3512 }
3513
3514 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3515 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3516 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
3517 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
3518 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3519
3520 /* Set various PHY bug flags. */
3521 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3522 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3523 sc->bge_phy_flags |= BGEPHYF_CRC_BUG;
3524 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
3525 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
3526 sc->bge_phy_flags |= BGEPHYF_ADC_BUG;
3527 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3528 sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG;
3529 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3530 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
3531 PCI_VENDOR(subid) == PCI_VENDOR_DELL)
3532 sc->bge_phy_flags |= BGEPHYF_NO_3LED;
3533 if (BGE_IS_5705_PLUS(sc) &&
3534 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
3535 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3536 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
3537 !BGE_IS_57765_PLUS(sc)) {
3538 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
3539 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
3540 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
3541 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
3542 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
3543 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
3544 sc->bge_phy_flags |= BGEPHYF_JITTER_BUG;
3545 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
3546 sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM;
3547 } else
3548 sc->bge_phy_flags |= BGEPHYF_BER_BUG;
3549 }
3550
3551 /*
3552 * SEEPROM check.
3553 * First check if firmware knows we do not have SEEPROM.
3554 */
3555 if (prop_dictionary_get_bool(device_properties(self),
3556 "without-seeprom", &no_seeprom) && no_seeprom)
3557 sc->bge_flags |= BGEF_NO_EEPROM;
3558
3559 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
3560 sc->bge_flags |= BGEF_NO_EEPROM;
3561
3562 /* Now check the 'ROM failed' bit on the RX CPU */
3563 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
3564 sc->bge_flags |= BGEF_NO_EEPROM;
3565
3566 sc->bge_asf_mode = 0;
3567 /* No ASF if APE present. */
3568 if ((sc->bge_flags & BGEF_APE) == 0) {
3569 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3570 BGE_SRAM_DATA_SIG_MAGIC)) {
3571 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
3572 BGE_HWCFG_ASF) {
3573 sc->bge_asf_mode |= ASF_ENABLE;
3574 sc->bge_asf_mode |= ASF_STACKUP;
3575 if (BGE_IS_575X_PLUS(sc))
3576 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3577 }
3578 }
3579 }
3580
3581 int counts[PCI_INTR_TYPE_SIZE] = {
3582 [PCI_INTR_TYPE_INTX] = 1,
3583 [PCI_INTR_TYPE_MSI] = 1,
3584 [PCI_INTR_TYPE_MSIX] = 1,
3585 };
3586 int max_type = PCI_INTR_TYPE_MSIX;
3587
3588 if (!bge_can_use_msi(sc)) {
3589 /* MSI broken, allow only INTx */
3590 max_type = PCI_INTR_TYPE_INTX;
3591 }
3592
3593 if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) {
3594 aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n");
3595 return;
3596 }
3597
3598 DPRINTFN(5, ("pci_intr_string\n"));
3599 intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf,
3600 sizeof(intrbuf));
3601 DPRINTFN(5, ("pci_intr_establish\n"));
3602 sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0],
3603 IPL_NET, bge_intr, sc, device_xname(sc->bge_dev));
3604 if (sc->bge_intrhand == NULL) {
3605 pci_intr_release(pc, sc->bge_pihp, 1);
3606 sc->bge_pihp = NULL;
3607
3608 aprint_error_dev(self, "couldn't establish interrupt");
3609 if (intrstr != NULL)
3610 aprint_error(" at %s", intrstr);
3611 aprint_error("\n");
3612 return;
3613 }
3614 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
3615
3616 switch (pci_intr_type(pc, sc->bge_pihp[0])) {
3617 case PCI_INTR_TYPE_MSIX:
3618 case PCI_INTR_TYPE_MSI:
3619 KASSERT(bge_can_use_msi(sc));
3620 sc->bge_flags |= BGEF_MSI;
3621 break;
3622 default:
3623 /* nothing to do */
3624 break;
3625 }
3626
3627 /*
3628 * All controllers except BCM5700 supports tagged status but
3629 * we use tagged status only for MSI case on BCM5717. Otherwise
3630 * MSI on BCM5717 does not work.
3631 */
3632 if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI)
3633 sc->bge_flags |= BGEF_TAGGED_STATUS;
3634
3635 /*
3636 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM
3637 * lock in bge_reset().
3638 */
3639 CSR_WRITE_4(sc, BGE_EE_ADDR,
3640 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
3641 delay(1000);
3642 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
3643
3644 bge_stop_fw(sc);
3645 bge_sig_pre_reset(sc, BGE_RESET_START);
3646 if (bge_reset(sc))
3647 aprint_error_dev(sc->bge_dev, "chip reset failed\n");
3648
3649 /*
3650 * Read the hardware config word in the first 32k of NIC internal
3651 * memory, or fall back to the config word in the EEPROM.
3652 * Note: on some BCM5700 cards, this value appears to be unset.
3653 */
3654 hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0;
3655 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3656 BGE_SRAM_DATA_SIG_MAGIC) {
3657 uint32_t tmp;
3658
3659 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3660 tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >>
3661 BGE_SRAM_DATA_VER_SHIFT;
3662 if ((0 < tmp) && (tmp < 0x100))
3663 hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2);
3664 if (sc->bge_flags & BGEF_PCIE)
3665 hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3);
3666 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
3667 hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4);
3668 if (BGE_IS_5717_PLUS(sc))
3669 hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5);
3670 } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) {
3671 bge_read_eeprom(sc, (void *)&hwcfg,
3672 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
3673 hwcfg = be32toh(hwcfg);
3674 }
3675 aprint_normal_dev(sc->bge_dev,
3676 "HW config %08x, %08x, %08x, %08x %08x\n",
3677 hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5);
3678
3679 bge_sig_legacy(sc, BGE_RESET_START);
3680 bge_sig_post_reset(sc, BGE_RESET_START);
3681
3682 if (bge_chipinit(sc)) {
3683 aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
3684 bge_release_resources(sc);
3685 return;
3686 }
3687
3688 /*
3689 * Get station address from the EEPROM.
3690 */
3691 if (bge_get_eaddr(sc, eaddr)) {
3692 aprint_error_dev(sc->bge_dev,
3693 "failed to read station address\n");
3694 bge_release_resources(sc);
3695 return;
3696 }
3697
3698 br = bge_lookup_rev(sc->bge_chipid);
3699
3700 if (br == NULL) {
3701 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
3702 sc->bge_chipid);
3703 } else {
3704 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
3705 br->br_name, sc->bge_chipid);
3706 }
3707 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
3708
3709 /* Allocate the general information block and ring buffers. */
3710 if (pci_dma64_available(pa)) {
3711 sc->bge_dmatag = pa->pa_dmat64;
3712 sc->bge_dmatag32 = pa->pa_dmat;
3713 sc->bge_dma64 = true;
3714 } else {
3715 sc->bge_dmatag = pa->pa_dmat;
3716 sc->bge_dmatag32 = pa->pa_dmat;
3717 sc->bge_dma64 = false;
3718 }
3719
3720 /* 40bit DMA workaround */
3721 if (sizeof(bus_addr_t) > 4) {
3722 if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) {
3723 bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */
3724
3725 if (bus_dmatag_subregion(olddmatag, 0,
3726 (bus_addr_t)(1ULL << 40), &(sc->bge_dmatag),
3727 BUS_DMA_NOWAIT) != 0) {
3728 aprint_error_dev(self,
3729 "WARNING: failed to restrict dma range,"
3730 " falling back to parent bus dma range\n");
3731 sc->bge_dmatag = olddmatag;
3732 }
3733 }
3734 }
3735 SLIST_INIT(&sc->txdma_list);
3736 DPRINTFN(5, ("bus_dmamem_alloc\n"));
3737 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
3738 PAGE_SIZE, 0, &sc->bge_ring_seg, 1,
3739 &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) {
3740 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
3741 return;
3742 }
3743 DPRINTFN(5, ("bus_dmamem_map\n"));
3744 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
3745 sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva,
3746 BUS_DMA_NOWAIT)) {
3747 aprint_error_dev(sc->bge_dev,
3748 "can't map DMA buffers (%zu bytes)\n",
3749 sizeof(struct bge_ring_data));
3750 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3751 sc->bge_ring_rseg);
3752 return;
3753 }
3754 DPRINTFN(5, ("bus_dmamem_create\n"));
3755 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
3756 sizeof(struct bge_ring_data), 0,
3757 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
3758 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
3759 bus_dmamem_unmap(sc->bge_dmatag, kva,
3760 sizeof(struct bge_ring_data));
3761 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3762 sc->bge_ring_rseg);
3763 return;
3764 }
3765 DPRINTFN(5, ("bus_dmamem_load\n"));
3766 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
3767 sizeof(struct bge_ring_data), NULL,
3768 BUS_DMA_NOWAIT)) {
3769 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3770 bus_dmamem_unmap(sc->bge_dmatag, kva,
3771 sizeof(struct bge_ring_data));
3772 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3773 sc->bge_ring_rseg);
3774 return;
3775 }
3776
3777 DPRINTFN(5, ("bzero\n"));
3778 sc->bge_rdata = (struct bge_ring_data *)kva;
3779
3780 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
3781
3782 /* Try to allocate memory for jumbo buffers. */
3783 if (BGE_IS_JUMBO_CAPABLE(sc)) {
3784 if (bge_alloc_jumbo_mem(sc)) {
3785 aprint_error_dev(sc->bge_dev,
3786 "jumbo buffer allocation failed\n");
3787 } else
3788 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3789 }
3790
3791 /* Set default tuneable values. */
3792 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3793 sc->bge_rx_coal_ticks = 150;
3794 sc->bge_rx_max_coal_bds = 64;
3795 sc->bge_tx_coal_ticks = 300;
3796 sc->bge_tx_max_coal_bds = 400;
3797 if (BGE_IS_5705_PLUS(sc)) {
3798 sc->bge_tx_coal_ticks = (12 * 5);
3799 sc->bge_tx_max_coal_bds = (12 * 5);
3800 aprint_verbose_dev(sc->bge_dev,
3801 "setting short Tx thresholds\n");
3802 }
3803
3804 if (BGE_IS_5717_PLUS(sc))
3805 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3806 else if (BGE_IS_5705_PLUS(sc))
3807 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3808 else
3809 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3810
3811 /* Set up ifnet structure */
3812 ifp = &sc->ethercom.ec_if;
3813 ifp->if_softc = sc;
3814 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3815 ifp->if_ioctl = bge_ioctl;
3816 ifp->if_stop = bge_stop;
3817 ifp->if_start = bge_start;
3818 ifp->if_init = bge_init;
3819 ifp->if_watchdog = bge_watchdog;
3820 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
3821 IFQ_SET_READY(&ifp->if_snd);
3822 DPRINTFN(5, ("strcpy if_xname\n"));
3823 strcpy(ifp->if_xname, device_xname(sc->bge_dev));
3824
3825 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
3826 sc->ethercom.ec_if.if_capabilities |=
3827 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
3828 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */
3829 sc->ethercom.ec_if.if_capabilities |=
3830 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3831 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
3832 #endif
3833 sc->ethercom.ec_capabilities |=
3834 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
3835
3836 if (sc->bge_flags & BGEF_TSO)
3837 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
3838
3839 /*
3840 * Do MII setup.
3841 */
3842 DPRINTFN(5, ("mii setup\n"));
3843 sc->bge_mii.mii_ifp = ifp;
3844 sc->bge_mii.mii_readreg = bge_miibus_readreg;
3845 sc->bge_mii.mii_writereg = bge_miibus_writereg;
3846 sc->bge_mii.mii_statchg = bge_miibus_statchg;
3847
3848 /*
3849 * Figure out what sort of media we have by checking the hardware
3850 * config word. Note: on some BCM5700 cards, this value appears to be
3851 * unset. If that's the case, we have to rely on identifying the NIC
3852 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41.
3853 * The SysKonnect SK-9D41 is a 1000baseSX card.
3854 */
3855 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 ||
3856 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3857 if (BGE_IS_5705_PLUS(sc)) {
3858 sc->bge_flags |= BGEF_FIBER_MII;
3859 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3860 } else
3861 sc->bge_flags |= BGEF_FIBER_TBI;
3862 }
3863
3864 /* Set bge_phy_flags before prop_dictionary_set_uint32() */
3865 if (BGE_IS_JUMBO_CAPABLE(sc))
3866 sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE;
3867
3868 /* set phyflags and chipid before mii_attach() */
3869 dict = device_properties(self);
3870 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags);
3871 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
3872
3873 if (sc->bge_flags & BGEF_FIBER_TBI) {
3874 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3875 bge_ifmedia_sts);
3876 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
3877 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX,
3878 0, NULL);
3879 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3880 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3881 /* Pretend the user requested this setting */
3882 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3883 } else {
3884 /*
3885 * Do transceiver setup and tell the firmware the
3886 * driver is down so we can try to get access the
3887 * probe if ASF is running. Retry a couple of times
3888 * if we get a conflict with the ASF firmware accessing
3889 * the PHY.
3890 */
3891 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3892 bge_asf_driver_up(sc);
3893
3894 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3895 bge_ifmedia_sts);
3896 mii_flags = MIIF_DOPAUSE;
3897 if (sc->bge_flags & BGEF_FIBER_MII)
3898 mii_flags |= MIIF_HAVEFIBER;
3899 mii_attach(sc->bge_dev, &sc->bge_mii, capmask, sc->bge_phy_addr,
3900 MII_OFFSET_ANY, mii_flags);
3901
3902 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
3903 aprint_error_dev(sc->bge_dev, "no PHY found!\n");
3904 ifmedia_add(&sc->bge_mii.mii_media,
3905 IFM_ETHER|IFM_MANUAL, 0, NULL);
3906 ifmedia_set(&sc->bge_mii.mii_media,
3907 IFM_ETHER|IFM_MANUAL);
3908 } else
3909 ifmedia_set(&sc->bge_mii.mii_media,
3910 IFM_ETHER|IFM_AUTO);
3911
3912 /*
3913 * Now tell the firmware we are going up after probing the PHY
3914 */
3915 if (sc->bge_asf_mode & ASF_STACKUP)
3916 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3917 }
3918
3919 /*
3920 * Call MI attach routine.
3921 */
3922 DPRINTFN(5, ("if_attach\n"));
3923 if_attach(ifp);
3924 if_deferred_start_init(ifp, NULL);
3925 DPRINTFN(5, ("ether_ifattach\n"));
3926 ether_ifattach(ifp, eaddr);
3927 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
3928 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
3929 RND_TYPE_NET, RND_FLAG_DEFAULT);
3930 #ifdef BGE_EVENT_COUNTERS
3931 /*
3932 * Attach event counters.
3933 */
3934 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
3935 NULL, device_xname(sc->bge_dev), "intr");
3936 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR,
3937 NULL, device_xname(sc->bge_dev), "intr_spurious");
3938 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR,
3939 NULL, device_xname(sc->bge_dev), "intr_spurious2");
3940 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
3941 NULL, device_xname(sc->bge_dev), "tx_xoff");
3942 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
3943 NULL, device_xname(sc->bge_dev), "tx_xon");
3944 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
3945 NULL, device_xname(sc->bge_dev), "rx_xoff");
3946 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
3947 NULL, device_xname(sc->bge_dev), "rx_xon");
3948 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
3949 NULL, device_xname(sc->bge_dev), "rx_macctl");
3950 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
3951 NULL, device_xname(sc->bge_dev), "xoffentered");
3952 #endif /* BGE_EVENT_COUNTERS */
3953 DPRINTFN(5, ("callout_init\n"));
3954 callout_init(&sc->bge_timeout, 0);
3955
3956 if (pmf_device_register(self, NULL, NULL))
3957 pmf_class_network_register(self, ifp);
3958 else
3959 aprint_error_dev(self, "couldn't establish power handler\n");
3960
3961 bge_sysctl_init(sc);
3962
3963 #ifdef BGE_DEBUG
3964 bge_debug_info(sc);
3965 #endif
3966 }
3967
3968 /*
3969 * Stop all chip I/O so that the kernel's probe routines don't
3970 * get confused by errant DMAs when rebooting.
3971 */
3972 static int
3973 bge_detach(device_t self, int flags __unused)
3974 {
3975 struct bge_softc *sc = device_private(self);
3976 struct ifnet *ifp = &sc->ethercom.ec_if;
3977 int s;
3978
3979 s = splnet();
3980 /* Stop the interface. Callouts are stopped in it. */
3981 bge_stop(ifp, 1);
3982 splx(s);
3983
3984 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3985
3986 /* Delete all remaining media. */
3987 ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY);
3988
3989 ether_ifdetach(ifp);
3990 if_detach(ifp);
3991
3992 bge_release_resources(sc);
3993
3994 return 0;
3995 }
3996
3997 static void
3998 bge_release_resources(struct bge_softc *sc)
3999 {
4000
4001 /* Detach sysctl */
4002 if (sc->bge_log != NULL)
4003 sysctl_teardown(&sc->bge_log);
4004
4005 #ifdef BGE_EVENT_COUNTERS
4006 /* Detach event counters. */
4007 evcnt_detach(&sc->bge_ev_intr);
4008 evcnt_detach(&sc->bge_ev_intr_spurious);
4009 evcnt_detach(&sc->bge_ev_intr_spurious2);
4010 evcnt_detach(&sc->bge_ev_tx_xoff);
4011 evcnt_detach(&sc->bge_ev_tx_xon);
4012 evcnt_detach(&sc->bge_ev_rx_xoff);
4013 evcnt_detach(&sc->bge_ev_rx_xon);
4014 evcnt_detach(&sc->bge_ev_rx_macctl);
4015 evcnt_detach(&sc->bge_ev_xoffentered);
4016 #endif /* BGE_EVENT_COUNTERS */
4017
4018 /* Disestablish the interrupt handler */
4019 if (sc->bge_intrhand != NULL) {
4020 pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
4021 pci_intr_release(sc->sc_pc, sc->bge_pihp, 1);
4022 sc->bge_intrhand = NULL;
4023 }
4024
4025 if (sc->bge_dmatag != NULL) {
4026 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
4027 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
4028 bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata,
4029 sizeof(struct bge_ring_data));
4030 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
4031 sc->bge_ring_rseg);
4032 }
4033
4034 /* Unmap the device registers */
4035 if (sc->bge_bsize != 0) {
4036 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
4037 sc->bge_bsize = 0;
4038 }
4039
4040 /* Unmap the APE registers */
4041 if (sc->bge_apesize != 0) {
4042 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
4043 sc->bge_apesize);
4044 sc->bge_apesize = 0;
4045 }
4046 }
4047
4048 static int
4049 bge_reset(struct bge_softc *sc)
4050 {
4051 uint32_t cachesize, command;
4052 uint32_t reset, mac_mode, mac_mode_mask;
4053 pcireg_t devctl, reg;
4054 int i, val;
4055 void (*write_op)(struct bge_softc *, int, int);
4056
4057 /* Make mask for BGE_MAC_MODE register. */
4058 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
4059 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4060 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
4061 /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */
4062 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
4063
4064 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
4065 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
4066 if (sc->bge_flags & BGEF_PCIE)
4067 write_op = bge_writemem_direct;
4068 else
4069 write_op = bge_writemem_ind;
4070 } else
4071 write_op = bge_writereg_ind;
4072
4073 /* 57XX step 4 */
4074 /* Acquire the NVM lock */
4075 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 &&
4076 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
4077 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) {
4078 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
4079 for (i = 0; i < 8000; i++) {
4080 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
4081 BGE_NVRAMSWARB_GNT1)
4082 break;
4083 DELAY(20);
4084 }
4085 if (i == 8000) {
4086 printf("%s: NVRAM lock timedout!\n",
4087 device_xname(sc->bge_dev));
4088 }
4089 }
4090
4091 /* Take APE lock when performing reset. */
4092 bge_ape_lock(sc, BGE_APE_LOCK_GRC);
4093
4094 /* 57XX step 3 */
4095 /* Save some important PCI state. */
4096 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
4097 /* 5718 reset step 3 */
4098 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
4099
4100 /* 5718 reset step 5, 57XX step 5b-5d */
4101 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
4102 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4103 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
4104
4105 /* XXX ???: Disable fastboot on controllers that support it. */
4106 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
4107 BGE_IS_5755_PLUS(sc))
4108 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
4109
4110 /* 5718 reset step 2, 57XX step 6 */
4111 /*
4112 * Write the magic number to SRAM at offset 0xB50.
4113 * When firmware finishes its initialization it will
4114 * write ~BGE_MAGIC_NUMBER to the same location.
4115 */
4116 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
4117
4118 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
4119 val = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
4120 val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
4121 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
4122 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val);
4123 }
4124
4125 /* 5718 reset step 6, 57XX step 7 */
4126 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
4127 /*
4128 * XXX: from FreeBSD/Linux; no documentation
4129 */
4130 if (sc->bge_flags & BGEF_PCIE) {
4131 if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) &&
4132 !BGE_IS_57765_PLUS(sc) &&
4133 (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) ==
4134 (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) {
4135 /* PCI Express 1.0 system */
4136 CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG,
4137 BGE_PHY_PCIE_SCRAM_MODE);
4138 }
4139 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
4140 /*
4141 * Prevent PCI Express link training
4142 * during global reset.
4143 */
4144 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
4145 reset |= (1 << 29);
4146 }
4147 }
4148
4149 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
4150 i = CSR_READ_4(sc, BGE_VCPU_STATUS);
4151 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
4152 i | BGE_VCPU_STATUS_DRV_RESET);
4153 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
4154 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
4155 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
4156 }
4157
4158 /*
4159 * Set GPHY Power Down Override to leave GPHY
4160 * powered up in D0 uninitialized.
4161 */
4162 if (BGE_IS_5705_PLUS(sc) &&
4163 (sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
4164 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
4165
4166 /* Issue global reset */
4167 write_op(sc, BGE_MISC_CFG, reset);
4168
4169 /* 5718 reset step 7, 57XX step 8 */
4170 if (sc->bge_flags & BGEF_PCIE)
4171 delay(100*1000); /* too big */
4172 else
4173 delay(1000);
4174
4175 if (sc->bge_flags & BGEF_PCIE) {
4176 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
4177 DELAY(500000);
4178 /* XXX: Magic Numbers */
4179 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4180 BGE_PCI_UNKNOWN0);
4181 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4182 BGE_PCI_UNKNOWN0,
4183 reg | (1 << 15));
4184 }
4185 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4186 sc->bge_pciecap + PCIE_DCSR);
4187 /* Clear enable no snoop and disable relaxed ordering. */
4188 devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD |
4189 PCIE_DCSR_ENA_NO_SNOOP);
4190
4191 /* Set PCIE max payload size to 128 for older PCIe devices */
4192 if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
4193 devctl &= ~(0x00e0);
4194 /* Clear device status register. Write 1b to clear */
4195 devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED
4196 | PCIE_DCSR_NFED | PCIE_DCSR_CED;
4197 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4198 sc->bge_pciecap + PCIE_DCSR, devctl);
4199 bge_set_max_readrq(sc);
4200 }
4201
4202 /* From Linux: dummy read to flush PCI posted writes */
4203 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
4204
4205 /*
4206 * Reset some of the PCI state that got zapped by reset
4207 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be
4208 * set, too.
4209 */
4210 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
4211 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4212 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
4213 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
4214 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
4215 (sc->bge_flags & BGEF_PCIX) != 0)
4216 val |= BGE_PCISTATE_RETRY_SAME_DMA;
4217 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4218 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
4219 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
4220 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
4221 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val);
4222 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
4223 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
4224
4225 /* 57xx step 11: disable PCI-X Relaxed Ordering. */
4226 if (sc->bge_flags & BGEF_PCIX) {
4227 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
4228 + PCIX_CMD);
4229 /* Set max memory read byte count to 2K */
4230 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
4231 reg &= ~PCIX_CMD_BYTECNT_MASK;
4232 reg |= PCIX_CMD_BCNT_2048;
4233 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){
4234 /*
4235 * For 5704, set max outstanding split transaction
4236 * field to 0 (0 means it supports 1 request)
4237 */
4238 reg &= ~(PCIX_CMD_SPLTRANS_MASK
4239 | PCIX_CMD_BYTECNT_MASK);
4240 reg |= PCIX_CMD_BCNT_2048;
4241 }
4242 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
4243 + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER);
4244 }
4245
4246 /* 5718 reset step 10, 57XX step 12 */
4247 /* Enable memory arbiter. */
4248 if (BGE_IS_5714_FAMILY(sc)) {
4249 val = CSR_READ_4(sc, BGE_MARB_MODE);
4250 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
4251 } else
4252 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4253
4254 /* XXX 5721, 5751 and 5752 */
4255 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
4256 /* Step 19: */
4257 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
4258 /* Step 20: */
4259 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
4260 }
4261
4262 /* 5718 reset step 12, 57XX step 15 and 16 */
4263 /* Fix up byte swapping */
4264 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
4265
4266 /* 5718 reset step 13, 57XX step 17 */
4267 /* Poll until the firmware initialization is complete */
4268 bge_poll_fw(sc);
4269
4270 /* 57XX step 21 */
4271 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
4272 pcireg_t msidata;
4273
4274 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4275 BGE_PCI_MSI_DATA);
4276 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
4277 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
4278 msidata);
4279 }
4280
4281 /* 57XX step 18 */
4282 /* Write mac mode. */
4283 val = CSR_READ_4(sc, BGE_MAC_MODE);
4284 /* Restore mac_mode_mask's bits using mac_mode */
4285 val = (val & ~mac_mode_mask) | mac_mode;
4286 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
4287 DELAY(40);
4288
4289 bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
4290
4291 /*
4292 * The 5704 in TBI mode apparently needs some special
4293 * adjustment to insure the SERDES drive level is set
4294 * to 1.2V.
4295 */
4296 if (sc->bge_flags & BGEF_FIBER_TBI &&
4297 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4298 uint32_t serdescfg;
4299
4300 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
4301 serdescfg = (serdescfg & ~0xFFF) | 0x880;
4302 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
4303 }
4304
4305 if (sc->bge_flags & BGEF_PCIE &&
4306 !BGE_IS_57765_PLUS(sc) &&
4307 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
4308 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
4309 uint32_t v;
4310
4311 /* Enable PCI Express bug fix */
4312 v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG);
4313 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG,
4314 v | BGE_TLP_DATA_FIFO_PROTECT);
4315 }
4316
4317 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
4318 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
4319 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
4320
4321 return 0;
4322 }
4323
4324 /*
4325 * Frame reception handling. This is called if there's a frame
4326 * on the receive return list.
4327 *
4328 * Note: we have to be able to handle two possibilities here:
4329 * 1) the frame is from the jumbo receive ring
4330 * 2) the frame is from the standard receive ring
4331 */
4332
4333 static void
4334 bge_rxeof(struct bge_softc *sc)
4335 {
4336 struct ifnet *ifp;
4337 uint16_t rx_prod, rx_cons;
4338 int stdcnt = 0, jumbocnt = 0;
4339 bus_dmamap_t dmamap;
4340 bus_addr_t offset, toff;
4341 bus_size_t tlen;
4342 int tosync;
4343
4344 rx_cons = sc->bge_rx_saved_considx;
4345 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
4346
4347 /* Nothing to do */
4348 if (rx_cons == rx_prod)
4349 return;
4350
4351 ifp = &sc->ethercom.ec_if;
4352
4353 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4354 offsetof(struct bge_ring_data, bge_status_block),
4355 sizeof (struct bge_status_block),
4356 BUS_DMASYNC_POSTREAD);
4357
4358 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
4359 tosync = rx_prod - rx_cons;
4360
4361 if (tosync != 0)
4362 rnd_add_uint32(&sc->rnd_source, tosync);
4363
4364 toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
4365
4366 if (tosync < 0) {
4367 tlen = (sc->bge_return_ring_cnt - rx_cons) *
4368 sizeof (struct bge_rx_bd);
4369 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4370 toff, tlen, BUS_DMASYNC_POSTREAD);
4371 tosync = -tosync;
4372 }
4373
4374 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4375 offset, tosync * sizeof (struct bge_rx_bd),
4376 BUS_DMASYNC_POSTREAD);
4377
4378 while (rx_cons != rx_prod) {
4379 struct bge_rx_bd *cur_rx;
4380 uint32_t rxidx;
4381 struct mbuf *m = NULL;
4382
4383 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
4384
4385 rxidx = cur_rx->bge_idx;
4386 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
4387
4388 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
4389 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4390 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
4391 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
4392 jumbocnt++;
4393 bus_dmamap_sync(sc->bge_dmatag,
4394 sc->bge_cdata.bge_rx_jumbo_map,
4395 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
4396 BGE_JLEN, BUS_DMASYNC_POSTREAD);
4397 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4398 ifp->if_ierrors++;
4399 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
4400 continue;
4401 }
4402 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
4403 NULL)== ENOBUFS) {
4404 ifp->if_ierrors++;
4405 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
4406 continue;
4407 }
4408 } else {
4409 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4410 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
4411
4412 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
4413 stdcnt++;
4414 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
4415 sc->bge_cdata.bge_rx_std_map[rxidx] = NULL;
4416 if (dmamap == NULL) {
4417 ifp->if_ierrors++;
4418 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
4419 continue;
4420 }
4421 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
4422 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
4423 bus_dmamap_unload(sc->bge_dmatag, dmamap);
4424 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4425 ifp->if_ierrors++;
4426 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
4427 continue;
4428 }
4429 if (bge_newbuf_std(sc, sc->bge_std,
4430 NULL, dmamap) == ENOBUFS) {
4431 ifp->if_ierrors++;
4432 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
4433 continue;
4434 }
4435 }
4436
4437 #ifndef __NO_STRICT_ALIGNMENT
4438 /*
4439 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
4440 * the Rx buffer has the layer-2 header unaligned.
4441 * If our CPU requires alignment, re-align by copying.
4442 */
4443 if (sc->bge_flags & BGEF_RX_ALIGNBUG) {
4444 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
4445 cur_rx->bge_len);
4446 m->m_data += ETHER_ALIGN;
4447 }
4448 #endif
4449
4450 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
4451 m_set_rcvif(m, ifp);
4452
4453 bge_rxcsum(sc, cur_rx, m);
4454
4455 /*
4456 * If we received a packet with a vlan tag, pass it
4457 * to vlan_input() instead of ether_input().
4458 */
4459 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
4460 vlan_set_tag(m, cur_rx->bge_vlan_tag);
4461 }
4462
4463 if_percpuq_enqueue(ifp->if_percpuq, m);
4464 }
4465
4466 sc->bge_rx_saved_considx = rx_cons;
4467 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
4468 if (stdcnt)
4469 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
4470 if (jumbocnt)
4471 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
4472 }
4473
4474 static void
4475 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
4476 {
4477
4478 if (BGE_IS_57765_PLUS(sc)) {
4479 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4480 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
4481 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
4482 if ((cur_rx->bge_error_flag &
4483 BGE_RXERRFLAG_IP_CSUM_NOK) != 0)
4484 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
4485 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4486 m->m_pkthdr.csum_data =
4487 cur_rx->bge_tcp_udp_csum;
4488 m->m_pkthdr.csum_flags |=
4489 (M_CSUM_TCPv4|M_CSUM_UDPv4|
4490 M_CSUM_DATA);
4491 }
4492 }
4493 } else {
4494 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
4495 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
4496 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
4497 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
4498 /*
4499 * Rx transport checksum-offload may also
4500 * have bugs with packets which, when transmitted,
4501 * were `runts' requiring padding.
4502 */
4503 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4504 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
4505 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
4506 m->m_pkthdr.csum_data =
4507 cur_rx->bge_tcp_udp_csum;
4508 m->m_pkthdr.csum_flags |=
4509 (M_CSUM_TCPv4|M_CSUM_UDPv4|
4510 M_CSUM_DATA);
4511 }
4512 }
4513 }
4514
4515 static void
4516 bge_txeof(struct bge_softc *sc)
4517 {
4518 struct bge_tx_bd *cur_tx = NULL;
4519 struct ifnet *ifp;
4520 struct txdmamap_pool_entry *dma;
4521 bus_addr_t offset, toff;
4522 bus_size_t tlen;
4523 int tosync;
4524 struct mbuf *m;
4525
4526 ifp = &sc->ethercom.ec_if;
4527
4528 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4529 offsetof(struct bge_ring_data, bge_status_block),
4530 sizeof (struct bge_status_block),
4531 BUS_DMASYNC_POSTREAD);
4532
4533 offset = offsetof(struct bge_ring_data, bge_tx_ring);
4534 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
4535 sc->bge_tx_saved_considx;
4536
4537 if (tosync != 0)
4538 rnd_add_uint32(&sc->rnd_source, tosync);
4539
4540 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
4541
4542 if (tosync < 0) {
4543 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
4544 sizeof (struct bge_tx_bd);
4545 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4546 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4547 tosync = -tosync;
4548 }
4549
4550 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4551 offset, tosync * sizeof (struct bge_tx_bd),
4552 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4553
4554 /*
4555 * Go through our tx ring and free mbufs for those
4556 * frames that have been sent.
4557 */
4558 while (sc->bge_tx_saved_considx !=
4559 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
4560 uint32_t idx = 0;
4561
4562 idx = sc->bge_tx_saved_considx;
4563 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
4564 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4565 ifp->if_opackets++;
4566 m = sc->bge_cdata.bge_tx_chain[idx];
4567 if (m != NULL) {
4568 sc->bge_cdata.bge_tx_chain[idx] = NULL;
4569 dma = sc->txdma[idx];
4570 if (dma->is_dma32) {
4571 bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32,
4572 0, dma->dmamap32->dm_mapsize,
4573 BUS_DMASYNC_POSTWRITE);
4574 bus_dmamap_unload(
4575 sc->bge_dmatag32, dma->dmamap32);
4576 } else {
4577 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap,
4578 0, dma->dmamap->dm_mapsize,
4579 BUS_DMASYNC_POSTWRITE);
4580 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
4581 }
4582 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
4583 sc->txdma[idx] = NULL;
4584
4585 m_freem(m);
4586 }
4587 sc->bge_txcnt--;
4588 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4589 ifp->if_timer = 0;
4590 }
4591
4592 if (cur_tx != NULL)
4593 ifp->if_flags &= ~IFF_OACTIVE;
4594 }
4595
4596 static int
4597 bge_intr(void *xsc)
4598 {
4599 struct bge_softc *sc;
4600 struct ifnet *ifp;
4601 uint32_t pcistate, statusword, statustag;
4602 uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE;
4603
4604 sc = xsc;
4605 ifp = &sc->ethercom.ec_if;
4606
4607 /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */
4608 if (BGE_IS_5717_PLUS(sc))
4609 intrmask = 0;
4610
4611 /* It is possible for the interrupt to arrive before
4612 * the status block is updated prior to the interrupt.
4613 * Reading the PCI State register will confirm whether the
4614 * interrupt is ours and will flush the status block.
4615 */
4616 pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE);
4617
4618 /* read status word from status block */
4619 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4620 offsetof(struct bge_ring_data, bge_status_block),
4621 sizeof (struct bge_status_block),
4622 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4623 statusword = sc->bge_rdata->bge_status_block.bge_status;
4624 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
4625
4626 if (sc->bge_flags & BGEF_TAGGED_STATUS) {
4627 if (sc->bge_lasttag == statustag &&
4628 (~pcistate & intrmask)) {
4629 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious);
4630 return (0);
4631 }
4632 sc->bge_lasttag = statustag;
4633 } else {
4634 if (!(statusword & BGE_STATFLAG_UPDATED) &&
4635 !(~pcistate & intrmask)) {
4636 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2);
4637 return (0);
4638 }
4639 statustag = 0;
4640 }
4641 /* Ack interrupt and stop others from occurring. */
4642 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
4643 BGE_EVCNT_INCR(sc->bge_ev_intr);
4644
4645 /* clear status word */
4646 sc->bge_rdata->bge_status_block.bge_status = 0;
4647
4648 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4649 offsetof(struct bge_ring_data, bge_status_block),
4650 sizeof (struct bge_status_block),
4651 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4652
4653 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4654 statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
4655 BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
4656 bge_link_upd(sc);
4657
4658 if (ifp->if_flags & IFF_RUNNING) {
4659 /* Check RX return ring producer/consumer */
4660 bge_rxeof(sc);
4661
4662 /* Check TX ring producer/consumer */
4663 bge_txeof(sc);
4664 }
4665
4666 if (sc->bge_pending_rxintr_change) {
4667 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
4668 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
4669
4670 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
4671 DELAY(10);
4672 (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4673
4674 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
4675 DELAY(10);
4676 (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4677
4678 sc->bge_pending_rxintr_change = 0;
4679 }
4680 bge_handle_events(sc);
4681
4682 /* Re-enable interrupts. */
4683 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag);
4684
4685 if (ifp->if_flags & IFF_RUNNING)
4686 if_schedule_deferred_start(ifp);
4687
4688 return 1;
4689 }
4690
4691 static void
4692 bge_asf_driver_up(struct bge_softc *sc)
4693 {
4694 if (sc->bge_asf_mode & ASF_STACKUP) {
4695 /* Send ASF heartbeat aprox. every 2s */
4696 if (sc->bge_asf_count)
4697 sc->bge_asf_count --;
4698 else {
4699 sc->bge_asf_count = 2;
4700
4701 bge_wait_for_event_ack(sc);
4702
4703 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4704 BGE_FW_CMD_DRV_ALIVE3);
4705 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4706 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4707 BGE_FW_HB_TIMEOUT_SEC);
4708 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
4709 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4710 BGE_RX_CPU_DRV_EVENT);
4711 }
4712 }
4713 }
4714
4715 static void
4716 bge_tick(void *xsc)
4717 {
4718 struct bge_softc *sc = xsc;
4719 struct mii_data *mii = &sc->bge_mii;
4720 int s;
4721
4722 s = splnet();
4723
4724 if (BGE_IS_5705_PLUS(sc))
4725 bge_stats_update_regs(sc);
4726 else
4727 bge_stats_update(sc);
4728
4729 if (sc->bge_flags & BGEF_FIBER_TBI) {
4730 /*
4731 * Since in TBI mode auto-polling can't be used we should poll
4732 * link status manually. Here we register pending link event
4733 * and trigger interrupt.
4734 */
4735 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4736 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4737 } else {
4738 /*
4739 * Do not touch PHY if we have link up. This could break
4740 * IPMI/ASF mode or produce extra input errors.
4741 * (extra input errors was reported for bcm5701 & bcm5704).
4742 */
4743 if (!BGE_STS_BIT(sc, BGE_STS_LINK))
4744 mii_tick(mii);
4745 }
4746
4747 bge_asf_driver_up(sc);
4748
4749 if (!sc->bge_detaching)
4750 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
4751
4752 splx(s);
4753 }
4754
4755 static void
4756 bge_stats_update_regs(struct bge_softc *sc)
4757 {
4758 struct ifnet *ifp = &sc->ethercom.ec_if;
4759
4760 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
4761 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
4762
4763 /*
4764 * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0,
4765 * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames
4766 * (silicon bug). There's no reliable workaround so just
4767 * ignore the counter
4768 */
4769 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
4770 BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5719_A0 &&
4771 BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5720_A0) {
4772 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4773 }
4774 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4775 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4776
4777 if (sc->bge_flags & BGEF_RDMA_BUG) {
4778 uint32_t val, ucast, mcast, bcast;
4779
4780 ucast = CSR_READ_4(sc, BGE_MAC_STATS +
4781 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
4782 mcast = CSR_READ_4(sc, BGE_MAC_STATS +
4783 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
4784 bcast = CSR_READ_4(sc, BGE_MAC_STATS +
4785 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
4786
4787 /*
4788 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4789 * frames, it's safe to disable workaround for DMA engine's
4790 * miscalculation of TXMBUF space.
4791 */
4792 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
4793 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
4794 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
4795 val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
4796 else
4797 val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
4798 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
4799 sc->bge_flags &= ~BGEF_RDMA_BUG;
4800 }
4801 }
4802 }
4803
4804 static void
4805 bge_stats_update(struct bge_softc *sc)
4806 {
4807 struct ifnet *ifp = &sc->ethercom.ec_if;
4808 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4809
4810 #define READ_STAT(sc, stats, stat) \
4811 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4812
4813 ifp->if_collisions +=
4814 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
4815 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
4816 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
4817 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
4818 ifp->if_collisions;
4819
4820 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
4821 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
4822 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
4823 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
4824 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
4825 READ_STAT(sc, stats,
4826 xoffPauseFramesReceived.bge_addr_lo));
4827 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
4828 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
4829 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
4830 READ_STAT(sc, stats,
4831 macControlFramesReceived.bge_addr_lo));
4832 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
4833 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
4834
4835 #undef READ_STAT
4836
4837 #ifdef notdef
4838 ifp->if_collisions +=
4839 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
4840 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
4841 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
4842 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
4843 ifp->if_collisions;
4844 #endif
4845 }
4846
4847 /*
4848 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4849 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4850 * but when such padded frames employ the bge IP/TCP checksum offload,
4851 * the hardware checksum assist gives incorrect results (possibly
4852 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4853 * If we pad such runts with zeros, the onboard checksum comes out correct.
4854 */
4855 static inline int
4856 bge_cksum_pad(struct mbuf *pkt)
4857 {
4858 struct mbuf *last = NULL;
4859 int padlen;
4860
4861 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
4862
4863 /* if there's only the packet-header and we can pad there, use it. */
4864 if (pkt->m_pkthdr.len == pkt->m_len &&
4865 M_TRAILINGSPACE(pkt) >= padlen) {
4866 last = pkt;
4867 } else {
4868 /*
4869 * Walk packet chain to find last mbuf. We will either
4870 * pad there, or append a new mbuf and pad it
4871 * (thus perhaps avoiding the bcm5700 dma-min bug).
4872 */
4873 for (last = pkt; last->m_next != NULL; last = last->m_next) {
4874 continue; /* do nothing */
4875 }
4876
4877 /* `last' now points to last in chain. */
4878 if (M_TRAILINGSPACE(last) < padlen) {
4879 /* Allocate new empty mbuf, pad it. Compact later. */
4880 struct mbuf *n;
4881 MGET(n, M_DONTWAIT, MT_DATA);
4882 if (n == NULL)
4883 return ENOBUFS;
4884 n->m_len = 0;
4885 last->m_next = n;
4886 last = n;
4887 }
4888 }
4889
4890 KDASSERT(!M_READONLY(last));
4891 KDASSERT(M_TRAILINGSPACE(last) >= padlen);
4892
4893 /* Now zero the pad area, to avoid the bge cksum-assist bug */
4894 memset(mtod(last, char *) + last->m_len, 0, padlen);
4895 last->m_len += padlen;
4896 pkt->m_pkthdr.len += padlen;
4897 return 0;
4898 }
4899
4900 /*
4901 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
4902 */
4903 static inline int
4904 bge_compact_dma_runt(struct mbuf *pkt)
4905 {
4906 struct mbuf *m, *prev;
4907 int totlen;
4908
4909 prev = NULL;
4910 totlen = 0;
4911
4912 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
4913 int mlen = m->m_len;
4914 int shortfall = 8 - mlen ;
4915
4916 totlen += mlen;
4917 if (mlen == 0)
4918 continue;
4919 if (mlen >= 8)
4920 continue;
4921
4922 /* If we get here, mbuf data is too small for DMA engine.
4923 * Try to fix by shuffling data to prev or next in chain.
4924 * If that fails, do a compacting deep-copy of the whole chain.
4925 */
4926
4927 /* Internal frag. If fits in prev, copy it there. */
4928 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
4929 memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
4930 prev->m_len += mlen;
4931 m->m_len = 0;
4932 /* XXX stitch chain */
4933 prev->m_next = m_free(m);
4934 m = prev;
4935 continue;
4936 }
4937 else if (m->m_next != NULL &&
4938 M_TRAILINGSPACE(m) >= shortfall &&
4939 m->m_next->m_len >= (8 + shortfall)) {
4940 /* m is writable and have enough data in next, pull up. */
4941
4942 memcpy(m->m_data + m->m_len, m->m_next->m_data,
4943 shortfall);
4944 m->m_len += shortfall;
4945 m->m_next->m_len -= shortfall;
4946 m->m_next->m_data += shortfall;
4947 }
4948 else if (m->m_next == NULL || 1) {
4949 /* Got a runt at the very end of the packet.
4950 * borrow data from the tail of the preceding mbuf and
4951 * update its length in-place. (The original data is still
4952 * valid, so we can do this even if prev is not writable.)
4953 */
4954
4955 /* if we'd make prev a runt, just move all of its data. */
4956 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
4957 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
4958
4959 if ((prev->m_len - shortfall) < 8)
4960 shortfall = prev->m_len;
4961
4962 #ifdef notyet /* just do the safe slow thing for now */
4963 if (!M_READONLY(m)) {
4964 if (M_LEADINGSPACE(m) < shorfall) {
4965 void *m_dat;
4966 m_dat = (m->m_flags & M_PKTHDR) ?
4967 m->m_pktdat : m->dat;
4968 memmove(m_dat, mtod(m, void*), m->m_len);
4969 m->m_data = m_dat;
4970 }
4971 } else
4972 #endif /* just do the safe slow thing */
4973 {
4974 struct mbuf * n = NULL;
4975 int newprevlen = prev->m_len - shortfall;
4976
4977 MGET(n, M_NOWAIT, MT_DATA);
4978 if (n == NULL)
4979 return ENOBUFS;
4980 KASSERT(m->m_len + shortfall < MLEN
4981 /*,
4982 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
4983
4984 /* first copy the data we're stealing from prev */
4985 memcpy(n->m_data, prev->m_data + newprevlen,
4986 shortfall);
4987
4988 /* update prev->m_len accordingly */
4989 prev->m_len -= shortfall;
4990
4991 /* copy data from runt m */
4992 memcpy(n->m_data + shortfall, m->m_data,
4993 m->m_len);
4994
4995 /* n holds what we stole from prev, plus m */
4996 n->m_len = shortfall + m->m_len;
4997
4998 /* stitch n into chain and free m */
4999 n->m_next = m->m_next;
5000 prev->m_next = n;
5001 /* KASSERT(m->m_next == NULL); */
5002 m->m_next = NULL;
5003 m_free(m);
5004 m = n; /* for continuing loop */
5005 }
5006 }
5007 }
5008 return 0;
5009 }
5010
5011 /*
5012 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
5013 * pointers to descriptors.
5014 */
5015 static int
5016 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
5017 {
5018 struct ifnet *ifp = &sc->ethercom.ec_if;
5019 struct bge_tx_bd *f, *prev_f;
5020 uint32_t frag, cur;
5021 uint16_t csum_flags = 0;
5022 uint16_t txbd_tso_flags = 0;
5023 struct txdmamap_pool_entry *dma;
5024 bus_dmamap_t dmamap;
5025 bus_dma_tag_t dmatag;
5026 int i = 0;
5027 int use_tso, maxsegsize, error;
5028 bool have_vtag;
5029 uint16_t vtag;
5030 bool remap;
5031
5032 if (m_head->m_pkthdr.csum_flags) {
5033 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
5034 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
5035 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
5036 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
5037 }
5038
5039 /*
5040 * If we were asked to do an outboard checksum, and the NIC
5041 * has the bug where it sometimes adds in the Ethernet padding,
5042 * explicitly pad with zeros so the cksum will be correct either way.
5043 * (For now, do this for all chip versions, until newer
5044 * are confirmed to not require the workaround.)
5045 */
5046 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
5047 #ifdef notyet
5048 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
5049 #endif
5050 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
5051 goto check_dma_bug;
5052
5053 if (bge_cksum_pad(m_head) != 0)
5054 return ENOBUFS;
5055
5056 check_dma_bug:
5057 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
5058 goto doit;
5059
5060 /*
5061 * bcm5700 Revision B silicon cannot handle DMA descriptors with
5062 * less than eight bytes. If we encounter a teeny mbuf
5063 * at the end of a chain, we can pad. Otherwise, copy.
5064 */
5065 if (bge_compact_dma_runt(m_head) != 0)
5066 return ENOBUFS;
5067
5068 doit:
5069 dma = SLIST_FIRST(&sc->txdma_list);
5070 if (dma == NULL) {
5071 ifp->if_flags |= IFF_OACTIVE;
5072 return ENOBUFS;
5073 }
5074 dmamap = dma->dmamap;
5075 dmatag = sc->bge_dmatag;
5076 dma->is_dma32 = false;
5077
5078 /*
5079 * Set up any necessary TSO state before we start packing...
5080 */
5081 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5082 if (!use_tso) {
5083 maxsegsize = 0;
5084 } else { /* TSO setup */
5085 unsigned mss;
5086 struct ether_header *eh;
5087 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
5088 unsigned bge_hlen;
5089 struct mbuf * m0 = m_head;
5090 struct ip *ip;
5091 struct tcphdr *th;
5092 int iphl, hlen;
5093
5094 /*
5095 * XXX It would be nice if the mbuf pkthdr had offset
5096 * fields for the protocol headers.
5097 */
5098
5099 eh = mtod(m0, struct ether_header *);
5100 switch (htons(eh->ether_type)) {
5101 case ETHERTYPE_IP:
5102 offset = ETHER_HDR_LEN;
5103 break;
5104
5105 case ETHERTYPE_VLAN:
5106 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5107 break;
5108
5109 default:
5110 /*
5111 * Don't support this protocol or encapsulation.
5112 */
5113 return ENOBUFS;
5114 }
5115
5116 /*
5117 * TCP/IP headers are in the first mbuf; we can do
5118 * this the easy way.
5119 */
5120 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5121 hlen = iphl + offset;
5122 if (__predict_false(m0->m_len <
5123 (hlen + sizeof(struct tcphdr)))) {
5124
5125 aprint_error_dev(sc->bge_dev,
5126 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
5127 "not handled yet\n",
5128 m0->m_len, hlen+ sizeof(struct tcphdr));
5129 #ifdef NOTYET
5130 /*
5131 * XXX jonathan (at) NetBSD.org: untested.
5132 * how to force this branch to be taken?
5133 */
5134 BGE_EVCNT_INCR(sc->bge_ev_txtsopain);
5135
5136 m_copydata(m0, offset, sizeof(ip), &ip);
5137 m_copydata(m0, hlen, sizeof(th), &th);
5138
5139 ip.ip_len = 0;
5140
5141 m_copyback(m0, hlen + offsetof(struct ip, ip_len),
5142 sizeof(ip.ip_len), &ip.ip_len);
5143
5144 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5145 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5146
5147 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5148 sizeof(th.th_sum), &th.th_sum);
5149
5150 hlen += th.th_off << 2;
5151 iptcp_opt_words = hlen;
5152 #else
5153 /*
5154 * if_wm "hard" case not yet supported, can we not
5155 * mandate it out of existence?
5156 */
5157 (void) ip; (void)th; (void) ip_tcp_hlen;
5158
5159 return ENOBUFS;
5160 #endif
5161 } else {
5162 ip = (struct ip *) (mtod(m0, char *) + offset);
5163 th = (struct tcphdr *) (mtod(m0, char *) + hlen);
5164 ip_tcp_hlen = iphl + (th->th_off << 2);
5165
5166 /* Total IP/TCP options, in 32-bit words */
5167 iptcp_opt_words = (ip_tcp_hlen
5168 - sizeof(struct tcphdr)
5169 - sizeof(struct ip)) >> 2;
5170 }
5171 if (BGE_IS_575X_PLUS(sc)) {
5172 th->th_sum = 0;
5173 csum_flags = 0;
5174 } else {
5175 /*
5176 * XXX jonathan (at) NetBSD.org: 5705 untested.
5177 * Requires TSO firmware patch for 5701/5703/5704.
5178 */
5179 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5180 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5181 }
5182
5183 mss = m_head->m_pkthdr.segsz;
5184 txbd_tso_flags |=
5185 BGE_TXBDFLAG_CPU_PRE_DMA |
5186 BGE_TXBDFLAG_CPU_POST_DMA;
5187
5188 /*
5189 * Our NIC TSO-assist assumes TSO has standard, optionless
5190 * IPv4 and TCP headers, which total 40 bytes. By default,
5191 * the NIC copies 40 bytes of IP/TCP header from the
5192 * supplied header into the IP/TCP header portion of
5193 * each post-TSO-segment. If the supplied packet has IP or
5194 * TCP options, we need to tell the NIC to copy those extra
5195 * bytes into each post-TSO header, in addition to the normal
5196 * 40-byte IP/TCP header (and to leave space accordingly).
5197 * Unfortunately, the driver encoding of option length
5198 * varies across different ASIC families.
5199 */
5200 tcp_seg_flags = 0;
5201 bge_hlen = ip_tcp_hlen >> 2;
5202 if (BGE_IS_5717_PLUS(sc)) {
5203 tcp_seg_flags = (bge_hlen & 0x3) << 14;
5204 txbd_tso_flags |=
5205 ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2);
5206 } else if (BGE_IS_5705_PLUS(sc)) {
5207 tcp_seg_flags =
5208 bge_hlen << 11;
5209 } else {
5210 /* XXX iptcp_opt_words or bge_hlen ? */
5211 txbd_tso_flags |=
5212 iptcp_opt_words << 12;
5213 }
5214 maxsegsize = mss | tcp_seg_flags;
5215 ip->ip_len = htons(mss + ip_tcp_hlen);
5216 ip->ip_sum = 0;
5217
5218 } /* TSO setup */
5219
5220 have_vtag = vlan_has_tag(m_head);
5221 if (have_vtag)
5222 vtag = vlan_get_tag(m_head);
5223
5224 /*
5225 * Start packing the mbufs in this chain into
5226 * the fragment pointers. Stop when we run out
5227 * of fragments or hit the end of the mbuf chain.
5228 */
5229 remap = true;
5230 load_again:
5231 error = bus_dmamap_load_mbuf(dmatag, dmamap,
5232 m_head, BUS_DMA_NOWAIT);
5233 if (__predict_false(error)) {
5234 if (error == EFBIG && remap) {
5235 struct mbuf *m;
5236 remap = false;
5237 m = m_defrag(m_head, M_NOWAIT);
5238 if (m != NULL) {
5239 KASSERT(m == m_head);
5240 goto load_again;
5241 }
5242 }
5243 return error;
5244 }
5245 /*
5246 * Sanity check: avoid coming within 16 descriptors
5247 * of the end of the ring.
5248 */
5249 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
5250 BGE_TSO_PRINTF(("%s: "
5251 " dmamap_load_mbuf too close to ring wrap\n",
5252 device_xname(sc->bge_dev)));
5253 goto fail_unload;
5254 }
5255
5256 /* Iterate over dmap-map fragments. */
5257 f = prev_f = NULL;
5258 cur = frag = *txidx;
5259
5260 for (i = 0; i < dmamap->dm_nsegs; i++) {
5261 f = &sc->bge_rdata->bge_tx_ring[frag];
5262 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
5263 break;
5264
5265 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
5266 f->bge_len = dmamap->dm_segs[i].ds_len;
5267 if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && (
5268 (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) !=
5269 ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) ||
5270 (prev_f != NULL &&
5271 prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi))
5272 ) {
5273 /*
5274 * watchdog timeout issue was observed with TSO,
5275 * limiting DMA address space to 32bits seems to
5276 * address the issue.
5277 */
5278 bus_dmamap_unload(dmatag, dmamap);
5279 dmatag = sc->bge_dmatag32;
5280 dmamap = dma->dmamap32;
5281 dma->is_dma32 = true;
5282 remap = true;
5283 goto load_again;
5284 }
5285
5286 /*
5287 * For 5751 and follow-ons, for TSO we must turn
5288 * off checksum-assist flag in the tx-descr, and
5289 * supply the ASIC-revision-specific encoding
5290 * of TSO flags and segsize.
5291 */
5292 if (use_tso) {
5293 if (BGE_IS_575X_PLUS(sc) || i == 0) {
5294 f->bge_rsvd = maxsegsize;
5295 f->bge_flags = csum_flags | txbd_tso_flags;
5296 } else {
5297 f->bge_rsvd = 0;
5298 f->bge_flags =
5299 (csum_flags | txbd_tso_flags) & 0x0fff;
5300 }
5301 } else {
5302 f->bge_rsvd = 0;
5303 f->bge_flags = csum_flags;
5304 }
5305
5306 if (have_vtag) {
5307 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
5308 f->bge_vlan_tag = vtag;
5309 } else {
5310 f->bge_vlan_tag = 0;
5311 }
5312 prev_f = f;
5313 cur = frag;
5314 BGE_INC(frag, BGE_TX_RING_CNT);
5315 }
5316
5317 if (i < dmamap->dm_nsegs) {
5318 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
5319 device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
5320 goto fail_unload;
5321 }
5322
5323 bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize,
5324 BUS_DMASYNC_PREWRITE);
5325
5326 if (frag == sc->bge_tx_saved_considx) {
5327 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
5328 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
5329
5330 goto fail_unload;
5331 }
5332
5333 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
5334 sc->bge_cdata.bge_tx_chain[cur] = m_head;
5335 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
5336 sc->txdma[cur] = dma;
5337 sc->bge_txcnt += dmamap->dm_nsegs;
5338
5339 *txidx = frag;
5340
5341 return 0;
5342
5343 fail_unload:
5344 bus_dmamap_unload(dmatag, dmamap);
5345 ifp->if_flags |= IFF_OACTIVE;
5346
5347 return ENOBUFS;
5348 }
5349
5350 /*
5351 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5352 * to the mbuf data regions directly in the transmit descriptors.
5353 */
5354 static void
5355 bge_start(struct ifnet *ifp)
5356 {
5357 struct bge_softc *sc;
5358 struct mbuf *m_head = NULL;
5359 struct mbuf *m;
5360 uint32_t prodidx;
5361 int pkts = 0;
5362 int error;
5363
5364 sc = ifp->if_softc;
5365
5366 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5367 return;
5368
5369 prodidx = sc->bge_tx_prodidx;
5370
5371 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
5372 IFQ_POLL(&ifp->if_snd, m_head);
5373 if (m_head == NULL)
5374 break;
5375
5376 #if 0
5377 /*
5378 * XXX
5379 * safety overkill. If this is a fragmented packet chain
5380 * with delayed TCP/UDP checksums, then only encapsulate
5381 * it if we have enough descriptors to handle the entire
5382 * chain at once.
5383 * (paranoia -- may not actually be needed)
5384 */
5385 if (m_head->m_flags & M_FIRSTFRAG &&
5386 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
5387 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
5388 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
5389 ifp->if_flags |= IFF_OACTIVE;
5390 break;
5391 }
5392 }
5393 #endif
5394
5395 /*
5396 * Pack the data into the transmit ring. If we
5397 * don't have room, set the OACTIVE flag and wait
5398 * for the NIC to drain the ring.
5399 */
5400 error = bge_encap(sc, m_head, &prodidx);
5401 if (__predict_false(error)) {
5402 if (ifp->if_flags & IFF_OACTIVE) {
5403 /* just wait for the transmit ring to drain */
5404 break;
5405 }
5406 IFQ_DEQUEUE(&ifp->if_snd, m);
5407 KASSERT(m == m_head);
5408 m_freem(m_head);
5409 continue;
5410 }
5411
5412 /* now we are committed to transmit the packet */
5413 IFQ_DEQUEUE(&ifp->if_snd, m);
5414 KASSERT(m == m_head);
5415 pkts++;
5416
5417 /*
5418 * If there's a BPF listener, bounce a copy of this frame
5419 * to him.
5420 */
5421 bpf_mtap(ifp, m_head, BPF_D_OUT);
5422 }
5423 if (pkts == 0)
5424 return;
5425
5426 /* Transmit */
5427 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5428 /* 5700 b2 errata */
5429 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
5430 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5431
5432 sc->bge_tx_prodidx = prodidx;
5433
5434 /*
5435 * Set a timeout in case the chip goes out to lunch.
5436 */
5437 ifp->if_timer = 5;
5438 }
5439
5440 static int
5441 bge_init(struct ifnet *ifp)
5442 {
5443 struct bge_softc *sc = ifp->if_softc;
5444 const uint16_t *m;
5445 uint32_t mode, reg;
5446 int s, error = 0;
5447
5448 s = splnet();
5449
5450 ifp = &sc->ethercom.ec_if;
5451
5452 /* Cancel pending I/O and flush buffers. */
5453 bge_stop(ifp, 0);
5454
5455 bge_stop_fw(sc);
5456 bge_sig_pre_reset(sc, BGE_RESET_START);
5457 bge_reset(sc);
5458 bge_sig_legacy(sc, BGE_RESET_START);
5459
5460 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
5461 reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
5462 reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE |
5463 BGE_CPMU_CTRL_LINK_IDLE_MODE);
5464 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
5465
5466 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
5467 reg &= ~BGE_CPMU_LSPD_10MB_CLK;
5468 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
5469 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
5470
5471 reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD);
5472 reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK;
5473 reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25;
5474 CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg);
5475
5476 reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC);
5477 reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK;
5478 reg |= BGE_CPMU_HST_ACC_MACCLK_6_25;
5479 CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg);
5480 }
5481
5482 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
5483 pcireg_t aercap;
5484
5485 reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH);
5486 reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK)
5487 | BGE_PCIE_PWRMNG_L1THRESH_4MS
5488 | BGE_PCIE_PWRMNG_EXTASPMTMR_EN;
5489 CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg);
5490
5491 reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY);
5492 reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK)
5493 | BGE_PCIE_EIDLE_DELAY_13CLK;
5494 CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg);
5495
5496 /* Clear correctable error */
5497 if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag,
5498 PCI_EXTCAP_AER, &aercap, NULL) != 0)
5499 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
5500 aercap + PCI_AER_COR_STATUS, 0xffffffff);
5501
5502 reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
5503 reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
5504 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
5505 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg);
5506 }
5507
5508 bge_sig_post_reset(sc, BGE_RESET_START);
5509
5510 bge_chipinit(sc);
5511
5512 /*
5513 * Init the various state machines, ring
5514 * control blocks and firmware.
5515 */
5516 error = bge_blockinit(sc);
5517 if (error != 0) {
5518 aprint_error_dev(sc->bge_dev, "initialization error %d\n",
5519 error);
5520 splx(s);
5521 return error;
5522 }
5523
5524 ifp = &sc->ethercom.ec_if;
5525
5526 /* 5718 step 25, 57XX step 54 */
5527 /* Specify MTU. */
5528 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
5529 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
5530
5531 /* 5718 step 23 */
5532 /* Load our MAC address. */
5533 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
5534 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
5535 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
5536
5537 /* Enable or disable promiscuous mode as needed. */
5538 if (ifp->if_flags & IFF_PROMISC)
5539 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5540 else
5541 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5542
5543 /* Program multicast filter. */
5544 bge_setmulti(sc);
5545
5546 /* Init RX ring. */
5547 bge_init_rx_ring_std(sc);
5548
5549 /*
5550 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5551 * memory to insure that the chip has in fact read the first
5552 * entry of the ring.
5553 */
5554 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
5555 uint32_t v, i;
5556 for (i = 0; i < 10; i++) {
5557 DELAY(20);
5558 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
5559 if (v == (MCLBYTES - ETHER_ALIGN))
5560 break;
5561 }
5562 if (i == 10)
5563 aprint_error_dev(sc->bge_dev,
5564 "5705 A0 chip failed to load RX ring\n");
5565 }
5566
5567 /* Init jumbo RX ring. */
5568 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
5569 bge_init_rx_ring_jumbo(sc);
5570
5571 /* Init our RX return ring index */
5572 sc->bge_rx_saved_considx = 0;
5573
5574 /* Init TX ring. */
5575 bge_init_tx_ring(sc);
5576
5577 /* 5718 step 63, 57XX step 94 */
5578 /* Enable TX MAC state machine lockup fix. */
5579 mode = CSR_READ_4(sc, BGE_TX_MODE);
5580 if (BGE_IS_5755_PLUS(sc) ||
5581 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5582 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5583 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
5584 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
5585 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5586 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5587 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5588 }
5589
5590 /* Turn on transmitter */
5591 CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5592 /* 5718 step 64 */
5593 DELAY(100);
5594
5595 /* 5718 step 65, 57XX step 95 */
5596 /* Turn on receiver */
5597 mode = CSR_READ_4(sc, BGE_RX_MODE);
5598 if (BGE_IS_5755_PLUS(sc))
5599 mode |= BGE_RXMODE_IPV6_ENABLE;
5600 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
5601 mode |= BGE_RXMODE_IPV4_FRAG_FIX;
5602 CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
5603 /* 5718 step 66 */
5604 DELAY(10);
5605
5606 /* 5718 step 12, 57XX step 37 */
5607 /*
5608 * XXX Doucments of 5718 series and 577xx say the recommended value
5609 * is 1, but tg3 set 1 only on 57765 series.
5610 */
5611 if (BGE_IS_57765_PLUS(sc))
5612 reg = 1;
5613 else
5614 reg = 2;
5615 CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg);
5616
5617 /* Tell firmware we're alive. */
5618 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5619
5620 /* Enable host interrupts. */
5621 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5622 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5623 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0);
5624
5625 if ((error = bge_ifmedia_upd(ifp)) != 0)
5626 goto out;
5627
5628 ifp->if_flags |= IFF_RUNNING;
5629 ifp->if_flags &= ~IFF_OACTIVE;
5630
5631 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
5632
5633 out:
5634 sc->bge_if_flags = ifp->if_flags;
5635 splx(s);
5636
5637 return error;
5638 }
5639
5640 /*
5641 * Set media options.
5642 */
5643 static int
5644 bge_ifmedia_upd(struct ifnet *ifp)
5645 {
5646 struct bge_softc *sc = ifp->if_softc;
5647 struct mii_data *mii = &sc->bge_mii;
5648 struct ifmedia *ifm = &sc->bge_ifmedia;
5649 int rc;
5650
5651 /* If this is a 1000baseX NIC, enable the TBI port. */
5652 if (sc->bge_flags & BGEF_FIBER_TBI) {
5653 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5654 return EINVAL;
5655 switch (IFM_SUBTYPE(ifm->ifm_media)) {
5656 case IFM_AUTO:
5657 /*
5658 * The BCM5704 ASIC appears to have a special
5659 * mechanism for programming the autoneg
5660 * advertisement registers in TBI mode.
5661 */
5662 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
5663 uint32_t sgdig;
5664 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5665 if (sgdig & BGE_SGDIGSTS_DONE) {
5666 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5667 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5668 sgdig |= BGE_SGDIGCFG_AUTO |
5669 BGE_SGDIGCFG_PAUSE_CAP |
5670 BGE_SGDIGCFG_ASYM_PAUSE;
5671 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
5672 sgdig | BGE_SGDIGCFG_SEND);
5673 DELAY(5);
5674 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
5675 sgdig);
5676 }
5677 }
5678 break;
5679 case IFM_1000_SX:
5680 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5681 BGE_CLRBIT(sc, BGE_MAC_MODE,
5682 BGE_MACMODE_HALF_DUPLEX);
5683 } else {
5684 BGE_SETBIT(sc, BGE_MAC_MODE,
5685 BGE_MACMODE_HALF_DUPLEX);
5686 }
5687 DELAY(40);
5688 break;
5689 default:
5690 return EINVAL;
5691 }
5692 /* XXX 802.3x flow control for 1000BASE-SX */
5693 return 0;
5694 }
5695
5696 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) &&
5697 (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) {
5698 uint32_t reg;
5699
5700 reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
5701 if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) {
5702 reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY;
5703 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
5704 }
5705 }
5706
5707 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
5708 if ((rc = mii_mediachg(mii)) == ENXIO)
5709 return 0;
5710
5711 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
5712 uint32_t reg;
5713
5714 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK);
5715 if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK)
5716 == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) {
5717 reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK;
5718 delay(40);
5719 CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg);
5720 }
5721 }
5722
5723 /*
5724 * Force an interrupt so that we will call bge_link_upd
5725 * if needed and clear any pending link state attention.
5726 * Without this we are not getting any further interrupts
5727 * for link state changes and thus will not UP the link and
5728 * not be able to send in bge_start. The only way to get
5729 * things working was to receive a packet and get a RX intr.
5730 */
5731 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
5732 sc->bge_flags & BGEF_IS_5788)
5733 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5734 else
5735 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5736
5737 return rc;
5738 }
5739
5740 /*
5741 * Report current media status.
5742 */
5743 static void
5744 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5745 {
5746 struct bge_softc *sc = ifp->if_softc;
5747 struct mii_data *mii = &sc->bge_mii;
5748
5749 if (sc->bge_flags & BGEF_FIBER_TBI) {
5750 ifmr->ifm_status = IFM_AVALID;
5751 ifmr->ifm_active = IFM_ETHER;
5752 if (CSR_READ_4(sc, BGE_MAC_STS) &
5753 BGE_MACSTAT_TBI_PCS_SYNCHED)
5754 ifmr->ifm_status |= IFM_ACTIVE;
5755 ifmr->ifm_active |= IFM_1000_SX;
5756 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5757 ifmr->ifm_active |= IFM_HDX;
5758 else
5759 ifmr->ifm_active |= IFM_FDX;
5760 return;
5761 }
5762
5763 mii_pollstat(mii);
5764 ifmr->ifm_status = mii->mii_media_status;
5765 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
5766 sc->bge_flowflags;
5767 }
5768
5769 static int
5770 bge_ifflags_cb(struct ethercom *ec)
5771 {
5772 struct ifnet *ifp = &ec->ec_if;
5773 struct bge_softc *sc = ifp->if_softc;
5774 int change = ifp->if_flags ^ sc->bge_if_flags;
5775
5776 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
5777 return ENETRESET;
5778 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
5779 return 0;
5780
5781 if ((ifp->if_flags & IFF_PROMISC) == 0)
5782 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5783 else
5784 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5785
5786 bge_setmulti(sc);
5787
5788 sc->bge_if_flags = ifp->if_flags;
5789 return 0;
5790 }
5791
5792 static int
5793 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
5794 {
5795 struct bge_softc *sc = ifp->if_softc;
5796 struct ifreq *ifr = (struct ifreq *) data;
5797 int s, error = 0;
5798 struct mii_data *mii;
5799
5800 s = splnet();
5801
5802 switch (command) {
5803 case SIOCSIFMEDIA:
5804 /* XXX Flow control is not supported for 1000BASE-SX */
5805 if (sc->bge_flags & BGEF_FIBER_TBI) {
5806 ifr->ifr_media &= ~IFM_ETH_FMASK;
5807 sc->bge_flowflags = 0;
5808 }
5809
5810 /* Flow control requires full-duplex mode. */
5811 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
5812 (ifr->ifr_media & IFM_FDX) == 0) {
5813 ifr->ifr_media &= ~IFM_ETH_FMASK;
5814 }
5815 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
5816 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
5817 /* We can do both TXPAUSE and RXPAUSE. */
5818 ifr->ifr_media |=
5819 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
5820 }
5821 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
5822 }
5823 /* FALLTHROUGH */
5824 case SIOCGIFMEDIA:
5825 if (sc->bge_flags & BGEF_FIBER_TBI) {
5826 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
5827 command);
5828 } else {
5829 mii = &sc->bge_mii;
5830 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
5831 command);
5832 }
5833 break;
5834 default:
5835 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5836 break;
5837
5838 error = 0;
5839
5840 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
5841 ;
5842 else if (ifp->if_flags & IFF_RUNNING)
5843 bge_setmulti(sc);
5844 break;
5845 }
5846
5847 splx(s);
5848
5849 return error;
5850 }
5851
5852 static void
5853 bge_watchdog(struct ifnet *ifp)
5854 {
5855 struct bge_softc *sc;
5856 uint32_t status;
5857
5858 sc = ifp->if_softc;
5859
5860 /* If pause frames are active then don't reset the hardware. */
5861 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
5862 status = CSR_READ_4(sc, BGE_RX_STS);
5863 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
5864 /*
5865 * If link partner has us in XOFF state then wait for
5866 * the condition to clear.
5867 */
5868 CSR_WRITE_4(sc, BGE_RX_STS, status);
5869 ifp->if_timer = 5;
5870 return;
5871 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
5872 (status & BGE_RXSTAT_RCVD_XON) != 0) {
5873 /*
5874 * If link partner has us in XOFF state then wait for
5875 * the condition to clear.
5876 */
5877 CSR_WRITE_4(sc, BGE_RX_STS, status);
5878 ifp->if_timer = 5;
5879 return;
5880 }
5881 /*
5882 * Any other condition is unexpected and the controller
5883 * should be reset.
5884 */
5885 }
5886
5887 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
5888
5889 ifp->if_flags &= ~IFF_RUNNING;
5890 bge_init(ifp);
5891
5892 ifp->if_oerrors++;
5893 }
5894
5895 static void
5896 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
5897 {
5898 int i;
5899
5900 BGE_CLRBIT_FLUSH(sc, reg, bit);
5901
5902 for (i = 0; i < 1000; i++) {
5903 delay(100);
5904 if ((CSR_READ_4(sc, reg) & bit) == 0)
5905 return;
5906 }
5907
5908 /*
5909 * Doesn't print only when the register is BGE_SRS_MODE. It occurs
5910 * on some environment (and once after boot?)
5911 */
5912 if (reg != BGE_SRS_MODE)
5913 aprint_error_dev(sc->bge_dev,
5914 "block failed to stop: reg 0x%lx, bit 0x%08x\n",
5915 (u_long)reg, bit);
5916 }
5917
5918 /*
5919 * Stop the adapter and free any mbufs allocated to the
5920 * RX and TX lists.
5921 */
5922 static void
5923 bge_stop(struct ifnet *ifp, int disable)
5924 {
5925 struct bge_softc *sc = ifp->if_softc;
5926
5927 if (disable) {
5928 sc->bge_detaching = 1;
5929 callout_halt(&sc->bge_timeout, NULL);
5930 } else
5931 callout_stop(&sc->bge_timeout);
5932
5933 /* Disable host interrupts. */
5934 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5935 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
5936
5937 /*
5938 * Tell firmware we're shutting down.
5939 */
5940 bge_stop_fw(sc);
5941 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
5942
5943 /*
5944 * Disable all of the receiver blocks.
5945 */
5946 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5947 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5948 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5949 if (BGE_IS_5700_FAMILY(sc))
5950 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5951 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5952 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5953 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5954
5955 /*
5956 * Disable all of the transmit blocks.
5957 */
5958 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5959 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5960 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5961 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5962 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5963 if (BGE_IS_5700_FAMILY(sc))
5964 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5965 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5966
5967 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB);
5968 delay(40);
5969
5970 bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
5971
5972 /*
5973 * Shut down all of the memory managers and related
5974 * state machines.
5975 */
5976 /* 5718 step 5a,5b */
5977 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5978 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5979 if (BGE_IS_5700_FAMILY(sc))
5980 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5981
5982 /* 5718 step 5c,5d */
5983 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5984 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5985
5986 if (BGE_IS_5700_FAMILY(sc)) {
5987 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5988 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5989 }
5990
5991 bge_reset(sc);
5992 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
5993 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
5994
5995 /*
5996 * Keep the ASF firmware running if up.
5997 */
5998 if (sc->bge_asf_mode & ASF_STACKUP)
5999 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6000 else
6001 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6002
6003 /* Free the RX lists. */
6004 bge_free_rx_ring_std(sc, disable);
6005
6006 /* Free jumbo RX list. */
6007 if (BGE_IS_JUMBO_CAPABLE(sc))
6008 bge_free_rx_ring_jumbo(sc);
6009
6010 /* Free TX buffers. */
6011 bge_free_tx_ring(sc, disable);
6012
6013 /*
6014 * Isolate/power down the PHY.
6015 */
6016 if (!(sc->bge_flags & BGEF_FIBER_TBI))
6017 mii_down(&sc->bge_mii);
6018
6019 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
6020
6021 /* Clear MAC's link state (PHY may still have link UP). */
6022 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6023
6024 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6025 }
6026
6027 static void
6028 bge_link_upd(struct bge_softc *sc)
6029 {
6030 struct ifnet *ifp = &sc->ethercom.ec_if;
6031 struct mii_data *mii = &sc->bge_mii;
6032 uint32_t status;
6033 uint16_t phyval;
6034 int link;
6035
6036 /* Clear 'pending link event' flag */
6037 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
6038
6039 /*
6040 * Process link state changes.
6041 * Grrr. The link status word in the status block does
6042 * not work correctly on the BCM5700 rev AX and BX chips,
6043 * according to all available information. Hence, we have
6044 * to enable MII interrupts in order to properly obtain
6045 * async link changes. Unfortunately, this also means that
6046 * we have to read the MAC status register to detect link
6047 * changes, thereby adding an additional register access to
6048 * the interrupt handler.
6049 */
6050
6051 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
6052 status = CSR_READ_4(sc, BGE_MAC_STS);
6053 if (status & BGE_MACSTAT_MI_INTERRUPT) {
6054 mii_pollstat(mii);
6055
6056 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
6057 mii->mii_media_status & IFM_ACTIVE &&
6058 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
6059 BGE_STS_SETBIT(sc, BGE_STS_LINK);
6060 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
6061 (!(mii->mii_media_status & IFM_ACTIVE) ||
6062 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
6063 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6064
6065 /* Clear the interrupt */
6066 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
6067 BGE_EVTENB_MI_INTERRUPT);
6068 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
6069 BRGPHY_MII_ISR, &phyval);
6070 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
6071 BRGPHY_MII_IMR, BRGPHY_INTRS);
6072 }
6073 return;
6074 }
6075
6076 if (sc->bge_flags & BGEF_FIBER_TBI) {
6077 status = CSR_READ_4(sc, BGE_MAC_STS);
6078 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
6079 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
6080 BGE_STS_SETBIT(sc, BGE_STS_LINK);
6081 if (BGE_ASICREV(sc->bge_chipid)
6082 == BGE_ASICREV_BCM5704) {
6083 BGE_CLRBIT(sc, BGE_MAC_MODE,
6084 BGE_MACMODE_TBI_SEND_CFGS);
6085 DELAY(40);
6086 }
6087 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
6088 if_link_state_change(ifp, LINK_STATE_UP);
6089 }
6090 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
6091 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6092 if_link_state_change(ifp, LINK_STATE_DOWN);
6093 }
6094 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
6095 /*
6096 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
6097 * bit in status word always set. Workaround this bug by
6098 * reading PHY link status directly.
6099 */
6100 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
6101 BGE_STS_LINK : 0;
6102
6103 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
6104 mii_pollstat(mii);
6105
6106 if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
6107 mii->mii_media_status & IFM_ACTIVE &&
6108 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
6109 BGE_STS_SETBIT(sc, BGE_STS_LINK);
6110 else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
6111 (!(mii->mii_media_status & IFM_ACTIVE) ||
6112 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
6113 BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6114 }
6115 } else {
6116 /*
6117 * For controllers that call mii_tick, we have to poll
6118 * link status.
6119 */
6120 mii_pollstat(mii);
6121 }
6122
6123 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
6124 uint32_t reg, scale;
6125
6126 reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) &
6127 BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK;
6128 if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5)
6129 scale = 65;
6130 else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25)
6131 scale = 6;
6132 else
6133 scale = 12;
6134
6135 reg = CSR_READ_4(sc, BGE_MISC_CFG) &
6136 ~BGE_MISCCFG_TIMER_PRESCALER;
6137 reg |= scale << 1;
6138 CSR_WRITE_4(sc, BGE_MISC_CFG, reg);
6139 }
6140 /* Clear the attention */
6141 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
6142 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
6143 BGE_MACSTAT_LINK_CHANGED);
6144 }
6145
6146 static int
6147 bge_sysctl_verify(SYSCTLFN_ARGS)
6148 {
6149 int error, t;
6150 struct sysctlnode node;
6151
6152 node = *rnode;
6153 t = *(int*)rnode->sysctl_data;
6154 node.sysctl_data = &t;
6155 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6156 if (error || newp == NULL)
6157 return error;
6158
6159 #if 0
6160 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
6161 node.sysctl_num, rnode->sysctl_num));
6162 #endif
6163
6164 if (node.sysctl_num == bge_rxthresh_nodenum) {
6165 if (t < 0 || t >= NBGE_RX_THRESH)
6166 return EINVAL;
6167 bge_update_all_threshes(t);
6168 } else
6169 return EINVAL;
6170
6171 *(int*)rnode->sysctl_data = t;
6172
6173 return 0;
6174 }
6175
6176 /*
6177 * Set up sysctl(3) MIB, hw.bge.*.
6178 */
6179 static void
6180 bge_sysctl_init(struct bge_softc *sc)
6181 {
6182 int rc, bge_root_num;
6183 const struct sysctlnode *node;
6184
6185 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6186 0, CTLTYPE_NODE, "bge",
6187 SYSCTL_DESCR("BGE interface controls"),
6188 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
6189 goto out;
6190 }
6191
6192 bge_root_num = node->sysctl_num;
6193
6194 /* BGE Rx interrupt mitigation level */
6195 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6196 CTLFLAG_READWRITE,
6197 CTLTYPE_INT, "rx_lvl",
6198 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
6199 bge_sysctl_verify, 0,
6200 &bge_rx_thresh_lvl,
6201 0, CTL_HW, bge_root_num, CTL_CREATE,
6202 CTL_EOL)) != 0) {
6203 goto out;
6204 }
6205
6206 bge_rxthresh_nodenum = node->sysctl_num;
6207
6208 return;
6209
6210 out:
6211 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
6212 }
6213
6214 #ifdef BGE_DEBUG
6215 void
6216 bge_debug_info(struct bge_softc *sc)
6217 {
6218
6219 printf("Hardware Flags:\n");
6220 if (BGE_IS_57765_PLUS(sc))
6221 printf(" - 57765 Plus\n");
6222 if (BGE_IS_5717_PLUS(sc))
6223 printf(" - 5717 Plus\n");
6224 if (BGE_IS_5755_PLUS(sc))
6225 printf(" - 5755 Plus\n");
6226 if (BGE_IS_575X_PLUS(sc))
6227 printf(" - 575X Plus\n");
6228 if (BGE_IS_5705_PLUS(sc))
6229 printf(" - 5705 Plus\n");
6230 if (BGE_IS_5714_FAMILY(sc))
6231 printf(" - 5714 Family\n");
6232 if (BGE_IS_5700_FAMILY(sc))
6233 printf(" - 5700 Family\n");
6234 if (sc->bge_flags & BGEF_IS_5788)
6235 printf(" - 5788\n");
6236 if (sc->bge_flags & BGEF_JUMBO_CAPABLE)
6237 printf(" - Supports Jumbo Frames\n");
6238 if (sc->bge_flags & BGEF_NO_EEPROM)
6239 printf(" - No EEPROM\n");
6240 if (sc->bge_flags & BGEF_PCIX)
6241 printf(" - PCI-X Bus\n");
6242 if (sc->bge_flags & BGEF_PCIE)
6243 printf(" - PCI Express Bus\n");
6244 if (sc->bge_flags & BGEF_RX_ALIGNBUG)
6245 printf(" - RX Alignment Bug\n");
6246 if (sc->bge_flags & BGEF_APE)
6247 printf(" - APE\n");
6248 if (sc->bge_flags & BGEF_CPMU_PRESENT)
6249 printf(" - CPMU\n");
6250 if (sc->bge_flags & BGEF_TSO)
6251 printf(" - TSO\n");
6252 if (sc->bge_flags & BGEF_TAGGED_STATUS)
6253 printf(" - TAGGED_STATUS\n");
6254
6255 /* PHY related */
6256 if (sc->bge_phy_flags & BGEPHYF_NO_3LED)
6257 printf(" - No 3 LEDs\n");
6258 if (sc->bge_phy_flags & BGEPHYF_CRC_BUG)
6259 printf(" - CRC bug\n");
6260 if (sc->bge_phy_flags & BGEPHYF_ADC_BUG)
6261 printf(" - ADC bug\n");
6262 if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG)
6263 printf(" - 5704 A0 bug\n");
6264 if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG)
6265 printf(" - jitter bug\n");
6266 if (sc->bge_phy_flags & BGEPHYF_BER_BUG)
6267 printf(" - BER bug\n");
6268 if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM)
6269 printf(" - adjust trim\n");
6270 if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED)
6271 printf(" - no wirespeed\n");
6272
6273 /* ASF related */
6274 if (sc->bge_asf_mode & ASF_ENABLE)
6275 printf(" - ASF enable\n");
6276 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE)
6277 printf(" - ASF new handshake\n");
6278 if (sc->bge_asf_mode & ASF_STACKUP)
6279 printf(" - ASF stackup\n");
6280 }
6281 #endif /* BGE_DEBUG */
6282
6283 static int
6284 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6285 {
6286 prop_dictionary_t dict;
6287 prop_data_t ea;
6288
6289 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0)
6290 return 1;
6291
6292 dict = device_properties(sc->bge_dev);
6293 ea = prop_dictionary_get(dict, "mac-address");
6294 if (ea != NULL) {
6295 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
6296 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
6297 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
6298 return 0;
6299 }
6300
6301 return 1;
6302 }
6303
6304 static int
6305 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6306 {
6307 uint32_t mac_addr;
6308
6309 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6310 if ((mac_addr >> 16) == 0x484b) {
6311 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6312 ether_addr[1] = (uint8_t)mac_addr;
6313 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6314 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6315 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6316 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6317 ether_addr[5] = (uint8_t)mac_addr;
6318 return 0;
6319 }
6320 return 1;
6321 }
6322
6323 static int
6324 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6325 {
6326 int mac_offset = BGE_EE_MAC_OFFSET;
6327
6328 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
6329 mac_offset = BGE_EE_MAC_OFFSET_5906;
6330
6331 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6332 ETHER_ADDR_LEN));
6333 }
6334
6335 static int
6336 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6337 {
6338
6339 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
6340 return 1;
6341
6342 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6343 ETHER_ADDR_LEN));
6344 }
6345
6346 static int
6347 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6348 {
6349 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6350 /* NOTE: Order is critical */
6351 bge_get_eaddr_fw,
6352 bge_get_eaddr_mem,
6353 bge_get_eaddr_nvram,
6354 bge_get_eaddr_eeprom,
6355 NULL
6356 };
6357 const bge_eaddr_fcn_t *func;
6358
6359 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6360 if ((*func)(sc, eaddr) == 0)
6361 break;
6362 }
6363 return (*func == NULL ? ENXIO : 0);
6364 }
6365