if_bnx.c revision 1.94 1 /* $NetBSD: if_bnx.c,v 1.94 2020/02/28 14:57:55 msaitoh Exp $ */
2 /* $OpenBSD: if_bnx.c,v 1.101 2013/03/28 17:21:44 brad Exp $ */
3
4 /*-
5 * Copyright (c) 2006-2010 Broadcom Corporation
6 * David Christensen <davidch (at) broadcom.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if 0
36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
37 #endif
38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.94 2020/02/28 14:57:55 msaitoh Exp $");
39
40 /*
41 * The following controllers are supported by this driver:
42 * BCM5706C A2, A3
43 * BCM5706S A2, A3
44 * BCM5708C B1, B2
45 * BCM5708S B1, B2
46 * BCM5709C A1, C0
47 * BCM5709S A1, C0
48 * BCM5716 C0
49 *
50 * The following controllers are not supported by this driver:
51 * BCM5706C A0, A1
52 * BCM5706S A0, A1
53 * BCM5708C A0, B0
54 * BCM5708S A0, B0
55 * BCM5709C A0 B0, B1, B2 (pre-production)
56 * BCM5709S A0, B0, B1, B2 (pre-production)
57 */
58
59 #include <sys/callout.h>
60 #include <sys/mutex.h>
61
62 #include <dev/pci/if_bnxreg.h>
63 #include <dev/pci/if_bnxvar.h>
64
65 #include <dev/microcode/bnx/bnxfw.h>
66
67 /****************************************************************************/
68 /* BNX Driver Version */
69 /****************************************************************************/
70 #define BNX_DRIVER_VERSION "v0.9.6"
71
72 /****************************************************************************/
73 /* BNX Debug Options */
74 /****************************************************************************/
75 #ifdef BNX_DEBUG
76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND;
77
78 /* 0 = Never */
79 /* 1 = 1 in 2,147,483,648 */
80 /* 256 = 1 in 8,388,608 */
81 /* 2048 = 1 in 1,048,576 */
82 /* 65536 = 1 in 32,768 */
83 /* 1048576 = 1 in 2,048 */
84 /* 268435456 = 1 in 8 */
85 /* 536870912 = 1 in 4 */
86 /* 1073741824 = 1 in 2 */
87
88 /* Controls how often the l2_fhdr frame error check will fail. */
89 int bnx_debug_l2fhdr_status_check = 0;
90
91 /* Controls how often the unexpected attention check will fail. */
92 int bnx_debug_unexpected_attention = 0;
93
94 /* Controls how often to simulate an mbuf allocation failure. */
95 int bnx_debug_mbuf_allocation_failure = 0;
96
97 /* Controls how often to simulate a DMA mapping failure. */
98 int bnx_debug_dma_map_addr_failure = 0;
99
100 /* Controls how often to simulate a bootcode failure. */
101 int bnx_debug_bootcode_running_failure = 0;
102 #endif
103
104 /****************************************************************************/
105 /* PCI Device ID Table */
106 /* */
107 /* Used by bnx_probe() to identify the devices supported by this driver. */
108 /****************************************************************************/
109 static const struct bnx_product {
110 pci_vendor_id_t bp_vendor;
111 pci_product_id_t bp_product;
112 pci_vendor_id_t bp_subvendor;
113 pci_product_id_t bp_subproduct;
114 const char *bp_name;
115 } bnx_devices[] = {
116 #ifdef PCI_SUBPRODUCT_HP_NC370T
117 {
118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T,
120 "HP NC370T Multifunction Gigabit Server Adapter"
121 },
122 #endif
123 #ifdef PCI_SUBPRODUCT_HP_NC370i
124 {
125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i,
127 "HP NC370i Multifunction Gigabit Server Adapter"
128 },
129 #endif
130 {
131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
132 0, 0,
133 "Broadcom NetXtreme II BCM5706 1000Base-T"
134 },
135 #ifdef PCI_SUBPRODUCT_HP_NC370F
136 {
137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F,
139 "HP NC370F Multifunction Gigabit Server Adapter"
140 },
141 #endif
142 {
143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
144 0, 0,
145 "Broadcom NetXtreme II BCM5706 1000Base-SX"
146 },
147 {
148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708,
149 0, 0,
150 "Broadcom NetXtreme II BCM5708 1000Base-T"
151 },
152 {
153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S,
154 0, 0,
155 "Broadcom NetXtreme II BCM5708 1000Base-SX"
156 },
157 {
158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709,
159 0, 0,
160 "Broadcom NetXtreme II BCM5709 1000Base-T"
161 },
162 {
163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S,
164 0, 0,
165 "Broadcom NetXtreme II BCM5709 1000Base-SX"
166 },
167 {
168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716,
169 0, 0,
170 "Broadcom NetXtreme II BCM5716 1000Base-T"
171 },
172 {
173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S,
174 0, 0,
175 "Broadcom NetXtreme II BCM5716 1000Base-SX"
176 },
177 };
178
179
180 /****************************************************************************/
181 /* Supported Flash NVRAM device data. */
182 /****************************************************************************/
183 static struct flash_spec flash_table[] =
184 {
185 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
186 #define NONBUFFERED_FLAGS (BNX_NV_WREN)
187
188 /* Slow EEPROM */
189 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
190 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
191 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
192 "EEPROM - slow"},
193 /* Expansion entry 0001 */
194 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
195 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 "Entry 0001"},
198 /* Saifun SA25F010 (non-buffered flash) */
199 /* strap, cfg1, & write1 need updates */
200 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
203 "Non-buffered flash (128kB)"},
204 /* Saifun SA25F020 (non-buffered flash) */
205 /* strap, cfg1, & write1 need updates */
206 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
209 "Non-buffered flash (256kB)"},
210 /* Expansion entry 0100 */
211 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 0100"},
215 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
216 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
218 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
219 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
220 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
221 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
223 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
224 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
225 /* Saifun SA25F005 (non-buffered flash) */
226 /* strap, cfg1, & write1 need updates */
227 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
230 "Non-buffered flash (64kB)"},
231 /* Fast EEPROM */
232 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
233 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
234 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
235 "EEPROM - fast"},
236 /* Expansion entry 1001 */
237 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
238 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 "Entry 1001"},
241 /* Expansion entry 1010 */
242 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
243 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
244 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
245 "Entry 1010"},
246 /* ATMEL AT45DB011B (buffered flash) */
247 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
248 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
250 "Buffered flash (128kB)"},
251 /* Expansion entry 1100 */
252 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
253 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
254 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
255 "Entry 1100"},
256 /* Expansion entry 1101 */
257 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
258 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
259 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
260 "Entry 1101"},
261 /* Ateml Expansion entry 1110 */
262 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
263 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
264 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
265 "Entry 1110 (Atmel)"},
266 /* ATMEL AT45DB021B (buffered flash) */
267 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
268 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
269 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
270 "Buffered flash (256kB)"},
271 };
272
273 /*
274 * The BCM5709 controllers transparently handle the
275 * differences between Atmel 264 byte pages and all
276 * flash devices which use 256 byte pages, so no
277 * logical-to-physical mapping is required in the
278 * driver.
279 */
280 static struct flash_spec flash_5709 = {
281 .flags = BNX_NV_BUFFERED,
282 .page_bits = BCM5709_FLASH_PAGE_BITS,
283 .page_size = BCM5709_FLASH_PAGE_SIZE,
284 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
285 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
286 .name = "5709 buffered flash (256kB)",
287 };
288
289 /****************************************************************************/
290 /* OpenBSD device entry points. */
291 /****************************************************************************/
292 static int bnx_probe(device_t, cfdata_t, void *);
293 void bnx_attach(device_t, device_t, void *);
294 int bnx_detach(device_t, int);
295
296 /****************************************************************************/
297 /* BNX Debug Data Structure Dump Routines */
298 /****************************************************************************/
299 #ifdef BNX_DEBUG
300 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
301 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
302 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
303 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
304 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
305 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
306 void bnx_dump_tx_chain(struct bnx_softc *, int, int);
307 void bnx_dump_rx_chain(struct bnx_softc *, int, int);
308 void bnx_dump_status_block(struct bnx_softc *);
309 void bnx_dump_stats_block(struct bnx_softc *);
310 void bnx_dump_driver_state(struct bnx_softc *);
311 void bnx_dump_hw_state(struct bnx_softc *);
312 void bnx_breakpoint(struct bnx_softc *);
313 #endif
314
315 /****************************************************************************/
316 /* BNX Register/Memory Access Routines */
317 /****************************************************************************/
318 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t);
319 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t);
320 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t);
321 int bnx_miibus_read_reg(device_t, int, int, uint16_t *);
322 int bnx_miibus_write_reg(device_t, int, int, uint16_t);
323 void bnx_miibus_statchg(struct ifnet *);
324
325 /****************************************************************************/
326 /* BNX NVRAM Access Routines */
327 /****************************************************************************/
328 int bnx_acquire_nvram_lock(struct bnx_softc *);
329 int bnx_release_nvram_lock(struct bnx_softc *);
330 void bnx_enable_nvram_access(struct bnx_softc *);
331 void bnx_disable_nvram_access(struct bnx_softc *);
332 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *,
333 uint32_t);
334 int bnx_init_nvram(struct bnx_softc *);
335 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int);
336 int bnx_nvram_test(struct bnx_softc *);
337 #ifdef BNX_NVRAM_WRITE_SUPPORT
338 int bnx_enable_nvram_write(struct bnx_softc *);
339 void bnx_disable_nvram_write(struct bnx_softc *);
340 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t);
341 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *,
342 uint32_t);
343 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int);
344 #endif
345
346 /****************************************************************************/
347 /* */
348 /****************************************************************************/
349 void bnx_get_media(struct bnx_softc *);
350 void bnx_init_media(struct bnx_softc *);
351 int bnx_dma_alloc(struct bnx_softc *);
352 void bnx_dma_free(struct bnx_softc *);
353 void bnx_release_resources(struct bnx_softc *);
354
355 /****************************************************************************/
356 /* BNX Firmware Synchronization and Load */
357 /****************************************************************************/
358 int bnx_fw_sync(struct bnx_softc *, uint32_t);
359 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, uint32_t);
360 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
361 struct fw_info *);
362 void bnx_init_cpus(struct bnx_softc *);
363
364 static void bnx_print_adapter_info(struct bnx_softc *);
365 static void bnx_probe_pci_caps(struct bnx_softc *);
366 void bnx_stop(struct ifnet *, int);
367 int bnx_reset(struct bnx_softc *, uint32_t);
368 int bnx_chipinit(struct bnx_softc *);
369 int bnx_blockinit(struct bnx_softc *);
370 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *,
371 uint16_t *, uint32_t *);
372 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *);
373
374 int bnx_init_tx_chain(struct bnx_softc *);
375 void bnx_init_tx_context(struct bnx_softc *);
376 int bnx_init_rx_chain(struct bnx_softc *);
377 void bnx_init_rx_context(struct bnx_softc *);
378 void bnx_free_rx_chain(struct bnx_softc *);
379 void bnx_free_tx_chain(struct bnx_softc *);
380
381 int bnx_tx_encap(struct bnx_softc *, struct mbuf *);
382 void bnx_start(struct ifnet *);
383 int bnx_ioctl(struct ifnet *, u_long, void *);
384 void bnx_watchdog(struct ifnet *);
385 int bnx_ifmedia_upd(struct ifnet *);
386 void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
387 int bnx_init(struct ifnet *);
388 static void bnx_mgmt_init(struct bnx_softc *);
389
390 void bnx_init_context(struct bnx_softc *);
391 void bnx_get_mac_addr(struct bnx_softc *);
392 void bnx_set_mac_addr(struct bnx_softc *);
393 void bnx_phy_intr(struct bnx_softc *);
394 void bnx_rx_intr(struct bnx_softc *);
395 void bnx_tx_intr(struct bnx_softc *);
396 void bnx_disable_intr(struct bnx_softc *);
397 void bnx_enable_intr(struct bnx_softc *);
398
399 int bnx_intr(void *);
400 void bnx_iff(struct bnx_softc *);
401 void bnx_stats_update(struct bnx_softc *);
402 void bnx_tick(void *);
403
404 struct pool *bnx_tx_pool = NULL;
405 void bnx_alloc_pkts(struct work *, void *);
406
407 /****************************************************************************/
408 /* OpenBSD device dispatch table. */
409 /****************************************************************************/
410 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc),
411 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
412
413 /****************************************************************************/
414 /* Device probe function. */
415 /* */
416 /* Compares the device to the driver's list of supported devices and */
417 /* reports back to the OS whether this is the right driver for the device. */
418 /* */
419 /* Returns: */
420 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
421 /****************************************************************************/
422 static const struct bnx_product *
423 bnx_lookup(const struct pci_attach_args *pa)
424 {
425 int i;
426 pcireg_t subid;
427
428 for (i = 0; i < __arraycount(bnx_devices); i++) {
429 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor ||
430 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product)
431 continue;
432 if (!bnx_devices[i].bp_subvendor)
433 return &bnx_devices[i];
434 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
435 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor &&
436 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct)
437 return &bnx_devices[i];
438 }
439
440 return NULL;
441 }
442 static int
443 bnx_probe(device_t parent, cfdata_t match, void *aux)
444 {
445 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
446
447 if (bnx_lookup(pa) != NULL)
448 return 1;
449
450 return 0;
451 }
452
453 /****************************************************************************/
454 /* PCI Capabilities Probe Function. */
455 /* */
456 /* Walks the PCI capabiites list for the device to find what features are */
457 /* supported. */
458 /* */
459 /* Returns: */
460 /* None. */
461 /****************************************************************************/
462 static void
463 bnx_print_adapter_info(struct bnx_softc *sc)
464 {
465 device_t dev = sc->bnx_dev;
466 int i = 0;
467
468 aprint_normal_dev(dev, "ASIC BCM%x %c%d %s(0x%08x)\n",
469 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc),
470 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT)
471 ? "Serdes " : "", sc->bnx_chipid);
472
473 /* Bus info. */
474 if (sc->bnx_flags & BNX_PCIE_FLAG) {
475 aprint_normal_dev(dev, "PCIe x%d ", sc->link_width);
476 switch (sc->link_speed) {
477 case 1: aprint_normal("2.5GT/s\n"); break;
478 case 2: aprint_normal("5GT/s\n"); break;
479 default: aprint_normal("Unknown link speed\n");
480 }
481 } else {
482 aprint_normal_dev(dev, "PCI%s %dbit %dMHz\n",
483 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""),
484 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64,
485 sc->bus_speed_mhz);
486 }
487
488 /* Firmware version and device features. */
489 aprint_normal_dev(dev, "B/C (%s); Bufs (RX:%d;TX:%d); Flags (",
490 sc->bnx_bc_ver, RX_PAGES, TX_PAGES);
491
492 if (sc->bnx_phy_flags & BNX_PHY_2_5G_CAPABLE_FLAG) {
493 if (i > 0) aprint_normal("|");
494 aprint_normal("2.5G"); i++;
495 }
496
497 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) {
498 if (i > 0) aprint_normal("|");
499 aprint_normal("MFW); MFW (%s)\n", sc->bnx_mfw_ver);
500 } else {
501 aprint_normal(")\n");
502 }
503
504 aprint_normal_dev(dev, "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n",
505 sc->bnx_rx_quick_cons_trip_int,
506 sc->bnx_rx_quick_cons_trip,
507 sc->bnx_rx_ticks_int,
508 sc->bnx_rx_ticks,
509 sc->bnx_tx_quick_cons_trip_int,
510 sc->bnx_tx_quick_cons_trip,
511 sc->bnx_tx_ticks_int,
512 sc->bnx_tx_ticks);
513 }
514
515
516 /****************************************************************************/
517 /* PCI Capabilities Probe Function. */
518 /* */
519 /* Walks the PCI capabiites list for the device to find what features are */
520 /* supported. */
521 /* */
522 /* Returns: */
523 /* None. */
524 /****************************************************************************/
525 static void
526 bnx_probe_pci_caps(struct bnx_softc *sc)
527 {
528 struct pci_attach_args *pa = &(sc->bnx_pa);
529 pcireg_t reg;
530
531 /* Check if PCI-X capability is enabled. */
532 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®,
533 NULL) != 0) {
534 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG;
535 }
536
537 /* Check if PCIe capability is enabled. */
538 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®,
539 NULL) != 0) {
540 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag,
541 reg + PCIE_LCSR);
542 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = "
543 "0x%08X\n", link_status);
544 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16;
545 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20;
546 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG;
547 sc->bnx_flags |= BNX_PCIE_FLAG;
548 }
549
550 /* Check if MSI capability is enabled. */
551 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®,
552 NULL) != 0)
553 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG;
554
555 /* Check if MSI-X capability is enabled. */
556 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®,
557 NULL) != 0)
558 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG;
559 }
560
561
562 /****************************************************************************/
563 /* Device attach function. */
564 /* */
565 /* Allocates device resources, performs secondary chip identification, */
566 /* resets and initializes the hardware, and initializes driver instance */
567 /* variables. */
568 /* */
569 /* Returns: */
570 /* 0 on success, positive value on failure. */
571 /****************************************************************************/
572 void
573 bnx_attach(device_t parent, device_t self, void *aux)
574 {
575 const struct bnx_product *bp;
576 struct bnx_softc *sc = device_private(self);
577 prop_dictionary_t dict;
578 struct pci_attach_args *pa = aux;
579 pci_chipset_tag_t pc = pa->pa_pc;
580 pci_intr_handle_t ih;
581 const char *intrstr = NULL;
582 uint32_t command;
583 struct ifnet *ifp;
584 struct mii_data * const mii = &sc->bnx_mii;
585 uint32_t val;
586 int mii_flags = MIIF_FORCEANEG;
587 pcireg_t memtype;
588 char intrbuf[PCI_INTRSTR_LEN];
589 int i, j;
590
591 if (bnx_tx_pool == NULL) {
592 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_WAITOK);
593 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt),
594 0, 0, 0, "bnxpkts", NULL, IPL_NET);
595 }
596
597 bp = bnx_lookup(pa);
598 if (bp == NULL)
599 panic("unknown device");
600
601 sc->bnx_dev = self;
602
603 aprint_naive("\n");
604 aprint_normal(": %s\n", bp->bp_name);
605
606 sc->bnx_pa = *pa;
607
608 /*
609 * Map control/status registers.
610 */
611 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
612 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
613 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
614 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
615
616 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
617 aprint_error_dev(sc->bnx_dev,
618 "failed to enable memory mapping!\n");
619 return;
620 }
621
622 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
623 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
624 &sc->bnx_bhandle, NULL, &sc->bnx_size)) {
625 aprint_error_dev(sc->bnx_dev, "can't find mem space\n");
626 return;
627 }
628
629 if (pci_intr_map(pa, &ih)) {
630 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n");
631 goto bnx_attach_fail;
632 }
633 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
634
635 /*
636 * Configure byte swap and enable indirect register access.
637 * Rely on CPU to do target byte swapping on big endian systems.
638 * Access to registers outside of PCI configurtion space are not
639 * valid until this is done.
640 */
641 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
642 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
643 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
644
645 /* Save ASIC revsion info. */
646 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
647
648 /*
649 * Find the base address for shared memory access.
650 * Newer versions of bootcode use a signature and offset
651 * while older versions use a fixed address.
652 */
653 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
654 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
655 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
656 (sc->bnx_pa.pa_function << 2));
657 else
658 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
659
660 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
661
662 /* Set initial device and PHY flags */
663 sc->bnx_flags = 0;
664 sc->bnx_phy_flags = 0;
665
666 /* Fetch the bootcode revision. */
667 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV);
668 for (i = 0, j = 0; i < 3; i++) {
669 uint8_t num;
670 int k, skip0;
671
672 num = (uint8_t)(val >> (24 - (i * 8)));
673 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
674 if (num >= k || !skip0 || k == 1) {
675 sc->bnx_bc_ver[j++] = (num / k) + '0';
676 skip0 = 0;
677 }
678 }
679 if (i != 2)
680 sc->bnx_bc_ver[j++] = '.';
681 }
682
683 /* Check if any management firmware is enabled. */
684 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
685 if (val & BNX_PORT_FEATURE_ASF_ENABLED) {
686 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
687 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
688
689 /* Allow time for firmware to enter the running state. */
690 for (i = 0; i < 30; i++) {
691 val = REG_RD_IND(sc, sc->bnx_shmem_base +
692 BNX_BC_STATE_CONDITION);
693 if (val & BNX_CONDITION_MFW_RUN_MASK)
694 break;
695 DELAY(10000);
696 }
697
698 /* Check if management firmware is running. */
699 val = REG_RD_IND(sc, sc->bnx_shmem_base +
700 BNX_BC_STATE_CONDITION);
701 val &= BNX_CONDITION_MFW_RUN_MASK;
702 if ((val != BNX_CONDITION_MFW_RUN_UNKNOWN) &&
703 (val != BNX_CONDITION_MFW_RUN_NONE)) {
704 uint32_t addr = REG_RD_IND(sc, sc->bnx_shmem_base +
705 BNX_MFW_VER_PTR);
706
707 /* Read the management firmware version string. */
708 for (j = 0; j < 3; j++) {
709 val = bnx_reg_rd_ind(sc, addr + j * 4);
710 val = bswap32(val);
711 memcpy(&sc->bnx_mfw_ver[i], &val, 4);
712 i += 4;
713 }
714 } else {
715 /* May cause firmware synchronization timeouts. */
716 BNX_PRINTF(sc, "%s(%d): Management firmware enabled "
717 "but not running!\n", __FILE__, __LINE__);
718 strcpy(sc->bnx_mfw_ver, "NOT RUNNING!");
719
720 /* ToDo: Any action the driver should take? */
721 }
722 }
723
724 bnx_probe_pci_caps(sc);
725
726 /* Get PCI bus information (speed and type). */
727 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
728 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
729 uint32_t clkreg;
730
731 sc->bnx_flags |= BNX_PCIX_FLAG;
732
733 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
734
735 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
736 switch (clkreg) {
737 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
738 sc->bus_speed_mhz = 133;
739 break;
740
741 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
742 sc->bus_speed_mhz = 100;
743 break;
744
745 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
746 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
747 sc->bus_speed_mhz = 66;
748 break;
749
750 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
751 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
752 sc->bus_speed_mhz = 50;
753 break;
754
755 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
756 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
757 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
758 sc->bus_speed_mhz = 33;
759 break;
760 }
761 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
762 sc->bus_speed_mhz = 66;
763 else
764 sc->bus_speed_mhz = 33;
765
766 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
767 sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
768
769 /* Reset the controller. */
770 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
771 goto bnx_attach_fail;
772
773 /* Initialize the controller. */
774 if (bnx_chipinit(sc)) {
775 aprint_error_dev(sc->bnx_dev,
776 "Controller initialization failed!\n");
777 goto bnx_attach_fail;
778 }
779
780 /* Perform NVRAM test. */
781 if (bnx_nvram_test(sc)) {
782 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n");
783 goto bnx_attach_fail;
784 }
785
786 /* Fetch the permanent Ethernet MAC address. */
787 bnx_get_mac_addr(sc);
788 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n",
789 ether_sprintf(sc->eaddr));
790
791 /*
792 * Trip points control how many BDs
793 * should be ready before generating an
794 * interrupt while ticks control how long
795 * a BD can sit in the chain before
796 * generating an interrupt. Set the default
797 * values for the RX and TX rings.
798 */
799
800 #ifdef BNX_DEBUG
801 /* Force more frequent interrupts. */
802 sc->bnx_tx_quick_cons_trip_int = 1;
803 sc->bnx_tx_quick_cons_trip = 1;
804 sc->bnx_tx_ticks_int = 0;
805 sc->bnx_tx_ticks = 0;
806
807 sc->bnx_rx_quick_cons_trip_int = 1;
808 sc->bnx_rx_quick_cons_trip = 1;
809 sc->bnx_rx_ticks_int = 0;
810 sc->bnx_rx_ticks = 0;
811 #else
812 sc->bnx_tx_quick_cons_trip_int = 20;
813 sc->bnx_tx_quick_cons_trip = 20;
814 sc->bnx_tx_ticks_int = 80;
815 sc->bnx_tx_ticks = 80;
816
817 sc->bnx_rx_quick_cons_trip_int = 6;
818 sc->bnx_rx_quick_cons_trip = 6;
819 sc->bnx_rx_ticks_int = 18;
820 sc->bnx_rx_ticks = 18;
821 #endif
822
823 /* Update statistics once every second. */
824 sc->bnx_stats_ticks = 1000000 & 0xffff00;
825
826 /* Find the media type for the adapter. */
827 bnx_get_media(sc);
828
829 /*
830 * Store config data needed by the PHY driver for
831 * backplane applications
832 */
833 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
834 BNX_SHARED_HW_CFG_CONFIG);
835 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
836 BNX_PORT_HW_CFG_CONFIG);
837
838 /* Allocate DMA memory resources. */
839 sc->bnx_dmatag = pa->pa_dmat;
840 if (bnx_dma_alloc(sc)) {
841 aprint_error_dev(sc->bnx_dev,
842 "DMA resource allocation failed!\n");
843 goto bnx_attach_fail;
844 }
845
846 /* Initialize the ifnet interface. */
847 ifp = &sc->bnx_ec.ec_if;
848 ifp->if_softc = sc;
849 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
850 ifp->if_ioctl = bnx_ioctl;
851 ifp->if_stop = bnx_stop;
852 ifp->if_start = bnx_start;
853 ifp->if_init = bnx_init;
854 ifp->if_watchdog = bnx_watchdog;
855 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
856 IFQ_SET_READY(&ifp->if_snd);
857 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
858
859 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU |
860 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
861 sc->bnx_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
862
863 ifp->if_capabilities |=
864 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
865 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
866 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
867
868 /* create workqueue to handle packet allocations */
869 if (workqueue_create(&sc->bnx_wq, device_xname(self),
870 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) {
871 aprint_error_dev(self, "failed to create workqueue\n");
872 goto bnx_attach_fail;
873 }
874
875 mii->mii_ifp = ifp;
876 mii->mii_readreg = bnx_miibus_read_reg;
877 mii->mii_writereg = bnx_miibus_write_reg;
878 mii->mii_statchg = bnx_miibus_statchg;
879
880 /* Handle any special PHY initialization for SerDes PHYs. */
881 bnx_init_media(sc);
882
883 sc->bnx_ec.ec_mii = mii;
884 ifmedia_init(&mii->mii_media, 0, bnx_ifmedia_upd, bnx_ifmedia_sts);
885
886 /* set phyflags and chipid before mii_attach() */
887 dict = device_properties(self);
888 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags);
889 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid);
890 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg);
891 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg);
892
893 /* Print some useful adapter info */
894 bnx_print_adapter_info(sc);
895
896 mii_flags |= MIIF_DOPAUSE;
897 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
898 mii_flags |= MIIF_HAVEFIBER;
899 mii_attach(self, mii, 0xffffffff,
900 sc->bnx_phy_addr, MII_OFFSET_ANY, mii_flags);
901
902 if (LIST_EMPTY(&mii->mii_phys)) {
903 aprint_error_dev(self, "no PHY found!\n");
904 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
905 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
906 } else
907 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
908
909 /* Attach to the Ethernet interface list. */
910 if_attach(ifp);
911 if_deferred_start_init(ifp, NULL);
912 ether_ifattach(ifp, sc->eaddr);
913
914 callout_init(&sc->bnx_timeout, 0);
915 callout_setfunc(&sc->bnx_timeout, bnx_tick, sc);
916
917 /* Hookup IRQ last. */
918 sc->bnx_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, bnx_intr,
919 sc, device_xname(self));
920 if (sc->bnx_intrhand == NULL) {
921 aprint_error_dev(self, "couldn't establish interrupt");
922 if (intrstr != NULL)
923 aprint_error(" at %s", intrstr);
924 aprint_error("\n");
925 goto bnx_attach_fail;
926 }
927 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr);
928
929 if (pmf_device_register(self, NULL, NULL))
930 pmf_class_network_register(self, ifp);
931 else
932 aprint_error_dev(self, "couldn't establish power handler\n");
933
934 /* Print some important debugging info. */
935 DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
936
937 /* Get the firmware running so ASF still works. */
938 bnx_mgmt_init(sc);
939
940 goto bnx_attach_exit;
941
942 bnx_attach_fail:
943 bnx_release_resources(sc);
944
945 bnx_attach_exit:
946 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
947 }
948
949 /****************************************************************************/
950 /* Device detach function. */
951 /* */
952 /* Stops the controller, resets the controller, and releases resources. */
953 /* */
954 /* Returns: */
955 /* 0 on success, positive value on failure. */
956 /****************************************************************************/
957 int
958 bnx_detach(device_t dev, int flags)
959 {
960 int s;
961 struct bnx_softc *sc;
962 struct ifnet *ifp;
963
964 sc = device_private(dev);
965 ifp = &sc->bnx_ec.ec_if;
966
967 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
968
969 /* Stop and reset the controller. */
970 s = splnet();
971 bnx_stop(ifp, 1);
972 splx(s);
973
974 pmf_device_deregister(dev);
975 callout_destroy(&sc->bnx_timeout);
976 ether_ifdetach(ifp);
977 workqueue_destroy(sc->bnx_wq);
978
979 if_detach(ifp);
980 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY);
981
982 /* Delete all remaining media. */
983 ifmedia_fini(&sc->bnx_mii.mii_media);
984
985 /* Release all remaining resources. */
986 bnx_release_resources(sc);
987
988 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
989
990 return 0;
991 }
992
993 /****************************************************************************/
994 /* Indirect register read. */
995 /* */
996 /* Reads NetXtreme II registers using an index/data register pair in PCI */
997 /* configuration space. Using this mechanism avoids issues with posted */
998 /* reads but is much slower than memory-mapped I/O. */
999 /* */
1000 /* Returns: */
1001 /* The value of the register. */
1002 /****************************************************************************/
1003 uint32_t
1004 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset)
1005 {
1006 struct pci_attach_args *pa = &(sc->bnx_pa);
1007
1008 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1009 offset);
1010 #ifdef BNX_DEBUG
1011 {
1012 uint32_t val;
1013 val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1014 BNX_PCICFG_REG_WINDOW);
1015 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1016 "val = 0x%08X\n", __func__, offset, val);
1017 return val;
1018 }
1019 #else
1020 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1021 #endif
1022 }
1023
1024 /****************************************************************************/
1025 /* Indirect register write. */
1026 /* */
1027 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1028 /* configuration space. Using this mechanism avoids issues with posted */
1029 /* writes but is muchh slower than memory-mapped I/O. */
1030 /* */
1031 /* Returns: */
1032 /* Nothing. */
1033 /****************************************************************************/
1034 void
1035 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val)
1036 {
1037 struct pci_attach_args *pa = &(sc->bnx_pa);
1038
1039 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1040 __func__, offset, val);
1041
1042 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1043 offset);
1044 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1045 }
1046
1047 /****************************************************************************/
1048 /* Context memory write. */
1049 /* */
1050 /* The NetXtreme II controller uses context memory to track connection */
1051 /* information for L2 and higher network protocols. */
1052 /* */
1053 /* Returns: */
1054 /* Nothing. */
1055 /****************************************************************************/
1056 void
1057 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1058 uint32_t ctx_val)
1059 {
1060 uint32_t idx, offset = ctx_offset + cid_addr;
1061 uint32_t val, retry_cnt = 5;
1062
1063 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1064 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
1065 REG_WR(sc, BNX_CTX_CTX_CTRL,
1066 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
1067
1068 for (idx = 0; idx < retry_cnt; idx++) {
1069 val = REG_RD(sc, BNX_CTX_CTX_CTRL);
1070 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
1071 break;
1072 DELAY(5);
1073 }
1074
1075 #if 0
1076 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
1077 BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1078 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1079 __FILE__, __LINE__, cid_addr, ctx_offset);
1080 #endif
1081
1082 } else {
1083 REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1084 REG_WR(sc, BNX_CTX_DATA, ctx_val);
1085 }
1086 }
1087
1088 /****************************************************************************/
1089 /* PHY register read. */
1090 /* */
1091 /* Implements register reads on the MII bus. */
1092 /* */
1093 /* Returns: */
1094 /* The value of the register. */
1095 /****************************************************************************/
1096 int
1097 bnx_miibus_read_reg(device_t dev, int phy, int reg, uint16_t *val)
1098 {
1099 struct bnx_softc *sc = device_private(dev);
1100 uint32_t data;
1101 int i, rv = 0;
1102
1103 /*
1104 * The BCM5709S PHY is an IEEE Clause 45 PHY
1105 * with special mappings to work with IEEE
1106 * Clause 22 register accesses.
1107 */
1108 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1109 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1110 reg += 0x10;
1111 }
1112
1113 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1114 data = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1115 data &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1116
1117 REG_WR(sc, BNX_EMAC_MDIO_MODE, data);
1118 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1119
1120 DELAY(40);
1121 }
1122
1123 data = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1124 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1125 BNX_EMAC_MDIO_COMM_START_BUSY;
1126 REG_WR(sc, BNX_EMAC_MDIO_COMM, data);
1127
1128 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1129 DELAY(10);
1130
1131 data = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1132 if (!(data & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1133 DELAY(5);
1134
1135 data = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1136 data &= BNX_EMAC_MDIO_COMM_DATA;
1137
1138 break;
1139 }
1140 }
1141
1142 if (data & BNX_EMAC_MDIO_COMM_START_BUSY) {
1143 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1144 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1145 rv = ETIMEDOUT;
1146 } else {
1147 data = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1148 *val = data & 0xffff;
1149
1150 DBPRINT(sc, BNX_EXCESSIVE,
1151 "%s(): phy = %d, reg = 0x%04X, val = 0x%04hX\n", __func__,
1152 phy, (uint16_t) reg & 0xffff, *val);
1153 }
1154
1155 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1156 data = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1157 data |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1158
1159 REG_WR(sc, BNX_EMAC_MDIO_MODE, data);
1160 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1161
1162 DELAY(40);
1163 }
1164
1165 return rv;
1166 }
1167
1168 /****************************************************************************/
1169 /* PHY register write. */
1170 /* */
1171 /* Implements register writes on the MII bus. */
1172 /* */
1173 /* Returns: */
1174 /* The value of the register. */
1175 /****************************************************************************/
1176 int
1177 bnx_miibus_write_reg(device_t dev, int phy, int reg, uint16_t val)
1178 {
1179 struct bnx_softc *sc = device_private(dev);
1180 uint32_t val1;
1181 int i, rv = 0;
1182
1183 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1184 "val = 0x%04hX\n", __func__,
1185 phy, (uint16_t) reg & 0xffff, val);
1186
1187 /*
1188 * The BCM5709S PHY is an IEEE Clause 45 PHY
1189 * with special mappings to work with IEEE
1190 * Clause 22 register accesses.
1191 */
1192 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1193 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1194 reg += 0x10;
1195 }
1196
1197 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1198 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1199 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1200
1201 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1202 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1203
1204 DELAY(40);
1205 }
1206
1207 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1208 BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1209 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1210 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1211
1212 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1213 DELAY(10);
1214
1215 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1216 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1217 DELAY(5);
1218 break;
1219 }
1220 }
1221
1222 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1223 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1224 __LINE__);
1225 rv = ETIMEDOUT;
1226 }
1227
1228 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1229 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1230 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1231
1232 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1233 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1234
1235 DELAY(40);
1236 }
1237
1238 return rv;
1239 }
1240
1241 /****************************************************************************/
1242 /* MII bus status change. */
1243 /* */
1244 /* Called by the MII bus driver when the PHY establishes link to set the */
1245 /* MAC interface registers. */
1246 /* */
1247 /* Returns: */
1248 /* Nothing. */
1249 /****************************************************************************/
1250 void
1251 bnx_miibus_statchg(struct ifnet *ifp)
1252 {
1253 struct bnx_softc *sc = ifp->if_softc;
1254 struct mii_data *mii = &sc->bnx_mii;
1255 uint32_t rx_mode = sc->rx_mode;
1256 int val;
1257
1258 val = REG_RD(sc, BNX_EMAC_MODE);
1259 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1260 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1261 BNX_EMAC_MODE_25G);
1262
1263 /*
1264 * Get flow control negotiation result.
1265 */
1266 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1267 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) {
1268 sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1269 mii->mii_media_active &= ~IFM_ETH_FMASK;
1270 }
1271
1272 /* Set MII or GMII interface based on the speed
1273 * negotiated by the PHY.
1274 */
1275 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1276 case IFM_10_T:
1277 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1278 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1279 val |= BNX_EMAC_MODE_PORT_MII_10;
1280 break;
1281 }
1282 /* FALLTHROUGH */
1283 case IFM_100_TX:
1284 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1285 val |= BNX_EMAC_MODE_PORT_MII;
1286 break;
1287 case IFM_2500_SX:
1288 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1289 val |= BNX_EMAC_MODE_25G;
1290 /* FALLTHROUGH */
1291 case IFM_1000_T:
1292 case IFM_1000_SX:
1293 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1294 val |= BNX_EMAC_MODE_PORT_GMII;
1295 break;
1296 default:
1297 val |= BNX_EMAC_MODE_PORT_GMII;
1298 break;
1299 }
1300
1301 /* Set half or full duplex based on the duplicity
1302 * negotiated by the PHY.
1303 */
1304 if ((mii->mii_media_active & IFM_HDX) != 0) {
1305 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1306 val |= BNX_EMAC_MODE_HALF_DUPLEX;
1307 } else
1308 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1309
1310 REG_WR(sc, BNX_EMAC_MODE, val);
1311
1312 /*
1313 * 802.3x flow control
1314 */
1315 if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) {
1316 DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n");
1317 rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN;
1318 } else {
1319 DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n");
1320 rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN;
1321 }
1322
1323 if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) {
1324 DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n");
1325 BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1326 } else {
1327 DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n");
1328 BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1329 }
1330
1331 /* Only make changes if the receive mode has actually changed. */
1332 if (rx_mode != sc->rx_mode) {
1333 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
1334 rx_mode);
1335
1336 sc->rx_mode = rx_mode;
1337 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
1338
1339 bnx_init_rx_context(sc);
1340 }
1341 }
1342
1343 /****************************************************************************/
1344 /* Acquire NVRAM lock. */
1345 /* */
1346 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1347 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1348 /* for use by the driver. */
1349 /* */
1350 /* Returns: */
1351 /* 0 on success, positive value on failure. */
1352 /****************************************************************************/
1353 int
1354 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1355 {
1356 uint32_t val;
1357 int j;
1358
1359 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1360
1361 /* Request access to the flash interface. */
1362 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1363 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1364 val = REG_RD(sc, BNX_NVM_SW_ARB);
1365 if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1366 break;
1367
1368 DELAY(5);
1369 }
1370
1371 if (j >= NVRAM_TIMEOUT_COUNT) {
1372 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1373 return EBUSY;
1374 }
1375
1376 return 0;
1377 }
1378
1379 /****************************************************************************/
1380 /* Release NVRAM lock. */
1381 /* */
1382 /* When the caller is finished accessing NVRAM the lock must be released. */
1383 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1384 /* for use by the driver. */
1385 /* */
1386 /* Returns: */
1387 /* 0 on success, positive value on failure. */
1388 /****************************************************************************/
1389 int
1390 bnx_release_nvram_lock(struct bnx_softc *sc)
1391 {
1392 int j;
1393 uint32_t val;
1394
1395 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1396
1397 /* Relinquish nvram interface. */
1398 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1399
1400 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1401 val = REG_RD(sc, BNX_NVM_SW_ARB);
1402 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1403 break;
1404
1405 DELAY(5);
1406 }
1407
1408 if (j >= NVRAM_TIMEOUT_COUNT) {
1409 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1410 return EBUSY;
1411 }
1412
1413 return 0;
1414 }
1415
1416 #ifdef BNX_NVRAM_WRITE_SUPPORT
1417 /****************************************************************************/
1418 /* Enable NVRAM write access. */
1419 /* */
1420 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1421 /* */
1422 /* Returns: */
1423 /* 0 on success, positive value on failure. */
1424 /****************************************************************************/
1425 int
1426 bnx_enable_nvram_write(struct bnx_softc *sc)
1427 {
1428 uint32_t val;
1429
1430 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1431
1432 val = REG_RD(sc, BNX_MISC_CFG);
1433 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1434
1435 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1436 int j;
1437
1438 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1439 REG_WR(sc, BNX_NVM_COMMAND,
1440 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1441
1442 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1443 DELAY(5);
1444
1445 val = REG_RD(sc, BNX_NVM_COMMAND);
1446 if (val & BNX_NVM_COMMAND_DONE)
1447 break;
1448 }
1449
1450 if (j >= NVRAM_TIMEOUT_COUNT) {
1451 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1452 return EBUSY;
1453 }
1454 }
1455
1456 return 0;
1457 }
1458
1459 /****************************************************************************/
1460 /* Disable NVRAM write access. */
1461 /* */
1462 /* When the caller is finished writing to NVRAM write access must be */
1463 /* disabled. */
1464 /* */
1465 /* Returns: */
1466 /* Nothing. */
1467 /****************************************************************************/
1468 void
1469 bnx_disable_nvram_write(struct bnx_softc *sc)
1470 {
1471 uint32_t val;
1472
1473 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1474
1475 val = REG_RD(sc, BNX_MISC_CFG);
1476 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1477 }
1478 #endif
1479
1480 /****************************************************************************/
1481 /* Enable NVRAM access. */
1482 /* */
1483 /* Before accessing NVRAM for read or write operations the caller must */
1484 /* enabled NVRAM access. */
1485 /* */
1486 /* Returns: */
1487 /* Nothing. */
1488 /****************************************************************************/
1489 void
1490 bnx_enable_nvram_access(struct bnx_softc *sc)
1491 {
1492 uint32_t val;
1493
1494 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1495
1496 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1497 /* Enable both bits, even on read. */
1498 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1499 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1500 }
1501
1502 /****************************************************************************/
1503 /* Disable NVRAM access. */
1504 /* */
1505 /* When the caller is finished accessing NVRAM access must be disabled. */
1506 /* */
1507 /* Returns: */
1508 /* Nothing. */
1509 /****************************************************************************/
1510 void
1511 bnx_disable_nvram_access(struct bnx_softc *sc)
1512 {
1513 uint32_t val;
1514
1515 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1516
1517 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1518
1519 /* Disable both bits, even after read. */
1520 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1521 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1522 }
1523
1524 #ifdef BNX_NVRAM_WRITE_SUPPORT
1525 /****************************************************************************/
1526 /* Erase NVRAM page before writing. */
1527 /* */
1528 /* Non-buffered flash parts require that a page be erased before it is */
1529 /* written. */
1530 /* */
1531 /* Returns: */
1532 /* 0 on success, positive value on failure. */
1533 /****************************************************************************/
1534 int
1535 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset)
1536 {
1537 uint32_t cmd;
1538 int j;
1539
1540 /* Buffered flash doesn't require an erase. */
1541 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1542 return 0;
1543
1544 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1545
1546 /* Build an erase command. */
1547 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1548 BNX_NVM_COMMAND_DOIT;
1549
1550 /*
1551 * Clear the DONE bit separately, set the NVRAM address to erase,
1552 * and issue the erase command.
1553 */
1554 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1555 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1556 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1557
1558 /* Wait for completion. */
1559 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1560 uint32_t val;
1561
1562 DELAY(5);
1563
1564 val = REG_RD(sc, BNX_NVM_COMMAND);
1565 if (val & BNX_NVM_COMMAND_DONE)
1566 break;
1567 }
1568
1569 if (j >= NVRAM_TIMEOUT_COUNT) {
1570 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1571 return EBUSY;
1572 }
1573
1574 return 0;
1575 }
1576 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1577
1578 /****************************************************************************/
1579 /* Read a dword (32 bits) from NVRAM. */
1580 /* */
1581 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1582 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1583 /* */
1584 /* Returns: */
1585 /* 0 on success and the 32 bit value read, positive value on failure. */
1586 /****************************************************************************/
1587 int
1588 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset,
1589 uint8_t *ret_val, uint32_t cmd_flags)
1590 {
1591 uint32_t cmd;
1592 int i, rc = 0;
1593
1594 /* Build the command word. */
1595 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1596
1597 /* Calculate the offset for buffered flash if translation is used. */
1598 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1599 offset = ((offset / sc->bnx_flash_info->page_size) <<
1600 sc->bnx_flash_info->page_bits) +
1601 (offset % sc->bnx_flash_info->page_size);
1602 }
1603
1604 /*
1605 * Clear the DONE bit separately, set the address to read,
1606 * and issue the read.
1607 */
1608 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1609 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1610 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1611
1612 /* Wait for completion. */
1613 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1614 uint32_t val;
1615
1616 DELAY(5);
1617
1618 val = REG_RD(sc, BNX_NVM_COMMAND);
1619 if (val & BNX_NVM_COMMAND_DONE) {
1620 val = REG_RD(sc, BNX_NVM_READ);
1621
1622 val = be32toh(val);
1623 memcpy(ret_val, &val, 4);
1624 break;
1625 }
1626 }
1627
1628 /* Check for errors. */
1629 if (i >= NVRAM_TIMEOUT_COUNT) {
1630 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1631 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1632 rc = EBUSY;
1633 }
1634
1635 return rc;
1636 }
1637
1638 #ifdef BNX_NVRAM_WRITE_SUPPORT
1639 /****************************************************************************/
1640 /* Write a dword (32 bits) to NVRAM. */
1641 /* */
1642 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1643 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1644 /* enabled NVRAM write access. */
1645 /* */
1646 /* Returns: */
1647 /* 0 on success, positive value on failure. */
1648 /****************************************************************************/
1649 int
1650 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val,
1651 uint32_t cmd_flags)
1652 {
1653 uint32_t cmd, val32;
1654 int j;
1655
1656 /* Build the command word. */
1657 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1658
1659 /* Calculate the offset for buffered flash if translation is used. */
1660 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1661 offset = ((offset / sc->bnx_flash_info->page_size) <<
1662 sc->bnx_flash_info->page_bits) +
1663 (offset % sc->bnx_flash_info->page_size);
1664 }
1665
1666 /*
1667 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1668 * set the NVRAM address to write, and issue the write command
1669 */
1670 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1671 memcpy(&val32, val, 4);
1672 val32 = htobe32(val32);
1673 REG_WR(sc, BNX_NVM_WRITE, val32);
1674 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1675 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1676
1677 /* Wait for completion. */
1678 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1679 DELAY(5);
1680
1681 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1682 break;
1683 }
1684 if (j >= NVRAM_TIMEOUT_COUNT) {
1685 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1686 "offset 0x%08X\n", __FILE__, __LINE__, offset);
1687 return EBUSY;
1688 }
1689
1690 return 0;
1691 }
1692 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1693
1694 /****************************************************************************/
1695 /* Initialize NVRAM access. */
1696 /* */
1697 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1698 /* access that device. */
1699 /* */
1700 /* Returns: */
1701 /* 0 on success, positive value on failure. */
1702 /****************************************************************************/
1703 int
1704 bnx_init_nvram(struct bnx_softc *sc)
1705 {
1706 uint32_t val;
1707 int j, entry_count, rc = 0;
1708 struct flash_spec *flash;
1709
1710 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1711
1712 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1713 sc->bnx_flash_info = &flash_5709;
1714 goto bnx_init_nvram_get_flash_size;
1715 }
1716
1717 /* Determine the selected interface. */
1718 val = REG_RD(sc, BNX_NVM_CFG1);
1719
1720 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1721
1722 /*
1723 * Flash reconfiguration is required to support additional
1724 * NVRAM devices not directly supported in hardware.
1725 * Check if the flash interface was reconfigured
1726 * by the bootcode.
1727 */
1728
1729 if (val & 0x40000000) {
1730 /* Flash interface reconfigured by bootcode. */
1731
1732 DBPRINT(sc, BNX_INFO_LOAD,
1733 "bnx_init_nvram(): Flash WAS reconfigured.\n");
1734
1735 for (j = 0, flash = &flash_table[0]; j < entry_count;
1736 j++, flash++) {
1737 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1738 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1739 sc->bnx_flash_info = flash;
1740 break;
1741 }
1742 }
1743 } else {
1744 /* Flash interface not yet reconfigured. */
1745 uint32_t mask;
1746
1747 DBPRINT(sc, BNX_INFO_LOAD,
1748 "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1749
1750 if (val & (1 << 23))
1751 mask = FLASH_BACKUP_STRAP_MASK;
1752 else
1753 mask = FLASH_STRAP_MASK;
1754
1755 /* Look for the matching NVRAM device configuration data. */
1756 for (j = 0, flash = &flash_table[0]; j < entry_count;
1757 j++, flash++) {
1758 /* Check if the dev matches any of the known devices. */
1759 if ((val & mask) == (flash->strapping & mask)) {
1760 /* Found a device match. */
1761 sc->bnx_flash_info = flash;
1762
1763 /* Request access to the flash interface. */
1764 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1765 return rc;
1766
1767 /* Reconfigure the flash interface. */
1768 bnx_enable_nvram_access(sc);
1769 REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1770 REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1771 REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1772 REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1773 bnx_disable_nvram_access(sc);
1774 bnx_release_nvram_lock(sc);
1775
1776 break;
1777 }
1778 }
1779 }
1780
1781 /* Check if a matching device was found. */
1782 if (j == entry_count) {
1783 sc->bnx_flash_info = NULL;
1784 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1785 __FILE__, __LINE__);
1786 rc = ENODEV;
1787 }
1788
1789 bnx_init_nvram_get_flash_size:
1790 /* Write the flash config data to the shared memory interface. */
1791 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1792 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1793 if (val)
1794 sc->bnx_flash_size = val;
1795 else
1796 sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1797
1798 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1799 "0x%08X\n", sc->bnx_flash_info->total_size);
1800
1801 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1802
1803 return rc;
1804 }
1805
1806 /****************************************************************************/
1807 /* Read an arbitrary range of data from NVRAM. */
1808 /* */
1809 /* Prepares the NVRAM interface for access and reads the requested data */
1810 /* into the supplied buffer. */
1811 /* */
1812 /* Returns: */
1813 /* 0 on success and the data read, positive value on failure. */
1814 /****************************************************************************/
1815 int
1816 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf,
1817 int buf_size)
1818 {
1819 int rc = 0;
1820 uint32_t cmd_flags, offset32, len32, extra;
1821
1822 if (buf_size == 0)
1823 return 0;
1824
1825 /* Request access to the flash interface. */
1826 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1827 return rc;
1828
1829 /* Enable access to flash interface */
1830 bnx_enable_nvram_access(sc);
1831
1832 len32 = buf_size;
1833 offset32 = offset;
1834 extra = 0;
1835
1836 cmd_flags = 0;
1837
1838 if (offset32 & 3) {
1839 uint8_t buf[4];
1840 uint32_t pre_len;
1841
1842 offset32 &= ~3;
1843 pre_len = 4 - (offset & 3);
1844
1845 if (pre_len >= len32) {
1846 pre_len = len32;
1847 cmd_flags =
1848 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1849 } else
1850 cmd_flags = BNX_NVM_COMMAND_FIRST;
1851
1852 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1853
1854 if (rc)
1855 return rc;
1856
1857 memcpy(ret_buf, buf + (offset & 3), pre_len);
1858
1859 offset32 += 4;
1860 ret_buf += pre_len;
1861 len32 -= pre_len;
1862 }
1863
1864 if (len32 & 3) {
1865 extra = 4 - (len32 & 3);
1866 len32 = (len32 + 4) & ~3;
1867 }
1868
1869 if (len32 == 4) {
1870 uint8_t buf[4];
1871
1872 if (cmd_flags)
1873 cmd_flags = BNX_NVM_COMMAND_LAST;
1874 else
1875 cmd_flags =
1876 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1877
1878 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1879
1880 memcpy(ret_buf, buf, 4 - extra);
1881 } else if (len32 > 0) {
1882 uint8_t buf[4];
1883
1884 /* Read the first word. */
1885 if (cmd_flags)
1886 cmd_flags = 0;
1887 else
1888 cmd_flags = BNX_NVM_COMMAND_FIRST;
1889
1890 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1891
1892 /* Advance to the next dword. */
1893 offset32 += 4;
1894 ret_buf += 4;
1895 len32 -= 4;
1896
1897 while (len32 > 4 && rc == 0) {
1898 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1899
1900 /* Advance to the next dword. */
1901 offset32 += 4;
1902 ret_buf += 4;
1903 len32 -= 4;
1904 }
1905
1906 if (rc)
1907 return rc;
1908
1909 cmd_flags = BNX_NVM_COMMAND_LAST;
1910 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1911
1912 memcpy(ret_buf, buf, 4 - extra);
1913 }
1914
1915 /* Disable access to flash interface and release the lock. */
1916 bnx_disable_nvram_access(sc);
1917 bnx_release_nvram_lock(sc);
1918
1919 return rc;
1920 }
1921
1922 #ifdef BNX_NVRAM_WRITE_SUPPORT
1923 /****************************************************************************/
1924 /* Write an arbitrary range of data from NVRAM. */
1925 /* */
1926 /* Prepares the NVRAM interface for write access and writes the requested */
1927 /* data from the supplied buffer. The caller is responsible for */
1928 /* calculating any appropriate CRCs. */
1929 /* */
1930 /* Returns: */
1931 /* 0 on success, positive value on failure. */
1932 /****************************************************************************/
1933 int
1934 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf,
1935 int buf_size)
1936 {
1937 uint32_t written, offset32, len32;
1938 uint8_t *buf, start[4], end[4];
1939 int rc = 0;
1940 int align_start, align_end;
1941
1942 buf = data_buf;
1943 offset32 = offset;
1944 len32 = buf_size;
1945 align_start = align_end = 0;
1946
1947 if ((align_start = (offset32 & 3))) {
1948 offset32 &= ~3;
1949 len32 += align_start;
1950 if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1951 return rc;
1952 }
1953
1954 if (len32 & 3) {
1955 if ((len32 > 4) || !align_start) {
1956 align_end = 4 - (len32 & 3);
1957 len32 += align_end;
1958 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1959 end, 4)))
1960 return rc;
1961 }
1962 }
1963
1964 if (align_start || align_end) {
1965 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1966 if (buf == NULL)
1967 return ENOMEM;
1968
1969 if (align_start)
1970 memcpy(buf, start, 4);
1971
1972 if (align_end)
1973 memcpy(buf + len32 - 4, end, 4);
1974
1975 memcpy(buf + align_start, data_buf, buf_size);
1976 }
1977
1978 written = 0;
1979 while ((written < len32) && (rc == 0)) {
1980 uint32_t page_start, page_end, data_start, data_end;
1981 uint32_t addr, cmd_flags;
1982 int i;
1983 uint8_t flash_buffer[264];
1984
1985 /* Find the page_start addr */
1986 page_start = offset32 + written;
1987 page_start -= (page_start % sc->bnx_flash_info->page_size);
1988 /* Find the page_end addr */
1989 page_end = page_start + sc->bnx_flash_info->page_size;
1990 /* Find the data_start addr */
1991 data_start = (written == 0) ? offset32 : page_start;
1992 /* Find the data_end addr */
1993 data_end = (page_end > offset32 + len32) ?
1994 (offset32 + len32) : page_end;
1995
1996 /* Request access to the flash interface. */
1997 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1998 goto nvram_write_end;
1999
2000 /* Enable access to flash interface */
2001 bnx_enable_nvram_access(sc);
2002
2003 cmd_flags = BNX_NVM_COMMAND_FIRST;
2004 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2005 int j;
2006
2007 /* Read the whole page into the buffer
2008 * (non-buffer flash only) */
2009 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
2010 if (j == (sc->bnx_flash_info->page_size - 4))
2011 cmd_flags |= BNX_NVM_COMMAND_LAST;
2012
2013 rc = bnx_nvram_read_dword(sc,
2014 page_start + j,
2015 &flash_buffer[j],
2016 cmd_flags);
2017
2018 if (rc)
2019 goto nvram_write_end;
2020
2021 cmd_flags = 0;
2022 }
2023 }
2024
2025 /* Enable writes to flash interface (unlock write-protect) */
2026 if ((rc = bnx_enable_nvram_write(sc)) != 0)
2027 goto nvram_write_end;
2028
2029 /* Erase the page */
2030 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
2031 goto nvram_write_end;
2032
2033 /* Re-enable the write again for the actual write */
2034 bnx_enable_nvram_write(sc);
2035
2036 /* Loop to write back the buffer data from page_start to
2037 * data_start */
2038 i = 0;
2039 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2040 for (addr = page_start; addr < data_start;
2041 addr += 4, i += 4) {
2042
2043 rc = bnx_nvram_write_dword(sc, addr,
2044 &flash_buffer[i], cmd_flags);
2045
2046 if (rc != 0)
2047 goto nvram_write_end;
2048
2049 cmd_flags = 0;
2050 }
2051 }
2052
2053 /* Loop to write the new data from data_start to data_end */
2054 for (addr = data_start; addr < data_end; addr += 4, i++) {
2055 if ((addr == page_end - 4) ||
2056 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
2057 && (addr == data_end - 4))) {
2058
2059 cmd_flags |= BNX_NVM_COMMAND_LAST;
2060 }
2061
2062 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
2063
2064 if (rc != 0)
2065 goto nvram_write_end;
2066
2067 cmd_flags = 0;
2068 buf += 4;
2069 }
2070
2071 /* Loop to write back the buffer data from data_end
2072 * to page_end */
2073 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2074 for (addr = data_end; addr < page_end;
2075 addr += 4, i += 4) {
2076
2077 if (addr == page_end-4)
2078 cmd_flags = BNX_NVM_COMMAND_LAST;
2079
2080 rc = bnx_nvram_write_dword(sc, addr,
2081 &flash_buffer[i], cmd_flags);
2082
2083 if (rc != 0)
2084 goto nvram_write_end;
2085
2086 cmd_flags = 0;
2087 }
2088 }
2089
2090 /* Disable writes to flash interface (lock write-protect) */
2091 bnx_disable_nvram_write(sc);
2092
2093 /* Disable access to flash interface */
2094 bnx_disable_nvram_access(sc);
2095 bnx_release_nvram_lock(sc);
2096
2097 /* Increment written */
2098 written += data_end - data_start;
2099 }
2100
2101 nvram_write_end:
2102 if (align_start || align_end)
2103 free(buf, M_DEVBUF);
2104
2105 return rc;
2106 }
2107 #endif /* BNX_NVRAM_WRITE_SUPPORT */
2108
2109 /****************************************************************************/
2110 /* Verifies that NVRAM is accessible and contains valid data. */
2111 /* */
2112 /* Reads the configuration data from NVRAM and verifies that the CRC is */
2113 /* correct. */
2114 /* */
2115 /* Returns: */
2116 /* 0 on success, positive value on failure. */
2117 /****************************************************************************/
2118 int
2119 bnx_nvram_test(struct bnx_softc *sc)
2120 {
2121 uint32_t buf[BNX_NVRAM_SIZE / 4];
2122 uint8_t *data = (uint8_t *) buf;
2123 int rc = 0;
2124 uint32_t magic, csum;
2125
2126 /*
2127 * Check that the device NVRAM is valid by reading
2128 * the magic value at offset 0.
2129 */
2130 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2131 goto bnx_nvram_test_done;
2132
2133 magic = be32toh(buf[0]);
2134 if (magic != BNX_NVRAM_MAGIC) {
2135 rc = ENODEV;
2136 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2137 "Expected: 0x%08X, Found: 0x%08X\n",
2138 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2139 goto bnx_nvram_test_done;
2140 }
2141
2142 /*
2143 * Verify that the device NVRAM includes valid
2144 * configuration data.
2145 */
2146 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2147 goto bnx_nvram_test_done;
2148
2149 csum = ether_crc32_le(data, 0x100);
2150 if (csum != BNX_CRC32_RESIDUAL) {
2151 rc = ENODEV;
2152 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2153 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2154 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2155 goto bnx_nvram_test_done;
2156 }
2157
2158 csum = ether_crc32_le(data + 0x100, 0x100);
2159 if (csum != BNX_CRC32_RESIDUAL) {
2160 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2161 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2162 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2163 rc = ENODEV;
2164 }
2165
2166 bnx_nvram_test_done:
2167 return rc;
2168 }
2169
2170 /****************************************************************************/
2171 /* Identifies the current media type of the controller and sets the PHY */
2172 /* address. */
2173 /* */
2174 /* Returns: */
2175 /* Nothing. */
2176 /****************************************************************************/
2177 void
2178 bnx_get_media(struct bnx_softc *sc)
2179 {
2180 sc->bnx_phy_addr = 1;
2181
2182 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2183 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2184 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2185 uint32_t strap;
2186
2187 /*
2188 * The BCM5709S is software configurable
2189 * for Copper or SerDes operation.
2190 */
2191 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2192 DBPRINT(sc, BNX_INFO_LOAD,
2193 "5709 bonded for copper.\n");
2194 goto bnx_get_media_exit;
2195 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2196 DBPRINT(sc, BNX_INFO_LOAD,
2197 "5709 bonded for dual media.\n");
2198 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2199 goto bnx_get_media_exit;
2200 }
2201
2202 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2203 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2204 else {
2205 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2206 >> 8;
2207 }
2208
2209 if (sc->bnx_pa.pa_function == 0) {
2210 switch (strap) {
2211 case 0x4:
2212 case 0x5:
2213 case 0x6:
2214 DBPRINT(sc, BNX_INFO_LOAD,
2215 "BCM5709 s/w configured for SerDes.\n");
2216 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2217 break;
2218 default:
2219 DBPRINT(sc, BNX_INFO_LOAD,
2220 "BCM5709 s/w configured for Copper.\n");
2221 }
2222 } else {
2223 switch (strap) {
2224 case 0x1:
2225 case 0x2:
2226 case 0x4:
2227 DBPRINT(sc, BNX_INFO_LOAD,
2228 "BCM5709 s/w configured for SerDes.\n");
2229 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2230 break;
2231 default:
2232 DBPRINT(sc, BNX_INFO_LOAD,
2233 "BCM5709 s/w configured for Copper.\n");
2234 }
2235 }
2236
2237 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2238 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2239
2240 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2241 uint32_t val;
2242
2243 sc->bnx_flags |= BNX_NO_WOL_FLAG;
2244
2245 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2246 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2247
2248 /*
2249 * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2250 * separate PHY for SerDes.
2251 */
2252 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2253 sc->bnx_phy_addr = 2;
2254 val = REG_RD_IND(sc, sc->bnx_shmem_base +
2255 BNX_SHARED_HW_CFG_CONFIG);
2256 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2257 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2258 DBPRINT(sc, BNX_INFO_LOAD,
2259 "Found 2.5Gb capable adapter\n");
2260 }
2261 }
2262 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2263 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2264 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2265
2266 bnx_get_media_exit:
2267 DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY),
2268 "Using PHY address %d.\n", sc->bnx_phy_addr);
2269 }
2270
2271 /****************************************************************************/
2272 /* Performs PHY initialization required before MII drivers access the */
2273 /* device. */
2274 /* */
2275 /* Returns: */
2276 /* Nothing. */
2277 /****************************************************************************/
2278 void
2279 bnx_init_media(struct bnx_softc *sc)
2280 {
2281 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2282 /*
2283 * Configure the BCM5709S / BCM5716S PHYs to use traditional
2284 * IEEE Clause 22 method. Otherwise we have no way to attach
2285 * the PHY to the mii(4) layer. PHY specific configuration
2286 * is done by the mii(4) layer.
2287 */
2288
2289 /* Select auto-negotiation MMD of the PHY. */
2290 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr,
2291 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2292
2293 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr,
2294 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2295
2296 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr,
2297 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2298 }
2299 }
2300
2301 /****************************************************************************/
2302 /* Free any DMA memory owned by the driver. */
2303 /* */
2304 /* Scans through each data structre that requires DMA memory and frees */
2305 /* the memory if allocated. */
2306 /* */
2307 /* Returns: */
2308 /* Nothing. */
2309 /****************************************************************************/
2310 void
2311 bnx_dma_free(struct bnx_softc *sc)
2312 {
2313 int i;
2314
2315 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2316
2317 /* Destroy the status block. */
2318 if (sc->status_block != NULL && sc->status_map != NULL) {
2319 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2320 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2321 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2322 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block,
2323 BNX_STATUS_BLK_SZ);
2324 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2325 sc->status_rseg);
2326 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2327 sc->status_block = NULL;
2328 sc->status_map = NULL;
2329 }
2330
2331 /* Destroy the statistics block. */
2332 if (sc->stats_block != NULL && sc->stats_map != NULL) {
2333 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2334 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block,
2335 BNX_STATS_BLK_SZ);
2336 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2337 sc->stats_rseg);
2338 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2339 sc->stats_block = NULL;
2340 sc->stats_map = NULL;
2341 }
2342
2343 /* Free, unmap and destroy all context memory pages. */
2344 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2345 for (i = 0; i < sc->ctx_pages; i++) {
2346 if (sc->ctx_block[i] != NULL) {
2347 bus_dmamap_unload(sc->bnx_dmatag,
2348 sc->ctx_map[i]);
2349 bus_dmamem_unmap(sc->bnx_dmatag,
2350 (void *)sc->ctx_block[i],
2351 BCM_PAGE_SIZE);
2352 bus_dmamem_free(sc->bnx_dmatag,
2353 &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2354 bus_dmamap_destroy(sc->bnx_dmatag,
2355 sc->ctx_map[i]);
2356 sc->ctx_block[i] = NULL;
2357 }
2358 }
2359 }
2360
2361 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2362 for (i = 0; i < TX_PAGES; i++ ) {
2363 if (sc->tx_bd_chain[i] != NULL &&
2364 sc->tx_bd_chain_map[i] != NULL) {
2365 bus_dmamap_unload(sc->bnx_dmatag,
2366 sc->tx_bd_chain_map[i]);
2367 bus_dmamem_unmap(sc->bnx_dmatag,
2368 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2369 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2370 sc->tx_bd_chain_rseg[i]);
2371 bus_dmamap_destroy(sc->bnx_dmatag,
2372 sc->tx_bd_chain_map[i]);
2373 sc->tx_bd_chain[i] = NULL;
2374 sc->tx_bd_chain_map[i] = NULL;
2375 }
2376 }
2377
2378 /* Destroy the TX dmamaps. */
2379 /* This isn't necessary since we dont allocate them up front */
2380
2381 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2382 for (i = 0; i < RX_PAGES; i++ ) {
2383 if (sc->rx_bd_chain[i] != NULL &&
2384 sc->rx_bd_chain_map[i] != NULL) {
2385 bus_dmamap_unload(sc->bnx_dmatag,
2386 sc->rx_bd_chain_map[i]);
2387 bus_dmamem_unmap(sc->bnx_dmatag,
2388 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2389 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2390 sc->rx_bd_chain_rseg[i]);
2391
2392 bus_dmamap_destroy(sc->bnx_dmatag,
2393 sc->rx_bd_chain_map[i]);
2394 sc->rx_bd_chain[i] = NULL;
2395 sc->rx_bd_chain_map[i] = NULL;
2396 }
2397 }
2398
2399 /* Unload and destroy the RX mbuf maps. */
2400 for (i = 0; i < TOTAL_RX_BD; i++) {
2401 if (sc->rx_mbuf_map[i] != NULL) {
2402 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2403 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2404 }
2405 }
2406
2407 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2408 }
2409
2410 /****************************************************************************/
2411 /* Allocate any DMA memory needed by the driver. */
2412 /* */
2413 /* Allocates DMA memory needed for the various global structures needed by */
2414 /* hardware. */
2415 /* */
2416 /* Returns: */
2417 /* 0 for success, positive value for failure. */
2418 /****************************************************************************/
2419 int
2420 bnx_dma_alloc(struct bnx_softc *sc)
2421 {
2422 int i, rc = 0;
2423
2424 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2425
2426 /*
2427 * Allocate DMA memory for the status block, map the memory into DMA
2428 * space, and fetch the physical address of the block.
2429 */
2430 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2431 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2432 aprint_error_dev(sc->bnx_dev,
2433 "Could not create status block DMA map!\n");
2434 rc = ENOMEM;
2435 goto bnx_dma_alloc_exit;
2436 }
2437
2438 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2439 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2440 &sc->status_rseg, BUS_DMA_NOWAIT)) {
2441 aprint_error_dev(sc->bnx_dev,
2442 "Could not allocate status block DMA memory!\n");
2443 rc = ENOMEM;
2444 goto bnx_dma_alloc_exit;
2445 }
2446
2447 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2448 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) {
2449 aprint_error_dev(sc->bnx_dev,
2450 "Could not map status block DMA memory!\n");
2451 rc = ENOMEM;
2452 goto bnx_dma_alloc_exit;
2453 }
2454
2455 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2456 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2457 aprint_error_dev(sc->bnx_dev,
2458 "Could not load status block DMA memory!\n");
2459 rc = ENOMEM;
2460 goto bnx_dma_alloc_exit;
2461 }
2462
2463 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2464 sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2465
2466 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2467 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ);
2468
2469 /* DRC - Fix for 64 bit addresses. */
2470 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2471 (uint32_t) sc->status_block_paddr);
2472
2473 /* BCM5709 uses host memory as cache for context memory. */
2474 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2475 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2476 if (sc->ctx_pages == 0)
2477 sc->ctx_pages = 1;
2478 if (sc->ctx_pages > 4) /* XXX */
2479 sc->ctx_pages = 4;
2480
2481 DBRUNIF((sc->ctx_pages > 512),
2482 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n",
2483 __FILE__, __LINE__, sc->ctx_pages));
2484
2485
2486 for (i = 0; i < sc->ctx_pages; i++) {
2487 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2488 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2489 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2490 &sc->ctx_map[i]) != 0) {
2491 rc = ENOMEM;
2492 goto bnx_dma_alloc_exit;
2493 }
2494
2495 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2496 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2497 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2498 rc = ENOMEM;
2499 goto bnx_dma_alloc_exit;
2500 }
2501
2502 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2503 sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2504 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) {
2505 rc = ENOMEM;
2506 goto bnx_dma_alloc_exit;
2507 }
2508
2509 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2510 sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2511 BUS_DMA_NOWAIT) != 0) {
2512 rc = ENOMEM;
2513 goto bnx_dma_alloc_exit;
2514 }
2515
2516 bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2517 }
2518 }
2519
2520 /*
2521 * Allocate DMA memory for the statistics block, map the memory into
2522 * DMA space, and fetch the physical address of the block.
2523 */
2524 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2525 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2526 aprint_error_dev(sc->bnx_dev,
2527 "Could not create stats block DMA map!\n");
2528 rc = ENOMEM;
2529 goto bnx_dma_alloc_exit;
2530 }
2531
2532 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2533 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2534 &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2535 aprint_error_dev(sc->bnx_dev,
2536 "Could not allocate stats block DMA memory!\n");
2537 rc = ENOMEM;
2538 goto bnx_dma_alloc_exit;
2539 }
2540
2541 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2542 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) {
2543 aprint_error_dev(sc->bnx_dev,
2544 "Could not map stats block DMA memory!\n");
2545 rc = ENOMEM;
2546 goto bnx_dma_alloc_exit;
2547 }
2548
2549 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2550 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2551 aprint_error_dev(sc->bnx_dev,
2552 "Could not load status block DMA memory!\n");
2553 rc = ENOMEM;
2554 goto bnx_dma_alloc_exit;
2555 }
2556
2557 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2558 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ);
2559
2560 /* DRC - Fix for 64 bit address. */
2561 DBPRINT(sc, BNX_INFO, "stats_block_paddr = 0x%08X\n",
2562 (uint32_t) sc->stats_block_paddr);
2563
2564 /*
2565 * Allocate DMA memory for the TX buffer descriptor chain,
2566 * and fetch the physical address of the block.
2567 */
2568 for (i = 0; i < TX_PAGES; i++) {
2569 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2570 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2571 &sc->tx_bd_chain_map[i])) {
2572 aprint_error_dev(sc->bnx_dev,
2573 "Could not create Tx desc %d DMA map!\n", i);
2574 rc = ENOMEM;
2575 goto bnx_dma_alloc_exit;
2576 }
2577
2578 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2579 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2580 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2581 aprint_error_dev(sc->bnx_dev,
2582 "Could not allocate TX desc %d DMA memory!\n",
2583 i);
2584 rc = ENOMEM;
2585 goto bnx_dma_alloc_exit;
2586 }
2587
2588 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2589 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2590 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2591 aprint_error_dev(sc->bnx_dev,
2592 "Could not map TX desc %d DMA memory!\n", i);
2593 rc = ENOMEM;
2594 goto bnx_dma_alloc_exit;
2595 }
2596
2597 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2598 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2599 BUS_DMA_NOWAIT)) {
2600 aprint_error_dev(sc->bnx_dev,
2601 "Could not load TX desc %d DMA memory!\n", i);
2602 rc = ENOMEM;
2603 goto bnx_dma_alloc_exit;
2604 }
2605
2606 sc->tx_bd_chain_paddr[i] =
2607 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2608
2609 /* DRC - Fix for 64 bit systems. */
2610 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2611 i, (uint32_t) sc->tx_bd_chain_paddr[i]);
2612 }
2613
2614 /*
2615 * Create lists to hold TX mbufs.
2616 */
2617 TAILQ_INIT(&sc->tx_free_pkts);
2618 TAILQ_INIT(&sc->tx_used_pkts);
2619 sc->tx_pkt_count = 0;
2620 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET);
2621
2622 /*
2623 * Allocate DMA memory for the Rx buffer descriptor chain,
2624 * and fetch the physical address of the block.
2625 */
2626 for (i = 0; i < RX_PAGES; i++) {
2627 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2628 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2629 &sc->rx_bd_chain_map[i])) {
2630 aprint_error_dev(sc->bnx_dev,
2631 "Could not create Rx desc %d DMA map!\n", i);
2632 rc = ENOMEM;
2633 goto bnx_dma_alloc_exit;
2634 }
2635
2636 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2637 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2638 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2639 aprint_error_dev(sc->bnx_dev,
2640 "Could not allocate Rx desc %d DMA memory!\n", i);
2641 rc = ENOMEM;
2642 goto bnx_dma_alloc_exit;
2643 }
2644
2645 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2646 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2647 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2648 aprint_error_dev(sc->bnx_dev,
2649 "Could not map Rx desc %d DMA memory!\n", i);
2650 rc = ENOMEM;
2651 goto bnx_dma_alloc_exit;
2652 }
2653
2654 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2655 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2656 BUS_DMA_NOWAIT)) {
2657 aprint_error_dev(sc->bnx_dev,
2658 "Could not load Rx desc %d DMA memory!\n", i);
2659 rc = ENOMEM;
2660 goto bnx_dma_alloc_exit;
2661 }
2662
2663 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
2664 sc->rx_bd_chain_paddr[i] =
2665 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2666
2667 /* DRC - Fix for 64 bit systems. */
2668 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2669 i, (uint32_t) sc->rx_bd_chain_paddr[i]);
2670 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2671 0, BNX_RX_CHAIN_PAGE_SZ,
2672 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2673 }
2674
2675 /*
2676 * Create DMA maps for the Rx buffer mbufs.
2677 */
2678 for (i = 0; i < TOTAL_RX_BD; i++) {
2679 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU,
2680 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT,
2681 &sc->rx_mbuf_map[i])) {
2682 aprint_error_dev(sc->bnx_dev,
2683 "Could not create Rx mbuf %d DMA map!\n", i);
2684 rc = ENOMEM;
2685 goto bnx_dma_alloc_exit;
2686 }
2687 }
2688
2689 bnx_dma_alloc_exit:
2690 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2691
2692 return rc;
2693 }
2694
2695 /****************************************************************************/
2696 /* Release all resources used by the driver. */
2697 /* */
2698 /* Releases all resources acquired by the driver including interrupts, */
2699 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2700 /* */
2701 /* Returns: */
2702 /* Nothing. */
2703 /****************************************************************************/
2704 void
2705 bnx_release_resources(struct bnx_softc *sc)
2706 {
2707 struct pci_attach_args *pa = &(sc->bnx_pa);
2708
2709 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2710
2711 bnx_dma_free(sc);
2712
2713 if (sc->bnx_intrhand != NULL)
2714 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2715
2716 if (sc->bnx_size)
2717 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2718
2719 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2720 }
2721
2722 /****************************************************************************/
2723 /* Firmware synchronization. */
2724 /* */
2725 /* Before performing certain events such as a chip reset, synchronize with */
2726 /* the firmware first. */
2727 /* */
2728 /* Returns: */
2729 /* 0 for success, positive value for failure. */
2730 /****************************************************************************/
2731 int
2732 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data)
2733 {
2734 int i, rc = 0;
2735 uint32_t val;
2736
2737 /* Don't waste any time if we've timed out before. */
2738 if (sc->bnx_fw_timed_out) {
2739 rc = EBUSY;
2740 goto bnx_fw_sync_exit;
2741 }
2742
2743 /* Increment the message sequence number. */
2744 sc->bnx_fw_wr_seq++;
2745 msg_data |= sc->bnx_fw_wr_seq;
2746
2747 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2748 msg_data);
2749
2750 /* Send the message to the bootcode driver mailbox. */
2751 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2752
2753 /* Wait for the bootcode to acknowledge the message. */
2754 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2755 /* Check for a response in the bootcode firmware mailbox. */
2756 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2757 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2758 break;
2759 DELAY(1000);
2760 }
2761
2762 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2763 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2764 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2765 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2766 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2767
2768 msg_data &= ~BNX_DRV_MSG_CODE;
2769 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2770
2771 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2772
2773 sc->bnx_fw_timed_out = 1;
2774 rc = EBUSY;
2775 }
2776
2777 bnx_fw_sync_exit:
2778 return rc;
2779 }
2780
2781 /****************************************************************************/
2782 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2783 /* */
2784 /* Returns: */
2785 /* Nothing. */
2786 /****************************************************************************/
2787 void
2788 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code,
2789 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2790 {
2791 int i;
2792 uint32_t val;
2793
2794 /* Set the page size used by RV2P. */
2795 if (rv2p_proc == RV2P_PROC2) {
2796 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2797 USABLE_RX_BD_PER_PAGE);
2798 }
2799
2800 for (i = 0; i < rv2p_code_len; i += 8) {
2801 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2802 rv2p_code++;
2803 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2804 rv2p_code++;
2805
2806 if (rv2p_proc == RV2P_PROC1) {
2807 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2808 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2809 } else {
2810 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2811 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2812 }
2813 }
2814
2815 /* Reset the processor, un-stall is done later. */
2816 if (rv2p_proc == RV2P_PROC1)
2817 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2818 else
2819 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2820 }
2821
2822 /****************************************************************************/
2823 /* Load RISC processor firmware. */
2824 /* */
2825 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */
2826 /* associated with a particular processor. */
2827 /* */
2828 /* Returns: */
2829 /* Nothing. */
2830 /****************************************************************************/
2831 void
2832 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2833 struct fw_info *fw)
2834 {
2835 uint32_t offset;
2836 uint32_t val;
2837
2838 /* Halt the CPU. */
2839 val = REG_RD_IND(sc, cpu_reg->mode);
2840 val |= cpu_reg->mode_value_halt;
2841 REG_WR_IND(sc, cpu_reg->mode, val);
2842 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2843
2844 /* Load the Text area. */
2845 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2846 if (fw->text) {
2847 int j;
2848
2849 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2850 REG_WR_IND(sc, offset, fw->text[j]);
2851 }
2852
2853 /* Load the Data area. */
2854 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2855 if (fw->data) {
2856 int j;
2857
2858 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2859 REG_WR_IND(sc, offset, fw->data[j]);
2860 }
2861
2862 /* Load the SBSS area. */
2863 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2864 if (fw->sbss) {
2865 int j;
2866
2867 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2868 REG_WR_IND(sc, offset, fw->sbss[j]);
2869 }
2870
2871 /* Load the BSS area. */
2872 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2873 if (fw->bss) {
2874 int j;
2875
2876 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2877 REG_WR_IND(sc, offset, fw->bss[j]);
2878 }
2879
2880 /* Load the Read-Only area. */
2881 offset = cpu_reg->spad_base +
2882 (fw->rodata_addr - cpu_reg->mips_view_base);
2883 if (fw->rodata) {
2884 int j;
2885
2886 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2887 REG_WR_IND(sc, offset, fw->rodata[j]);
2888 }
2889
2890 /* Clear the pre-fetch instruction. */
2891 REG_WR_IND(sc, cpu_reg->inst, 0);
2892 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2893
2894 /* Start the CPU. */
2895 val = REG_RD_IND(sc, cpu_reg->mode);
2896 val &= ~cpu_reg->mode_value_halt;
2897 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2898 REG_WR_IND(sc, cpu_reg->mode, val);
2899 }
2900
2901 /****************************************************************************/
2902 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2903 /* */
2904 /* Loads the firmware for each CPU and starts the CPU. */
2905 /* */
2906 /* Returns: */
2907 /* Nothing. */
2908 /****************************************************************************/
2909 void
2910 bnx_init_cpus(struct bnx_softc *sc)
2911 {
2912 struct cpu_reg cpu_reg;
2913 struct fw_info fw;
2914
2915 switch (BNX_CHIP_NUM(sc)) {
2916 case BNX_CHIP_NUM_5709:
2917 /* Initialize the RV2P processor. */
2918 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) {
2919 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1,
2920 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1);
2921 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2,
2922 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2);
2923 } else {
2924 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1,
2925 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1);
2926 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2,
2927 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2);
2928 }
2929
2930 /* Initialize the RX Processor. */
2931 cpu_reg.mode = BNX_RXP_CPU_MODE;
2932 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2933 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2934 cpu_reg.state = BNX_RXP_CPU_STATE;
2935 cpu_reg.state_value_clear = 0xffffff;
2936 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2937 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2938 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2939 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2940 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2941 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2942 cpu_reg.mips_view_base = 0x8000000;
2943
2944 fw.ver_major = bnx_RXP_b09FwReleaseMajor;
2945 fw.ver_minor = bnx_RXP_b09FwReleaseMinor;
2946 fw.ver_fix = bnx_RXP_b09FwReleaseFix;
2947 fw.start_addr = bnx_RXP_b09FwStartAddr;
2948
2949 fw.text_addr = bnx_RXP_b09FwTextAddr;
2950 fw.text_len = bnx_RXP_b09FwTextLen;
2951 fw.text_index = 0;
2952 fw.text = bnx_RXP_b09FwText;
2953
2954 fw.data_addr = bnx_RXP_b09FwDataAddr;
2955 fw.data_len = bnx_RXP_b09FwDataLen;
2956 fw.data_index = 0;
2957 fw.data = bnx_RXP_b09FwData;
2958
2959 fw.sbss_addr = bnx_RXP_b09FwSbssAddr;
2960 fw.sbss_len = bnx_RXP_b09FwSbssLen;
2961 fw.sbss_index = 0;
2962 fw.sbss = bnx_RXP_b09FwSbss;
2963
2964 fw.bss_addr = bnx_RXP_b09FwBssAddr;
2965 fw.bss_len = bnx_RXP_b09FwBssLen;
2966 fw.bss_index = 0;
2967 fw.bss = bnx_RXP_b09FwBss;
2968
2969 fw.rodata_addr = bnx_RXP_b09FwRodataAddr;
2970 fw.rodata_len = bnx_RXP_b09FwRodataLen;
2971 fw.rodata_index = 0;
2972 fw.rodata = bnx_RXP_b09FwRodata;
2973
2974 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2975 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2976
2977 /* Initialize the TX Processor. */
2978 cpu_reg.mode = BNX_TXP_CPU_MODE;
2979 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2980 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2981 cpu_reg.state = BNX_TXP_CPU_STATE;
2982 cpu_reg.state_value_clear = 0xffffff;
2983 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2984 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2985 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2986 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2987 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2988 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2989 cpu_reg.mips_view_base = 0x8000000;
2990
2991 fw.ver_major = bnx_TXP_b09FwReleaseMajor;
2992 fw.ver_minor = bnx_TXP_b09FwReleaseMinor;
2993 fw.ver_fix = bnx_TXP_b09FwReleaseFix;
2994 fw.start_addr = bnx_TXP_b09FwStartAddr;
2995
2996 fw.text_addr = bnx_TXP_b09FwTextAddr;
2997 fw.text_len = bnx_TXP_b09FwTextLen;
2998 fw.text_index = 0;
2999 fw.text = bnx_TXP_b09FwText;
3000
3001 fw.data_addr = bnx_TXP_b09FwDataAddr;
3002 fw.data_len = bnx_TXP_b09FwDataLen;
3003 fw.data_index = 0;
3004 fw.data = bnx_TXP_b09FwData;
3005
3006 fw.sbss_addr = bnx_TXP_b09FwSbssAddr;
3007 fw.sbss_len = bnx_TXP_b09FwSbssLen;
3008 fw.sbss_index = 0;
3009 fw.sbss = bnx_TXP_b09FwSbss;
3010
3011 fw.bss_addr = bnx_TXP_b09FwBssAddr;
3012 fw.bss_len = bnx_TXP_b09FwBssLen;
3013 fw.bss_index = 0;
3014 fw.bss = bnx_TXP_b09FwBss;
3015
3016 fw.rodata_addr = bnx_TXP_b09FwRodataAddr;
3017 fw.rodata_len = bnx_TXP_b09FwRodataLen;
3018 fw.rodata_index = 0;
3019 fw.rodata = bnx_TXP_b09FwRodata;
3020
3021 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
3022 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3023
3024 /* Initialize the TX Patch-up Processor. */
3025 cpu_reg.mode = BNX_TPAT_CPU_MODE;
3026 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
3027 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
3028 cpu_reg.state = BNX_TPAT_CPU_STATE;
3029 cpu_reg.state_value_clear = 0xffffff;
3030 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3031 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3032 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3033 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3034 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3035 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3036 cpu_reg.mips_view_base = 0x8000000;
3037
3038 fw.ver_major = bnx_TPAT_b09FwReleaseMajor;
3039 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor;
3040 fw.ver_fix = bnx_TPAT_b09FwReleaseFix;
3041 fw.start_addr = bnx_TPAT_b09FwStartAddr;
3042
3043 fw.text_addr = bnx_TPAT_b09FwTextAddr;
3044 fw.text_len = bnx_TPAT_b09FwTextLen;
3045 fw.text_index = 0;
3046 fw.text = bnx_TPAT_b09FwText;
3047
3048 fw.data_addr = bnx_TPAT_b09FwDataAddr;
3049 fw.data_len = bnx_TPAT_b09FwDataLen;
3050 fw.data_index = 0;
3051 fw.data = bnx_TPAT_b09FwData;
3052
3053 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr;
3054 fw.sbss_len = bnx_TPAT_b09FwSbssLen;
3055 fw.sbss_index = 0;
3056 fw.sbss = bnx_TPAT_b09FwSbss;
3057
3058 fw.bss_addr = bnx_TPAT_b09FwBssAddr;
3059 fw.bss_len = bnx_TPAT_b09FwBssLen;
3060 fw.bss_index = 0;
3061 fw.bss = bnx_TPAT_b09FwBss;
3062
3063 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr;
3064 fw.rodata_len = bnx_TPAT_b09FwRodataLen;
3065 fw.rodata_index = 0;
3066 fw.rodata = bnx_TPAT_b09FwRodata;
3067
3068 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3069 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3070
3071 /* Initialize the Completion Processor. */
3072 cpu_reg.mode = BNX_COM_CPU_MODE;
3073 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3074 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3075 cpu_reg.state = BNX_COM_CPU_STATE;
3076 cpu_reg.state_value_clear = 0xffffff;
3077 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3078 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3079 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3080 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3081 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3082 cpu_reg.spad_base = BNX_COM_SCRATCH;
3083 cpu_reg.mips_view_base = 0x8000000;
3084
3085 fw.ver_major = bnx_COM_b09FwReleaseMajor;
3086 fw.ver_minor = bnx_COM_b09FwReleaseMinor;
3087 fw.ver_fix = bnx_COM_b09FwReleaseFix;
3088 fw.start_addr = bnx_COM_b09FwStartAddr;
3089
3090 fw.text_addr = bnx_COM_b09FwTextAddr;
3091 fw.text_len = bnx_COM_b09FwTextLen;
3092 fw.text_index = 0;
3093 fw.text = bnx_COM_b09FwText;
3094
3095 fw.data_addr = bnx_COM_b09FwDataAddr;
3096 fw.data_len = bnx_COM_b09FwDataLen;
3097 fw.data_index = 0;
3098 fw.data = bnx_COM_b09FwData;
3099
3100 fw.sbss_addr = bnx_COM_b09FwSbssAddr;
3101 fw.sbss_len = bnx_COM_b09FwSbssLen;
3102 fw.sbss_index = 0;
3103 fw.sbss = bnx_COM_b09FwSbss;
3104
3105 fw.bss_addr = bnx_COM_b09FwBssAddr;
3106 fw.bss_len = bnx_COM_b09FwBssLen;
3107 fw.bss_index = 0;
3108 fw.bss = bnx_COM_b09FwBss;
3109
3110 fw.rodata_addr = bnx_COM_b09FwRodataAddr;
3111 fw.rodata_len = bnx_COM_b09FwRodataLen;
3112 fw.rodata_index = 0;
3113 fw.rodata = bnx_COM_b09FwRodata;
3114 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3115 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3116 break;
3117 default:
3118 /* Initialize the RV2P processor. */
3119 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1),
3120 RV2P_PROC1);
3121 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2),
3122 RV2P_PROC2);
3123
3124 /* Initialize the RX Processor. */
3125 cpu_reg.mode = BNX_RXP_CPU_MODE;
3126 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
3127 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
3128 cpu_reg.state = BNX_RXP_CPU_STATE;
3129 cpu_reg.state_value_clear = 0xffffff;
3130 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
3131 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
3132 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
3133 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
3134 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
3135 cpu_reg.spad_base = BNX_RXP_SCRATCH;
3136 cpu_reg.mips_view_base = 0x8000000;
3137
3138 fw.ver_major = bnx_RXP_b06FwReleaseMajor;
3139 fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
3140 fw.ver_fix = bnx_RXP_b06FwReleaseFix;
3141 fw.start_addr = bnx_RXP_b06FwStartAddr;
3142
3143 fw.text_addr = bnx_RXP_b06FwTextAddr;
3144 fw.text_len = bnx_RXP_b06FwTextLen;
3145 fw.text_index = 0;
3146 fw.text = bnx_RXP_b06FwText;
3147
3148 fw.data_addr = bnx_RXP_b06FwDataAddr;
3149 fw.data_len = bnx_RXP_b06FwDataLen;
3150 fw.data_index = 0;
3151 fw.data = bnx_RXP_b06FwData;
3152
3153 fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
3154 fw.sbss_len = bnx_RXP_b06FwSbssLen;
3155 fw.sbss_index = 0;
3156 fw.sbss = bnx_RXP_b06FwSbss;
3157
3158 fw.bss_addr = bnx_RXP_b06FwBssAddr;
3159 fw.bss_len = bnx_RXP_b06FwBssLen;
3160 fw.bss_index = 0;
3161 fw.bss = bnx_RXP_b06FwBss;
3162
3163 fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
3164 fw.rodata_len = bnx_RXP_b06FwRodataLen;
3165 fw.rodata_index = 0;
3166 fw.rodata = bnx_RXP_b06FwRodata;
3167
3168 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
3169 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3170
3171 /* Initialize the TX Processor. */
3172 cpu_reg.mode = BNX_TXP_CPU_MODE;
3173 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
3174 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
3175 cpu_reg.state = BNX_TXP_CPU_STATE;
3176 cpu_reg.state_value_clear = 0xffffff;
3177 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
3178 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
3179 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
3180 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
3181 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
3182 cpu_reg.spad_base = BNX_TXP_SCRATCH;
3183 cpu_reg.mips_view_base = 0x8000000;
3184
3185 fw.ver_major = bnx_TXP_b06FwReleaseMajor;
3186 fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
3187 fw.ver_fix = bnx_TXP_b06FwReleaseFix;
3188 fw.start_addr = bnx_TXP_b06FwStartAddr;
3189
3190 fw.text_addr = bnx_TXP_b06FwTextAddr;
3191 fw.text_len = bnx_TXP_b06FwTextLen;
3192 fw.text_index = 0;
3193 fw.text = bnx_TXP_b06FwText;
3194
3195 fw.data_addr = bnx_TXP_b06FwDataAddr;
3196 fw.data_len = bnx_TXP_b06FwDataLen;
3197 fw.data_index = 0;
3198 fw.data = bnx_TXP_b06FwData;
3199
3200 fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
3201 fw.sbss_len = bnx_TXP_b06FwSbssLen;
3202 fw.sbss_index = 0;
3203 fw.sbss = bnx_TXP_b06FwSbss;
3204
3205 fw.bss_addr = bnx_TXP_b06FwBssAddr;
3206 fw.bss_len = bnx_TXP_b06FwBssLen;
3207 fw.bss_index = 0;
3208 fw.bss = bnx_TXP_b06FwBss;
3209
3210 fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
3211 fw.rodata_len = bnx_TXP_b06FwRodataLen;
3212 fw.rodata_index = 0;
3213 fw.rodata = bnx_TXP_b06FwRodata;
3214
3215 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
3216 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3217
3218 /* Initialize the TX Patch-up Processor. */
3219 cpu_reg.mode = BNX_TPAT_CPU_MODE;
3220 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
3221 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
3222 cpu_reg.state = BNX_TPAT_CPU_STATE;
3223 cpu_reg.state_value_clear = 0xffffff;
3224 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3225 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3226 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3227 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3228 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3229 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3230 cpu_reg.mips_view_base = 0x8000000;
3231
3232 fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
3233 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
3234 fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
3235 fw.start_addr = bnx_TPAT_b06FwStartAddr;
3236
3237 fw.text_addr = bnx_TPAT_b06FwTextAddr;
3238 fw.text_len = bnx_TPAT_b06FwTextLen;
3239 fw.text_index = 0;
3240 fw.text = bnx_TPAT_b06FwText;
3241
3242 fw.data_addr = bnx_TPAT_b06FwDataAddr;
3243 fw.data_len = bnx_TPAT_b06FwDataLen;
3244 fw.data_index = 0;
3245 fw.data = bnx_TPAT_b06FwData;
3246
3247 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
3248 fw.sbss_len = bnx_TPAT_b06FwSbssLen;
3249 fw.sbss_index = 0;
3250 fw.sbss = bnx_TPAT_b06FwSbss;
3251
3252 fw.bss_addr = bnx_TPAT_b06FwBssAddr;
3253 fw.bss_len = bnx_TPAT_b06FwBssLen;
3254 fw.bss_index = 0;
3255 fw.bss = bnx_TPAT_b06FwBss;
3256
3257 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
3258 fw.rodata_len = bnx_TPAT_b06FwRodataLen;
3259 fw.rodata_index = 0;
3260 fw.rodata = bnx_TPAT_b06FwRodata;
3261
3262 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3263 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3264
3265 /* Initialize the Completion Processor. */
3266 cpu_reg.mode = BNX_COM_CPU_MODE;
3267 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3268 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3269 cpu_reg.state = BNX_COM_CPU_STATE;
3270 cpu_reg.state_value_clear = 0xffffff;
3271 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3272 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3273 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3274 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3275 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3276 cpu_reg.spad_base = BNX_COM_SCRATCH;
3277 cpu_reg.mips_view_base = 0x8000000;
3278
3279 fw.ver_major = bnx_COM_b06FwReleaseMajor;
3280 fw.ver_minor = bnx_COM_b06FwReleaseMinor;
3281 fw.ver_fix = bnx_COM_b06FwReleaseFix;
3282 fw.start_addr = bnx_COM_b06FwStartAddr;
3283
3284 fw.text_addr = bnx_COM_b06FwTextAddr;
3285 fw.text_len = bnx_COM_b06FwTextLen;
3286 fw.text_index = 0;
3287 fw.text = bnx_COM_b06FwText;
3288
3289 fw.data_addr = bnx_COM_b06FwDataAddr;
3290 fw.data_len = bnx_COM_b06FwDataLen;
3291 fw.data_index = 0;
3292 fw.data = bnx_COM_b06FwData;
3293
3294 fw.sbss_addr = bnx_COM_b06FwSbssAddr;
3295 fw.sbss_len = bnx_COM_b06FwSbssLen;
3296 fw.sbss_index = 0;
3297 fw.sbss = bnx_COM_b06FwSbss;
3298
3299 fw.bss_addr = bnx_COM_b06FwBssAddr;
3300 fw.bss_len = bnx_COM_b06FwBssLen;
3301 fw.bss_index = 0;
3302 fw.bss = bnx_COM_b06FwBss;
3303
3304 fw.rodata_addr = bnx_COM_b06FwRodataAddr;
3305 fw.rodata_len = bnx_COM_b06FwRodataLen;
3306 fw.rodata_index = 0;
3307 fw.rodata = bnx_COM_b06FwRodata;
3308 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3309 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3310 break;
3311 }
3312 }
3313
3314 /****************************************************************************/
3315 /* Initialize context memory. */
3316 /* */
3317 /* Clears the memory associated with each Context ID (CID). */
3318 /* */
3319 /* Returns: */
3320 /* Nothing. */
3321 /****************************************************************************/
3322 void
3323 bnx_init_context(struct bnx_softc *sc)
3324 {
3325 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3326 /* DRC: Replace this constant value with a #define. */
3327 int i, retry_cnt = 10;
3328 uint32_t val;
3329
3330 /*
3331 * BCM5709 context memory may be cached
3332 * in host memory so prepare the host memory
3333 * for access.
3334 */
3335 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3336 | (1 << 12);
3337 val |= (BCM_PAGE_BITS - 8) << 16;
3338 REG_WR(sc, BNX_CTX_COMMAND, val);
3339
3340 /* Wait for mem init command to complete. */
3341 for (i = 0; i < retry_cnt; i++) {
3342 val = REG_RD(sc, BNX_CTX_COMMAND);
3343 if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3344 break;
3345 DELAY(2);
3346 }
3347
3348 /* ToDo: Consider returning an error here. */
3349
3350 for (i = 0; i < sc->ctx_pages; i++) {
3351 int j;
3352
3353 /* Set the physaddr of the context memory cache. */
3354 val = (uint32_t)(sc->ctx_segs[i].ds_addr);
3355 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3356 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3357 val = (uint32_t)
3358 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32);
3359 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3360 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3361 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3362
3363 /* Verify that the context memory write was successful. */
3364 for (j = 0; j < retry_cnt; j++) {
3365 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3366 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3367 break;
3368 DELAY(5);
3369 }
3370
3371 /* ToDo: Consider returning an error here. */
3372 }
3373 } else {
3374 uint32_t vcid_addr, offset;
3375
3376 /*
3377 * For the 5706/5708, context memory is local to the
3378 * controller, so initialize the controller context memory.
3379 */
3380
3381 vcid_addr = GET_CID_ADDR(96);
3382 while (vcid_addr) {
3383
3384 vcid_addr -= BNX_PHY_CTX_SIZE;
3385
3386 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3387 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3388
3389 for (offset = 0; offset < BNX_PHY_CTX_SIZE;
3390 offset += 4)
3391 CTX_WR(sc, 0x00, offset, 0);
3392
3393 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3394 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3395 }
3396 }
3397 }
3398
3399 /****************************************************************************/
3400 /* Fetch the permanent MAC address of the controller. */
3401 /* */
3402 /* Returns: */
3403 /* Nothing. */
3404 /****************************************************************************/
3405 void
3406 bnx_get_mac_addr(struct bnx_softc *sc)
3407 {
3408 uint32_t mac_lo = 0, mac_hi = 0;
3409
3410 /*
3411 * The NetXtreme II bootcode populates various NIC
3412 * power-on and runtime configuration items in a
3413 * shared memory area. The factory configured MAC
3414 * address is available from both NVRAM and the
3415 * shared memory area so we'll read the value from
3416 * shared memory for speed.
3417 */
3418
3419 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3420 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3421
3422 if ((mac_lo == 0) && (mac_hi == 0)) {
3423 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3424 __FILE__, __LINE__);
3425 } else {
3426 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3427 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3428 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3429 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3430 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3431 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3432 }
3433
3434 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3435 "%s\n", ether_sprintf(sc->eaddr));
3436 }
3437
3438 /****************************************************************************/
3439 /* Program the MAC address. */
3440 /* */
3441 /* Returns: */
3442 /* Nothing. */
3443 /****************************************************************************/
3444 void
3445 bnx_set_mac_addr(struct bnx_softc *sc)
3446 {
3447 uint32_t val;
3448 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl);
3449
3450 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3451 "%s\n", ether_sprintf(sc->eaddr));
3452
3453 val = (mac_addr[0] << 8) | mac_addr[1];
3454
3455 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3456
3457 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3458 (mac_addr[4] << 8) | mac_addr[5];
3459
3460 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3461 }
3462
3463 /****************************************************************************/
3464 /* Stop the controller. */
3465 /* */
3466 /* Returns: */
3467 /* Nothing. */
3468 /****************************************************************************/
3469 void
3470 bnx_stop(struct ifnet *ifp, int disable)
3471 {
3472 struct bnx_softc *sc = ifp->if_softc;
3473
3474 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3475
3476 if (disable) {
3477 sc->bnx_detaching = 1;
3478 callout_halt(&sc->bnx_timeout, NULL);
3479 } else
3480 callout_stop(&sc->bnx_timeout);
3481
3482 mii_down(&sc->bnx_mii);
3483
3484 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3485
3486 /* Disable the transmit/receive blocks. */
3487 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3488 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3489 DELAY(20);
3490
3491 bnx_disable_intr(sc);
3492
3493 /* Tell firmware that the driver is going away. */
3494 if (disable)
3495 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
3496 else
3497 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3498
3499 /* Free RX buffers. */
3500 bnx_free_rx_chain(sc);
3501
3502 /* Free TX buffers. */
3503 bnx_free_tx_chain(sc);
3504
3505 ifp->if_timer = 0;
3506
3507 sc->bnx_link = 0;
3508
3509 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3510
3511 bnx_mgmt_init(sc);
3512 }
3513
3514 int
3515 bnx_reset(struct bnx_softc *sc, uint32_t reset_code)
3516 {
3517 struct pci_attach_args *pa = &(sc->bnx_pa);
3518 uint32_t val;
3519 int i, rc = 0;
3520
3521 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3522
3523 /* Wait for pending PCI transactions to complete. */
3524 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
3525 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) {
3526 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3527 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3528 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3529 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3530 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3531 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3532 DELAY(5);
3533 } else {
3534 /* Disable DMA */
3535 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3536 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3537 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3538 REG_RD(sc, BNX_MISC_NEW_CORE_CTL); /* barrier */
3539
3540 for (i = 0; i < 100; i++) {
3541 delay(1 * 1000);
3542 val = REG_RD(sc, BNX_PCICFG_DEVICE_CONTROL);
3543 if ((val & PCIE_DCSR_TRANSACTION_PND) == 0)
3544 break;
3545 }
3546 }
3547
3548 /* Assume bootcode is running. */
3549 sc->bnx_fw_timed_out = 0;
3550
3551 /* Give the firmware a chance to prepare for the reset. */
3552 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3553 if (rc)
3554 goto bnx_reset_exit;
3555
3556 /* Set a firmware reminder that this is a soft reset. */
3557 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3558 BNX_DRV_RESET_SIGNATURE_MAGIC);
3559
3560 /* Dummy read to force the chip to complete all current transactions. */
3561 val = REG_RD(sc, BNX_MISC_ID);
3562
3563 /* Chip reset. */
3564 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3565 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3566 REG_RD(sc, BNX_MISC_COMMAND);
3567 DELAY(5);
3568
3569 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3570 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3571
3572 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3573 val);
3574 } else {
3575 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3576 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3577 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3578 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3579
3580 /* Allow up to 30us for reset to complete. */
3581 for (i = 0; i < 10; i++) {
3582 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3583 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3584 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3585 break;
3586 }
3587 DELAY(10);
3588 }
3589
3590 /* Check that reset completed successfully. */
3591 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3592 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3593 BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3594 __FILE__, __LINE__);
3595 rc = EBUSY;
3596 goto bnx_reset_exit;
3597 }
3598 }
3599
3600 /* Make sure byte swapping is properly configured. */
3601 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3602 if (val != 0x01020304) {
3603 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3604 __FILE__, __LINE__);
3605 rc = ENODEV;
3606 goto bnx_reset_exit;
3607 }
3608
3609 /* Just completed a reset, assume that firmware is running again. */
3610 sc->bnx_fw_timed_out = 0;
3611
3612 /* Wait for the firmware to finish its initialization. */
3613 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3614 if (rc)
3615 BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3616 "initialization!\n", __FILE__, __LINE__);
3617
3618 bnx_reset_exit:
3619 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3620
3621 return rc;
3622 }
3623
3624 int
3625 bnx_chipinit(struct bnx_softc *sc)
3626 {
3627 struct pci_attach_args *pa = &(sc->bnx_pa);
3628 uint32_t val;
3629 int rc = 0;
3630
3631 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3632
3633 /* Make sure the interrupt is not active. */
3634 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3635
3636 /* Initialize DMA byte/word swapping, configure the number of DMA */
3637 /* channels and PCI clock compensation delay. */
3638 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3639 BNX_DMA_CONFIG_DATA_WORD_SWAP |
3640 #if BYTE_ORDER == BIG_ENDIAN
3641 BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3642 #endif
3643 BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3644 DMA_READ_CHANS << 12 |
3645 DMA_WRITE_CHANS << 16;
3646
3647 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3648
3649 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3650 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3651
3652 /*
3653 * This setting resolves a problem observed on certain Intel PCI
3654 * chipsets that cannot handle multiple outstanding DMA operations.
3655 * See errata E9_5706A1_65.
3656 */
3657 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3658 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3659 !(sc->bnx_flags & BNX_PCIX_FLAG))
3660 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3661
3662 REG_WR(sc, BNX_DMA_CONFIG, val);
3663
3664 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3665 if (sc->bnx_flags & BNX_PCIX_FLAG) {
3666 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3667 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3668 val & ~0x20000);
3669 }
3670
3671 /* Enable the RX_V2P and Context state machines before access. */
3672 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3673 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3674 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3675 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3676
3677 /* Initialize context mapping and zero out the quick contexts. */
3678 bnx_init_context(sc);
3679
3680 /* Initialize the on-boards CPUs */
3681 bnx_init_cpus(sc);
3682
3683 /* Enable management frames (NC-SI) to flow to the MCP. */
3684 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) {
3685 val = REG_RD(sc, BNX_RPM_MGMT_PKT_CTRL) |
3686 BNX_RPM_MGMT_PKT_CTRL_MGMT_EN;
3687 REG_WR(sc, BNX_RPM_MGMT_PKT_CTRL, val);
3688 }
3689
3690 /* Prepare NVRAM for access. */
3691 if (bnx_init_nvram(sc)) {
3692 rc = ENODEV;
3693 goto bnx_chipinit_exit;
3694 }
3695
3696 /* Set the kernel bypass block size */
3697 val = REG_RD(sc, BNX_MQ_CONFIG);
3698 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3699 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3700
3701 /* Enable bins used on the 5709. */
3702 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3703 val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3704 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3705 val |= BNX_MQ_CONFIG_HALT_DIS;
3706 }
3707
3708 REG_WR(sc, BNX_MQ_CONFIG, val);
3709
3710 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE);
3711 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3712 REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3713
3714 val = (BCM_PAGE_BITS - 8) << 24;
3715 REG_WR(sc, BNX_RV2P_CONFIG, val);
3716
3717 /* Configure page size. */
3718 val = REG_RD(sc, BNX_TBDR_CONFIG);
3719 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3720 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3721 REG_WR(sc, BNX_TBDR_CONFIG, val);
3722
3723 #if 0
3724 /* Set the perfect match control register to default. */
3725 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3726 #endif
3727
3728 bnx_chipinit_exit:
3729 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3730
3731 return rc;
3732 }
3733
3734 /****************************************************************************/
3735 /* Initialize the controller in preparation to send/receive traffic. */
3736 /* */
3737 /* Returns: */
3738 /* 0 for success, positive value for failure. */
3739 /****************************************************************************/
3740 int
3741 bnx_blockinit(struct bnx_softc *sc)
3742 {
3743 uint32_t reg, val;
3744 int rc = 0;
3745
3746 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3747
3748 /* Load the hardware default MAC address. */
3749 bnx_set_mac_addr(sc);
3750
3751 /* Set the Ethernet backoff seed value */
3752 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3753 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3754 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3755
3756 sc->last_status_idx = 0;
3757 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3758
3759 /* Set up link change interrupt generation. */
3760 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3761 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3762
3763 /* Program the physical address of the status block. */
3764 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr));
3765 REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3766 (uint32_t)((uint64_t)sc->status_block_paddr >> 32));
3767
3768 /* Program the physical address of the statistics block. */
3769 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3770 (uint32_t)(sc->stats_block_paddr));
3771 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3772 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32));
3773
3774 /* Program various host coalescing parameters. */
3775 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3776 << 16) | sc->bnx_tx_quick_cons_trip);
3777 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3778 << 16) | sc->bnx_rx_quick_cons_trip);
3779 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3780 sc->bnx_comp_prod_trip);
3781 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3782 sc->bnx_tx_ticks);
3783 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3784 sc->bnx_rx_ticks);
3785 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3786 sc->bnx_com_ticks);
3787 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3788 sc->bnx_cmd_ticks);
3789 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3790 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3791 REG_WR(sc, BNX_HC_CONFIG,
3792 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3793 BNX_HC_CONFIG_COLLECT_STATS));
3794
3795 /* Clear the internal statistics counters. */
3796 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3797
3798 /* Verify that bootcode is running. */
3799 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3800
3801 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3802 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3803 __FILE__, __LINE__); reg = 0);
3804
3805 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3806 BNX_DEV_INFO_SIGNATURE_MAGIC) {
3807 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3808 "Expected: 08%08X\n", __FILE__, __LINE__,
3809 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3810 BNX_DEV_INFO_SIGNATURE_MAGIC);
3811 rc = ENODEV;
3812 goto bnx_blockinit_exit;
3813 }
3814
3815 /* Enable DMA */
3816 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3817 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3818 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3819 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3820 }
3821
3822 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3823 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3824
3825 /* Disable management frames (NC-SI) from flowing to the MCP. */
3826 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) {
3827 val = REG_RD(sc, BNX_RPM_MGMT_PKT_CTRL) &
3828 ~BNX_RPM_MGMT_PKT_CTRL_MGMT_EN;
3829 REG_WR(sc, BNX_RPM_MGMT_PKT_CTRL, val);
3830 }
3831
3832 /* Enable all remaining blocks in the MAC. */
3833 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3834 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3835 BNX_MISC_ENABLE_DEFAULT_XI);
3836 } else
3837 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3838
3839 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3840 DELAY(20);
3841
3842 bnx_blockinit_exit:
3843 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3844
3845 return rc;
3846 }
3847
3848 static int
3849 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod,
3850 uint16_t *chain_prod, uint32_t *prod_bseq)
3851 {
3852 bus_dmamap_t map;
3853 struct rx_bd *rxbd;
3854 uint32_t addr;
3855 int i;
3856 #ifdef BNX_DEBUG
3857 uint16_t debug_chain_prod = *chain_prod;
3858 #endif
3859 uint16_t first_chain_prod;
3860
3861 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3862
3863 /* Map the mbuf cluster into device memory. */
3864 map = sc->rx_mbuf_map[*chain_prod];
3865 first_chain_prod = *chain_prod;
3866 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3867 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3868 __FILE__, __LINE__);
3869
3870 m_freem(m_new);
3871
3872 DBRUNIF(1, sc->rx_mbuf_alloc--);
3873
3874 return ENOBUFS;
3875 }
3876 /* Make sure there is room in the receive chain. */
3877 if (map->dm_nsegs > sc->free_rx_bd) {
3878 bus_dmamap_unload(sc->bnx_dmatag, map);
3879 m_freem(m_new);
3880 return EFBIG;
3881 }
3882 #ifdef BNX_DEBUG
3883 /* Track the distribution of buffer segments. */
3884 sc->rx_mbuf_segs[map->dm_nsegs]++;
3885 #endif
3886
3887 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
3888 BUS_DMASYNC_PREREAD);
3889
3890 /* Update some debug statistics counters */
3891 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3892 sc->rx_low_watermark = sc->free_rx_bd);
3893 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3894
3895 /*
3896 * Setup the rx_bd for the first segment
3897 */
3898 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3899
3900 addr = (uint32_t)map->dm_segs[0].ds_addr;
3901 rxbd->rx_bd_haddr_lo = addr;
3902 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32);
3903 rxbd->rx_bd_haddr_hi = addr;
3904 rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3905 rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3906 *prod_bseq += map->dm_segs[0].ds_len;
3907 bus_dmamap_sync(sc->bnx_dmatag,
3908 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3909 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd),
3910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3911
3912 for (i = 1; i < map->dm_nsegs; i++) {
3913 *prod = NEXT_RX_BD(*prod);
3914 *chain_prod = RX_CHAIN_IDX(*prod);
3915
3916 rxbd =
3917 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3918
3919 addr = (uint32_t)map->dm_segs[i].ds_addr;
3920 rxbd->rx_bd_haddr_lo = addr;
3921 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32);
3922 rxbd->rx_bd_haddr_hi = addr;
3923 rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3924 rxbd->rx_bd_flags = 0;
3925 *prod_bseq += map->dm_segs[i].ds_len;
3926 bus_dmamap_sync(sc->bnx_dmatag,
3927 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3928 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3929 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3930 }
3931
3932 rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3933 bus_dmamap_sync(sc->bnx_dmatag,
3934 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3935 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3936 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3937
3938 /*
3939 * Save the mbuf, adjust the map pointer (swap map for first and
3940 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3941 * and update our counter.
3942 */
3943 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3944 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3945 sc->rx_mbuf_map[*chain_prod] = map;
3946 sc->free_rx_bd -= map->dm_nsegs;
3947
3948 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3949 map->dm_nsegs));
3950 *prod = NEXT_RX_BD(*prod);
3951 *chain_prod = RX_CHAIN_IDX(*prod);
3952
3953 return 0;
3954 }
3955
3956 /****************************************************************************/
3957 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3958 /* */
3959 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3960 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3961 /* necessary. */
3962 /* */
3963 /* Returns: */
3964 /* 0 for success, positive value for failure. */
3965 /****************************************************************************/
3966 int
3967 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod,
3968 uint16_t *chain_prod, uint32_t *prod_bseq)
3969 {
3970 struct mbuf *m_new = NULL;
3971 int rc = 0;
3972 uint16_t min_free_bd;
3973
3974 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3975 __func__);
3976
3977 /* Make sure the inputs are valid. */
3978 DBRUNIF((*chain_prod > MAX_RX_BD),
3979 aprint_error_dev(sc->bnx_dev,
3980 "RX producer out of range: 0x%04X > 0x%04X\n",
3981 *chain_prod, (uint16_t)MAX_RX_BD));
3982
3983 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3984 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod,
3985 *prod_bseq);
3986
3987 /* try to get in as many mbufs as possible */
3988 if (sc->mbuf_alloc_size == MCLBYTES)
3989 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE;
3990 else
3991 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE;
3992 while (sc->free_rx_bd >= min_free_bd) {
3993 /* Simulate an mbuf allocation failure. */
3994 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3995 aprint_error_dev(sc->bnx_dev,
3996 "Simulating mbuf allocation failure.\n");
3997 sc->mbuf_sim_alloc_failed++;
3998 rc = ENOBUFS;
3999 goto bnx_get_buf_exit);
4000
4001 /* This is a new mbuf allocation. */
4002 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
4003 if (m_new == NULL) {
4004 DBPRINT(sc, BNX_WARN,
4005 "%s(%d): RX mbuf header allocation failed!\n",
4006 __FILE__, __LINE__);
4007
4008 sc->mbuf_alloc_failed++;
4009
4010 rc = ENOBUFS;
4011 goto bnx_get_buf_exit;
4012 }
4013
4014 DBRUNIF(1, sc->rx_mbuf_alloc++);
4015
4016 /* Simulate an mbuf cluster allocation failure. */
4017 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
4018 m_freem(m_new);
4019 sc->rx_mbuf_alloc--;
4020 sc->mbuf_alloc_failed++;
4021 sc->mbuf_sim_alloc_failed++;
4022 rc = ENOBUFS;
4023 goto bnx_get_buf_exit);
4024
4025 if (sc->mbuf_alloc_size == MCLBYTES)
4026 MCLGET(m_new, M_DONTWAIT);
4027 else
4028 MEXTMALLOC(m_new, sc->mbuf_alloc_size,
4029 M_DONTWAIT);
4030 if (!(m_new->m_flags & M_EXT)) {
4031 DBPRINT(sc, BNX_WARN,
4032 "%s(%d): RX mbuf chain allocation failed!\n",
4033 __FILE__, __LINE__);
4034
4035 m_freem(m_new);
4036
4037 DBRUNIF(1, sc->rx_mbuf_alloc--);
4038 sc->mbuf_alloc_failed++;
4039
4040 rc = ENOBUFS;
4041 goto bnx_get_buf_exit;
4042 }
4043
4044 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq);
4045 if (rc != 0)
4046 goto bnx_get_buf_exit;
4047 }
4048
4049 bnx_get_buf_exit:
4050 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
4051 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod,
4052 *chain_prod, *prod_bseq);
4053
4054 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
4055 __func__);
4056
4057 return rc;
4058 }
4059
4060 void
4061 bnx_alloc_pkts(struct work * unused, void * arg)
4062 {
4063 struct bnx_softc *sc = arg;
4064 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4065 struct bnx_pkt *pkt;
4066 int i, s;
4067
4068 for (i = 0; i < 4; i++) { /* magic! */
4069 pkt = pool_get(bnx_tx_pool, PR_WAITOK);
4070 if (pkt == NULL)
4071 break;
4072
4073 if (bus_dmamap_create(sc->bnx_dmatag,
4074 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
4075 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
4076 &pkt->pkt_dmamap) != 0)
4077 goto put;
4078
4079 if (!ISSET(ifp->if_flags, IFF_UP))
4080 goto stopping;
4081
4082 mutex_enter(&sc->tx_pkt_mtx);
4083 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4084 sc->tx_pkt_count++;
4085 mutex_exit(&sc->tx_pkt_mtx);
4086 }
4087
4088 mutex_enter(&sc->tx_pkt_mtx);
4089 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
4090 mutex_exit(&sc->tx_pkt_mtx);
4091
4092 /* fire-up TX now that allocations have been done */
4093 s = splnet();
4094 if (!IFQ_IS_EMPTY(&ifp->if_snd))
4095 bnx_start(ifp);
4096 splx(s);
4097
4098 return;
4099
4100 stopping:
4101 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
4102 put:
4103 pool_put(bnx_tx_pool, pkt);
4104 return;
4105 }
4106
4107 /****************************************************************************/
4108 /* Initialize the TX context memory. */
4109 /* */
4110 /* Returns: */
4111 /* Nothing */
4112 /****************************************************************************/
4113 void
4114 bnx_init_tx_context(struct bnx_softc *sc)
4115 {
4116 uint32_t val;
4117
4118 /* Initialize the context ID for an L2 TX chain. */
4119 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4120 /* Set the CID type to support an L2 connection. */
4121 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
4122 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
4123 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4124 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
4125
4126 /* Point the hardware to the first page in the chain. */
4127 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32);
4128 CTX_WR(sc, GET_CID_ADDR(TX_CID),
4129 BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
4130 val = (uint32_t)(sc->tx_bd_chain_paddr[0]);
4131 CTX_WR(sc, GET_CID_ADDR(TX_CID),
4132 BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
4133 } else {
4134 /* Set the CID type to support an L2 connection. */
4135 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
4136 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
4137 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4138 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
4139
4140 /* Point the hardware to the first page in the chain. */
4141 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32);
4142 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
4143 val = (uint32_t)(sc->tx_bd_chain_paddr[0]);
4144 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
4145 }
4146 }
4147
4148
4149 /****************************************************************************/
4150 /* Allocate memory and initialize the TX data structures. */
4151 /* */
4152 /* Returns: */
4153 /* 0 for success, positive value for failure. */
4154 /****************************************************************************/
4155 int
4156 bnx_init_tx_chain(struct bnx_softc *sc)
4157 {
4158 struct tx_bd *txbd;
4159 uint32_t addr;
4160 int i, rc = 0;
4161
4162 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4163
4164 /* Force an allocation of some dmamaps for tx up front */
4165 bnx_alloc_pkts(NULL, sc);
4166
4167 /* Set the initial TX producer/consumer indices. */
4168 sc->tx_prod = 0;
4169 sc->tx_cons = 0;
4170 sc->tx_prod_bseq = 0;
4171 sc->used_tx_bd = 0;
4172 sc->max_tx_bd = USABLE_TX_BD;
4173 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
4174 DBRUNIF(1, sc->tx_full_count = 0);
4175
4176 /*
4177 * The NetXtreme II supports a linked-list structure called
4178 * a Buffer Descriptor Chain (or BD chain). A BD chain
4179 * consists of a series of 1 or more chain pages, each of which
4180 * consists of a fixed number of BD entries.
4181 * The last BD entry on each page is a pointer to the next page
4182 * in the chain, and the last pointer in the BD chain
4183 * points back to the beginning of the chain.
4184 */
4185
4186 /* Set the TX next pointer chain entries. */
4187 for (i = 0; i < TX_PAGES; i++) {
4188 int j;
4189
4190 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4191
4192 /* Check if we've reached the last page. */
4193 if (i == (TX_PAGES - 1))
4194 j = 0;
4195 else
4196 j = i + 1;
4197
4198 addr = (uint32_t)sc->tx_bd_chain_paddr[j];
4199 txbd->tx_bd_haddr_lo = addr;
4200 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32);
4201 txbd->tx_bd_haddr_hi = addr;
4202 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
4203 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
4204 }
4205
4206 /*
4207 * Initialize the context ID for an L2 TX chain.
4208 */
4209 bnx_init_tx_context(sc);
4210
4211 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4212
4213 return rc;
4214 }
4215
4216 /****************************************************************************/
4217 /* Free memory and clear the TX data structures. */
4218 /* */
4219 /* Returns: */
4220 /* Nothing. */
4221 /****************************************************************************/
4222 void
4223 bnx_free_tx_chain(struct bnx_softc *sc)
4224 {
4225 struct bnx_pkt *pkt;
4226 int i;
4227
4228 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4229
4230 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4231 mutex_enter(&sc->tx_pkt_mtx);
4232 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) {
4233 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4234 mutex_exit(&sc->tx_pkt_mtx);
4235
4236 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0,
4237 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4238 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap);
4239
4240 m_freem(pkt->pkt_mbuf);
4241 DBRUNIF(1, sc->tx_mbuf_alloc--);
4242
4243 mutex_enter(&sc->tx_pkt_mtx);
4244 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4245 }
4246
4247 /* Destroy all the dmamaps we allocated for TX */
4248 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) {
4249 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4250 sc->tx_pkt_count--;
4251 mutex_exit(&sc->tx_pkt_mtx);
4252
4253 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
4254 pool_put(bnx_tx_pool, pkt);
4255
4256 mutex_enter(&sc->tx_pkt_mtx);
4257 }
4258 mutex_exit(&sc->tx_pkt_mtx);
4259
4260
4261
4262 /* Clear each TX chain page. */
4263 for (i = 0; i < TX_PAGES; i++) {
4264 memset(sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ);
4265 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
4266 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
4267 }
4268
4269 sc->used_tx_bd = 0;
4270
4271 /* Check if we lost any mbufs in the process. */
4272 DBRUNIF((sc->tx_mbuf_alloc),
4273 aprint_error_dev(sc->bnx_dev,
4274 "Memory leak! Lost %d mbufs from tx chain!\n",
4275 sc->tx_mbuf_alloc));
4276
4277 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4278 }
4279
4280 /****************************************************************************/
4281 /* Initialize the RX context memory. */
4282 /* */
4283 /* Returns: */
4284 /* Nothing */
4285 /****************************************************************************/
4286 void
4287 bnx_init_rx_context(struct bnx_softc *sc)
4288 {
4289 uint32_t val;
4290
4291 /* Initialize the context ID for an L2 RX chain. */
4292 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4293 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4294
4295 if (sc->bnx_flowflags & IFM_ETH_TXPAUSE)
4296 val |= 0x000000ff;
4297
4298 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
4299
4300 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4301 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4302 val = REG_RD(sc, BNX_MQ_MAP_L2_5);
4303 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
4304 }
4305
4306 /* Point the hardware to the first page in the chain. */
4307 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32);
4308 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
4309 val = (uint32_t)(sc->rx_bd_chain_paddr[0]);
4310 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
4311 }
4312
4313 /****************************************************************************/
4314 /* Allocate memory and initialize the RX data structures. */
4315 /* */
4316 /* Returns: */
4317 /* 0 for success, positive value for failure. */
4318 /****************************************************************************/
4319 int
4320 bnx_init_rx_chain(struct bnx_softc *sc)
4321 {
4322 struct rx_bd *rxbd;
4323 int i, rc = 0;
4324 uint16_t prod, chain_prod;
4325 uint32_t prod_bseq, addr;
4326
4327 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4328
4329 /* Initialize the RX producer and consumer indices. */
4330 sc->rx_prod = 0;
4331 sc->rx_cons = 0;
4332 sc->rx_prod_bseq = 0;
4333 sc->free_rx_bd = USABLE_RX_BD;
4334 sc->max_rx_bd = USABLE_RX_BD;
4335 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4336 DBRUNIF(1, sc->rx_empty_count = 0);
4337
4338 /* Initialize the RX next pointer chain entries. */
4339 for (i = 0; i < RX_PAGES; i++) {
4340 int j;
4341
4342 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4343
4344 /* Check if we've reached the last page. */
4345 if (i == (RX_PAGES - 1))
4346 j = 0;
4347 else
4348 j = i + 1;
4349
4350 /* Setup the chain page pointers. */
4351 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32);
4352 rxbd->rx_bd_haddr_hi = addr;
4353 addr = (uint32_t)sc->rx_bd_chain_paddr[j];
4354 rxbd->rx_bd_haddr_lo = addr;
4355 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
4356 0, BNX_RX_CHAIN_PAGE_SZ,
4357 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4358 }
4359
4360 /* Allocate mbuf clusters for the rx_bd chain. */
4361 prod = prod_bseq = 0;
4362 chain_prod = RX_CHAIN_IDX(prod);
4363 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
4364 BNX_PRINTF(sc,
4365 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod);
4366 }
4367
4368 /* Save the RX chain producer index. */
4369 sc->rx_prod = prod;
4370 sc->rx_prod_bseq = prod_bseq;
4371
4372 for (i = 0; i < RX_PAGES; i++)
4373 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4374 sc->rx_bd_chain_map[i]->dm_mapsize,
4375 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4376
4377 /* Tell the chip about the waiting rx_bd's. */
4378 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4379 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4380
4381 bnx_init_rx_context(sc);
4382
4383 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4384
4385 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4386
4387 return rc;
4388 }
4389
4390 /****************************************************************************/
4391 /* Free memory and clear the RX data structures. */
4392 /* */
4393 /* Returns: */
4394 /* Nothing. */
4395 /****************************************************************************/
4396 void
4397 bnx_free_rx_chain(struct bnx_softc *sc)
4398 {
4399 int i;
4400
4401 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4402
4403 /* Free any mbufs still in the RX mbuf chain. */
4404 for (i = 0; i < TOTAL_RX_BD; i++) {
4405 if (sc->rx_mbuf_ptr[i] != NULL) {
4406 if (sc->rx_mbuf_map[i] != NULL) {
4407 bus_dmamap_sync(sc->bnx_dmatag,
4408 sc->rx_mbuf_map[i], 0,
4409 sc->rx_mbuf_map[i]->dm_mapsize,
4410 BUS_DMASYNC_POSTREAD);
4411 bus_dmamap_unload(sc->bnx_dmatag,
4412 sc->rx_mbuf_map[i]);
4413 }
4414 m_freem(sc->rx_mbuf_ptr[i]);
4415 sc->rx_mbuf_ptr[i] = NULL;
4416 DBRUNIF(1, sc->rx_mbuf_alloc--);
4417 }
4418 }
4419
4420 /* Clear each RX chain page. */
4421 for (i = 0; i < RX_PAGES; i++)
4422 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
4423
4424 sc->free_rx_bd = sc->max_rx_bd;
4425
4426 /* Check if we lost any mbufs in the process. */
4427 DBRUNIF((sc->rx_mbuf_alloc),
4428 aprint_error_dev(sc->bnx_dev,
4429 "Memory leak! Lost %d mbufs from rx chain!\n",
4430 sc->rx_mbuf_alloc));
4431
4432 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4433 }
4434
4435 /****************************************************************************/
4436 /* Set media options. */
4437 /* */
4438 /* Returns: */
4439 /* 0 for success, positive value for failure. */
4440 /****************************************************************************/
4441 int
4442 bnx_ifmedia_upd(struct ifnet *ifp)
4443 {
4444 struct bnx_softc *sc;
4445 struct mii_data *mii;
4446 int rc = 0;
4447
4448 sc = ifp->if_softc;
4449
4450 mii = &sc->bnx_mii;
4451 sc->bnx_link = 0;
4452 if (mii->mii_instance) {
4453 struct mii_softc *miisc;
4454 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4455 mii_phy_reset(miisc);
4456 }
4457 mii_mediachg(mii);
4458
4459 return rc;
4460 }
4461
4462 /****************************************************************************/
4463 /* Reports current media status. */
4464 /* */
4465 /* Returns: */
4466 /* Nothing. */
4467 /****************************************************************************/
4468 void
4469 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4470 {
4471 struct bnx_softc *sc;
4472 struct mii_data *mii;
4473 int s;
4474
4475 sc = ifp->if_softc;
4476
4477 s = splnet();
4478
4479 mii = &sc->bnx_mii;
4480
4481 mii_pollstat(mii);
4482 ifmr->ifm_status = mii->mii_media_status;
4483 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4484 sc->bnx_flowflags;
4485
4486 splx(s);
4487 }
4488
4489 /****************************************************************************/
4490 /* Handles PHY generated interrupt events. */
4491 /* */
4492 /* Returns: */
4493 /* Nothing. */
4494 /****************************************************************************/
4495 void
4496 bnx_phy_intr(struct bnx_softc *sc)
4497 {
4498 uint32_t new_link_state, old_link_state;
4499
4500 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4501 BUS_DMASYNC_POSTREAD);
4502 new_link_state = sc->status_block->status_attn_bits &
4503 STATUS_ATTN_BITS_LINK_STATE;
4504 old_link_state = sc->status_block->status_attn_bits_ack &
4505 STATUS_ATTN_BITS_LINK_STATE;
4506
4507 /* Handle any changes if the link state has changed. */
4508 if (new_link_state != old_link_state) {
4509 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4510
4511 sc->bnx_link = 0;
4512 callout_stop(&sc->bnx_timeout);
4513 bnx_tick(sc);
4514
4515 /* Update the status_attn_bits_ack field in the status block. */
4516 if (new_link_state) {
4517 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4518 STATUS_ATTN_BITS_LINK_STATE);
4519 DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4520 } else {
4521 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4522 STATUS_ATTN_BITS_LINK_STATE);
4523 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4524 }
4525 }
4526
4527 /* Acknowledge the link change interrupt. */
4528 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4529 }
4530
4531 /****************************************************************************/
4532 /* Handles received frame interrupt events. */
4533 /* */
4534 /* Returns: */
4535 /* Nothing. */
4536 /****************************************************************************/
4537 void
4538 bnx_rx_intr(struct bnx_softc *sc)
4539 {
4540 struct status_block *sblk = sc->status_block;
4541 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4542 uint16_t hw_cons, sw_cons, sw_chain_cons;
4543 uint16_t sw_prod, sw_chain_prod;
4544 uint32_t sw_prod_bseq;
4545 struct l2_fhdr *l2fhdr;
4546 int i;
4547
4548 DBRUNIF(1, sc->rx_interrupts++);
4549 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4550 BUS_DMASYNC_POSTREAD);
4551
4552 /* Prepare the RX chain pages to be accessed by the host CPU. */
4553 for (i = 0; i < RX_PAGES; i++)
4554 bus_dmamap_sync(sc->bnx_dmatag,
4555 sc->rx_bd_chain_map[i], 0,
4556 sc->rx_bd_chain_map[i]->dm_mapsize,
4557 BUS_DMASYNC_POSTWRITE);
4558
4559 /* Get the hardware's view of the RX consumer index. */
4560 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4561 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4562 hw_cons++;
4563
4564 /* Get working copies of the driver's view of the RX indices. */
4565 sw_cons = sc->rx_cons;
4566 sw_prod = sc->rx_prod;
4567 sw_prod_bseq = sc->rx_prod_bseq;
4568
4569 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4570 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4571 __func__, sw_prod, sw_cons, sw_prod_bseq);
4572
4573 /* Prevent speculative reads from getting ahead of the status block. */
4574 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4575 BUS_SPACE_BARRIER_READ);
4576
4577 /* Update some debug statistics counters */
4578 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4579 sc->rx_low_watermark = sc->free_rx_bd);
4580 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4581
4582 /*
4583 * Scan through the receive chain as long
4584 * as there is work to do.
4585 */
4586 while (sw_cons != hw_cons) {
4587 struct mbuf *m;
4588 struct rx_bd *rxbd __diagused;
4589 unsigned int len;
4590 uint32_t status;
4591
4592 /* Convert the producer/consumer indices to an actual
4593 * rx_bd index.
4594 */
4595 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4596 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4597
4598 /* Get the used rx_bd. */
4599 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4600 sc->free_rx_bd++;
4601
4602 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__);
4603 bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4604
4605 /* The mbuf is stored with the last rx_bd entry of a packet. */
4606 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4607 #ifdef DIAGNOSTIC
4608 /* Validate that this is the last rx_bd. */
4609 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) {
4610 printf("%s: Unexpected mbuf found in "
4611 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev),
4612 sw_chain_cons);
4613 }
4614 #endif
4615
4616 /* DRC - ToDo: If the received packet is small, say
4617 * less than 128 bytes, allocate a new mbuf
4618 * here, copy the data to that mbuf, and
4619 * recycle the mapped jumbo frame.
4620 */
4621
4622 /* Unmap the mbuf from DMA space. */
4623 #ifdef DIAGNOSTIC
4624 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) {
4625 printf("invalid map sw_cons 0x%x "
4626 "sw_prod 0x%x "
4627 "sw_chain_cons 0x%x "
4628 "sw_chain_prod 0x%x "
4629 "hw_cons 0x%x "
4630 "TOTAL_RX_BD_PER_PAGE 0x%x "
4631 "TOTAL_RX_BD 0x%x\n",
4632 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod,
4633 hw_cons,
4634 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD);
4635 }
4636 #endif
4637 bus_dmamap_sync(sc->bnx_dmatag,
4638 sc->rx_mbuf_map[sw_chain_cons], 0,
4639 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4640 BUS_DMASYNC_POSTREAD);
4641 bus_dmamap_unload(sc->bnx_dmatag,
4642 sc->rx_mbuf_map[sw_chain_cons]);
4643
4644 /* Remove the mbuf from the driver's chain. */
4645 m = sc->rx_mbuf_ptr[sw_chain_cons];
4646 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4647
4648 /*
4649 * Frames received on the NetXteme II are prepended
4650 * with the l2_fhdr structure which provides status
4651 * information about the received frame (including
4652 * VLAN tags and checksum info) and are also
4653 * automatically adjusted to align the IP header
4654 * (i.e. two null bytes are inserted before the
4655 * Ethernet header).
4656 */
4657 l2fhdr = mtod(m, struct l2_fhdr *);
4658
4659 len = l2fhdr->l2_fhdr_pkt_len;
4660 status = l2fhdr->l2_fhdr_status;
4661
4662 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4663 aprint_error("Simulating l2_fhdr status error.\n");
4664 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4665
4666 /* Watch for unusual sized frames. */
4667 DBRUNIF(((len < BNX_MIN_MTU) ||
4668 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4669 aprint_error_dev(sc->bnx_dev,
4670 "Unusual frame size found. "
4671 "Min(%d), Actual(%d), Max(%d)\n",
4672 (int)BNX_MIN_MTU, len,
4673 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4674
4675 bnx_dump_mbuf(sc, m);
4676 bnx_breakpoint(sc));
4677
4678 len -= ETHER_CRC_LEN;
4679
4680 /* Check the received frame for errors. */
4681 if ((status & (L2_FHDR_ERRORS_BAD_CRC |
4682 L2_FHDR_ERRORS_PHY_DECODE |
4683 L2_FHDR_ERRORS_ALIGNMENT |
4684 L2_FHDR_ERRORS_TOO_SHORT |
4685 L2_FHDR_ERRORS_GIANT_FRAME)) ||
4686 len < (BNX_MIN_MTU - ETHER_CRC_LEN) ||
4687 len >
4688 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
4689 if_statinc(ifp, if_ierrors);
4690 DBRUNIF(1, sc->l2fhdr_status_errors++);
4691
4692 /* Reuse the mbuf for a new frame. */
4693 if (bnx_add_buf(sc, m, &sw_prod,
4694 &sw_chain_prod, &sw_prod_bseq)) {
4695 DBRUNIF(1, bnx_breakpoint(sc));
4696 panic("%s: Can't reuse RX mbuf!\n",
4697 device_xname(sc->bnx_dev));
4698 }
4699 continue;
4700 }
4701
4702 /*
4703 * Get a new mbuf for the rx_bd. If no new
4704 * mbufs are available then reuse the current mbuf,
4705 * log an ierror on the interface, and generate
4706 * an error in the system log.
4707 */
4708 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod,
4709 &sw_prod_bseq)) {
4710 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev,
4711 "Failed to allocate "
4712 "new mbuf, incoming frame dropped!\n"));
4713
4714 if_statinc(ifp, if_ierrors);
4715
4716 /* Try and reuse the exisitng mbuf. */
4717 if (bnx_add_buf(sc, m, &sw_prod,
4718 &sw_chain_prod, &sw_prod_bseq)) {
4719 DBRUNIF(1, bnx_breakpoint(sc));
4720 panic("%s: Double mbuf allocation "
4721 "failure!",
4722 device_xname(sc->bnx_dev));
4723 }
4724 continue;
4725 }
4726
4727 /* Skip over the l2_fhdr when passing the data up
4728 * the stack.
4729 */
4730 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4731
4732 /* Adjust the pckt length to match the received data. */
4733 m->m_pkthdr.len = m->m_len = len;
4734
4735 /* Send the packet to the appropriate interface. */
4736 m_set_rcvif(m, ifp);
4737
4738 DBRUN(BNX_VERBOSE_RECV,
4739 struct ether_header *eh;
4740 eh = mtod(m, struct ether_header *);
4741 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n",
4742 __func__, ether_sprintf(eh->ether_dhost),
4743 ether_sprintf(eh->ether_shost),
4744 htons(eh->ether_type)));
4745
4746 /* Validate the checksum. */
4747
4748 /* Check for an IP datagram. */
4749 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4750 /* Check if the IP checksum is valid. */
4751 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4752 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
4753 #ifdef BNX_DEBUG
4754 else
4755 DBPRINT(sc, BNX_WARN_SEND,
4756 "%s(): Invalid IP checksum "
4757 "= 0x%04X!\n",
4758 __func__,
4759 l2fhdr->l2_fhdr_ip_xsum
4760 );
4761 #endif
4762 }
4763
4764 /* Check for a valid TCP/UDP frame. */
4765 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4766 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4767 /* Check for a good TCP/UDP checksum. */
4768 if ((status &
4769 (L2_FHDR_ERRORS_TCP_XSUM |
4770 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4771 m->m_pkthdr.csum_flags |=
4772 M_CSUM_TCPv4 |
4773 M_CSUM_UDPv4;
4774 } else {
4775 DBPRINT(sc, BNX_WARN_SEND,
4776 "%s(): Invalid TCP/UDP "
4777 "checksum = 0x%04X!\n",
4778 __func__,
4779 l2fhdr->l2_fhdr_tcp_udp_xsum);
4780 }
4781 }
4782
4783 /*
4784 * If we received a packet with a vlan tag,
4785 * attach that information to the packet.
4786 */
4787 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4788 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4789 vlan_set_tag(m, l2fhdr->l2_fhdr_vlan_tag);
4790 }
4791
4792 /* Pass the mbuf off to the upper layers. */
4793
4794 DBPRINT(sc, BNX_VERBOSE_RECV,
4795 "%s(): Passing received frame up.\n", __func__);
4796 if_percpuq_enqueue(ifp->if_percpuq, m);
4797 DBRUNIF(1, sc->rx_mbuf_alloc--);
4798
4799 }
4800
4801 sw_cons = NEXT_RX_BD(sw_cons);
4802
4803 /* Refresh hw_cons to see if there's new work */
4804 if (sw_cons == hw_cons) {
4805 hw_cons = sc->hw_rx_cons =
4806 sblk->status_rx_quick_consumer_index0;
4807 if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4808 USABLE_RX_BD_PER_PAGE)
4809 hw_cons++;
4810 }
4811
4812 /* Prevent speculative reads from getting ahead of
4813 * the status block.
4814 */
4815 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4816 BUS_SPACE_BARRIER_READ);
4817 }
4818
4819 for (i = 0; i < RX_PAGES; i++)
4820 bus_dmamap_sync(sc->bnx_dmatag,
4821 sc->rx_bd_chain_map[i], 0,
4822 sc->rx_bd_chain_map[i]->dm_mapsize,
4823 BUS_DMASYNC_PREWRITE);
4824
4825 sc->rx_cons = sw_cons;
4826 sc->rx_prod = sw_prod;
4827 sc->rx_prod_bseq = sw_prod_bseq;
4828
4829 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4830 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4831
4832 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4833 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4834 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4835 }
4836
4837 /****************************************************************************/
4838 /* Handles transmit completion interrupt events. */
4839 /* */
4840 /* Returns: */
4841 /* Nothing. */
4842 /****************************************************************************/
4843 void
4844 bnx_tx_intr(struct bnx_softc *sc)
4845 {
4846 struct status_block *sblk = sc->status_block;
4847 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4848 struct bnx_pkt *pkt;
4849 bus_dmamap_t map;
4850 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4851
4852 DBRUNIF(1, sc->tx_interrupts++);
4853 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4854 BUS_DMASYNC_POSTREAD);
4855
4856 /* Get the hardware's view of the TX consumer index. */
4857 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4858
4859 /* Skip to the next entry if this is a chain page pointer. */
4860 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4861 hw_tx_cons++;
4862
4863 sw_tx_cons = sc->tx_cons;
4864
4865 /* Prevent speculative reads from getting ahead of the status block. */
4866 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4867 BUS_SPACE_BARRIER_READ);
4868
4869 /* Cycle through any completed TX chain page entries. */
4870 while (sw_tx_cons != hw_tx_cons) {
4871 #ifdef BNX_DEBUG
4872 struct tx_bd *txbd = NULL;
4873 #endif
4874 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4875
4876 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4877 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4878 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4879
4880 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4881 aprint_error_dev(sc->bnx_dev,
4882 "TX chain consumer out of range! 0x%04X > 0x%04X\n",
4883 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc));
4884
4885 DBRUNIF(1, txbd = &sc->tx_bd_chain
4886 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4887
4888 DBRUNIF((txbd == NULL),
4889 aprint_error_dev(sc->bnx_dev,
4890 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons);
4891 bnx_breakpoint(sc));
4892
4893 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__);
4894 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4895
4896
4897 mutex_enter(&sc->tx_pkt_mtx);
4898 pkt = TAILQ_FIRST(&sc->tx_used_pkts);
4899 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) {
4900 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4901 mutex_exit(&sc->tx_pkt_mtx);
4902 /*
4903 * Free the associated mbuf. Remember
4904 * that only the last tx_bd of a packet
4905 * has an mbuf pointer and DMA map.
4906 */
4907 map = pkt->pkt_dmamap;
4908 bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4909 map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4910 bus_dmamap_unload(sc->bnx_dmatag, map);
4911
4912 m_freem(pkt->pkt_mbuf);
4913 DBRUNIF(1, sc->tx_mbuf_alloc--);
4914
4915 if_statinc(ifp, if_opackets);
4916
4917 mutex_enter(&sc->tx_pkt_mtx);
4918 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4919 }
4920 mutex_exit(&sc->tx_pkt_mtx);
4921
4922 sc->used_tx_bd--;
4923 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n",
4924 __FILE__, __LINE__, sc->used_tx_bd);
4925
4926 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4927
4928 /* Refresh hw_cons to see if there's new work. */
4929 hw_tx_cons = sc->hw_tx_cons =
4930 sblk->status_tx_quick_consumer_index0;
4931 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4932 USABLE_TX_BD_PER_PAGE)
4933 hw_tx_cons++;
4934
4935 /* Prevent speculative reads from getting ahead of
4936 * the status block.
4937 */
4938 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4939 BUS_SPACE_BARRIER_READ);
4940 }
4941
4942 /* Clear the TX timeout timer. */
4943 ifp->if_timer = 0;
4944
4945 /* Clear the tx hardware queue full flag. */
4946 if (sc->used_tx_bd < sc->max_tx_bd) {
4947 DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4948 aprint_debug_dev(sc->bnx_dev,
4949 "Open TX chain! %d/%d (used/total)\n",
4950 sc->used_tx_bd, sc->max_tx_bd));
4951 ifp->if_flags &= ~IFF_OACTIVE;
4952 }
4953
4954 sc->tx_cons = sw_tx_cons;
4955 }
4956
4957 /****************************************************************************/
4958 /* Disables interrupt generation. */
4959 /* */
4960 /* Returns: */
4961 /* Nothing. */
4962 /****************************************************************************/
4963 void
4964 bnx_disable_intr(struct bnx_softc *sc)
4965 {
4966 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4967 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4968 }
4969
4970 /****************************************************************************/
4971 /* Enables interrupt generation. */
4972 /* */
4973 /* Returns: */
4974 /* Nothing. */
4975 /****************************************************************************/
4976 void
4977 bnx_enable_intr(struct bnx_softc *sc)
4978 {
4979 uint32_t val;
4980
4981 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4982 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4983
4984 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4985 sc->last_status_idx);
4986
4987 val = REG_RD(sc, BNX_HC_COMMAND);
4988 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4989 }
4990
4991 /****************************************************************************/
4992 /* Handles controller initialization. */
4993 /* */
4994 /****************************************************************************/
4995 int
4996 bnx_init(struct ifnet *ifp)
4997 {
4998 struct bnx_softc *sc = ifp->if_softc;
4999 uint32_t ether_mtu;
5000 int s, error = 0;
5001
5002 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
5003
5004 s = splnet();
5005
5006 bnx_stop(ifp, 0);
5007
5008 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) {
5009 aprint_error_dev(sc->bnx_dev,
5010 "Controller reset failed!\n");
5011 goto bnx_init_exit;
5012 }
5013
5014 if ((error = bnx_chipinit(sc)) != 0) {
5015 aprint_error_dev(sc->bnx_dev,
5016 "Controller initialization failed!\n");
5017 goto bnx_init_exit;
5018 }
5019
5020 if ((error = bnx_blockinit(sc)) != 0) {
5021 aprint_error_dev(sc->bnx_dev,
5022 "Block initialization failed!\n");
5023 goto bnx_init_exit;
5024 }
5025
5026 /* Calculate and program the Ethernet MRU size. */
5027 if (ifp->if_mtu <= ETHERMTU) {
5028 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
5029 sc->mbuf_alloc_size = MCLBYTES;
5030 } else {
5031 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
5032 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU;
5033 }
5034
5035
5036 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", __func__, ether_mtu);
5037
5038 /*
5039 * Program the MRU and enable Jumbo frame
5040 * support.
5041 */
5042 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
5043 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
5044
5045 /* Calculate the RX Ethernet frame size for rx_bd's. */
5046 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
5047
5048 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
5049 "max_frame_size = %d\n", __func__, (int)MCLBYTES,
5050 sc->mbuf_alloc_size, sc->max_frame_size);
5051
5052 /* Program appropriate promiscuous/multicast filtering. */
5053 bnx_iff(sc);
5054
5055 /* Init RX buffer descriptor chain. */
5056 bnx_init_rx_chain(sc);
5057
5058 /* Init TX buffer descriptor chain. */
5059 bnx_init_tx_chain(sc);
5060
5061 /* Enable host interrupts. */
5062 bnx_enable_intr(sc);
5063
5064 mii_ifmedia_change(&sc->bnx_mii);
5065
5066 SET(ifp->if_flags, IFF_RUNNING);
5067 CLR(ifp->if_flags, IFF_OACTIVE);
5068
5069 callout_schedule(&sc->bnx_timeout, hz);
5070
5071 bnx_init_exit:
5072 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
5073
5074 splx(s);
5075
5076 return error;
5077 }
5078
5079 void
5080 bnx_mgmt_init(struct bnx_softc *sc)
5081 {
5082 struct ifnet *ifp = &sc->bnx_ec.ec_if;
5083 uint32_t val;
5084
5085 /* Check if the driver is still running and bail out if it is. */
5086 if (ifp->if_flags & IFF_RUNNING)
5087 goto bnx_mgmt_init_exit;
5088
5089 /* Initialize the on-boards CPUs */
5090 bnx_init_cpus(sc);
5091
5092 val = (BCM_PAGE_BITS - 8) << 24;
5093 REG_WR(sc, BNX_RV2P_CONFIG, val);
5094
5095 /* Enable all critical blocks in the MAC. */
5096 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
5097 BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
5098 BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
5099 BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
5100 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
5101 DELAY(20);
5102
5103 mii_ifmedia_change(&sc->bnx_mii);
5104
5105 bnx_mgmt_init_exit:
5106 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
5107 }
5108
5109 /****************************************************************************/
5110 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
5111 /* memory visible to the controller. */
5112 /* */
5113 /* Returns: */
5114 /* 0 for success, positive value for failure. */
5115 /****************************************************************************/
5116 int
5117 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m)
5118 {
5119 struct bnx_pkt *pkt;
5120 bus_dmamap_t map;
5121 struct tx_bd *txbd = NULL;
5122 uint16_t vlan_tag = 0, flags = 0;
5123 uint16_t chain_prod, prod;
5124 #ifdef BNX_DEBUG
5125 uint16_t debug_prod;
5126 #endif
5127 uint32_t addr, prod_bseq;
5128 int i, error;
5129 static struct work bnx_wk; /* Dummy work. Statically allocated. */
5130 bool remap = true;
5131
5132 mutex_enter(&sc->tx_pkt_mtx);
5133 pkt = TAILQ_FIRST(&sc->tx_free_pkts);
5134 if (pkt == NULL) {
5135 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) {
5136 mutex_exit(&sc->tx_pkt_mtx);
5137 return ENETDOWN;
5138 }
5139
5140 if (sc->tx_pkt_count <= TOTAL_TX_BD &&
5141 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) {
5142 workqueue_enqueue(sc->bnx_wq, &bnx_wk, NULL);
5143 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
5144 }
5145
5146 mutex_exit(&sc->tx_pkt_mtx);
5147 return ENOMEM;
5148 }
5149 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
5150 mutex_exit(&sc->tx_pkt_mtx);
5151
5152 /* Transfer any checksum offload flags to the bd. */
5153 if (m->m_pkthdr.csum_flags) {
5154 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
5155 flags |= TX_BD_FLAGS_IP_CKSUM;
5156 if (m->m_pkthdr.csum_flags &
5157 (M_CSUM_TCPv4 | M_CSUM_UDPv4))
5158 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5159 }
5160
5161 /* Transfer any VLAN tags to the bd. */
5162 if (vlan_has_tag(m)) {
5163 flags |= TX_BD_FLAGS_VLAN_TAG;
5164 vlan_tag = vlan_get_tag(m);
5165 }
5166
5167 /* Map the mbuf into DMAable memory. */
5168 prod = sc->tx_prod;
5169 chain_prod = TX_CHAIN_IDX(prod);
5170 map = pkt->pkt_dmamap;
5171
5172 /* Map the mbuf into our DMA address space. */
5173 retry:
5174 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT);
5175 if (__predict_false(error)) {
5176 if (error == EFBIG) {
5177 if (remap == true) {
5178 struct mbuf *newm;
5179
5180 remap = false;
5181 newm = m_defrag(m, M_NOWAIT);
5182 if (newm != NULL) {
5183 m = newm;
5184 goto retry;
5185 }
5186 }
5187 }
5188 sc->tx_dma_map_failures++;
5189 goto maperr;
5190 }
5191 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
5192 BUS_DMASYNC_PREWRITE);
5193 /* Make sure there's room in the chain */
5194 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd))
5195 goto nospace;
5196
5197 /* prod points to an empty tx_bd at this point. */
5198 prod_bseq = sc->tx_prod_bseq;
5199 #ifdef BNX_DEBUG
5200 debug_prod = chain_prod;
5201 #endif
5202 DBPRINT(sc, BNX_INFO_SEND,
5203 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
5204 "prod_bseq = 0x%08X\n",
5205 __func__, prod, chain_prod, prod_bseq);
5206
5207 /*
5208 * Cycle through each mbuf segment that makes up
5209 * the outgoing frame, gathering the mapping info
5210 * for that segment and creating a tx_bd for the
5211 * mbuf.
5212 */
5213 for (i = 0; i < map->dm_nsegs ; i++) {
5214 chain_prod = TX_CHAIN_IDX(prod);
5215 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
5216
5217 addr = (uint32_t)map->dm_segs[i].ds_addr;
5218 txbd->tx_bd_haddr_lo = addr;
5219 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32);
5220 txbd->tx_bd_haddr_hi = addr;
5221 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
5222 txbd->tx_bd_vlan_tag = vlan_tag;
5223 txbd->tx_bd_flags = flags;
5224 prod_bseq += map->dm_segs[i].ds_len;
5225 if (i == 0)
5226 txbd->tx_bd_flags |= TX_BD_FLAGS_START;
5227 prod = NEXT_TX_BD(prod);
5228 }
5229
5230 /* Set the END flag on the last TX buffer descriptor. */
5231 txbd->tx_bd_flags |= TX_BD_FLAGS_END;
5232
5233 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs));
5234
5235 DBPRINT(sc, BNX_INFO_SEND,
5236 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
5237 "prod_bseq = 0x%08X\n",
5238 __func__, prod, chain_prod, prod_bseq);
5239
5240 pkt->pkt_mbuf = m;
5241 pkt->pkt_end_desc = chain_prod;
5242
5243 mutex_enter(&sc->tx_pkt_mtx);
5244 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry);
5245 mutex_exit(&sc->tx_pkt_mtx);
5246
5247 sc->used_tx_bd += map->dm_nsegs;
5248 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n",
5249 __FILE__, __LINE__, sc->used_tx_bd);
5250
5251 /* Update some debug statistics counters */
5252 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
5253 sc->tx_hi_watermark = sc->used_tx_bd);
5254 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
5255 DBRUNIF(1, sc->tx_mbuf_alloc++);
5256
5257 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
5258 map->dm_nsegs));
5259
5260 /* prod points to the next free tx_bd at this point. */
5261 sc->tx_prod = prod;
5262 sc->tx_prod_bseq = prod_bseq;
5263
5264 return 0;
5265
5266
5267 nospace:
5268 bus_dmamap_unload(sc->bnx_dmatag, map);
5269 maperr:
5270 mutex_enter(&sc->tx_pkt_mtx);
5271 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
5272 mutex_exit(&sc->tx_pkt_mtx);
5273
5274 return ENOMEM;
5275 }
5276
5277 /****************************************************************************/
5278 /* Main transmit routine. */
5279 /* */
5280 /* Returns: */
5281 /* Nothing. */
5282 /****************************************************************************/
5283 void
5284 bnx_start(struct ifnet *ifp)
5285 {
5286 struct bnx_softc *sc = ifp->if_softc;
5287 struct mbuf *m_head = NULL;
5288 int count = 0;
5289 #ifdef BNX_DEBUG
5290 uint16_t tx_chain_prod;
5291 #endif
5292
5293 /* If there's no link or the transmit queue is empty then just exit. */
5294 if (!sc->bnx_link
5295 ||(ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) {
5296 DBPRINT(sc, BNX_INFO_SEND,
5297 "%s(): output active or device not running.\n", __func__);
5298 goto bnx_start_exit;
5299 }
5300
5301 /* prod points to the next free tx_bd. */
5302 #ifdef BNX_DEBUG
5303 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5304 #endif
5305
5306 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
5307 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, "
5308 "used_tx %d max_tx %d\n",
5309 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq,
5310 sc->used_tx_bd, sc->max_tx_bd);
5311
5312 /*
5313 * Keep adding entries while there is space in the ring.
5314 */
5315 while (sc->used_tx_bd < sc->max_tx_bd) {
5316 /* Check for any frames to send. */
5317 IFQ_POLL(&ifp->if_snd, m_head);
5318 if (m_head == NULL)
5319 break;
5320
5321 /*
5322 * Pack the data into the transmit ring. If we
5323 * don't have room, set the OACTIVE flag to wait
5324 * for the NIC to drain the chain.
5325 */
5326 if (bnx_tx_encap(sc, m_head)) {
5327 ifp->if_flags |= IFF_OACTIVE;
5328 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
5329 "business! Total tx_bd used = %d\n",
5330 sc->used_tx_bd);
5331 break;
5332 }
5333
5334 IFQ_DEQUEUE(&ifp->if_snd, m_head);
5335 count++;
5336
5337 /* Send a copy of the frame to any BPF listeners. */
5338 bpf_mtap(ifp, m_head, BPF_D_OUT);
5339 }
5340
5341 if (count == 0) {
5342 /* no packets were dequeued */
5343 DBPRINT(sc, BNX_VERBOSE_SEND,
5344 "%s(): No packets were dequeued\n", __func__);
5345 goto bnx_start_exit;
5346 }
5347
5348 /* Update the driver's counters. */
5349 #ifdef BNX_DEBUG
5350 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5351 #endif
5352
5353 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, "
5354 "tx_chain_prod = 0x%04X, tx_prod_bseq = 0x%08X\n",
5355 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5356
5357 /* Start the transmit. */
5358 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5359 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5360
5361 /* Set the tx timeout. */
5362 ifp->if_timer = BNX_TX_TIMEOUT;
5363
5364 bnx_start_exit:
5365 return;
5366 }
5367
5368 /****************************************************************************/
5369 /* Handles any IOCTL calls from the operating system. */
5370 /* */
5371 /* Returns: */
5372 /* 0 for success, positive value for failure. */
5373 /****************************************************************************/
5374 int
5375 bnx_ioctl(struct ifnet *ifp, u_long command, void *data)
5376 {
5377 struct bnx_softc *sc = ifp->if_softc;
5378 struct ifreq *ifr = (struct ifreq *) data;
5379 struct mii_data *mii = &sc->bnx_mii;
5380 int s, error = 0;
5381
5382 s = splnet();
5383
5384 switch (command) {
5385 case SIOCSIFFLAGS:
5386 if ((error = ifioctl_common(ifp, command, data)) != 0)
5387 break;
5388 /* XXX set an ifflags callback and let ether_ioctl
5389 * handle all of this.
5390 */
5391 if (ISSET(ifp->if_flags, IFF_UP)) {
5392 if (ifp->if_flags & IFF_RUNNING)
5393 error = ENETRESET;
5394 else
5395 bnx_init(ifp);
5396 } else if (ifp->if_flags & IFF_RUNNING)
5397 bnx_stop(ifp, 1);
5398 break;
5399
5400 case SIOCSIFMEDIA:
5401 /* Flow control requires full-duplex mode. */
5402 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
5403 (ifr->ifr_media & IFM_FDX) == 0)
5404 ifr->ifr_media &= ~IFM_ETH_FMASK;
5405
5406 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
5407 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
5408 /* We can do both TXPAUSE and RXPAUSE. */
5409 ifr->ifr_media |=
5410 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
5411 }
5412 sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
5413 }
5414 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5415 sc->bnx_phy_flags);
5416
5417 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5418 break;
5419
5420 default:
5421 error = ether_ioctl(ifp, command, data);
5422 }
5423
5424 if (error == ENETRESET) {
5425 if (ifp->if_flags & IFF_RUNNING)
5426 bnx_iff(sc);
5427 error = 0;
5428 }
5429
5430 splx(s);
5431 return error;
5432 }
5433
5434 /****************************************************************************/
5435 /* Transmit timeout handler. */
5436 /* */
5437 /* Returns: */
5438 /* Nothing. */
5439 /****************************************************************************/
5440 void
5441 bnx_watchdog(struct ifnet *ifp)
5442 {
5443 struct bnx_softc *sc = ifp->if_softc;
5444
5445 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5446 bnx_dump_status_block(sc));
5447 /*
5448 * If we are in this routine because of pause frames, then
5449 * don't reset the hardware.
5450 */
5451 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5452 return;
5453
5454 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n");
5455
5456 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5457
5458 bnx_init(ifp);
5459
5460 if_statinc(ifp, if_oerrors);
5461 }
5462
5463 /*
5464 * Interrupt handler.
5465 */
5466 /****************************************************************************/
5467 /* Main interrupt entry point. Verifies that the controller generated the */
5468 /* interrupt and then calls a separate routine for handle the various */
5469 /* interrupt causes (PHY, TX, RX). */
5470 /* */
5471 /* Returns: */
5472 /* 0 for success, positive value for failure. */
5473 /****************************************************************************/
5474 int
5475 bnx_intr(void *xsc)
5476 {
5477 struct bnx_softc *sc = xsc;
5478 struct ifnet *ifp = &sc->bnx_ec.ec_if;
5479 uint32_t status_attn_bits;
5480 uint16_t status_idx;
5481 const struct status_block *sblk;
5482 int rv = 0;
5483
5484 if (!device_is_active(sc->bnx_dev) ||
5485 (ifp->if_flags & IFF_RUNNING) == 0)
5486 return 0;
5487
5488 DBRUNIF(1, sc->interrupts_generated++);
5489
5490 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5491 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
5492
5493 sblk = sc->status_block;
5494 /*
5495 * If the hardware status block index
5496 * matches the last value read by the
5497 * driver and we haven't asserted our
5498 * interrupt then there's nothing to do.
5499 */
5500 status_idx = sblk->status_idx;
5501 if ((status_idx != sc->last_status_idx) ||
5502 !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS),
5503 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) {
5504 rv = 1;
5505
5506 /* Ack the interrupt */
5507 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5508 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx);
5509
5510 status_attn_bits = sblk->status_attn_bits;
5511
5512 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5513 aprint_debug("Simulating unexpected status attention bit set.");
5514 status_attn_bits = status_attn_bits |
5515 STATUS_ATTN_BITS_PARITY_ERROR);
5516
5517 /* Was it a link change interrupt? */
5518 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5519 (sblk->status_attn_bits_ack &
5520 STATUS_ATTN_BITS_LINK_STATE))
5521 bnx_phy_intr(sc);
5522
5523 /* If any other attention is asserted then the chip is toast. */
5524 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5525 (sblk->status_attn_bits_ack &
5526 ~STATUS_ATTN_BITS_LINK_STATE))) {
5527 DBRUN(sc->unexpected_attentions++);
5528
5529 BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5530 sblk->status_attn_bits);
5531
5532 DBRUNIF((bnx_debug_unexpected_attention == 0),
5533 bnx_breakpoint(sc));
5534
5535 bnx_init(ifp);
5536 goto out;
5537 }
5538
5539 /* Check for any completed RX frames. */
5540 if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5541 bnx_rx_intr(sc);
5542
5543 /* Check for any completed TX frames. */
5544 if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5545 bnx_tx_intr(sc);
5546
5547 /*
5548 * Save the status block index value for use during the
5549 * next interrupt.
5550 */
5551 sc->last_status_idx = status_idx;
5552
5553 /* Start moving packets again */
5554 if (ifp->if_flags & IFF_RUNNING)
5555 if_schedule_deferred_start(ifp);
5556 }
5557
5558 out:
5559 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5560 sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5561
5562 return rv;
5563 }
5564
5565 /****************************************************************************/
5566 /* Programs the various packet receive modes (broadcast and multicast). */
5567 /* */
5568 /* Returns: */
5569 /* Nothing. */
5570 /****************************************************************************/
5571 void
5572 bnx_iff(struct bnx_softc *sc)
5573 {
5574 struct ethercom *ec = &sc->bnx_ec;
5575 struct ifnet *ifp = &ec->ec_if;
5576 struct ether_multi *enm;
5577 struct ether_multistep step;
5578 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5579 uint32_t rx_mode, sort_mode;
5580 int h, i;
5581
5582 /* Initialize receive mode default settings. */
5583 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5584 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5585 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5586 ifp->if_flags &= ~IFF_ALLMULTI;
5587
5588 /*
5589 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5590 * be enbled.
5591 */
5592 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
5593 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5594
5595 /*
5596 * Check for promiscuous, all multicast, or selected
5597 * multicast address filtering.
5598 */
5599 if (ifp->if_flags & IFF_PROMISC) {
5600 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5601
5602 ifp->if_flags |= IFF_ALLMULTI;
5603 /* Enable promiscuous mode. */
5604 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5605 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5606 } else if (ifp->if_flags & IFF_ALLMULTI) {
5607 allmulti:
5608 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5609
5610 ifp->if_flags |= IFF_ALLMULTI;
5611 /* Enable all multicast addresses. */
5612 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5613 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5614 0xffffffff);
5615 sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5616 } else {
5617 /* Accept one or more multicast(s). */
5618 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5619
5620 ETHER_LOCK(ec);
5621 ETHER_FIRST_MULTI(step, ec, enm);
5622 while (enm != NULL) {
5623 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
5624 ETHER_ADDR_LEN)) {
5625 ETHER_UNLOCK(ec);
5626 goto allmulti;
5627 }
5628 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5629 0xFF;
5630 hashes[(h & 0xE0) >> 5] |= __BIT(h & 0x1F);
5631 ETHER_NEXT_MULTI(step, enm);
5632 }
5633 ETHER_UNLOCK(ec);
5634
5635 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5636 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5637 hashes[i]);
5638
5639 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5640 }
5641
5642 /* Only make changes if the receive mode has actually changed. */
5643 if (rx_mode != sc->rx_mode) {
5644 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5645 rx_mode);
5646
5647 sc->rx_mode = rx_mode;
5648 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5649 }
5650
5651 /* Disable and clear the exisitng sort before enabling a new sort. */
5652 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5653 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5654 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5655 }
5656
5657 /****************************************************************************/
5658 /* Called periodically to updates statistics from the controllers */
5659 /* statistics block. */
5660 /* */
5661 /* Returns: */
5662 /* Nothing. */
5663 /****************************************************************************/
5664 void
5665 bnx_stats_update(struct bnx_softc *sc)
5666 {
5667 struct ifnet *ifp = &sc->bnx_ec.ec_if;
5668 struct statistics_block *stats;
5669
5670 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__);
5671 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5672 BUS_DMASYNC_POSTREAD);
5673
5674 stats = (struct statistics_block *)sc->stats_block;
5675
5676 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
5677 uint64_t value;
5678
5679 /*
5680 * Update the interface statistics from the
5681 * hardware statistics.
5682 */
5683 value = (u_long)stats->stat_EtherStatsCollisions;
5684 if_statadd_ref(nsr, if_collisions, value - sc->if_stat_collisions);
5685 sc->if_stat_collisions = value;
5686
5687 value = (u_long)stats->stat_EtherStatsUndersizePkts +
5688 (u_long)stats->stat_EtherStatsOverrsizePkts +
5689 (u_long)stats->stat_IfInMBUFDiscards +
5690 (u_long)stats->stat_Dot3StatsAlignmentErrors +
5691 (u_long)stats->stat_Dot3StatsFCSErrors;
5692 if_statadd_ref(nsr, if_ierrors, value - sc->if_stat_ierrors);
5693 sc->if_stat_ierrors = value;
5694
5695 value = (u_long)
5696 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5697 (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5698 (u_long)stats->stat_Dot3StatsLateCollisions;
5699 if_statadd_ref(nsr, if_oerrors, value - sc->if_stat_oerrors);
5700 sc->if_stat_oerrors = value;
5701
5702 /*
5703 * Certain controllers don't report
5704 * carrier sense errors correctly.
5705 * See errata E11_5708CA0_1165.
5706 */
5707 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5708 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) {
5709 if_statadd_ref(nsr, if_oerrors,
5710 (u_long) stats->stat_Dot3StatsCarrierSenseErrors);
5711 }
5712
5713 IF_STAT_PUTREF(ifp);
5714
5715 /*
5716 * Update the sysctl statistics from the
5717 * hardware statistics.
5718 */
5719 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5720 (uint64_t) stats->stat_IfHCInOctets_lo;
5721
5722 sc->stat_IfHCInBadOctets =
5723 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5724 (uint64_t) stats->stat_IfHCInBadOctets_lo;
5725
5726 sc->stat_IfHCOutOctets =
5727 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) +
5728 (uint64_t) stats->stat_IfHCOutOctets_lo;
5729
5730 sc->stat_IfHCOutBadOctets =
5731 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5732 (uint64_t) stats->stat_IfHCOutBadOctets_lo;
5733
5734 sc->stat_IfHCInUcastPkts =
5735 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5736 (uint64_t) stats->stat_IfHCInUcastPkts_lo;
5737
5738 sc->stat_IfHCInMulticastPkts =
5739 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5740 (uint64_t) stats->stat_IfHCInMulticastPkts_lo;
5741
5742 sc->stat_IfHCInBroadcastPkts =
5743 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5744 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo;
5745
5746 sc->stat_IfHCOutUcastPkts =
5747 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5748 (uint64_t) stats->stat_IfHCOutUcastPkts_lo;
5749
5750 sc->stat_IfHCOutMulticastPkts =
5751 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5752 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo;
5753
5754 sc->stat_IfHCOutBroadcastPkts =
5755 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5756 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5757
5758 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5759 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5760
5761 sc->stat_Dot3StatsCarrierSenseErrors =
5762 stats->stat_Dot3StatsCarrierSenseErrors;
5763
5764 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5765
5766 sc->stat_Dot3StatsAlignmentErrors =
5767 stats->stat_Dot3StatsAlignmentErrors;
5768
5769 sc->stat_Dot3StatsSingleCollisionFrames =
5770 stats->stat_Dot3StatsSingleCollisionFrames;
5771
5772 sc->stat_Dot3StatsMultipleCollisionFrames =
5773 stats->stat_Dot3StatsMultipleCollisionFrames;
5774
5775 sc->stat_Dot3StatsDeferredTransmissions =
5776 stats->stat_Dot3StatsDeferredTransmissions;
5777
5778 sc->stat_Dot3StatsExcessiveCollisions =
5779 stats->stat_Dot3StatsExcessiveCollisions;
5780
5781 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5782
5783 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5784
5785 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5786
5787 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5788
5789 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5790
5791 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5792
5793 sc->stat_EtherStatsPktsRx64Octets =
5794 stats->stat_EtherStatsPktsRx64Octets;
5795
5796 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5797 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5798
5799 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5800 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5801
5802 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5803 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5804
5805 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5806 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5807
5808 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5809 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5810
5811 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5812 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5813
5814 sc->stat_EtherStatsPktsTx64Octets =
5815 stats->stat_EtherStatsPktsTx64Octets;
5816
5817 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5818 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5819
5820 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5821 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5822
5823 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5824 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5825
5826 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5827 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5828
5829 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5830 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5831
5832 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5833 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5834
5835 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5836
5837 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5838
5839 sc->stat_OutXonSent = stats->stat_OutXonSent;
5840
5841 sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5842
5843 sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5844
5845 sc->stat_MacControlFramesReceived =
5846 stats->stat_MacControlFramesReceived;
5847
5848 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5849
5850 sc->stat_IfInFramesL2FilterDiscards =
5851 stats->stat_IfInFramesL2FilterDiscards;
5852
5853 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5854
5855 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5856
5857 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5858
5859 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5860
5861 sc->stat_CatchupInRuleCheckerDiscards =
5862 stats->stat_CatchupInRuleCheckerDiscards;
5863
5864 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5865
5866 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5867
5868 sc->stat_CatchupInRuleCheckerP4Hit =
5869 stats->stat_CatchupInRuleCheckerP4Hit;
5870
5871 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__);
5872 }
5873
5874 void
5875 bnx_tick(void *xsc)
5876 {
5877 struct bnx_softc *sc = xsc;
5878 struct ifnet *ifp = &sc->bnx_ec.ec_if;
5879 struct mii_data *mii;
5880 uint32_t msg;
5881 uint16_t prod, chain_prod;
5882 uint32_t prod_bseq;
5883 int s = splnet();
5884
5885 /* Tell the firmware that the driver is still running. */
5886 #ifdef BNX_DEBUG
5887 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5888 #else
5889 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5890 #endif
5891 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5892
5893 /* Update the statistics from the hardware statistics block. */
5894 bnx_stats_update(sc);
5895
5896 /* Schedule the next tick. */
5897 if (!sc->bnx_detaching)
5898 callout_schedule(&sc->bnx_timeout, hz);
5899
5900 if (sc->bnx_link)
5901 goto bnx_tick_exit;
5902
5903 mii = &sc->bnx_mii;
5904 mii_tick(mii);
5905
5906 /* Check if the link has come up. */
5907 if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5908 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5909 sc->bnx_link++;
5910 /* Now that link is up, handle any outstanding TX traffic. */
5911 if_schedule_deferred_start(ifp);
5912 }
5913
5914 bnx_tick_exit:
5915 /* try to get more RX buffers, just in case */
5916 prod = sc->rx_prod;
5917 prod_bseq = sc->rx_prod_bseq;
5918 chain_prod = RX_CHAIN_IDX(prod);
5919 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
5920 sc->rx_prod = prod;
5921 sc->rx_prod_bseq = prod_bseq;
5922
5923 splx(s);
5924 return;
5925 }
5926
5927 /****************************************************************************/
5928 /* BNX Debug Routines */
5929 /****************************************************************************/
5930 #ifdef BNX_DEBUG
5931
5932 /****************************************************************************/
5933 /* Prints out information about an mbuf. */
5934 /* */
5935 /* Returns: */
5936 /* Nothing. */
5937 /****************************************************************************/
5938 void
5939 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5940 {
5941 struct mbuf *mp = m;
5942
5943 if (m == NULL) {
5944 /* Index out of range. */
5945 aprint_error("mbuf ptr is null!\n");
5946 return;
5947 }
5948
5949 while (mp) {
5950 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5951 mp, mp->m_len);
5952
5953 if (mp->m_flags & M_EXT)
5954 aprint_debug("M_EXT ");
5955 if (mp->m_flags & M_PKTHDR)
5956 aprint_debug("M_PKTHDR ");
5957 aprint_debug("\n");
5958
5959 if (mp->m_flags & M_EXT)
5960 aprint_debug("- m_ext: vaddr = %p, "
5961 "ext_size = 0x%04zX\n", mp, mp->m_ext.ext_size);
5962
5963 mp = mp->m_next;
5964 }
5965 }
5966
5967 /****************************************************************************/
5968 /* Prints out the mbufs in the TX mbuf chain. */
5969 /* */
5970 /* Returns: */
5971 /* Nothing. */
5972 /****************************************************************************/
5973 void
5974 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5975 {
5976 #if 0
5977 struct mbuf *m;
5978 int i;
5979
5980 aprint_debug_dev(sc->bnx_dev,
5981 "----------------------------"
5982 " tx mbuf data "
5983 "----------------------------\n");
5984
5985 for (i = 0; i < count; i++) {
5986 m = sc->tx_mbuf_ptr[chain_prod];
5987 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5988 bnx_dump_mbuf(sc, m);
5989 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5990 }
5991
5992 aprint_debug_dev(sc->bnx_dev,
5993 "--------------------------------------------"
5994 "----------------------------\n");
5995 #endif
5996 }
5997
5998 /*
5999 * This routine prints the RX mbuf chain.
6000 */
6001 void
6002 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
6003 {
6004 struct mbuf *m;
6005 int i;
6006
6007 aprint_debug_dev(sc->bnx_dev,
6008 "----------------------------"
6009 " rx mbuf data "
6010 "----------------------------\n");
6011
6012 for (i = 0; i < count; i++) {
6013 m = sc->rx_mbuf_ptr[chain_prod];
6014 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6015 bnx_dump_mbuf(sc, m);
6016 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6017 }
6018
6019
6020 aprint_debug_dev(sc->bnx_dev,
6021 "--------------------------------------------"
6022 "----------------------------\n");
6023 }
6024
6025 void
6026 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
6027 {
6028 if (idx > MAX_TX_BD)
6029 /* Index out of range. */
6030 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6031 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6032 /* TX Chain page pointer. */
6033 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
6034 "page pointer\n", idx, txbd->tx_bd_haddr_hi,
6035 txbd->tx_bd_haddr_lo);
6036 else
6037 /* Normal tx_bd entry. */
6038 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
6039 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
6040 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6041 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
6042 txbd->tx_bd_flags);
6043 }
6044
6045 void
6046 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
6047 {
6048 if (idx > MAX_RX_BD)
6049 /* Index out of range. */
6050 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6051 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6052 /* TX Chain page pointer. */
6053 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
6054 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
6055 rxbd->rx_bd_haddr_lo);
6056 else
6057 /* Normal tx_bd entry. */
6058 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
6059 "0x%08X, flags = 0x%08X\n", idx,
6060 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6061 rxbd->rx_bd_len, rxbd->rx_bd_flags);
6062 }
6063
6064 void
6065 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6066 {
6067 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6068 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6069 "tcp_udp_xsum = 0x%04X\n", idx,
6070 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6071 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6072 l2fhdr->l2_fhdr_tcp_udp_xsum);
6073 }
6074
6075 /*
6076 * This routine prints the TX chain.
6077 */
6078 void
6079 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
6080 {
6081 struct tx_bd *txbd;
6082 int i;
6083
6084 /* First some info about the tx_bd chain structure. */
6085 aprint_debug_dev(sc->bnx_dev,
6086 "----------------------------"
6087 " tx_bd chain "
6088 "----------------------------\n");
6089
6090 BNX_PRINTF(sc,
6091 "page size = 0x%08X, tx chain pages = 0x%08X\n",
6092 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES);
6093
6094 BNX_PRINTF(sc,
6095 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6096 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE);
6097
6098 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
6099
6100 aprint_error_dev(sc->bnx_dev, ""
6101 "-----------------------------"
6102 " tx_bd data "
6103 "-----------------------------\n");
6104
6105 /* Now print out the tx_bd's themselves. */
6106 for (i = 0; i < count; i++) {
6107 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6108 bnx_dump_txbd(sc, tx_prod, txbd);
6109 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6110 }
6111
6112 aprint_debug_dev(sc->bnx_dev,
6113 "-----------------------------"
6114 "--------------"
6115 "-----------------------------\n");
6116 }
6117
6118 /*
6119 * This routine prints the RX chain.
6120 */
6121 void
6122 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
6123 {
6124 struct rx_bd *rxbd;
6125 int i;
6126
6127 /* First some info about the tx_bd chain structure. */
6128 aprint_debug_dev(sc->bnx_dev,
6129 "----------------------------"
6130 " rx_bd chain "
6131 "----------------------------\n");
6132
6133 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n");
6134
6135 BNX_PRINTF(sc,
6136 "page size = 0x%08X, rx chain pages = 0x%08X\n",
6137 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
6138
6139 BNX_PRINTF(sc,
6140 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6141 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE);
6142
6143 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
6144
6145 aprint_error_dev(sc->bnx_dev,
6146 "----------------------------"
6147 " rx_bd data "
6148 "----------------------------\n");
6149
6150 /* Now print out the rx_bd's themselves. */
6151 for (i = 0; i < count; i++) {
6152 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6153 bnx_dump_rxbd(sc, rx_prod, rxbd);
6154 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6155 }
6156
6157 aprint_debug_dev(sc->bnx_dev,
6158 "----------------------------"
6159 "--------------"
6160 "----------------------------\n");
6161 }
6162
6163 /*
6164 * This routine prints the status block.
6165 */
6166 void
6167 bnx_dump_status_block(struct bnx_softc *sc)
6168 {
6169 struct status_block *sblk;
6170 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
6171 BUS_DMASYNC_POSTREAD);
6172
6173 sblk = sc->status_block;
6174
6175 aprint_debug_dev(sc->bnx_dev, "----------------------------- "
6176 "Status Block -----------------------------\n");
6177
6178 BNX_PRINTF(sc,
6179 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6180 sblk->status_attn_bits, sblk->status_attn_bits_ack,
6181 sblk->status_idx);
6182
6183 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
6184 sblk->status_rx_quick_consumer_index0,
6185 sblk->status_tx_quick_consumer_index0);
6186
6187 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6188
6189 /* Theses indices are not used for normal L2 drivers. */
6190 if (sblk->status_rx_quick_consumer_index1 ||
6191 sblk->status_tx_quick_consumer_index1)
6192 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
6193 sblk->status_rx_quick_consumer_index1,
6194 sblk->status_tx_quick_consumer_index1);
6195
6196 if (sblk->status_rx_quick_consumer_index2 ||
6197 sblk->status_tx_quick_consumer_index2)
6198 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
6199 sblk->status_rx_quick_consumer_index2,
6200 sblk->status_tx_quick_consumer_index2);
6201
6202 if (sblk->status_rx_quick_consumer_index3 ||
6203 sblk->status_tx_quick_consumer_index3)
6204 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
6205 sblk->status_rx_quick_consumer_index3,
6206 sblk->status_tx_quick_consumer_index3);
6207
6208 if (sblk->status_rx_quick_consumer_index4 ||
6209 sblk->status_rx_quick_consumer_index5)
6210 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
6211 sblk->status_rx_quick_consumer_index4,
6212 sblk->status_rx_quick_consumer_index5);
6213
6214 if (sblk->status_rx_quick_consumer_index6 ||
6215 sblk->status_rx_quick_consumer_index7)
6216 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
6217 sblk->status_rx_quick_consumer_index6,
6218 sblk->status_rx_quick_consumer_index7);
6219
6220 if (sblk->status_rx_quick_consumer_index8 ||
6221 sblk->status_rx_quick_consumer_index9)
6222 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
6223 sblk->status_rx_quick_consumer_index8,
6224 sblk->status_rx_quick_consumer_index9);
6225
6226 if (sblk->status_rx_quick_consumer_index10 ||
6227 sblk->status_rx_quick_consumer_index11)
6228 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
6229 sblk->status_rx_quick_consumer_index10,
6230 sblk->status_rx_quick_consumer_index11);
6231
6232 if (sblk->status_rx_quick_consumer_index12 ||
6233 sblk->status_rx_quick_consumer_index13)
6234 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
6235 sblk->status_rx_quick_consumer_index12,
6236 sblk->status_rx_quick_consumer_index13);
6237
6238 if (sblk->status_rx_quick_consumer_index14 ||
6239 sblk->status_rx_quick_consumer_index15)
6240 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
6241 sblk->status_rx_quick_consumer_index14,
6242 sblk->status_rx_quick_consumer_index15);
6243
6244 if (sblk->status_completion_producer_index ||
6245 sblk->status_cmd_consumer_index)
6246 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
6247 sblk->status_completion_producer_index,
6248 sblk->status_cmd_consumer_index);
6249
6250 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------"
6251 "-----------------------------\n");
6252 }
6253
6254 /*
6255 * This routine prints the statistics block.
6256 */
6257 void
6258 bnx_dump_stats_block(struct bnx_softc *sc)
6259 {
6260 struct statistics_block *sblk;
6261 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
6262 BUS_DMASYNC_POSTREAD);
6263
6264 sblk = sc->stats_block;
6265
6266 aprint_debug_dev(sc->bnx_dev, ""
6267 "-----------------------------"
6268 " Stats Block "
6269 "-----------------------------\n");
6270
6271 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
6272 "IfHcInBadOctets = 0x%08X:%08X\n",
6273 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6274 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6275
6276 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
6277 "IfHcOutBadOctets = 0x%08X:%08X\n",
6278 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6279 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6280
6281 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
6282 "IfHcInMulticastPkts = 0x%08X:%08X\n",
6283 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6284 sblk->stat_IfHCInMulticastPkts_hi,
6285 sblk->stat_IfHCInMulticastPkts_lo);
6286
6287 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
6288 "IfHcOutUcastPkts = 0x%08X:%08X\n",
6289 sblk->stat_IfHCInBroadcastPkts_hi,
6290 sblk->stat_IfHCInBroadcastPkts_lo,
6291 sblk->stat_IfHCOutUcastPkts_hi,
6292 sblk->stat_IfHCOutUcastPkts_lo);
6293
6294 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
6295 "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6296 sblk->stat_IfHCOutMulticastPkts_hi,
6297 sblk->stat_IfHCOutMulticastPkts_lo,
6298 sblk->stat_IfHCOutBroadcastPkts_hi,
6299 sblk->stat_IfHCOutBroadcastPkts_lo);
6300
6301 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6302 BNX_PRINTF(sc, "0x%08X : "
6303 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6304 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6305
6306 if (sblk->stat_Dot3StatsCarrierSenseErrors)
6307 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6308 sblk->stat_Dot3StatsCarrierSenseErrors);
6309
6310 if (sblk->stat_Dot3StatsFCSErrors)
6311 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6312 sblk->stat_Dot3StatsFCSErrors);
6313
6314 if (sblk->stat_Dot3StatsAlignmentErrors)
6315 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6316 sblk->stat_Dot3StatsAlignmentErrors);
6317
6318 if (sblk->stat_Dot3StatsSingleCollisionFrames)
6319 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6320 sblk->stat_Dot3StatsSingleCollisionFrames);
6321
6322 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6323 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6324 sblk->stat_Dot3StatsMultipleCollisionFrames);
6325
6326 if (sblk->stat_Dot3StatsDeferredTransmissions)
6327 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6328 sblk->stat_Dot3StatsDeferredTransmissions);
6329
6330 if (sblk->stat_Dot3StatsExcessiveCollisions)
6331 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6332 sblk->stat_Dot3StatsExcessiveCollisions);
6333
6334 if (sblk->stat_Dot3StatsLateCollisions)
6335 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6336 sblk->stat_Dot3StatsLateCollisions);
6337
6338 if (sblk->stat_EtherStatsCollisions)
6339 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6340 sblk->stat_EtherStatsCollisions);
6341
6342 if (sblk->stat_EtherStatsFragments)
6343 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6344 sblk->stat_EtherStatsFragments);
6345
6346 if (sblk->stat_EtherStatsJabbers)
6347 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6348 sblk->stat_EtherStatsJabbers);
6349
6350 if (sblk->stat_EtherStatsUndersizePkts)
6351 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6352 sblk->stat_EtherStatsUndersizePkts);
6353
6354 if (sblk->stat_EtherStatsOverrsizePkts)
6355 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6356 sblk->stat_EtherStatsOverrsizePkts);
6357
6358 if (sblk->stat_EtherStatsPktsRx64Octets)
6359 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6360 sblk->stat_EtherStatsPktsRx64Octets);
6361
6362 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6363 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6364 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6365
6366 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6367 BNX_PRINTF(sc, "0x%08X : "
6368 "EtherStatsPktsRx128Octetsto255Octets\n",
6369 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6370
6371 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6372 BNX_PRINTF(sc, "0x%08X : "
6373 "EtherStatsPktsRx256Octetsto511Octets\n",
6374 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6375
6376 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6377 BNX_PRINTF(sc, "0x%08X : "
6378 "EtherStatsPktsRx512Octetsto1023Octets\n",
6379 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6380
6381 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6382 BNX_PRINTF(sc, "0x%08X : "
6383 "EtherStatsPktsRx1024Octetsto1522Octets\n",
6384 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6385
6386 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6387 BNX_PRINTF(sc, "0x%08X : "
6388 "EtherStatsPktsRx1523Octetsto9022Octets\n",
6389 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6390
6391 if (sblk->stat_EtherStatsPktsTx64Octets)
6392 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6393 sblk->stat_EtherStatsPktsTx64Octets);
6394
6395 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6396 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6397 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6398
6399 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6400 BNX_PRINTF(sc, "0x%08X : "
6401 "EtherStatsPktsTx128Octetsto255Octets\n",
6402 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6403
6404 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6405 BNX_PRINTF(sc, "0x%08X : "
6406 "EtherStatsPktsTx256Octetsto511Octets\n",
6407 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6408
6409 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6410 BNX_PRINTF(sc, "0x%08X : "
6411 "EtherStatsPktsTx512Octetsto1023Octets\n",
6412 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6413
6414 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6415 BNX_PRINTF(sc, "0x%08X : "
6416 "EtherStatsPktsTx1024Octetsto1522Octets\n",
6417 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6418
6419 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6420 BNX_PRINTF(sc, "0x%08X : "
6421 "EtherStatsPktsTx1523Octetsto9022Octets\n",
6422 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6423
6424 if (sblk->stat_XonPauseFramesReceived)
6425 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6426 sblk->stat_XonPauseFramesReceived);
6427
6428 if (sblk->stat_XoffPauseFramesReceived)
6429 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6430 sblk->stat_XoffPauseFramesReceived);
6431
6432 if (sblk->stat_OutXonSent)
6433 BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
6434 sblk->stat_OutXonSent);
6435
6436 if (sblk->stat_OutXoffSent)
6437 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6438 sblk->stat_OutXoffSent);
6439
6440 if (sblk->stat_FlowControlDone)
6441 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6442 sblk->stat_FlowControlDone);
6443
6444 if (sblk->stat_MacControlFramesReceived)
6445 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6446 sblk->stat_MacControlFramesReceived);
6447
6448 if (sblk->stat_XoffStateEntered)
6449 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6450 sblk->stat_XoffStateEntered);
6451
6452 if (sblk->stat_IfInFramesL2FilterDiscards)
6453 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6454 sblk->stat_IfInFramesL2FilterDiscards);
6455
6456 if (sblk->stat_IfInRuleCheckerDiscards)
6457 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6458 sblk->stat_IfInRuleCheckerDiscards);
6459
6460 if (sblk->stat_IfInFTQDiscards)
6461 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6462 sblk->stat_IfInFTQDiscards);
6463
6464 if (sblk->stat_IfInMBUFDiscards)
6465 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6466 sblk->stat_IfInMBUFDiscards);
6467
6468 if (sblk->stat_IfInRuleCheckerP4Hit)
6469 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6470 sblk->stat_IfInRuleCheckerP4Hit);
6471
6472 if (sblk->stat_CatchupInRuleCheckerDiscards)
6473 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6474 sblk->stat_CatchupInRuleCheckerDiscards);
6475
6476 if (sblk->stat_CatchupInFTQDiscards)
6477 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6478 sblk->stat_CatchupInFTQDiscards);
6479
6480 if (sblk->stat_CatchupInMBUFDiscards)
6481 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6482 sblk->stat_CatchupInMBUFDiscards);
6483
6484 if (sblk->stat_CatchupInRuleCheckerP4Hit)
6485 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6486 sblk->stat_CatchupInRuleCheckerP4Hit);
6487
6488 aprint_debug_dev(sc->bnx_dev,
6489 "-----------------------------"
6490 "--------------"
6491 "-----------------------------\n");
6492 }
6493
6494 void
6495 bnx_dump_driver_state(struct bnx_softc *sc)
6496 {
6497 aprint_debug_dev(sc->bnx_dev,
6498 "-----------------------------"
6499 " Driver State "
6500 "-----------------------------\n");
6501
6502 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6503 "address\n", sc);
6504
6505 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6506 sc->status_block);
6507
6508 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6509 "address\n", sc->stats_block);
6510
6511 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6512 "address\n", sc->tx_bd_chain);
6513
6514 #if 0
6515 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6516 sc->rx_bd_chain);
6517
6518 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6519 sc->tx_mbuf_ptr);
6520 #endif
6521
6522 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6523 sc->rx_mbuf_ptr);
6524
6525 BNX_PRINTF(sc,
6526 " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
6527 sc->interrupts_generated);
6528
6529 BNX_PRINTF(sc,
6530 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6531 sc->rx_interrupts);
6532
6533 BNX_PRINTF(sc,
6534 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6535 sc->tx_interrupts);
6536
6537 BNX_PRINTF(sc,
6538 " 0x%08X - (sc->last_status_idx) status block index\n",
6539 sc->last_status_idx);
6540
6541 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
6542 sc->tx_prod);
6543
6544 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
6545 sc->tx_cons);
6546
6547 BNX_PRINTF(sc,
6548 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6549 sc->tx_prod_bseq);
6550 BNX_PRINTF(sc,
6551 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6552 sc->tx_mbuf_alloc);
6553
6554 BNX_PRINTF(sc,
6555 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6556 sc->used_tx_bd);
6557
6558 BNX_PRINTF(sc,
6559 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6560 sc->tx_hi_watermark, sc->max_tx_bd);
6561
6562
6563 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
6564 sc->rx_prod);
6565
6566 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
6567 sc->rx_cons);
6568
6569 BNX_PRINTF(sc,
6570 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6571 sc->rx_prod_bseq);
6572
6573 BNX_PRINTF(sc,
6574 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6575 sc->rx_mbuf_alloc);
6576
6577 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6578 sc->free_rx_bd);
6579
6580 BNX_PRINTF(sc,
6581 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6582 sc->rx_low_watermark, sc->max_rx_bd);
6583
6584 BNX_PRINTF(sc,
6585 " 0x%08X - (sc->mbuf_alloc_failed) "
6586 "mbuf alloc failures\n",
6587 sc->mbuf_alloc_failed);
6588
6589 BNX_PRINTF(sc,
6590 " 0x%0X - (sc->mbuf_sim_allocated_failed) "
6591 "simulated mbuf alloc failures\n",
6592 sc->mbuf_sim_alloc_failed);
6593
6594 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------"
6595 "-----------------------------\n");
6596 }
6597
6598 void
6599 bnx_dump_hw_state(struct bnx_softc *sc)
6600 {
6601 uint32_t val1;
6602 int i;
6603
6604 aprint_debug_dev(sc->bnx_dev,
6605 "----------------------------"
6606 " Hardware State "
6607 "----------------------------\n");
6608
6609 val1 = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV);
6610 BNX_PRINTF(sc, "0x%08X : bootcode version\n", val1);
6611
6612 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6613 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6614 val1, BNX_MISC_ENABLE_STATUS_BITS);
6615
6616 val1 = REG_RD(sc, BNX_DMA_STATUS);
6617 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6618
6619 val1 = REG_RD(sc, BNX_CTX_STATUS);
6620 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6621
6622 val1 = REG_RD(sc, BNX_EMAC_STATUS);
6623 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6624 BNX_EMAC_STATUS);
6625
6626 val1 = REG_RD(sc, BNX_RPM_STATUS);
6627 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6628
6629 val1 = REG_RD(sc, BNX_TBDR_STATUS);
6630 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6631 BNX_TBDR_STATUS);
6632
6633 val1 = REG_RD(sc, BNX_TDMA_STATUS);
6634 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6635 BNX_TDMA_STATUS);
6636
6637 val1 = REG_RD(sc, BNX_HC_STATUS);
6638 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6639
6640 aprint_debug_dev(sc->bnx_dev,
6641 "----------------------------"
6642 "----------------"
6643 "----------------------------\n");
6644
6645 aprint_debug_dev(sc->bnx_dev,
6646 "----------------------------"
6647 " Register Dump "
6648 "----------------------------\n");
6649
6650 for (i = 0x400; i < 0x8000; i += 0x10)
6651 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6652 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6653 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6654
6655 aprint_debug_dev(sc->bnx_dev,
6656 "----------------------------"
6657 "----------------"
6658 "----------------------------\n");
6659 }
6660
6661 void
6662 bnx_breakpoint(struct bnx_softc *sc)
6663 {
6664 /* Unreachable code to shut the compiler up about unused functions. */
6665 if (0) {
6666 bnx_dump_txbd(sc, 0, NULL);
6667 bnx_dump_rxbd(sc, 0, NULL);
6668 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6669 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6670 bnx_dump_l2fhdr(sc, 0, NULL);
6671 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6672 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6673 bnx_dump_status_block(sc);
6674 bnx_dump_stats_block(sc);
6675 bnx_dump_driver_state(sc);
6676 bnx_dump_hw_state(sc);
6677 }
6678
6679 bnx_dump_driver_state(sc);
6680 /* Print the important status block fields. */
6681 bnx_dump_status_block(sc);
6682
6683 #if 0
6684 /* Call the debugger. */
6685 breakpoint();
6686 #endif
6687
6688 return;
6689 }
6690 #endif
6691