if_bnx.c revision 1.68 1 /* $NetBSD: if_bnx.c,v 1.68 2019/01/22 03:42:27 msaitoh Exp $ */
2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */
3
4 /*-
5 * Copyright (c) 2006-2010 Broadcom Corporation
6 * David Christensen <davidch (at) broadcom.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if 0
36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
37 #endif
38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.68 2019/01/22 03:42:27 msaitoh Exp $");
39
40 /*
41 * The following controllers are supported by this driver:
42 * BCM5706C A2, A3
43 * BCM5706S A2, A3
44 * BCM5708C B1, B2
45 * BCM5708S B1, B2
46 * BCM5709C A1, C0
47 * BCM5709S A1, C0
48 * BCM5716 C0
49 *
50 * The following controllers are not supported by this driver:
51 * BCM5706C A0, A1
52 * BCM5706S A0, A1
53 * BCM5708C A0, B0
54 * BCM5708S A0, B0
55 * BCM5709C A0 B0, B1, B2 (pre-production)
56 * BCM5709S A0, B0, B1, B2 (pre-production)
57 */
58
59 #include <sys/callout.h>
60 #include <sys/mutex.h>
61
62 #include <dev/pci/if_bnxreg.h>
63 #include <dev/pci/if_bnxvar.h>
64
65 #include <dev/microcode/bnx/bnxfw.h>
66
67 /****************************************************************************/
68 /* BNX Driver Version */
69 /****************************************************************************/
70 #define BNX_DRIVER_VERSION "v0.9.6"
71
72 /****************************************************************************/
73 /* BNX Debug Options */
74 /****************************************************************************/
75 #ifdef BNX_DEBUG
76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND;
77
78 /* 0 = Never */
79 /* 1 = 1 in 2,147,483,648 */
80 /* 256 = 1 in 8,388,608 */
81 /* 2048 = 1 in 1,048,576 */
82 /* 65536 = 1 in 32,768 */
83 /* 1048576 = 1 in 2,048 */
84 /* 268435456 = 1 in 8 */
85 /* 536870912 = 1 in 4 */
86 /* 1073741824 = 1 in 2 */
87
88 /* Controls how often the l2_fhdr frame error check will fail. */
89 int bnx_debug_l2fhdr_status_check = 0;
90
91 /* Controls how often the unexpected attention check will fail. */
92 int bnx_debug_unexpected_attention = 0;
93
94 /* Controls how often to simulate an mbuf allocation failure. */
95 int bnx_debug_mbuf_allocation_failure = 0;
96
97 /* Controls how often to simulate a DMA mapping failure. */
98 int bnx_debug_dma_map_addr_failure = 0;
99
100 /* Controls how often to simulate a bootcode failure. */
101 int bnx_debug_bootcode_running_failure = 0;
102 #endif
103
104 /****************************************************************************/
105 /* PCI Device ID Table */
106 /* */
107 /* Used by bnx_probe() to identify the devices supported by this driver. */
108 /****************************************************************************/
109 static const struct bnx_product {
110 pci_vendor_id_t bp_vendor;
111 pci_product_id_t bp_product;
112 pci_vendor_id_t bp_subvendor;
113 pci_product_id_t bp_subproduct;
114 const char *bp_name;
115 } bnx_devices[] = {
116 #ifdef PCI_SUBPRODUCT_HP_NC370T
117 {
118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T,
120 "HP NC370T Multifunction Gigabit Server Adapter"
121 },
122 #endif
123 #ifdef PCI_SUBPRODUCT_HP_NC370i
124 {
125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i,
127 "HP NC370i Multifunction Gigabit Server Adapter"
128 },
129 #endif
130 {
131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
132 0, 0,
133 "Broadcom NetXtreme II BCM5706 1000Base-T"
134 },
135 #ifdef PCI_SUBPRODUCT_HP_NC370F
136 {
137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F,
139 "HP NC370F Multifunction Gigabit Server Adapter"
140 },
141 #endif
142 {
143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
144 0, 0,
145 "Broadcom NetXtreme II BCM5706 1000Base-SX"
146 },
147 {
148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708,
149 0, 0,
150 "Broadcom NetXtreme II BCM5708 1000Base-T"
151 },
152 {
153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S,
154 0, 0,
155 "Broadcom NetXtreme II BCM5708 1000Base-SX"
156 },
157 {
158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709,
159 0, 0,
160 "Broadcom NetXtreme II BCM5709 1000Base-T"
161 },
162 {
163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S,
164 0, 0,
165 "Broadcom NetXtreme II BCM5709 1000Base-SX"
166 },
167 {
168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716,
169 0, 0,
170 "Broadcom NetXtreme II BCM5716 1000Base-T"
171 },
172 {
173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S,
174 0, 0,
175 "Broadcom NetXtreme II BCM5716 1000Base-SX"
176 },
177 };
178
179 /****************************************************************************/
180 /* Supported Flash NVRAM device data. */
181 /****************************************************************************/
182 static struct flash_spec flash_table[] =
183 {
184 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
185 #define NONBUFFERED_FLAGS (BNX_NV_WREN)
186 /* Slow EEPROM */
187 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
188 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
189 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
190 "EEPROM - slow"},
191 /* Expansion entry 0001 */
192 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 0001"},
196 /* Saifun SA25F010 (non-buffered flash) */
197 /* strap, cfg1, & write1 need updates */
198 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
201 "Non-buffered flash (128kB)"},
202 /* Saifun SA25F020 (non-buffered flash) */
203 /* strap, cfg1, & write1 need updates */
204 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
205 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
206 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
207 "Non-buffered flash (256kB)"},
208 /* Expansion entry 0100 */
209 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
210 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
211 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
212 "Entry 0100"},
213 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
214 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
215 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
216 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
217 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
218 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
219 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
220 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
221 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
222 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
223 /* Saifun SA25F005 (non-buffered flash) */
224 /* strap, cfg1, & write1 need updates */
225 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
226 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
228 "Non-buffered flash (64kB)"},
229 /* Fast EEPROM */
230 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
231 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
232 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
233 "EEPROM - fast"},
234 /* Expansion entry 1001 */
235 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
236 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
237 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
238 "Entry 1001"},
239 /* Expansion entry 1010 */
240 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
241 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
242 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243 "Entry 1010"},
244 /* ATMEL AT45DB011B (buffered flash) */
245 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
246 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
247 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
248 "Buffered flash (128kB)"},
249 /* Expansion entry 1100 */
250 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
251 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
252 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
253 "Entry 1100"},
254 /* Expansion entry 1101 */
255 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
256 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
257 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
258 "Entry 1101"},
259 /* Ateml Expansion entry 1110 */
260 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
261 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
262 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
263 "Entry 1110 (Atmel)"},
264 /* ATMEL AT45DB021B (buffered flash) */
265 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
266 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
267 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
268 "Buffered flash (256kB)"},
269 };
270
271 /*
272 * The BCM5709 controllers transparently handle the
273 * differences between Atmel 264 byte pages and all
274 * flash devices which use 256 byte pages, so no
275 * logical-to-physical mapping is required in the
276 * driver.
277 */
278 static struct flash_spec flash_5709 = {
279 .flags = BNX_NV_BUFFERED,
280 .page_bits = BCM5709_FLASH_PAGE_BITS,
281 .page_size = BCM5709_FLASH_PAGE_SIZE,
282 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
283 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
284 .name = "5709 buffered flash (256kB)",
285 };
286
287 /****************************************************************************/
288 /* OpenBSD device entry points. */
289 /****************************************************************************/
290 static int bnx_probe(device_t, cfdata_t, void *);
291 void bnx_attach(device_t, device_t, void *);
292 int bnx_detach(device_t, int);
293
294 /****************************************************************************/
295 /* BNX Debug Data Structure Dump Routines */
296 /****************************************************************************/
297 #ifdef BNX_DEBUG
298 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
299 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
300 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
301 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
302 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
303 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
304 void bnx_dump_tx_chain(struct bnx_softc *, int, int);
305 void bnx_dump_rx_chain(struct bnx_softc *, int, int);
306 void bnx_dump_status_block(struct bnx_softc *);
307 void bnx_dump_stats_block(struct bnx_softc *);
308 void bnx_dump_driver_state(struct bnx_softc *);
309 void bnx_dump_hw_state(struct bnx_softc *);
310 void bnx_breakpoint(struct bnx_softc *);
311 #endif
312
313 /****************************************************************************/
314 /* BNX Register/Memory Access Routines */
315 /****************************************************************************/
316 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t);
317 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t);
318 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t);
319 int bnx_miibus_read_reg(device_t, int, int, uint16_t *);
320 int bnx_miibus_write_reg(device_t, int, int, uint16_t);
321 void bnx_miibus_statchg(struct ifnet *);
322
323 /****************************************************************************/
324 /* BNX NVRAM Access Routines */
325 /****************************************************************************/
326 int bnx_acquire_nvram_lock(struct bnx_softc *);
327 int bnx_release_nvram_lock(struct bnx_softc *);
328 void bnx_enable_nvram_access(struct bnx_softc *);
329 void bnx_disable_nvram_access(struct bnx_softc *);
330 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *,
331 uint32_t);
332 int bnx_init_nvram(struct bnx_softc *);
333 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int);
334 int bnx_nvram_test(struct bnx_softc *);
335 #ifdef BNX_NVRAM_WRITE_SUPPORT
336 int bnx_enable_nvram_write(struct bnx_softc *);
337 void bnx_disable_nvram_write(struct bnx_softc *);
338 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t);
339 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *,
340 uint32_t);
341 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int);
342 #endif
343
344 /****************************************************************************/
345 /* */
346 /****************************************************************************/
347 void bnx_get_media(struct bnx_softc *);
348 void bnx_init_media(struct bnx_softc *);
349 int bnx_dma_alloc(struct bnx_softc *);
350 void bnx_dma_free(struct bnx_softc *);
351 void bnx_release_resources(struct bnx_softc *);
352
353 /****************************************************************************/
354 /* BNX Firmware Synchronization and Load */
355 /****************************************************************************/
356 int bnx_fw_sync(struct bnx_softc *, uint32_t);
357 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, uint32_t);
358 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
359 struct fw_info *);
360 void bnx_init_cpus(struct bnx_softc *);
361
362 static void bnx_print_adapter_info(struct bnx_softc *);
363 static void bnx_probe_pci_caps(struct bnx_softc *);
364 void bnx_stop(struct ifnet *, int);
365 int bnx_reset(struct bnx_softc *, uint32_t);
366 int bnx_chipinit(struct bnx_softc *);
367 int bnx_blockinit(struct bnx_softc *);
368 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *,
369 uint16_t *, uint32_t *);
370 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *);
371
372 int bnx_init_tx_chain(struct bnx_softc *);
373 void bnx_init_tx_context(struct bnx_softc *);
374 int bnx_init_rx_chain(struct bnx_softc *);
375 void bnx_init_rx_context(struct bnx_softc *);
376 void bnx_free_rx_chain(struct bnx_softc *);
377 void bnx_free_tx_chain(struct bnx_softc *);
378
379 int bnx_tx_encap(struct bnx_softc *, struct mbuf *);
380 void bnx_start(struct ifnet *);
381 int bnx_ioctl(struct ifnet *, u_long, void *);
382 void bnx_watchdog(struct ifnet *);
383 int bnx_init(struct ifnet *);
384
385 void bnx_init_context(struct bnx_softc *);
386 void bnx_get_mac_addr(struct bnx_softc *);
387 void bnx_set_mac_addr(struct bnx_softc *);
388 void bnx_phy_intr(struct bnx_softc *);
389 void bnx_rx_intr(struct bnx_softc *);
390 void bnx_tx_intr(struct bnx_softc *);
391 void bnx_disable_intr(struct bnx_softc *);
392 void bnx_enable_intr(struct bnx_softc *);
393
394 int bnx_intr(void *);
395 void bnx_iff(struct bnx_softc *);
396 void bnx_stats_update(struct bnx_softc *);
397 void bnx_tick(void *);
398
399 struct pool *bnx_tx_pool = NULL;
400 void bnx_alloc_pkts(struct work *, void *);
401
402 /****************************************************************************/
403 /* OpenBSD device dispatch table. */
404 /****************************************************************************/
405 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc),
406 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
407
408 /****************************************************************************/
409 /* Device probe function. */
410 /* */
411 /* Compares the device to the driver's list of supported devices and */
412 /* reports back to the OS whether this is the right driver for the device. */
413 /* */
414 /* Returns: */
415 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
416 /****************************************************************************/
417 static const struct bnx_product *
418 bnx_lookup(const struct pci_attach_args *pa)
419 {
420 int i;
421 pcireg_t subid;
422
423 for (i = 0; i < __arraycount(bnx_devices); i++) {
424 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor ||
425 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product)
426 continue;
427 if (!bnx_devices[i].bp_subvendor)
428 return &bnx_devices[i];
429 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
430 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor &&
431 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct)
432 return &bnx_devices[i];
433 }
434
435 return NULL;
436 }
437 static int
438 bnx_probe(device_t parent, cfdata_t match, void *aux)
439 {
440 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
441
442 if (bnx_lookup(pa) != NULL)
443 return 1;
444
445 return 0;
446 }
447
448 /****************************************************************************/
449 /* PCI Capabilities Probe Function. */
450 /* */
451 /* Walks the PCI capabiites list for the device to find what features are */
452 /* supported. */
453 /* */
454 /* Returns: */
455 /* None. */
456 /****************************************************************************/
457 static void
458 bnx_print_adapter_info(struct bnx_softc *sc)
459 {
460
461 aprint_normal_dev(sc->bnx_dev, "ASIC BCM%x %c%d %s(0x%08x)\n",
462 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc),
463 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT)
464 ? "Serdes " : "", sc->bnx_chipid);
465
466 /* Bus info. */
467 if (sc->bnx_flags & BNX_PCIE_FLAG) {
468 aprint_normal_dev(sc->bnx_dev, "PCIe x%d ",
469 sc->link_width);
470 switch (sc->link_speed) {
471 case 1: aprint_normal("2.5Gbps\n"); break;
472 case 2: aprint_normal("5Gbps\n"); break;
473 default: aprint_normal("Unknown link speed\n");
474 }
475 } else {
476 aprint_normal_dev(sc->bnx_dev, "PCI%s %dbit %dMHz\n",
477 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""),
478 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64,
479 sc->bus_speed_mhz);
480 }
481
482 aprint_normal_dev(sc->bnx_dev,
483 "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n",
484 sc->bnx_rx_quick_cons_trip_int,
485 sc->bnx_rx_quick_cons_trip,
486 sc->bnx_rx_ticks_int,
487 sc->bnx_rx_ticks,
488 sc->bnx_tx_quick_cons_trip_int,
489 sc->bnx_tx_quick_cons_trip,
490 sc->bnx_tx_ticks_int,
491 sc->bnx_tx_ticks);
492 }
493
494
495 /****************************************************************************/
496 /* PCI Capabilities Probe Function. */
497 /* */
498 /* Walks the PCI capabiites list for the device to find what features are */
499 /* supported. */
500 /* */
501 /* Returns: */
502 /* None. */
503 /****************************************************************************/
504 static void
505 bnx_probe_pci_caps(struct bnx_softc *sc)
506 {
507 struct pci_attach_args *pa = &(sc->bnx_pa);
508 pcireg_t reg;
509
510 /* Check if PCI-X capability is enabled. */
511 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®,
512 NULL) != 0) {
513 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG;
514 }
515
516 /* Check if PCIe capability is enabled. */
517 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®,
518 NULL) != 0) {
519 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag,
520 reg + PCIE_LCSR);
521 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = "
522 "0x%08X\n", link_status);
523 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16;
524 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20;
525 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG;
526 sc->bnx_flags |= BNX_PCIE_FLAG;
527 }
528
529 /* Check if MSI capability is enabled. */
530 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®,
531 NULL) != 0)
532 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG;
533
534 /* Check if MSI-X capability is enabled. */
535 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®,
536 NULL) != 0)
537 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG;
538 }
539
540
541 /****************************************************************************/
542 /* Device attach function. */
543 /* */
544 /* Allocates device resources, performs secondary chip identification, */
545 /* resets and initializes the hardware, and initializes driver instance */
546 /* variables. */
547 /* */
548 /* Returns: */
549 /* 0 on success, positive value on failure. */
550 /****************************************************************************/
551 void
552 bnx_attach(device_t parent, device_t self, void *aux)
553 {
554 const struct bnx_product *bp;
555 struct bnx_softc *sc = device_private(self);
556 prop_dictionary_t dict;
557 struct pci_attach_args *pa = aux;
558 pci_chipset_tag_t pc = pa->pa_pc;
559 pci_intr_handle_t ih;
560 const char *intrstr = NULL;
561 uint32_t command;
562 struct ifnet *ifp;
563 uint32_t val;
564 int mii_flags = MIIF_FORCEANEG;
565 pcireg_t memtype;
566 char intrbuf[PCI_INTRSTR_LEN];
567
568 if (bnx_tx_pool == NULL) {
569 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT);
570 if (bnx_tx_pool != NULL) {
571 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt),
572 0, 0, 0, "bnxpkts", NULL, IPL_NET);
573 } else {
574 aprint_error(": can't alloc bnx_tx_pool\n");
575 return;
576 }
577 }
578
579 bp = bnx_lookup(pa);
580 if (bp == NULL)
581 panic("unknown device");
582
583 sc->bnx_dev = self;
584
585 aprint_naive("\n");
586 aprint_normal(": %s\n", bp->bp_name);
587
588 sc->bnx_pa = *pa;
589
590 /*
591 * Map control/status registers.
592 */
593 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
594 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
595 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
596 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
597
598 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
599 aprint_error_dev(sc->bnx_dev,
600 "failed to enable memory mapping!\n");
601 return;
602 }
603
604 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
605 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
606 &sc->bnx_bhandle, NULL, &sc->bnx_size)) {
607 aprint_error_dev(sc->bnx_dev, "can't find mem space\n");
608 return;
609 }
610
611 if (pci_intr_map(pa, &ih)) {
612 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n");
613 goto bnx_attach_fail;
614 }
615
616 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
617
618 /*
619 * Configure byte swap and enable indirect register access.
620 * Rely on CPU to do target byte swapping on big endian systems.
621 * Access to registers outside of PCI configurtion space are not
622 * valid until this is done.
623 */
624 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
625 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
626 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
627
628 /* Save ASIC revsion info. */
629 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
630
631 /*
632 * Find the base address for shared memory access.
633 * Newer versions of bootcode use a signature and offset
634 * while older versions use a fixed address.
635 */
636 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
637 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
638 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
639 (sc->bnx_pa.pa_function << 2));
640 else
641 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
642
643 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
644
645 /* Set initial device and PHY flags */
646 sc->bnx_flags = 0;
647 sc->bnx_phy_flags = 0;
648
649 bnx_probe_pci_caps(sc);
650
651 /* Get PCI bus information (speed and type). */
652 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
653 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
654 uint32_t clkreg;
655
656 sc->bnx_flags |= BNX_PCIX_FLAG;
657
658 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
659
660 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
661 switch (clkreg) {
662 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
663 sc->bus_speed_mhz = 133;
664 break;
665
666 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
667 sc->bus_speed_mhz = 100;
668 break;
669
670 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
671 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
672 sc->bus_speed_mhz = 66;
673 break;
674
675 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
676 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
677 sc->bus_speed_mhz = 50;
678 break;
679
680 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
681 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
682 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
683 sc->bus_speed_mhz = 33;
684 break;
685 }
686 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
687 sc->bus_speed_mhz = 66;
688 else
689 sc->bus_speed_mhz = 33;
690
691 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
692 sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
693
694 /* Reset the controller. */
695 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
696 goto bnx_attach_fail;
697
698 /* Initialize the controller. */
699 if (bnx_chipinit(sc)) {
700 aprint_error_dev(sc->bnx_dev,
701 "Controller initialization failed!\n");
702 goto bnx_attach_fail;
703 }
704
705 /* Perform NVRAM test. */
706 if (bnx_nvram_test(sc)) {
707 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n");
708 goto bnx_attach_fail;
709 }
710
711 /* Fetch the permanent Ethernet MAC address. */
712 bnx_get_mac_addr(sc);
713 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n",
714 ether_sprintf(sc->eaddr));
715
716 /*
717 * Trip points control how many BDs
718 * should be ready before generating an
719 * interrupt while ticks control how long
720 * a BD can sit in the chain before
721 * generating an interrupt. Set the default
722 * values for the RX and TX rings.
723 */
724
725 #ifdef BNX_DEBUG
726 /* Force more frequent interrupts. */
727 sc->bnx_tx_quick_cons_trip_int = 1;
728 sc->bnx_tx_quick_cons_trip = 1;
729 sc->bnx_tx_ticks_int = 0;
730 sc->bnx_tx_ticks = 0;
731
732 sc->bnx_rx_quick_cons_trip_int = 1;
733 sc->bnx_rx_quick_cons_trip = 1;
734 sc->bnx_rx_ticks_int = 0;
735 sc->bnx_rx_ticks = 0;
736 #else
737 sc->bnx_tx_quick_cons_trip_int = 20;
738 sc->bnx_tx_quick_cons_trip = 20;
739 sc->bnx_tx_ticks_int = 80;
740 sc->bnx_tx_ticks = 80;
741
742 sc->bnx_rx_quick_cons_trip_int = 6;
743 sc->bnx_rx_quick_cons_trip = 6;
744 sc->bnx_rx_ticks_int = 18;
745 sc->bnx_rx_ticks = 18;
746 #endif
747
748 /* Update statistics once every second. */
749 sc->bnx_stats_ticks = 1000000 & 0xffff00;
750
751 /* Find the media type for the adapter. */
752 bnx_get_media(sc);
753
754 /*
755 * Store config data needed by the PHY driver for
756 * backplane applications
757 */
758 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
759 BNX_SHARED_HW_CFG_CONFIG);
760 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
761 BNX_PORT_HW_CFG_CONFIG);
762
763 /* Allocate DMA memory resources. */
764 sc->bnx_dmatag = pa->pa_dmat;
765 if (bnx_dma_alloc(sc)) {
766 aprint_error_dev(sc->bnx_dev,
767 "DMA resource allocation failed!\n");
768 goto bnx_attach_fail;
769 }
770
771 /* Initialize the ifnet interface. */
772 ifp = &sc->bnx_ec.ec_if;
773 ifp->if_softc = sc;
774 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
775 ifp->if_ioctl = bnx_ioctl;
776 ifp->if_stop = bnx_stop;
777 ifp->if_start = bnx_start;
778 ifp->if_init = bnx_init;
779 ifp->if_timer = 0;
780 ifp->if_watchdog = bnx_watchdog;
781 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
782 IFQ_SET_READY(&ifp->if_snd);
783 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
784
785 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU |
786 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
787
788 ifp->if_capabilities |=
789 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
790 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
791 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
792
793 /* Hookup IRQ last. */
794 sc->bnx_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, bnx_intr,
795 sc, device_xname(self));
796 if (sc->bnx_intrhand == NULL) {
797 aprint_error_dev(self, "couldn't establish interrupt");
798 if (intrstr != NULL)
799 aprint_error(" at %s", intrstr);
800 aprint_error("\n");
801 goto bnx_attach_fail;
802 }
803 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr);
804
805 /* create workqueue to handle packet allocations */
806 if (workqueue_create(&sc->bnx_wq, device_xname(self),
807 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) {
808 aprint_error_dev(self, "failed to create workqueue\n");
809 goto bnx_attach_fail;
810 }
811
812 sc->bnx_mii.mii_ifp = ifp;
813 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
814 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
815 sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
816
817 /* Handle any special PHY initialization for SerDes PHYs. */
818 bnx_init_media(sc);
819
820 sc->bnx_ec.ec_mii = &sc->bnx_mii;
821 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange,
822 ether_mediastatus);
823
824 /* set phyflags and chipid before mii_attach() */
825 dict = device_properties(self);
826 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags);
827 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid);
828 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg);
829 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg);
830
831 /* Print some useful adapter info */
832 bnx_print_adapter_info(sc);
833
834 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
835 mii_flags |= MIIF_HAVEFIBER;
836 mii_attach(self, &sc->bnx_mii, 0xffffffff,
837 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
838
839 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) {
840 aprint_error_dev(self, "no PHY found!\n");
841 ifmedia_add(&sc->bnx_mii.mii_media,
842 IFM_ETHER|IFM_MANUAL, 0, NULL);
843 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_MANUAL);
844 } else
845 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_AUTO);
846
847 /* Attach to the Ethernet interface list. */
848 if_attach(ifp);
849 if_deferred_start_init(ifp, NULL);
850 ether_ifattach(ifp,sc->eaddr);
851
852 callout_init(&sc->bnx_timeout, 0);
853
854 if (pmf_device_register(self, NULL, NULL))
855 pmf_class_network_register(self, ifp);
856 else
857 aprint_error_dev(self, "couldn't establish power handler\n");
858
859 /* Print some important debugging info. */
860 DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
861
862 goto bnx_attach_exit;
863
864 bnx_attach_fail:
865 bnx_release_resources(sc);
866
867 bnx_attach_exit:
868 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
869 }
870
871 /****************************************************************************/
872 /* Device detach function. */
873 /* */
874 /* Stops the controller, resets the controller, and releases resources. */
875 /* */
876 /* Returns: */
877 /* 0 on success, positive value on failure. */
878 /****************************************************************************/
879 int
880 bnx_detach(device_t dev, int flags)
881 {
882 int s;
883 struct bnx_softc *sc;
884 struct ifnet *ifp;
885
886 sc = device_private(dev);
887 ifp = &sc->bnx_ec.ec_if;
888
889 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
890
891 /* Stop and reset the controller. */
892 s = splnet();
893 bnx_stop(ifp, 1);
894 splx(s);
895
896 pmf_device_deregister(dev);
897 callout_destroy(&sc->bnx_timeout);
898 ether_ifdetach(ifp);
899 workqueue_destroy(sc->bnx_wq);
900
901 /* Delete all remaining media. */
902 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY);
903
904 if_detach(ifp);
905 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY);
906
907 /* Release all remaining resources. */
908 bnx_release_resources(sc);
909
910 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
911
912 return 0;
913 }
914
915 /****************************************************************************/
916 /* Indirect register read. */
917 /* */
918 /* Reads NetXtreme II registers using an index/data register pair in PCI */
919 /* configuration space. Using this mechanism avoids issues with posted */
920 /* reads but is much slower than memory-mapped I/O. */
921 /* */
922 /* Returns: */
923 /* The value of the register. */
924 /****************************************************************************/
925 uint32_t
926 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset)
927 {
928 struct pci_attach_args *pa = &(sc->bnx_pa);
929
930 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
931 offset);
932 #ifdef BNX_DEBUG
933 {
934 uint32_t val;
935 val = pci_conf_read(pa->pa_pc, pa->pa_tag,
936 BNX_PCICFG_REG_WINDOW);
937 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
938 "val = 0x%08X\n", __func__, offset, val);
939 return val;
940 }
941 #else
942 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
943 #endif
944 }
945
946 /****************************************************************************/
947 /* Indirect register write. */
948 /* */
949 /* Writes NetXtreme II registers using an index/data register pair in PCI */
950 /* configuration space. Using this mechanism avoids issues with posted */
951 /* writes but is muchh slower than memory-mapped I/O. */
952 /* */
953 /* Returns: */
954 /* Nothing. */
955 /****************************************************************************/
956 void
957 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val)
958 {
959 struct pci_attach_args *pa = &(sc->bnx_pa);
960
961 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
962 __func__, offset, val);
963
964 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
965 offset);
966 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
967 }
968
969 /****************************************************************************/
970 /* Context memory write. */
971 /* */
972 /* The NetXtreme II controller uses context memory to track connection */
973 /* information for L2 and higher network protocols. */
974 /* */
975 /* Returns: */
976 /* Nothing. */
977 /****************************************************************************/
978 void
979 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
980 uint32_t ctx_val)
981 {
982 uint32_t idx, offset = ctx_offset + cid_addr;
983 uint32_t val, retry_cnt = 5;
984
985 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
986 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
987 REG_WR(sc, BNX_CTX_CTX_CTRL,
988 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
989
990 for (idx = 0; idx < retry_cnt; idx++) {
991 val = REG_RD(sc, BNX_CTX_CTX_CTRL);
992 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
993 break;
994 DELAY(5);
995 }
996
997 #if 0
998 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
999 BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1000 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1001 __FILE__, __LINE__, cid_addr, ctx_offset);
1002 #endif
1003
1004 } else {
1005 REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1006 REG_WR(sc, BNX_CTX_DATA, ctx_val);
1007 }
1008 }
1009
1010 /****************************************************************************/
1011 /* PHY register read. */
1012 /* */
1013 /* Implements register reads on the MII bus. */
1014 /* */
1015 /* Returns: */
1016 /* The value of the register. */
1017 /****************************************************************************/
1018 int
1019 bnx_miibus_read_reg(device_t dev, int phy, int reg, uint16_t *val)
1020 {
1021 struct bnx_softc *sc = device_private(dev);
1022 uint32_t data;
1023 int i, rv = 0;
1024
1025 /* Make sure we are accessing the correct PHY address. */
1026 if (phy != sc->bnx_phy_addr) {
1027 DBPRINT(sc, BNX_VERBOSE,
1028 "Invalid PHY address %d for PHY read!\n", phy);
1029 return -1;
1030 }
1031
1032 /*
1033 * The BCM5709S PHY is an IEEE Clause 45 PHY
1034 * with special mappings to work with IEEE
1035 * Clause 22 register accesses.
1036 */
1037 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1038 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1039 reg += 0x10;
1040 }
1041
1042 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1043 data = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1044 data &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1045
1046 REG_WR(sc, BNX_EMAC_MDIO_MODE, data);
1047 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1048
1049 DELAY(40);
1050 }
1051
1052 data = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1053 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1054 BNX_EMAC_MDIO_COMM_START_BUSY;
1055 REG_WR(sc, BNX_EMAC_MDIO_COMM, data);
1056
1057 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1058 DELAY(10);
1059
1060 data = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1061 if (!(data & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1062 DELAY(5);
1063
1064 data = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1065 data &= BNX_EMAC_MDIO_COMM_DATA;
1066
1067 break;
1068 }
1069 }
1070
1071 if (data & BNX_EMAC_MDIO_COMM_START_BUSY) {
1072 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1073 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1074 rv = ETIMEDOUT;
1075 } else {
1076 data = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1077 *val = data & 0xffff;
1078
1079 DBPRINT(sc, BNX_EXCESSIVE,
1080 "%s(): phy = %d, reg = 0x%04X, val = 0x%04hX\n", __func__,
1081 phy, (uint16_t) reg & 0xffff, *val);
1082 }
1083
1084 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1085 data = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1086 data |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1087
1088 REG_WR(sc, BNX_EMAC_MDIO_MODE, data);
1089 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1090
1091 DELAY(40);
1092 }
1093
1094 return rv;
1095 }
1096
1097 /****************************************************************************/
1098 /* PHY register write. */
1099 /* */
1100 /* Implements register writes on the MII bus. */
1101 /* */
1102 /* Returns: */
1103 /* The value of the register. */
1104 /****************************************************************************/
1105 int
1106 bnx_miibus_write_reg(device_t dev, int phy, int reg, uint16_t val)
1107 {
1108 struct bnx_softc *sc = device_private(dev);
1109 uint32_t val1;
1110 int i, rv = 0;
1111
1112 /* Make sure we are accessing the correct PHY address. */
1113 if (phy != sc->bnx_phy_addr) {
1114 DBPRINT(sc, BNX_WARN,
1115 "Invalid PHY address %d for PHY write!\n", phy);
1116 return -1;
1117 }
1118
1119 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1120 "val = 0x%04hX\n", __func__,
1121 phy, (uint16_t) reg & 0xffff, val);
1122
1123 /*
1124 * The BCM5709S PHY is an IEEE Clause 45 PHY
1125 * with special mappings to work with IEEE
1126 * Clause 22 register accesses.
1127 */
1128 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1129 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1130 reg += 0x10;
1131 }
1132
1133 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1134 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1135 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1136
1137 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1138 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1139
1140 DELAY(40);
1141 }
1142
1143 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1144 BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1145 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1146 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1147
1148 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1149 DELAY(10);
1150
1151 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1152 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1153 DELAY(5);
1154 break;
1155 }
1156 }
1157
1158 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1159 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1160 __LINE__);
1161 rv = ETIMEDOUT;
1162 }
1163
1164 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1165 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1166 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1167
1168 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1169 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1170
1171 DELAY(40);
1172 }
1173
1174 return rv;
1175 }
1176
1177 /****************************************************************************/
1178 /* MII bus status change. */
1179 /* */
1180 /* Called by the MII bus driver when the PHY establishes link to set the */
1181 /* MAC interface registers. */
1182 /* */
1183 /* Returns: */
1184 /* Nothing. */
1185 /****************************************************************************/
1186 void
1187 bnx_miibus_statchg(struct ifnet *ifp)
1188 {
1189 struct bnx_softc *sc = ifp->if_softc;
1190 struct mii_data *mii = &sc->bnx_mii;
1191 int val;
1192
1193 val = REG_RD(sc, BNX_EMAC_MODE);
1194 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1195 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1196 BNX_EMAC_MODE_25G);
1197
1198 /* Set MII or GMII interface based on the speed
1199 * negotiated by the PHY.
1200 */
1201 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1202 case IFM_10_T:
1203 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1204 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1205 val |= BNX_EMAC_MODE_PORT_MII_10;
1206 break;
1207 }
1208 /* FALLTHROUGH */
1209 case IFM_100_TX:
1210 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1211 val |= BNX_EMAC_MODE_PORT_MII;
1212 break;
1213 case IFM_2500_SX:
1214 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1215 val |= BNX_EMAC_MODE_25G;
1216 /* FALLTHROUGH */
1217 case IFM_1000_T:
1218 case IFM_1000_SX:
1219 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1220 val |= BNX_EMAC_MODE_PORT_GMII;
1221 break;
1222 default:
1223 val |= BNX_EMAC_MODE_PORT_GMII;
1224 break;
1225 }
1226
1227 /* Set half or full duplex based on the duplicity
1228 * negotiated by the PHY.
1229 */
1230 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1231 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1232 val |= BNX_EMAC_MODE_HALF_DUPLEX;
1233 } else {
1234 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1235 }
1236
1237 REG_WR(sc, BNX_EMAC_MODE, val);
1238 }
1239
1240 /****************************************************************************/
1241 /* Acquire NVRAM lock. */
1242 /* */
1243 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1244 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1245 /* for use by the driver. */
1246 /* */
1247 /* Returns: */
1248 /* 0 on success, positive value on failure. */
1249 /****************************************************************************/
1250 int
1251 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1252 {
1253 uint32_t val;
1254 int j;
1255
1256 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1257
1258 /* Request access to the flash interface. */
1259 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1260 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1261 val = REG_RD(sc, BNX_NVM_SW_ARB);
1262 if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1263 break;
1264
1265 DELAY(5);
1266 }
1267
1268 if (j >= NVRAM_TIMEOUT_COUNT) {
1269 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1270 return EBUSY;
1271 }
1272
1273 return 0;
1274 }
1275
1276 /****************************************************************************/
1277 /* Release NVRAM lock. */
1278 /* */
1279 /* When the caller is finished accessing NVRAM the lock must be released. */
1280 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1281 /* for use by the driver. */
1282 /* */
1283 /* Returns: */
1284 /* 0 on success, positive value on failure. */
1285 /****************************************************************************/
1286 int
1287 bnx_release_nvram_lock(struct bnx_softc *sc)
1288 {
1289 int j;
1290 uint32_t val;
1291
1292 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1293
1294 /* Relinquish nvram interface. */
1295 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1296
1297 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1298 val = REG_RD(sc, BNX_NVM_SW_ARB);
1299 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1300 break;
1301
1302 DELAY(5);
1303 }
1304
1305 if (j >= NVRAM_TIMEOUT_COUNT) {
1306 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1307 return EBUSY;
1308 }
1309
1310 return 0;
1311 }
1312
1313 #ifdef BNX_NVRAM_WRITE_SUPPORT
1314 /****************************************************************************/
1315 /* Enable NVRAM write access. */
1316 /* */
1317 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1318 /* */
1319 /* Returns: */
1320 /* 0 on success, positive value on failure. */
1321 /****************************************************************************/
1322 int
1323 bnx_enable_nvram_write(struct bnx_softc *sc)
1324 {
1325 uint32_t val;
1326
1327 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1328
1329 val = REG_RD(sc, BNX_MISC_CFG);
1330 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1331
1332 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1333 int j;
1334
1335 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1336 REG_WR(sc, BNX_NVM_COMMAND,
1337 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1338
1339 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1340 DELAY(5);
1341
1342 val = REG_RD(sc, BNX_NVM_COMMAND);
1343 if (val & BNX_NVM_COMMAND_DONE)
1344 break;
1345 }
1346
1347 if (j >= NVRAM_TIMEOUT_COUNT) {
1348 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1349 return EBUSY;
1350 }
1351 }
1352
1353 return 0;
1354 }
1355
1356 /****************************************************************************/
1357 /* Disable NVRAM write access. */
1358 /* */
1359 /* When the caller is finished writing to NVRAM write access must be */
1360 /* disabled. */
1361 /* */
1362 /* Returns: */
1363 /* Nothing. */
1364 /****************************************************************************/
1365 void
1366 bnx_disable_nvram_write(struct bnx_softc *sc)
1367 {
1368 uint32_t val;
1369
1370 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1371
1372 val = REG_RD(sc, BNX_MISC_CFG);
1373 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1374 }
1375 #endif
1376
1377 /****************************************************************************/
1378 /* Enable NVRAM access. */
1379 /* */
1380 /* Before accessing NVRAM for read or write operations the caller must */
1381 /* enabled NVRAM access. */
1382 /* */
1383 /* Returns: */
1384 /* Nothing. */
1385 /****************************************************************************/
1386 void
1387 bnx_enable_nvram_access(struct bnx_softc *sc)
1388 {
1389 uint32_t val;
1390
1391 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1392
1393 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1394 /* Enable both bits, even on read. */
1395 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1396 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1397 }
1398
1399 /****************************************************************************/
1400 /* Disable NVRAM access. */
1401 /* */
1402 /* When the caller is finished accessing NVRAM access must be disabled. */
1403 /* */
1404 /* Returns: */
1405 /* Nothing. */
1406 /****************************************************************************/
1407 void
1408 bnx_disable_nvram_access(struct bnx_softc *sc)
1409 {
1410 uint32_t val;
1411
1412 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1413
1414 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1415
1416 /* Disable both bits, even after read. */
1417 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1418 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1419 }
1420
1421 #ifdef BNX_NVRAM_WRITE_SUPPORT
1422 /****************************************************************************/
1423 /* Erase NVRAM page before writing. */
1424 /* */
1425 /* Non-buffered flash parts require that a page be erased before it is */
1426 /* written. */
1427 /* */
1428 /* Returns: */
1429 /* 0 on success, positive value on failure. */
1430 /****************************************************************************/
1431 int
1432 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset)
1433 {
1434 uint32_t cmd;
1435 int j;
1436
1437 /* Buffered flash doesn't require an erase. */
1438 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1439 return 0;
1440
1441 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1442
1443 /* Build an erase command. */
1444 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1445 BNX_NVM_COMMAND_DOIT;
1446
1447 /*
1448 * Clear the DONE bit separately, set the NVRAM address to erase,
1449 * and issue the erase command.
1450 */
1451 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1452 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1453 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1454
1455 /* Wait for completion. */
1456 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1457 uint32_t val;
1458
1459 DELAY(5);
1460
1461 val = REG_RD(sc, BNX_NVM_COMMAND);
1462 if (val & BNX_NVM_COMMAND_DONE)
1463 break;
1464 }
1465
1466 if (j >= NVRAM_TIMEOUT_COUNT) {
1467 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1468 return EBUSY;
1469 }
1470
1471 return 0;
1472 }
1473 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1474
1475 /****************************************************************************/
1476 /* Read a dword (32 bits) from NVRAM. */
1477 /* */
1478 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1479 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1480 /* */
1481 /* Returns: */
1482 /* 0 on success and the 32 bit value read, positive value on failure. */
1483 /****************************************************************************/
1484 int
1485 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset,
1486 uint8_t *ret_val, uint32_t cmd_flags)
1487 {
1488 uint32_t cmd;
1489 int i, rc = 0;
1490
1491 /* Build the command word. */
1492 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1493
1494 /* Calculate the offset for buffered flash if translation is used. */
1495 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1496 offset = ((offset / sc->bnx_flash_info->page_size) <<
1497 sc->bnx_flash_info->page_bits) +
1498 (offset % sc->bnx_flash_info->page_size);
1499 }
1500
1501 /*
1502 * Clear the DONE bit separately, set the address to read,
1503 * and issue the read.
1504 */
1505 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1506 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1507 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1508
1509 /* Wait for completion. */
1510 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1511 uint32_t val;
1512
1513 DELAY(5);
1514
1515 val = REG_RD(sc, BNX_NVM_COMMAND);
1516 if (val & BNX_NVM_COMMAND_DONE) {
1517 val = REG_RD(sc, BNX_NVM_READ);
1518
1519 val = bnx_be32toh(val);
1520 memcpy(ret_val, &val, 4);
1521 break;
1522 }
1523 }
1524
1525 /* Check for errors. */
1526 if (i >= NVRAM_TIMEOUT_COUNT) {
1527 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1528 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1529 rc = EBUSY;
1530 }
1531
1532 return rc;
1533 }
1534
1535 #ifdef BNX_NVRAM_WRITE_SUPPORT
1536 /****************************************************************************/
1537 /* Write a dword (32 bits) to NVRAM. */
1538 /* */
1539 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1540 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1541 /* enabled NVRAM write access. */
1542 /* */
1543 /* Returns: */
1544 /* 0 on success, positive value on failure. */
1545 /****************************************************************************/
1546 int
1547 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val,
1548 uint32_t cmd_flags)
1549 {
1550 uint32_t cmd, val32;
1551 int j;
1552
1553 /* Build the command word. */
1554 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1555
1556 /* Calculate the offset for buffered flash if translation is used. */
1557 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1558 offset = ((offset / sc->bnx_flash_info->page_size) <<
1559 sc->bnx_flash_info->page_bits) +
1560 (offset % sc->bnx_flash_info->page_size);
1561 }
1562
1563 /*
1564 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1565 * set the NVRAM address to write, and issue the write command
1566 */
1567 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1568 memcpy(&val32, val, 4);
1569 val32 = htobe32(val32);
1570 REG_WR(sc, BNX_NVM_WRITE, val32);
1571 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1572 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1573
1574 /* Wait for completion. */
1575 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1576 DELAY(5);
1577
1578 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1579 break;
1580 }
1581 if (j >= NVRAM_TIMEOUT_COUNT) {
1582 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1583 "offset 0x%08X\n", __FILE__, __LINE__, offset);
1584 return EBUSY;
1585 }
1586
1587 return 0;
1588 }
1589 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1590
1591 /****************************************************************************/
1592 /* Initialize NVRAM access. */
1593 /* */
1594 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1595 /* access that device. */
1596 /* */
1597 /* Returns: */
1598 /* 0 on success, positive value on failure. */
1599 /****************************************************************************/
1600 int
1601 bnx_init_nvram(struct bnx_softc *sc)
1602 {
1603 uint32_t val;
1604 int j, entry_count, rc = 0;
1605 struct flash_spec *flash;
1606
1607 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1608
1609 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1610 sc->bnx_flash_info = &flash_5709;
1611 goto bnx_init_nvram_get_flash_size;
1612 }
1613
1614 /* Determine the selected interface. */
1615 val = REG_RD(sc, BNX_NVM_CFG1);
1616
1617 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1618
1619 /*
1620 * Flash reconfiguration is required to support additional
1621 * NVRAM devices not directly supported in hardware.
1622 * Check if the flash interface was reconfigured
1623 * by the bootcode.
1624 */
1625
1626 if (val & 0x40000000) {
1627 /* Flash interface reconfigured by bootcode. */
1628
1629 DBPRINT(sc,BNX_INFO_LOAD,
1630 "bnx_init_nvram(): Flash WAS reconfigured.\n");
1631
1632 for (j = 0, flash = &flash_table[0]; j < entry_count;
1633 j++, flash++) {
1634 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1635 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1636 sc->bnx_flash_info = flash;
1637 break;
1638 }
1639 }
1640 } else {
1641 /* Flash interface not yet reconfigured. */
1642 uint32_t mask;
1643
1644 DBPRINT(sc,BNX_INFO_LOAD,
1645 "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1646
1647 if (val & (1 << 23))
1648 mask = FLASH_BACKUP_STRAP_MASK;
1649 else
1650 mask = FLASH_STRAP_MASK;
1651
1652 /* Look for the matching NVRAM device configuration data. */
1653 for (j = 0, flash = &flash_table[0]; j < entry_count;
1654 j++, flash++) {
1655 /* Check if the dev matches any of the known devices. */
1656 if ((val & mask) == (flash->strapping & mask)) {
1657 /* Found a device match. */
1658 sc->bnx_flash_info = flash;
1659
1660 /* Request access to the flash interface. */
1661 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1662 return rc;
1663
1664 /* Reconfigure the flash interface. */
1665 bnx_enable_nvram_access(sc);
1666 REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1667 REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1668 REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1669 REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1670 bnx_disable_nvram_access(sc);
1671 bnx_release_nvram_lock(sc);
1672
1673 break;
1674 }
1675 }
1676 }
1677
1678 /* Check if a matching device was found. */
1679 if (j == entry_count) {
1680 sc->bnx_flash_info = NULL;
1681 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1682 __FILE__, __LINE__);
1683 rc = ENODEV;
1684 }
1685
1686 bnx_init_nvram_get_flash_size:
1687 /* Write the flash config data to the shared memory interface. */
1688 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1689 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1690 if (val)
1691 sc->bnx_flash_size = val;
1692 else
1693 sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1694
1695 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1696 "0x%08X\n", sc->bnx_flash_info->total_size);
1697
1698 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1699
1700 return rc;
1701 }
1702
1703 /****************************************************************************/
1704 /* Read an arbitrary range of data from NVRAM. */
1705 /* */
1706 /* Prepares the NVRAM interface for access and reads the requested data */
1707 /* into the supplied buffer. */
1708 /* */
1709 /* Returns: */
1710 /* 0 on success and the data read, positive value on failure. */
1711 /****************************************************************************/
1712 int
1713 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf,
1714 int buf_size)
1715 {
1716 int rc = 0;
1717 uint32_t cmd_flags, offset32, len32, extra;
1718
1719 if (buf_size == 0)
1720 return 0;
1721
1722 /* Request access to the flash interface. */
1723 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1724 return rc;
1725
1726 /* Enable access to flash interface */
1727 bnx_enable_nvram_access(sc);
1728
1729 len32 = buf_size;
1730 offset32 = offset;
1731 extra = 0;
1732
1733 cmd_flags = 0;
1734
1735 if (offset32 & 3) {
1736 uint8_t buf[4];
1737 uint32_t pre_len;
1738
1739 offset32 &= ~3;
1740 pre_len = 4 - (offset & 3);
1741
1742 if (pre_len >= len32) {
1743 pre_len = len32;
1744 cmd_flags =
1745 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1746 } else
1747 cmd_flags = BNX_NVM_COMMAND_FIRST;
1748
1749 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1750
1751 if (rc)
1752 return rc;
1753
1754 memcpy(ret_buf, buf + (offset & 3), pre_len);
1755
1756 offset32 += 4;
1757 ret_buf += pre_len;
1758 len32 -= pre_len;
1759 }
1760
1761 if (len32 & 3) {
1762 extra = 4 - (len32 & 3);
1763 len32 = (len32 + 4) & ~3;
1764 }
1765
1766 if (len32 == 4) {
1767 uint8_t buf[4];
1768
1769 if (cmd_flags)
1770 cmd_flags = BNX_NVM_COMMAND_LAST;
1771 else
1772 cmd_flags =
1773 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1774
1775 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1776
1777 memcpy(ret_buf, buf, 4 - extra);
1778 } else if (len32 > 0) {
1779 uint8_t buf[4];
1780
1781 /* Read the first word. */
1782 if (cmd_flags)
1783 cmd_flags = 0;
1784 else
1785 cmd_flags = BNX_NVM_COMMAND_FIRST;
1786
1787 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1788
1789 /* Advance to the next dword. */
1790 offset32 += 4;
1791 ret_buf += 4;
1792 len32 -= 4;
1793
1794 while (len32 > 4 && rc == 0) {
1795 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1796
1797 /* Advance to the next dword. */
1798 offset32 += 4;
1799 ret_buf += 4;
1800 len32 -= 4;
1801 }
1802
1803 if (rc)
1804 return rc;
1805
1806 cmd_flags = BNX_NVM_COMMAND_LAST;
1807 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1808
1809 memcpy(ret_buf, buf, 4 - extra);
1810 }
1811
1812 /* Disable access to flash interface and release the lock. */
1813 bnx_disable_nvram_access(sc);
1814 bnx_release_nvram_lock(sc);
1815
1816 return rc;
1817 }
1818
1819 #ifdef BNX_NVRAM_WRITE_SUPPORT
1820 /****************************************************************************/
1821 /* Write an arbitrary range of data from NVRAM. */
1822 /* */
1823 /* Prepares the NVRAM interface for write access and writes the requested */
1824 /* data from the supplied buffer. The caller is responsible for */
1825 /* calculating any appropriate CRCs. */
1826 /* */
1827 /* Returns: */
1828 /* 0 on success, positive value on failure. */
1829 /****************************************************************************/
1830 int
1831 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf,
1832 int buf_size)
1833 {
1834 uint32_t written, offset32, len32;
1835 uint8_t *buf, start[4], end[4];
1836 int rc = 0;
1837 int align_start, align_end;
1838
1839 buf = data_buf;
1840 offset32 = offset;
1841 len32 = buf_size;
1842 align_start = align_end = 0;
1843
1844 if ((align_start = (offset32 & 3))) {
1845 offset32 &= ~3;
1846 len32 += align_start;
1847 if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1848 return rc;
1849 }
1850
1851 if (len32 & 3) {
1852 if ((len32 > 4) || !align_start) {
1853 align_end = 4 - (len32 & 3);
1854 len32 += align_end;
1855 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1856 end, 4)))
1857 return rc;
1858 }
1859 }
1860
1861 if (align_start || align_end) {
1862 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1863 if (buf == 0)
1864 return ENOMEM;
1865
1866 if (align_start)
1867 memcpy(buf, start, 4);
1868
1869 if (align_end)
1870 memcpy(buf + len32 - 4, end, 4);
1871
1872 memcpy(buf + align_start, data_buf, buf_size);
1873 }
1874
1875 written = 0;
1876 while ((written < len32) && (rc == 0)) {
1877 uint32_t page_start, page_end, data_start, data_end;
1878 uint32_t addr, cmd_flags;
1879 int i;
1880 uint8_t flash_buffer[264];
1881
1882 /* Find the page_start addr */
1883 page_start = offset32 + written;
1884 page_start -= (page_start % sc->bnx_flash_info->page_size);
1885 /* Find the page_end addr */
1886 page_end = page_start + sc->bnx_flash_info->page_size;
1887 /* Find the data_start addr */
1888 data_start = (written == 0) ? offset32 : page_start;
1889 /* Find the data_end addr */
1890 data_end = (page_end > offset32 + len32) ?
1891 (offset32 + len32) : page_end;
1892
1893 /* Request access to the flash interface. */
1894 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1895 goto nvram_write_end;
1896
1897 /* Enable access to flash interface */
1898 bnx_enable_nvram_access(sc);
1899
1900 cmd_flags = BNX_NVM_COMMAND_FIRST;
1901 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1902 int j;
1903
1904 /* Read the whole page into the buffer
1905 * (non-buffer flash only) */
1906 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1907 if (j == (sc->bnx_flash_info->page_size - 4))
1908 cmd_flags |= BNX_NVM_COMMAND_LAST;
1909
1910 rc = bnx_nvram_read_dword(sc,
1911 page_start + j,
1912 &flash_buffer[j],
1913 cmd_flags);
1914
1915 if (rc)
1916 goto nvram_write_end;
1917
1918 cmd_flags = 0;
1919 }
1920 }
1921
1922 /* Enable writes to flash interface (unlock write-protect) */
1923 if ((rc = bnx_enable_nvram_write(sc)) != 0)
1924 goto nvram_write_end;
1925
1926 /* Erase the page */
1927 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1928 goto nvram_write_end;
1929
1930 /* Re-enable the write again for the actual write */
1931 bnx_enable_nvram_write(sc);
1932
1933 /* Loop to write back the buffer data from page_start to
1934 * data_start */
1935 i = 0;
1936 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1937 for (addr = page_start; addr < data_start;
1938 addr += 4, i += 4) {
1939
1940 rc = bnx_nvram_write_dword(sc, addr,
1941 &flash_buffer[i], cmd_flags);
1942
1943 if (rc != 0)
1944 goto nvram_write_end;
1945
1946 cmd_flags = 0;
1947 }
1948 }
1949
1950 /* Loop to write the new data from data_start to data_end */
1951 for (addr = data_start; addr < data_end; addr += 4, i++) {
1952 if ((addr == page_end - 4) ||
1953 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
1954 && (addr == data_end - 4))) {
1955
1956 cmd_flags |= BNX_NVM_COMMAND_LAST;
1957 }
1958
1959 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1960
1961 if (rc != 0)
1962 goto nvram_write_end;
1963
1964 cmd_flags = 0;
1965 buf += 4;
1966 }
1967
1968 /* Loop to write back the buffer data from data_end
1969 * to page_end */
1970 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1971 for (addr = data_end; addr < page_end;
1972 addr += 4, i += 4) {
1973
1974 if (addr == page_end-4)
1975 cmd_flags = BNX_NVM_COMMAND_LAST;
1976
1977 rc = bnx_nvram_write_dword(sc, addr,
1978 &flash_buffer[i], cmd_flags);
1979
1980 if (rc != 0)
1981 goto nvram_write_end;
1982
1983 cmd_flags = 0;
1984 }
1985 }
1986
1987 /* Disable writes to flash interface (lock write-protect) */
1988 bnx_disable_nvram_write(sc);
1989
1990 /* Disable access to flash interface */
1991 bnx_disable_nvram_access(sc);
1992 bnx_release_nvram_lock(sc);
1993
1994 /* Increment written */
1995 written += data_end - data_start;
1996 }
1997
1998 nvram_write_end:
1999 if (align_start || align_end)
2000 free(buf, M_DEVBUF);
2001
2002 return rc;
2003 }
2004 #endif /* BNX_NVRAM_WRITE_SUPPORT */
2005
2006 /****************************************************************************/
2007 /* Verifies that NVRAM is accessible and contains valid data. */
2008 /* */
2009 /* Reads the configuration data from NVRAM and verifies that the CRC is */
2010 /* correct. */
2011 /* */
2012 /* Returns: */
2013 /* 0 on success, positive value on failure. */
2014 /****************************************************************************/
2015 int
2016 bnx_nvram_test(struct bnx_softc *sc)
2017 {
2018 uint32_t buf[BNX_NVRAM_SIZE / 4];
2019 uint8_t *data = (uint8_t *) buf;
2020 int rc = 0;
2021 uint32_t magic, csum;
2022
2023 /*
2024 * Check that the device NVRAM is valid by reading
2025 * the magic value at offset 0.
2026 */
2027 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2028 goto bnx_nvram_test_done;
2029
2030 magic = bnx_be32toh(buf[0]);
2031 if (magic != BNX_NVRAM_MAGIC) {
2032 rc = ENODEV;
2033 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2034 "Expected: 0x%08X, Found: 0x%08X\n",
2035 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2036 goto bnx_nvram_test_done;
2037 }
2038
2039 /*
2040 * Verify that the device NVRAM includes valid
2041 * configuration data.
2042 */
2043 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2044 goto bnx_nvram_test_done;
2045
2046 csum = ether_crc32_le(data, 0x100);
2047 if (csum != BNX_CRC32_RESIDUAL) {
2048 rc = ENODEV;
2049 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2050 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2051 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2052 goto bnx_nvram_test_done;
2053 }
2054
2055 csum = ether_crc32_le(data + 0x100, 0x100);
2056 if (csum != BNX_CRC32_RESIDUAL) {
2057 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2058 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2059 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2060 rc = ENODEV;
2061 }
2062
2063 bnx_nvram_test_done:
2064 return rc;
2065 }
2066
2067 /****************************************************************************/
2068 /* Identifies the current media type of the controller and sets the PHY */
2069 /* address. */
2070 /* */
2071 /* Returns: */
2072 /* Nothing. */
2073 /****************************************************************************/
2074 void
2075 bnx_get_media(struct bnx_softc *sc)
2076 {
2077 sc->bnx_phy_addr = 1;
2078
2079 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2080 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2081 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2082 uint32_t strap;
2083
2084 /*
2085 * The BCM5709S is software configurable
2086 * for Copper or SerDes operation.
2087 */
2088 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2089 DBPRINT(sc, BNX_INFO_LOAD,
2090 "5709 bonded for copper.\n");
2091 goto bnx_get_media_exit;
2092 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2093 DBPRINT(sc, BNX_INFO_LOAD,
2094 "5709 bonded for dual media.\n");
2095 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2096 goto bnx_get_media_exit;
2097 }
2098
2099 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2100 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2101 else {
2102 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2103 >> 8;
2104 }
2105
2106 if (sc->bnx_pa.pa_function == 0) {
2107 switch (strap) {
2108 case 0x4:
2109 case 0x5:
2110 case 0x6:
2111 DBPRINT(sc, BNX_INFO_LOAD,
2112 "BCM5709 s/w configured for SerDes.\n");
2113 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2114 break;
2115 default:
2116 DBPRINT(sc, BNX_INFO_LOAD,
2117 "BCM5709 s/w configured for Copper.\n");
2118 }
2119 } else {
2120 switch (strap) {
2121 case 0x1:
2122 case 0x2:
2123 case 0x4:
2124 DBPRINT(sc, BNX_INFO_LOAD,
2125 "BCM5709 s/w configured for SerDes.\n");
2126 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2127 break;
2128 default:
2129 DBPRINT(sc, BNX_INFO_LOAD,
2130 "BCM5709 s/w configured for Copper.\n");
2131 }
2132 }
2133
2134 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2135 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2136
2137 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2138 uint32_t val;
2139
2140 sc->bnx_flags |= BNX_NO_WOL_FLAG;
2141
2142 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2143 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2144
2145 /*
2146 * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2147 * separate PHY for SerDes.
2148 */
2149 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2150 sc->bnx_phy_addr = 2;
2151 val = REG_RD_IND(sc, sc->bnx_shmem_base +
2152 BNX_SHARED_HW_CFG_CONFIG);
2153 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2154 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2155 DBPRINT(sc, BNX_INFO_LOAD,
2156 "Found 2.5Gb capable adapter\n");
2157 }
2158 }
2159 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2160 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2161 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2162
2163 bnx_get_media_exit:
2164 DBPRINT(sc, (BNX_INFO_LOAD),
2165 "Using PHY address %d.\n", sc->bnx_phy_addr);
2166 }
2167
2168 /****************************************************************************/
2169 /* Performs PHY initialization required before MII drivers access the */
2170 /* device. */
2171 /* */
2172 /* Returns: */
2173 /* Nothing. */
2174 /****************************************************************************/
2175 void
2176 bnx_init_media(struct bnx_softc *sc)
2177 {
2178 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2179 /*
2180 * Configure the BCM5709S / BCM5716S PHYs to use traditional
2181 * IEEE Clause 22 method. Otherwise we have no way to attach
2182 * the PHY to the mii(4) layer. PHY specific configuration
2183 * is done by the mii(4) layer.
2184 */
2185
2186 /* Select auto-negotiation MMD of the PHY. */
2187 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr,
2188 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2189
2190 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr,
2191 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2192
2193 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr,
2194 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2195 }
2196 }
2197
2198 /****************************************************************************/
2199 /* Free any DMA memory owned by the driver. */
2200 /* */
2201 /* Scans through each data structre that requires DMA memory and frees */
2202 /* the memory if allocated. */
2203 /* */
2204 /* Returns: */
2205 /* Nothing. */
2206 /****************************************************************************/
2207 void
2208 bnx_dma_free(struct bnx_softc *sc)
2209 {
2210 int i;
2211
2212 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2213
2214 /* Destroy the status block. */
2215 if (sc->status_block != NULL && sc->status_map != NULL) {
2216 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2217 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block,
2218 BNX_STATUS_BLK_SZ);
2219 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2220 sc->status_rseg);
2221 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2222 sc->status_block = NULL;
2223 sc->status_map = NULL;
2224 }
2225
2226 /* Destroy the statistics block. */
2227 if (sc->stats_block != NULL && sc->stats_map != NULL) {
2228 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2229 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block,
2230 BNX_STATS_BLK_SZ);
2231 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2232 sc->stats_rseg);
2233 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2234 sc->stats_block = NULL;
2235 sc->stats_map = NULL;
2236 }
2237
2238 /* Free, unmap and destroy all context memory pages. */
2239 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2240 for (i = 0; i < sc->ctx_pages; i++) {
2241 if (sc->ctx_block[i] != NULL) {
2242 bus_dmamap_unload(sc->bnx_dmatag,
2243 sc->ctx_map[i]);
2244 bus_dmamem_unmap(sc->bnx_dmatag,
2245 (void *)sc->ctx_block[i],
2246 BCM_PAGE_SIZE);
2247 bus_dmamem_free(sc->bnx_dmatag,
2248 &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2249 bus_dmamap_destroy(sc->bnx_dmatag,
2250 sc->ctx_map[i]);
2251 sc->ctx_block[i] = NULL;
2252 }
2253 }
2254 }
2255
2256 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2257 for (i = 0; i < TX_PAGES; i++ ) {
2258 if (sc->tx_bd_chain[i] != NULL &&
2259 sc->tx_bd_chain_map[i] != NULL) {
2260 bus_dmamap_unload(sc->bnx_dmatag,
2261 sc->tx_bd_chain_map[i]);
2262 bus_dmamem_unmap(sc->bnx_dmatag,
2263 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2264 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2265 sc->tx_bd_chain_rseg[i]);
2266 bus_dmamap_destroy(sc->bnx_dmatag,
2267 sc->tx_bd_chain_map[i]);
2268 sc->tx_bd_chain[i] = NULL;
2269 sc->tx_bd_chain_map[i] = NULL;
2270 }
2271 }
2272
2273 /* Destroy the TX dmamaps. */
2274 /* This isn't necessary since we dont allocate them up front */
2275
2276 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2277 for (i = 0; i < RX_PAGES; i++ ) {
2278 if (sc->rx_bd_chain[i] != NULL &&
2279 sc->rx_bd_chain_map[i] != NULL) {
2280 bus_dmamap_unload(sc->bnx_dmatag,
2281 sc->rx_bd_chain_map[i]);
2282 bus_dmamem_unmap(sc->bnx_dmatag,
2283 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2284 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2285 sc->rx_bd_chain_rseg[i]);
2286
2287 bus_dmamap_destroy(sc->bnx_dmatag,
2288 sc->rx_bd_chain_map[i]);
2289 sc->rx_bd_chain[i] = NULL;
2290 sc->rx_bd_chain_map[i] = NULL;
2291 }
2292 }
2293
2294 /* Unload and destroy the RX mbuf maps. */
2295 for (i = 0; i < TOTAL_RX_BD; i++) {
2296 if (sc->rx_mbuf_map[i] != NULL) {
2297 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2298 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2299 }
2300 }
2301
2302 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2303 }
2304
2305 /****************************************************************************/
2306 /* Allocate any DMA memory needed by the driver. */
2307 /* */
2308 /* Allocates DMA memory needed for the various global structures needed by */
2309 /* hardware. */
2310 /* */
2311 /* Returns: */
2312 /* 0 for success, positive value for failure. */
2313 /****************************************************************************/
2314 int
2315 bnx_dma_alloc(struct bnx_softc *sc)
2316 {
2317 int i, rc = 0;
2318
2319 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2320
2321 /*
2322 * Allocate DMA memory for the status block, map the memory into DMA
2323 * space, and fetch the physical address of the block.
2324 */
2325 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2326 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2327 aprint_error_dev(sc->bnx_dev,
2328 "Could not create status block DMA map!\n");
2329 rc = ENOMEM;
2330 goto bnx_dma_alloc_exit;
2331 }
2332
2333 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2334 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2335 &sc->status_rseg, BUS_DMA_NOWAIT)) {
2336 aprint_error_dev(sc->bnx_dev,
2337 "Could not allocate status block DMA memory!\n");
2338 rc = ENOMEM;
2339 goto bnx_dma_alloc_exit;
2340 }
2341
2342 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2343 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) {
2344 aprint_error_dev(sc->bnx_dev,
2345 "Could not map status block DMA memory!\n");
2346 rc = ENOMEM;
2347 goto bnx_dma_alloc_exit;
2348 }
2349
2350 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2351 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2352 aprint_error_dev(sc->bnx_dev,
2353 "Could not load status block DMA memory!\n");
2354 rc = ENOMEM;
2355 goto bnx_dma_alloc_exit;
2356 }
2357
2358 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2359 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ);
2360
2361 /* DRC - Fix for 64 bit addresses. */
2362 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2363 (uint32_t) sc->status_block_paddr);
2364
2365 /* BCM5709 uses host memory as cache for context memory. */
2366 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2367 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2368 if (sc->ctx_pages == 0)
2369 sc->ctx_pages = 1;
2370 if (sc->ctx_pages > 4) /* XXX */
2371 sc->ctx_pages = 4;
2372
2373 DBRUNIF((sc->ctx_pages > 512),
2374 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n",
2375 __FILE__, __LINE__, sc->ctx_pages));
2376
2377
2378 for (i = 0; i < sc->ctx_pages; i++) {
2379 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2380 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2381 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2382 &sc->ctx_map[i]) != 0) {
2383 rc = ENOMEM;
2384 goto bnx_dma_alloc_exit;
2385 }
2386
2387 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2388 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2389 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2390 rc = ENOMEM;
2391 goto bnx_dma_alloc_exit;
2392 }
2393
2394 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2395 sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2396 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) {
2397 rc = ENOMEM;
2398 goto bnx_dma_alloc_exit;
2399 }
2400
2401 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2402 sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2403 BUS_DMA_NOWAIT) != 0) {
2404 rc = ENOMEM;
2405 goto bnx_dma_alloc_exit;
2406 }
2407
2408 bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2409 }
2410 }
2411
2412 /*
2413 * Allocate DMA memory for the statistics block, map the memory into
2414 * DMA space, and fetch the physical address of the block.
2415 */
2416 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2417 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2418 aprint_error_dev(sc->bnx_dev,
2419 "Could not create stats block DMA map!\n");
2420 rc = ENOMEM;
2421 goto bnx_dma_alloc_exit;
2422 }
2423
2424 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2425 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2426 &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2427 aprint_error_dev(sc->bnx_dev,
2428 "Could not allocate stats block DMA memory!\n");
2429 rc = ENOMEM;
2430 goto bnx_dma_alloc_exit;
2431 }
2432
2433 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2434 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) {
2435 aprint_error_dev(sc->bnx_dev,
2436 "Could not map stats block DMA memory!\n");
2437 rc = ENOMEM;
2438 goto bnx_dma_alloc_exit;
2439 }
2440
2441 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2442 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2443 aprint_error_dev(sc->bnx_dev,
2444 "Could not load status block DMA memory!\n");
2445 rc = ENOMEM;
2446 goto bnx_dma_alloc_exit;
2447 }
2448
2449 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2450 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ);
2451
2452 /* DRC - Fix for 64 bit address. */
2453 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2454 (uint32_t) sc->stats_block_paddr);
2455
2456 /*
2457 * Allocate DMA memory for the TX buffer descriptor chain,
2458 * and fetch the physical address of the block.
2459 */
2460 for (i = 0; i < TX_PAGES; i++) {
2461 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2462 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2463 &sc->tx_bd_chain_map[i])) {
2464 aprint_error_dev(sc->bnx_dev,
2465 "Could not create Tx desc %d DMA map!\n", i);
2466 rc = ENOMEM;
2467 goto bnx_dma_alloc_exit;
2468 }
2469
2470 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2471 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2472 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2473 aprint_error_dev(sc->bnx_dev,
2474 "Could not allocate TX desc %d DMA memory!\n",
2475 i);
2476 rc = ENOMEM;
2477 goto bnx_dma_alloc_exit;
2478 }
2479
2480 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2481 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2482 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2483 aprint_error_dev(sc->bnx_dev,
2484 "Could not map TX desc %d DMA memory!\n", i);
2485 rc = ENOMEM;
2486 goto bnx_dma_alloc_exit;
2487 }
2488
2489 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2490 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2491 BUS_DMA_NOWAIT)) {
2492 aprint_error_dev(sc->bnx_dev,
2493 "Could not load TX desc %d DMA memory!\n", i);
2494 rc = ENOMEM;
2495 goto bnx_dma_alloc_exit;
2496 }
2497
2498 sc->tx_bd_chain_paddr[i] =
2499 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2500
2501 /* DRC - Fix for 64 bit systems. */
2502 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2503 i, (uint32_t) sc->tx_bd_chain_paddr[i]);
2504 }
2505
2506 /*
2507 * Create lists to hold TX mbufs.
2508 */
2509 TAILQ_INIT(&sc->tx_free_pkts);
2510 TAILQ_INIT(&sc->tx_used_pkts);
2511 sc->tx_pkt_count = 0;
2512 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET);
2513
2514 /*
2515 * Allocate DMA memory for the Rx buffer descriptor chain,
2516 * and fetch the physical address of the block.
2517 */
2518 for (i = 0; i < RX_PAGES; i++) {
2519 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2520 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2521 &sc->rx_bd_chain_map[i])) {
2522 aprint_error_dev(sc->bnx_dev,
2523 "Could not create Rx desc %d DMA map!\n", i);
2524 rc = ENOMEM;
2525 goto bnx_dma_alloc_exit;
2526 }
2527
2528 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2529 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2530 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2531 aprint_error_dev(sc->bnx_dev,
2532 "Could not allocate Rx desc %d DMA memory!\n", i);
2533 rc = ENOMEM;
2534 goto bnx_dma_alloc_exit;
2535 }
2536
2537 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2538 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2539 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2540 aprint_error_dev(sc->bnx_dev,
2541 "Could not map Rx desc %d DMA memory!\n", i);
2542 rc = ENOMEM;
2543 goto bnx_dma_alloc_exit;
2544 }
2545
2546 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2547 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2548 BUS_DMA_NOWAIT)) {
2549 aprint_error_dev(sc->bnx_dev,
2550 "Could not load Rx desc %d DMA memory!\n", i);
2551 rc = ENOMEM;
2552 goto bnx_dma_alloc_exit;
2553 }
2554
2555 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
2556 sc->rx_bd_chain_paddr[i] =
2557 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2558
2559 /* DRC - Fix for 64 bit systems. */
2560 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2561 i, (uint32_t) sc->rx_bd_chain_paddr[i]);
2562 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2563 0, BNX_RX_CHAIN_PAGE_SZ,
2564 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2565 }
2566
2567 /*
2568 * Create DMA maps for the Rx buffer mbufs.
2569 */
2570 for (i = 0; i < TOTAL_RX_BD; i++) {
2571 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU,
2572 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT,
2573 &sc->rx_mbuf_map[i])) {
2574 aprint_error_dev(sc->bnx_dev,
2575 "Could not create Rx mbuf %d DMA map!\n", i);
2576 rc = ENOMEM;
2577 goto bnx_dma_alloc_exit;
2578 }
2579 }
2580
2581 bnx_dma_alloc_exit:
2582 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2583
2584 return rc;
2585 }
2586
2587 /****************************************************************************/
2588 /* Release all resources used by the driver. */
2589 /* */
2590 /* Releases all resources acquired by the driver including interrupts, */
2591 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2592 /* */
2593 /* Returns: */
2594 /* Nothing. */
2595 /****************************************************************************/
2596 void
2597 bnx_release_resources(struct bnx_softc *sc)
2598 {
2599 struct pci_attach_args *pa = &(sc->bnx_pa);
2600
2601 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2602
2603 bnx_dma_free(sc);
2604
2605 if (sc->bnx_intrhand != NULL)
2606 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2607
2608 if (sc->bnx_size)
2609 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2610
2611 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2612 }
2613
2614 /****************************************************************************/
2615 /* Firmware synchronization. */
2616 /* */
2617 /* Before performing certain events such as a chip reset, synchronize with */
2618 /* the firmware first. */
2619 /* */
2620 /* Returns: */
2621 /* 0 for success, positive value for failure. */
2622 /****************************************************************************/
2623 int
2624 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data)
2625 {
2626 int i, rc = 0;
2627 uint32_t val;
2628
2629 /* Don't waste any time if we've timed out before. */
2630 if (sc->bnx_fw_timed_out) {
2631 rc = EBUSY;
2632 goto bnx_fw_sync_exit;
2633 }
2634
2635 /* Increment the message sequence number. */
2636 sc->bnx_fw_wr_seq++;
2637 msg_data |= sc->bnx_fw_wr_seq;
2638
2639 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2640 msg_data);
2641
2642 /* Send the message to the bootcode driver mailbox. */
2643 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2644
2645 /* Wait for the bootcode to acknowledge the message. */
2646 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2647 /* Check for a response in the bootcode firmware mailbox. */
2648 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2649 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2650 break;
2651 DELAY(1000);
2652 }
2653
2654 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2655 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2656 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2657 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2658 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2659
2660 msg_data &= ~BNX_DRV_MSG_CODE;
2661 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2662
2663 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2664
2665 sc->bnx_fw_timed_out = 1;
2666 rc = EBUSY;
2667 }
2668
2669 bnx_fw_sync_exit:
2670 return rc;
2671 }
2672
2673 /****************************************************************************/
2674 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2675 /* */
2676 /* Returns: */
2677 /* Nothing. */
2678 /****************************************************************************/
2679 void
2680 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code,
2681 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2682 {
2683 int i;
2684 uint32_t val;
2685
2686 /* Set the page size used by RV2P. */
2687 if (rv2p_proc == RV2P_PROC2) {
2688 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2689 USABLE_RX_BD_PER_PAGE);
2690 }
2691
2692 for (i = 0; i < rv2p_code_len; i += 8) {
2693 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2694 rv2p_code++;
2695 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2696 rv2p_code++;
2697
2698 if (rv2p_proc == RV2P_PROC1) {
2699 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2700 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2701 } else {
2702 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2703 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2704 }
2705 }
2706
2707 /* Reset the processor, un-stall is done later. */
2708 if (rv2p_proc == RV2P_PROC1)
2709 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2710 else
2711 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2712 }
2713
2714 /****************************************************************************/
2715 /* Load RISC processor firmware. */
2716 /* */
2717 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */
2718 /* associated with a particular processor. */
2719 /* */
2720 /* Returns: */
2721 /* Nothing. */
2722 /****************************************************************************/
2723 void
2724 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2725 struct fw_info *fw)
2726 {
2727 uint32_t offset;
2728 uint32_t val;
2729
2730 /* Halt the CPU. */
2731 val = REG_RD_IND(sc, cpu_reg->mode);
2732 val |= cpu_reg->mode_value_halt;
2733 REG_WR_IND(sc, cpu_reg->mode, val);
2734 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2735
2736 /* Load the Text area. */
2737 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2738 if (fw->text) {
2739 int j;
2740
2741 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2742 REG_WR_IND(sc, offset, fw->text[j]);
2743 }
2744
2745 /* Load the Data area. */
2746 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2747 if (fw->data) {
2748 int j;
2749
2750 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2751 REG_WR_IND(sc, offset, fw->data[j]);
2752 }
2753
2754 /* Load the SBSS area. */
2755 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2756 if (fw->sbss) {
2757 int j;
2758
2759 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2760 REG_WR_IND(sc, offset, fw->sbss[j]);
2761 }
2762
2763 /* Load the BSS area. */
2764 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2765 if (fw->bss) {
2766 int j;
2767
2768 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2769 REG_WR_IND(sc, offset, fw->bss[j]);
2770 }
2771
2772 /* Load the Read-Only area. */
2773 offset = cpu_reg->spad_base +
2774 (fw->rodata_addr - cpu_reg->mips_view_base);
2775 if (fw->rodata) {
2776 int j;
2777
2778 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2779 REG_WR_IND(sc, offset, fw->rodata[j]);
2780 }
2781
2782 /* Clear the pre-fetch instruction. */
2783 REG_WR_IND(sc, cpu_reg->inst, 0);
2784 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2785
2786 /* Start the CPU. */
2787 val = REG_RD_IND(sc, cpu_reg->mode);
2788 val &= ~cpu_reg->mode_value_halt;
2789 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2790 REG_WR_IND(sc, cpu_reg->mode, val);
2791 }
2792
2793 /****************************************************************************/
2794 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2795 /* */
2796 /* Loads the firmware for each CPU and starts the CPU. */
2797 /* */
2798 /* Returns: */
2799 /* Nothing. */
2800 /****************************************************************************/
2801 void
2802 bnx_init_cpus(struct bnx_softc *sc)
2803 {
2804 struct cpu_reg cpu_reg;
2805 struct fw_info fw;
2806
2807 switch(BNX_CHIP_NUM(sc)) {
2808 case BNX_CHIP_NUM_5709:
2809 /* Initialize the RV2P processor. */
2810 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) {
2811 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1,
2812 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1);
2813 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2,
2814 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2);
2815 } else {
2816 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1,
2817 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1);
2818 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2,
2819 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2);
2820 }
2821
2822 /* Initialize the RX Processor. */
2823 cpu_reg.mode = BNX_RXP_CPU_MODE;
2824 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2825 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2826 cpu_reg.state = BNX_RXP_CPU_STATE;
2827 cpu_reg.state_value_clear = 0xffffff;
2828 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2829 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2830 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2831 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2832 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2833 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2834 cpu_reg.mips_view_base = 0x8000000;
2835
2836 fw.ver_major = bnx_RXP_b09FwReleaseMajor;
2837 fw.ver_minor = bnx_RXP_b09FwReleaseMinor;
2838 fw.ver_fix = bnx_RXP_b09FwReleaseFix;
2839 fw.start_addr = bnx_RXP_b09FwStartAddr;
2840
2841 fw.text_addr = bnx_RXP_b09FwTextAddr;
2842 fw.text_len = bnx_RXP_b09FwTextLen;
2843 fw.text_index = 0;
2844 fw.text = bnx_RXP_b09FwText;
2845
2846 fw.data_addr = bnx_RXP_b09FwDataAddr;
2847 fw.data_len = bnx_RXP_b09FwDataLen;
2848 fw.data_index = 0;
2849 fw.data = bnx_RXP_b09FwData;
2850
2851 fw.sbss_addr = bnx_RXP_b09FwSbssAddr;
2852 fw.sbss_len = bnx_RXP_b09FwSbssLen;
2853 fw.sbss_index = 0;
2854 fw.sbss = bnx_RXP_b09FwSbss;
2855
2856 fw.bss_addr = bnx_RXP_b09FwBssAddr;
2857 fw.bss_len = bnx_RXP_b09FwBssLen;
2858 fw.bss_index = 0;
2859 fw.bss = bnx_RXP_b09FwBss;
2860
2861 fw.rodata_addr = bnx_RXP_b09FwRodataAddr;
2862 fw.rodata_len = bnx_RXP_b09FwRodataLen;
2863 fw.rodata_index = 0;
2864 fw.rodata = bnx_RXP_b09FwRodata;
2865
2866 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2867 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2868
2869 /* Initialize the TX Processor. */
2870 cpu_reg.mode = BNX_TXP_CPU_MODE;
2871 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2872 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2873 cpu_reg.state = BNX_TXP_CPU_STATE;
2874 cpu_reg.state_value_clear = 0xffffff;
2875 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2876 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2877 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2878 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2879 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2880 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2881 cpu_reg.mips_view_base = 0x8000000;
2882
2883 fw.ver_major = bnx_TXP_b09FwReleaseMajor;
2884 fw.ver_minor = bnx_TXP_b09FwReleaseMinor;
2885 fw.ver_fix = bnx_TXP_b09FwReleaseFix;
2886 fw.start_addr = bnx_TXP_b09FwStartAddr;
2887
2888 fw.text_addr = bnx_TXP_b09FwTextAddr;
2889 fw.text_len = bnx_TXP_b09FwTextLen;
2890 fw.text_index = 0;
2891 fw.text = bnx_TXP_b09FwText;
2892
2893 fw.data_addr = bnx_TXP_b09FwDataAddr;
2894 fw.data_len = bnx_TXP_b09FwDataLen;
2895 fw.data_index = 0;
2896 fw.data = bnx_TXP_b09FwData;
2897
2898 fw.sbss_addr = bnx_TXP_b09FwSbssAddr;
2899 fw.sbss_len = bnx_TXP_b09FwSbssLen;
2900 fw.sbss_index = 0;
2901 fw.sbss = bnx_TXP_b09FwSbss;
2902
2903 fw.bss_addr = bnx_TXP_b09FwBssAddr;
2904 fw.bss_len = bnx_TXP_b09FwBssLen;
2905 fw.bss_index = 0;
2906 fw.bss = bnx_TXP_b09FwBss;
2907
2908 fw.rodata_addr = bnx_TXP_b09FwRodataAddr;
2909 fw.rodata_len = bnx_TXP_b09FwRodataLen;
2910 fw.rodata_index = 0;
2911 fw.rodata = bnx_TXP_b09FwRodata;
2912
2913 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2914 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2915
2916 /* Initialize the TX Patch-up Processor. */
2917 cpu_reg.mode = BNX_TPAT_CPU_MODE;
2918 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2919 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2920 cpu_reg.state = BNX_TPAT_CPU_STATE;
2921 cpu_reg.state_value_clear = 0xffffff;
2922 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2923 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2924 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2925 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2926 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2927 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2928 cpu_reg.mips_view_base = 0x8000000;
2929
2930 fw.ver_major = bnx_TPAT_b09FwReleaseMajor;
2931 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor;
2932 fw.ver_fix = bnx_TPAT_b09FwReleaseFix;
2933 fw.start_addr = bnx_TPAT_b09FwStartAddr;
2934
2935 fw.text_addr = bnx_TPAT_b09FwTextAddr;
2936 fw.text_len = bnx_TPAT_b09FwTextLen;
2937 fw.text_index = 0;
2938 fw.text = bnx_TPAT_b09FwText;
2939
2940 fw.data_addr = bnx_TPAT_b09FwDataAddr;
2941 fw.data_len = bnx_TPAT_b09FwDataLen;
2942 fw.data_index = 0;
2943 fw.data = bnx_TPAT_b09FwData;
2944
2945 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr;
2946 fw.sbss_len = bnx_TPAT_b09FwSbssLen;
2947 fw.sbss_index = 0;
2948 fw.sbss = bnx_TPAT_b09FwSbss;
2949
2950 fw.bss_addr = bnx_TPAT_b09FwBssAddr;
2951 fw.bss_len = bnx_TPAT_b09FwBssLen;
2952 fw.bss_index = 0;
2953 fw.bss = bnx_TPAT_b09FwBss;
2954
2955 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr;
2956 fw.rodata_len = bnx_TPAT_b09FwRodataLen;
2957 fw.rodata_index = 0;
2958 fw.rodata = bnx_TPAT_b09FwRodata;
2959
2960 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2961 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2962
2963 /* Initialize the Completion Processor. */
2964 cpu_reg.mode = BNX_COM_CPU_MODE;
2965 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2966 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2967 cpu_reg.state = BNX_COM_CPU_STATE;
2968 cpu_reg.state_value_clear = 0xffffff;
2969 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2970 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2971 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2972 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2973 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2974 cpu_reg.spad_base = BNX_COM_SCRATCH;
2975 cpu_reg.mips_view_base = 0x8000000;
2976
2977 fw.ver_major = bnx_COM_b09FwReleaseMajor;
2978 fw.ver_minor = bnx_COM_b09FwReleaseMinor;
2979 fw.ver_fix = bnx_COM_b09FwReleaseFix;
2980 fw.start_addr = bnx_COM_b09FwStartAddr;
2981
2982 fw.text_addr = bnx_COM_b09FwTextAddr;
2983 fw.text_len = bnx_COM_b09FwTextLen;
2984 fw.text_index = 0;
2985 fw.text = bnx_COM_b09FwText;
2986
2987 fw.data_addr = bnx_COM_b09FwDataAddr;
2988 fw.data_len = bnx_COM_b09FwDataLen;
2989 fw.data_index = 0;
2990 fw.data = bnx_COM_b09FwData;
2991
2992 fw.sbss_addr = bnx_COM_b09FwSbssAddr;
2993 fw.sbss_len = bnx_COM_b09FwSbssLen;
2994 fw.sbss_index = 0;
2995 fw.sbss = bnx_COM_b09FwSbss;
2996
2997 fw.bss_addr = bnx_COM_b09FwBssAddr;
2998 fw.bss_len = bnx_COM_b09FwBssLen;
2999 fw.bss_index = 0;
3000 fw.bss = bnx_COM_b09FwBss;
3001
3002 fw.rodata_addr = bnx_COM_b09FwRodataAddr;
3003 fw.rodata_len = bnx_COM_b09FwRodataLen;
3004 fw.rodata_index = 0;
3005 fw.rodata = bnx_COM_b09FwRodata;
3006 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3007 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3008 break;
3009 default:
3010 /* Initialize the RV2P processor. */
3011 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1),
3012 RV2P_PROC1);
3013 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2),
3014 RV2P_PROC2);
3015
3016 /* Initialize the RX Processor. */
3017 cpu_reg.mode = BNX_RXP_CPU_MODE;
3018 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
3019 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
3020 cpu_reg.state = BNX_RXP_CPU_STATE;
3021 cpu_reg.state_value_clear = 0xffffff;
3022 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
3023 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
3024 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
3025 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
3026 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
3027 cpu_reg.spad_base = BNX_RXP_SCRATCH;
3028 cpu_reg.mips_view_base = 0x8000000;
3029
3030 fw.ver_major = bnx_RXP_b06FwReleaseMajor;
3031 fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
3032 fw.ver_fix = bnx_RXP_b06FwReleaseFix;
3033 fw.start_addr = bnx_RXP_b06FwStartAddr;
3034
3035 fw.text_addr = bnx_RXP_b06FwTextAddr;
3036 fw.text_len = bnx_RXP_b06FwTextLen;
3037 fw.text_index = 0;
3038 fw.text = bnx_RXP_b06FwText;
3039
3040 fw.data_addr = bnx_RXP_b06FwDataAddr;
3041 fw.data_len = bnx_RXP_b06FwDataLen;
3042 fw.data_index = 0;
3043 fw.data = bnx_RXP_b06FwData;
3044
3045 fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
3046 fw.sbss_len = bnx_RXP_b06FwSbssLen;
3047 fw.sbss_index = 0;
3048 fw.sbss = bnx_RXP_b06FwSbss;
3049
3050 fw.bss_addr = bnx_RXP_b06FwBssAddr;
3051 fw.bss_len = bnx_RXP_b06FwBssLen;
3052 fw.bss_index = 0;
3053 fw.bss = bnx_RXP_b06FwBss;
3054
3055 fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
3056 fw.rodata_len = bnx_RXP_b06FwRodataLen;
3057 fw.rodata_index = 0;
3058 fw.rodata = bnx_RXP_b06FwRodata;
3059
3060 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
3061 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3062
3063 /* Initialize the TX Processor. */
3064 cpu_reg.mode = BNX_TXP_CPU_MODE;
3065 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
3066 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
3067 cpu_reg.state = BNX_TXP_CPU_STATE;
3068 cpu_reg.state_value_clear = 0xffffff;
3069 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
3070 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
3071 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
3072 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
3073 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
3074 cpu_reg.spad_base = BNX_TXP_SCRATCH;
3075 cpu_reg.mips_view_base = 0x8000000;
3076
3077 fw.ver_major = bnx_TXP_b06FwReleaseMajor;
3078 fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
3079 fw.ver_fix = bnx_TXP_b06FwReleaseFix;
3080 fw.start_addr = bnx_TXP_b06FwStartAddr;
3081
3082 fw.text_addr = bnx_TXP_b06FwTextAddr;
3083 fw.text_len = bnx_TXP_b06FwTextLen;
3084 fw.text_index = 0;
3085 fw.text = bnx_TXP_b06FwText;
3086
3087 fw.data_addr = bnx_TXP_b06FwDataAddr;
3088 fw.data_len = bnx_TXP_b06FwDataLen;
3089 fw.data_index = 0;
3090 fw.data = bnx_TXP_b06FwData;
3091
3092 fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
3093 fw.sbss_len = bnx_TXP_b06FwSbssLen;
3094 fw.sbss_index = 0;
3095 fw.sbss = bnx_TXP_b06FwSbss;
3096
3097 fw.bss_addr = bnx_TXP_b06FwBssAddr;
3098 fw.bss_len = bnx_TXP_b06FwBssLen;
3099 fw.bss_index = 0;
3100 fw.bss = bnx_TXP_b06FwBss;
3101
3102 fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
3103 fw.rodata_len = bnx_TXP_b06FwRodataLen;
3104 fw.rodata_index = 0;
3105 fw.rodata = bnx_TXP_b06FwRodata;
3106
3107 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
3108 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3109
3110 /* Initialize the TX Patch-up Processor. */
3111 cpu_reg.mode = BNX_TPAT_CPU_MODE;
3112 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
3113 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
3114 cpu_reg.state = BNX_TPAT_CPU_STATE;
3115 cpu_reg.state_value_clear = 0xffffff;
3116 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3117 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3118 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3119 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3120 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3121 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3122 cpu_reg.mips_view_base = 0x8000000;
3123
3124 fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
3125 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
3126 fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
3127 fw.start_addr = bnx_TPAT_b06FwStartAddr;
3128
3129 fw.text_addr = bnx_TPAT_b06FwTextAddr;
3130 fw.text_len = bnx_TPAT_b06FwTextLen;
3131 fw.text_index = 0;
3132 fw.text = bnx_TPAT_b06FwText;
3133
3134 fw.data_addr = bnx_TPAT_b06FwDataAddr;
3135 fw.data_len = bnx_TPAT_b06FwDataLen;
3136 fw.data_index = 0;
3137 fw.data = bnx_TPAT_b06FwData;
3138
3139 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
3140 fw.sbss_len = bnx_TPAT_b06FwSbssLen;
3141 fw.sbss_index = 0;
3142 fw.sbss = bnx_TPAT_b06FwSbss;
3143
3144 fw.bss_addr = bnx_TPAT_b06FwBssAddr;
3145 fw.bss_len = bnx_TPAT_b06FwBssLen;
3146 fw.bss_index = 0;
3147 fw.bss = bnx_TPAT_b06FwBss;
3148
3149 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
3150 fw.rodata_len = bnx_TPAT_b06FwRodataLen;
3151 fw.rodata_index = 0;
3152 fw.rodata = bnx_TPAT_b06FwRodata;
3153
3154 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3155 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3156
3157 /* Initialize the Completion Processor. */
3158 cpu_reg.mode = BNX_COM_CPU_MODE;
3159 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3160 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3161 cpu_reg.state = BNX_COM_CPU_STATE;
3162 cpu_reg.state_value_clear = 0xffffff;
3163 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3164 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3165 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3166 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3167 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3168 cpu_reg.spad_base = BNX_COM_SCRATCH;
3169 cpu_reg.mips_view_base = 0x8000000;
3170
3171 fw.ver_major = bnx_COM_b06FwReleaseMajor;
3172 fw.ver_minor = bnx_COM_b06FwReleaseMinor;
3173 fw.ver_fix = bnx_COM_b06FwReleaseFix;
3174 fw.start_addr = bnx_COM_b06FwStartAddr;
3175
3176 fw.text_addr = bnx_COM_b06FwTextAddr;
3177 fw.text_len = bnx_COM_b06FwTextLen;
3178 fw.text_index = 0;
3179 fw.text = bnx_COM_b06FwText;
3180
3181 fw.data_addr = bnx_COM_b06FwDataAddr;
3182 fw.data_len = bnx_COM_b06FwDataLen;
3183 fw.data_index = 0;
3184 fw.data = bnx_COM_b06FwData;
3185
3186 fw.sbss_addr = bnx_COM_b06FwSbssAddr;
3187 fw.sbss_len = bnx_COM_b06FwSbssLen;
3188 fw.sbss_index = 0;
3189 fw.sbss = bnx_COM_b06FwSbss;
3190
3191 fw.bss_addr = bnx_COM_b06FwBssAddr;
3192 fw.bss_len = bnx_COM_b06FwBssLen;
3193 fw.bss_index = 0;
3194 fw.bss = bnx_COM_b06FwBss;
3195
3196 fw.rodata_addr = bnx_COM_b06FwRodataAddr;
3197 fw.rodata_len = bnx_COM_b06FwRodataLen;
3198 fw.rodata_index = 0;
3199 fw.rodata = bnx_COM_b06FwRodata;
3200 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3201 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3202 break;
3203 }
3204 }
3205
3206 /****************************************************************************/
3207 /* Initialize context memory. */
3208 /* */
3209 /* Clears the memory associated with each Context ID (CID). */
3210 /* */
3211 /* Returns: */
3212 /* Nothing. */
3213 /****************************************************************************/
3214 void
3215 bnx_init_context(struct bnx_softc *sc)
3216 {
3217 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3218 /* DRC: Replace this constant value with a #define. */
3219 int i, retry_cnt = 10;
3220 uint32_t val;
3221
3222 /*
3223 * BCM5709 context memory may be cached
3224 * in host memory so prepare the host memory
3225 * for access.
3226 */
3227 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3228 | (1 << 12);
3229 val |= (BCM_PAGE_BITS - 8) << 16;
3230 REG_WR(sc, BNX_CTX_COMMAND, val);
3231
3232 /* Wait for mem init command to complete. */
3233 for (i = 0; i < retry_cnt; i++) {
3234 val = REG_RD(sc, BNX_CTX_COMMAND);
3235 if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3236 break;
3237 DELAY(2);
3238 }
3239
3240 /* ToDo: Consider returning an error here. */
3241
3242 for (i = 0; i < sc->ctx_pages; i++) {
3243 int j;
3244
3245 /* Set the physaddr of the context memory cache. */
3246 val = (uint32_t)(sc->ctx_segs[i].ds_addr);
3247 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3248 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3249 val = (uint32_t)
3250 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32);
3251 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3252 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3253 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3254
3255 /* Verify that the context memory write was successful. */
3256 for (j = 0; j < retry_cnt; j++) {
3257 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3258 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3259 break;
3260 DELAY(5);
3261 }
3262
3263 /* ToDo: Consider returning an error here. */
3264 }
3265 } else {
3266 uint32_t vcid_addr, offset;
3267
3268 /*
3269 * For the 5706/5708, context memory is local to the
3270 * controller, so initialize the controller context memory.
3271 */
3272
3273 vcid_addr = GET_CID_ADDR(96);
3274 while (vcid_addr) {
3275
3276 vcid_addr -= BNX_PHY_CTX_SIZE;
3277
3278 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3279 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3280
3281 for (offset = 0; offset < BNX_PHY_CTX_SIZE;
3282 offset += 4)
3283 CTX_WR(sc, 0x00, offset, 0);
3284
3285 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3286 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3287 }
3288 }
3289 }
3290
3291 /****************************************************************************/
3292 /* Fetch the permanent MAC address of the controller. */
3293 /* */
3294 /* Returns: */
3295 /* Nothing. */
3296 /****************************************************************************/
3297 void
3298 bnx_get_mac_addr(struct bnx_softc *sc)
3299 {
3300 uint32_t mac_lo = 0, mac_hi = 0;
3301
3302 /*
3303 * The NetXtreme II bootcode populates various NIC
3304 * power-on and runtime configuration items in a
3305 * shared memory area. The factory configured MAC
3306 * address is available from both NVRAM and the
3307 * shared memory area so we'll read the value from
3308 * shared memory for speed.
3309 */
3310
3311 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3312 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3313
3314 if ((mac_lo == 0) && (mac_hi == 0)) {
3315 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3316 __FILE__, __LINE__);
3317 } else {
3318 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3319 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3320 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3321 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3322 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3323 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3324 }
3325
3326 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3327 "%s\n", ether_sprintf(sc->eaddr));
3328 }
3329
3330 /****************************************************************************/
3331 /* Program the MAC address. */
3332 /* */
3333 /* Returns: */
3334 /* Nothing. */
3335 /****************************************************************************/
3336 void
3337 bnx_set_mac_addr(struct bnx_softc *sc)
3338 {
3339 uint32_t val;
3340 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl);
3341
3342 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3343 "%s\n", ether_sprintf(sc->eaddr));
3344
3345 val = (mac_addr[0] << 8) | mac_addr[1];
3346
3347 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3348
3349 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3350 (mac_addr[4] << 8) | mac_addr[5];
3351
3352 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3353 }
3354
3355 /****************************************************************************/
3356 /* Stop the controller. */
3357 /* */
3358 /* Returns: */
3359 /* Nothing. */
3360 /****************************************************************************/
3361 void
3362 bnx_stop(struct ifnet *ifp, int disable)
3363 {
3364 struct bnx_softc *sc = ifp->if_softc;
3365
3366 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3367
3368 if (disable) {
3369 sc->bnx_detaching = 1;
3370 callout_halt(&sc->bnx_timeout, NULL);
3371 } else
3372 callout_stop(&sc->bnx_timeout);
3373
3374 mii_down(&sc->bnx_mii);
3375
3376 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3377
3378 /* Disable the transmit/receive blocks. */
3379 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3380 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3381 DELAY(20);
3382
3383 bnx_disable_intr(sc);
3384
3385 /* Tell firmware that the driver is going away. */
3386 if (disable)
3387 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
3388 else
3389 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3390
3391 /* Free RX buffers. */
3392 bnx_free_rx_chain(sc);
3393
3394 /* Free TX buffers. */
3395 bnx_free_tx_chain(sc);
3396
3397 ifp->if_timer = 0;
3398
3399 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3400
3401 }
3402
3403 int
3404 bnx_reset(struct bnx_softc *sc, uint32_t reset_code)
3405 {
3406 struct pci_attach_args *pa = &(sc->bnx_pa);
3407 uint32_t val;
3408 int i, rc = 0;
3409
3410 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3411
3412 /* Wait for pending PCI transactions to complete. */
3413 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3414 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3415 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3416 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3417 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3418 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3419 DELAY(5);
3420
3421 /* Disable DMA */
3422 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3423 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3424 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3425 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3426 }
3427
3428 /* Assume bootcode is running. */
3429 sc->bnx_fw_timed_out = 0;
3430
3431 /* Give the firmware a chance to prepare for the reset. */
3432 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3433 if (rc)
3434 goto bnx_reset_exit;
3435
3436 /* Set a firmware reminder that this is a soft reset. */
3437 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3438 BNX_DRV_RESET_SIGNATURE_MAGIC);
3439
3440 /* Dummy read to force the chip to complete all current transactions. */
3441 val = REG_RD(sc, BNX_MISC_ID);
3442
3443 /* Chip reset. */
3444 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3445 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3446 REG_RD(sc, BNX_MISC_COMMAND);
3447 DELAY(5);
3448
3449 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3450 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3451
3452 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3453 val);
3454 } else {
3455 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3456 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3457 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3458 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3459
3460 /* Allow up to 30us for reset to complete. */
3461 for (i = 0; i < 10; i++) {
3462 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3463 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3464 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3465 break;
3466 }
3467 DELAY(10);
3468 }
3469
3470 /* Check that reset completed successfully. */
3471 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3472 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3473 BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3474 __FILE__, __LINE__);
3475 rc = EBUSY;
3476 goto bnx_reset_exit;
3477 }
3478 }
3479
3480 /* Make sure byte swapping is properly configured. */
3481 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3482 if (val != 0x01020304) {
3483 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3484 __FILE__, __LINE__);
3485 rc = ENODEV;
3486 goto bnx_reset_exit;
3487 }
3488
3489 /* Just completed a reset, assume that firmware is running again. */
3490 sc->bnx_fw_timed_out = 0;
3491
3492 /* Wait for the firmware to finish its initialization. */
3493 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3494 if (rc)
3495 BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3496 "initialization!\n", __FILE__, __LINE__);
3497
3498 bnx_reset_exit:
3499 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3500
3501 return rc;
3502 }
3503
3504 int
3505 bnx_chipinit(struct bnx_softc *sc)
3506 {
3507 struct pci_attach_args *pa = &(sc->bnx_pa);
3508 uint32_t val;
3509 int rc = 0;
3510
3511 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3512
3513 /* Make sure the interrupt is not active. */
3514 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3515
3516 /* Initialize DMA byte/word swapping, configure the number of DMA */
3517 /* channels and PCI clock compensation delay. */
3518 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3519 BNX_DMA_CONFIG_DATA_WORD_SWAP |
3520 #if BYTE_ORDER == BIG_ENDIAN
3521 BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3522 #endif
3523 BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3524 DMA_READ_CHANS << 12 |
3525 DMA_WRITE_CHANS << 16;
3526
3527 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3528
3529 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3530 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3531
3532 /*
3533 * This setting resolves a problem observed on certain Intel PCI
3534 * chipsets that cannot handle multiple outstanding DMA operations.
3535 * See errata E9_5706A1_65.
3536 */
3537 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3538 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3539 !(sc->bnx_flags & BNX_PCIX_FLAG))
3540 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3541
3542 REG_WR(sc, BNX_DMA_CONFIG, val);
3543
3544 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3545 if (sc->bnx_flags & BNX_PCIX_FLAG) {
3546 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3547 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3548 val & ~0x20000);
3549 }
3550
3551 /* Enable the RX_V2P and Context state machines before access. */
3552 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3553 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3554 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3555 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3556
3557 /* Initialize context mapping and zero out the quick contexts. */
3558 bnx_init_context(sc);
3559
3560 /* Initialize the on-boards CPUs */
3561 bnx_init_cpus(sc);
3562
3563 /* Prepare NVRAM for access. */
3564 if (bnx_init_nvram(sc)) {
3565 rc = ENODEV;
3566 goto bnx_chipinit_exit;
3567 }
3568
3569 /* Set the kernel bypass block size */
3570 val = REG_RD(sc, BNX_MQ_CONFIG);
3571 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3572 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3573
3574 /* Enable bins used on the 5709. */
3575 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3576 val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3577 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3578 val |= BNX_MQ_CONFIG_HALT_DIS;
3579 }
3580
3581 REG_WR(sc, BNX_MQ_CONFIG, val);
3582
3583 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE);
3584 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3585 REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3586
3587 val = (BCM_PAGE_BITS - 8) << 24;
3588 REG_WR(sc, BNX_RV2P_CONFIG, val);
3589
3590 /* Configure page size. */
3591 val = REG_RD(sc, BNX_TBDR_CONFIG);
3592 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3593 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3594 REG_WR(sc, BNX_TBDR_CONFIG, val);
3595
3596 #if 0
3597 /* Set the perfect match control register to default. */
3598 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3599 #endif
3600
3601 bnx_chipinit_exit:
3602 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3603
3604 return rc;
3605 }
3606
3607 /****************************************************************************/
3608 /* Initialize the controller in preparation to send/receive traffic. */
3609 /* */
3610 /* Returns: */
3611 /* 0 for success, positive value for failure. */
3612 /****************************************************************************/
3613 int
3614 bnx_blockinit(struct bnx_softc *sc)
3615 {
3616 uint32_t reg, val;
3617 int rc = 0;
3618
3619 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3620
3621 /* Load the hardware default MAC address. */
3622 bnx_set_mac_addr(sc);
3623
3624 /* Set the Ethernet backoff seed value */
3625 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3626 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3627 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3628
3629 sc->last_status_idx = 0;
3630 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3631
3632 /* Set up link change interrupt generation. */
3633 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3634 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3635
3636 /* Program the physical address of the status block. */
3637 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr));
3638 REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3639 (uint32_t)((uint64_t)sc->status_block_paddr >> 32));
3640
3641 /* Program the physical address of the statistics block. */
3642 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3643 (uint32_t)(sc->stats_block_paddr));
3644 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3645 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32));
3646
3647 /* Program various host coalescing parameters. */
3648 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3649 << 16) | sc->bnx_tx_quick_cons_trip);
3650 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3651 << 16) | sc->bnx_rx_quick_cons_trip);
3652 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3653 sc->bnx_comp_prod_trip);
3654 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3655 sc->bnx_tx_ticks);
3656 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3657 sc->bnx_rx_ticks);
3658 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3659 sc->bnx_com_ticks);
3660 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3661 sc->bnx_cmd_ticks);
3662 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3663 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3664 REG_WR(sc, BNX_HC_CONFIG,
3665 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3666 BNX_HC_CONFIG_COLLECT_STATS));
3667
3668 /* Clear the internal statistics counters. */
3669 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3670
3671 /* Verify that bootcode is running. */
3672 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3673
3674 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3675 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3676 __FILE__, __LINE__); reg = 0);
3677
3678 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3679 BNX_DEV_INFO_SIGNATURE_MAGIC) {
3680 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3681 "Expected: 08%08X\n", __FILE__, __LINE__,
3682 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3683 BNX_DEV_INFO_SIGNATURE_MAGIC);
3684 rc = ENODEV;
3685 goto bnx_blockinit_exit;
3686 }
3687
3688 /* Check if any management firmware is running. */
3689 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3690 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3691 BNX_PORT_FEATURE_IMD_ENABLED)) {
3692 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3693 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3694 }
3695
3696 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3697 BNX_DEV_INFO_BC_REV);
3698
3699 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3700
3701 /* Enable DMA */
3702 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3703 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3704 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3705 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3706 }
3707
3708 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3709 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3710
3711 /* Enable link state change interrupt generation. */
3712 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3713 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3714 BNX_MISC_ENABLE_DEFAULT_XI);
3715 } else
3716 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3717
3718 /* Enable all remaining blocks in the MAC. */
3719 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3720 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3721 DELAY(20);
3722
3723 bnx_blockinit_exit:
3724 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3725
3726 return rc;
3727 }
3728
3729 static int
3730 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod,
3731 uint16_t *chain_prod, uint32_t *prod_bseq)
3732 {
3733 bus_dmamap_t map;
3734 struct rx_bd *rxbd;
3735 uint32_t addr;
3736 int i;
3737 #ifdef BNX_DEBUG
3738 uint16_t debug_chain_prod = *chain_prod;
3739 #endif
3740 uint16_t first_chain_prod;
3741
3742 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3743
3744 /* Map the mbuf cluster into device memory. */
3745 map = sc->rx_mbuf_map[*chain_prod];
3746 first_chain_prod = *chain_prod;
3747 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3748 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3749 __FILE__, __LINE__);
3750
3751 m_freem(m_new);
3752
3753 DBRUNIF(1, sc->rx_mbuf_alloc--);
3754
3755 return ENOBUFS;
3756 }
3757 /* Make sure there is room in the receive chain. */
3758 if (map->dm_nsegs > sc->free_rx_bd) {
3759 bus_dmamap_unload(sc->bnx_dmatag, map);
3760 m_freem(m_new);
3761 return EFBIG;
3762 }
3763 #ifdef BNX_DEBUG
3764 /* Track the distribution of buffer segments. */
3765 sc->rx_mbuf_segs[map->dm_nsegs]++;
3766 #endif
3767
3768 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
3769 BUS_DMASYNC_PREREAD);
3770
3771 /* Update some debug statistics counters */
3772 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3773 sc->rx_low_watermark = sc->free_rx_bd);
3774 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3775
3776 /*
3777 * Setup the rx_bd for the first segment
3778 */
3779 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3780
3781 addr = (uint32_t)map->dm_segs[0].ds_addr;
3782 rxbd->rx_bd_haddr_lo = addr;
3783 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32);
3784 rxbd->rx_bd_haddr_hi = addr;
3785 rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3786 rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3787 *prod_bseq += map->dm_segs[0].ds_len;
3788 bus_dmamap_sync(sc->bnx_dmatag,
3789 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3790 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd),
3791 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3792
3793 for (i = 1; i < map->dm_nsegs; i++) {
3794 *prod = NEXT_RX_BD(*prod);
3795 *chain_prod = RX_CHAIN_IDX(*prod);
3796
3797 rxbd =
3798 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3799
3800 addr = (uint32_t)map->dm_segs[i].ds_addr;
3801 rxbd->rx_bd_haddr_lo = addr;
3802 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32);
3803 rxbd->rx_bd_haddr_hi = addr;
3804 rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3805 rxbd->rx_bd_flags = 0;
3806 *prod_bseq += map->dm_segs[i].ds_len;
3807 bus_dmamap_sync(sc->bnx_dmatag,
3808 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3809 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3810 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3811 }
3812
3813 rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3814 bus_dmamap_sync(sc->bnx_dmatag,
3815 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3816 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3817 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3818
3819 /*
3820 * Save the mbuf, adjust the map pointer (swap map for first and
3821 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3822 * and update our counter.
3823 */
3824 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3825 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3826 sc->rx_mbuf_map[*chain_prod] = map;
3827 sc->free_rx_bd -= map->dm_nsegs;
3828
3829 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3830 map->dm_nsegs));
3831 *prod = NEXT_RX_BD(*prod);
3832 *chain_prod = RX_CHAIN_IDX(*prod);
3833
3834 return 0;
3835 }
3836
3837 /****************************************************************************/
3838 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3839 /* */
3840 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3841 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3842 /* necessary. */
3843 /* */
3844 /* Returns: */
3845 /* 0 for success, positive value for failure. */
3846 /****************************************************************************/
3847 int
3848 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod,
3849 uint16_t *chain_prod, uint32_t *prod_bseq)
3850 {
3851 struct mbuf *m_new = NULL;
3852 int rc = 0;
3853 uint16_t min_free_bd;
3854
3855 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3856 __func__);
3857
3858 /* Make sure the inputs are valid. */
3859 DBRUNIF((*chain_prod > MAX_RX_BD),
3860 aprint_error_dev(sc->bnx_dev,
3861 "RX producer out of range: 0x%04X > 0x%04X\n",
3862 *chain_prod, (uint16_t)MAX_RX_BD));
3863
3864 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3865 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod,
3866 *prod_bseq);
3867
3868 /* try to get in as many mbufs as possible */
3869 if (sc->mbuf_alloc_size == MCLBYTES)
3870 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE;
3871 else
3872 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE;
3873 while (sc->free_rx_bd >= min_free_bd) {
3874 /* Simulate an mbuf allocation failure. */
3875 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3876 aprint_error_dev(sc->bnx_dev,
3877 "Simulating mbuf allocation failure.\n");
3878 sc->mbuf_sim_alloc_failed++;
3879 rc = ENOBUFS;
3880 goto bnx_get_buf_exit);
3881
3882 /* This is a new mbuf allocation. */
3883 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3884 if (m_new == NULL) {
3885 DBPRINT(sc, BNX_WARN,
3886 "%s(%d): RX mbuf header allocation failed!\n",
3887 __FILE__, __LINE__);
3888
3889 sc->mbuf_alloc_failed++;
3890
3891 rc = ENOBUFS;
3892 goto bnx_get_buf_exit;
3893 }
3894
3895 DBRUNIF(1, sc->rx_mbuf_alloc++);
3896
3897 /* Simulate an mbuf cluster allocation failure. */
3898 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3899 m_freem(m_new);
3900 sc->rx_mbuf_alloc--;
3901 sc->mbuf_alloc_failed++;
3902 sc->mbuf_sim_alloc_failed++;
3903 rc = ENOBUFS;
3904 goto bnx_get_buf_exit);
3905
3906 if (sc->mbuf_alloc_size == MCLBYTES)
3907 MCLGET(m_new, M_DONTWAIT);
3908 else
3909 MEXTMALLOC(m_new, sc->mbuf_alloc_size,
3910 M_DONTWAIT);
3911 if (!(m_new->m_flags & M_EXT)) {
3912 DBPRINT(sc, BNX_WARN,
3913 "%s(%d): RX mbuf chain allocation failed!\n",
3914 __FILE__, __LINE__);
3915
3916 m_freem(m_new);
3917
3918 DBRUNIF(1, sc->rx_mbuf_alloc--);
3919 sc->mbuf_alloc_failed++;
3920
3921 rc = ENOBUFS;
3922 goto bnx_get_buf_exit;
3923 }
3924
3925 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq);
3926 if (rc != 0)
3927 goto bnx_get_buf_exit;
3928 }
3929
3930 bnx_get_buf_exit:
3931 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3932 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod,
3933 *chain_prod, *prod_bseq);
3934
3935 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3936 __func__);
3937
3938 return rc;
3939 }
3940
3941 void
3942 bnx_alloc_pkts(struct work * unused, void * arg)
3943 {
3944 struct bnx_softc *sc = arg;
3945 struct ifnet *ifp = &sc->bnx_ec.ec_if;
3946 struct bnx_pkt *pkt;
3947 int i, s;
3948
3949 for (i = 0; i < 4; i++) { /* magic! */
3950 pkt = pool_get(bnx_tx_pool, PR_WAITOK);
3951 if (pkt == NULL)
3952 break;
3953
3954 if (bus_dmamap_create(sc->bnx_dmatag,
3955 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
3956 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3957 &pkt->pkt_dmamap) != 0)
3958 goto put;
3959
3960 if (!ISSET(ifp->if_flags, IFF_UP))
3961 goto stopping;
3962
3963 mutex_enter(&sc->tx_pkt_mtx);
3964 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3965 sc->tx_pkt_count++;
3966 mutex_exit(&sc->tx_pkt_mtx);
3967 }
3968
3969 mutex_enter(&sc->tx_pkt_mtx);
3970 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
3971 mutex_exit(&sc->tx_pkt_mtx);
3972
3973 /* fire-up TX now that allocations have been done */
3974 s = splnet();
3975 if (!IFQ_IS_EMPTY(&ifp->if_snd))
3976 bnx_start(ifp);
3977 splx(s);
3978
3979 return;
3980
3981 stopping:
3982 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3983 put:
3984 pool_put(bnx_tx_pool, pkt);
3985 return;
3986 }
3987
3988 /****************************************************************************/
3989 /* Initialize the TX context memory. */
3990 /* */
3991 /* Returns: */
3992 /* Nothing */
3993 /****************************************************************************/
3994 void
3995 bnx_init_tx_context(struct bnx_softc *sc)
3996 {
3997 uint32_t val;
3998
3999 /* Initialize the context ID for an L2 TX chain. */
4000 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4001 /* Set the CID type to support an L2 connection. */
4002 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
4003 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
4004 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4005 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
4006
4007 /* Point the hardware to the first page in the chain. */
4008 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32);
4009 CTX_WR(sc, GET_CID_ADDR(TX_CID),
4010 BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
4011 val = (uint32_t)(sc->tx_bd_chain_paddr[0]);
4012 CTX_WR(sc, GET_CID_ADDR(TX_CID),
4013 BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
4014 } else {
4015 /* Set the CID type to support an L2 connection. */
4016 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
4017 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
4018 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4019 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
4020
4021 /* Point the hardware to the first page in the chain. */
4022 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32);
4023 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
4024 val = (uint32_t)(sc->tx_bd_chain_paddr[0]);
4025 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
4026 }
4027 }
4028
4029
4030 /****************************************************************************/
4031 /* Allocate memory and initialize the TX data structures. */
4032 /* */
4033 /* Returns: */
4034 /* 0 for success, positive value for failure. */
4035 /****************************************************************************/
4036 int
4037 bnx_init_tx_chain(struct bnx_softc *sc)
4038 {
4039 struct tx_bd *txbd;
4040 uint32_t addr;
4041 int i, rc = 0;
4042
4043 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4044
4045 /* Force an allocation of some dmamaps for tx up front */
4046 bnx_alloc_pkts(NULL, sc);
4047
4048 /* Set the initial TX producer/consumer indices. */
4049 sc->tx_prod = 0;
4050 sc->tx_cons = 0;
4051 sc->tx_prod_bseq = 0;
4052 sc->used_tx_bd = 0;
4053 sc->max_tx_bd = USABLE_TX_BD;
4054 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
4055 DBRUNIF(1, sc->tx_full_count = 0);
4056
4057 /*
4058 * The NetXtreme II supports a linked-list structure called
4059 * a Buffer Descriptor Chain (or BD chain). A BD chain
4060 * consists of a series of 1 or more chain pages, each of which
4061 * consists of a fixed number of BD entries.
4062 * The last BD entry on each page is a pointer to the next page
4063 * in the chain, and the last pointer in the BD chain
4064 * points back to the beginning of the chain.
4065 */
4066
4067 /* Set the TX next pointer chain entries. */
4068 for (i = 0; i < TX_PAGES; i++) {
4069 int j;
4070
4071 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4072
4073 /* Check if we've reached the last page. */
4074 if (i == (TX_PAGES - 1))
4075 j = 0;
4076 else
4077 j = i + 1;
4078
4079 addr = (uint32_t)sc->tx_bd_chain_paddr[j];
4080 txbd->tx_bd_haddr_lo = addr;
4081 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32);
4082 txbd->tx_bd_haddr_hi = addr;
4083 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
4084 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
4085 }
4086
4087 /*
4088 * Initialize the context ID for an L2 TX chain.
4089 */
4090 bnx_init_tx_context(sc);
4091
4092 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4093
4094 return rc;
4095 }
4096
4097 /****************************************************************************/
4098 /* Free memory and clear the TX data structures. */
4099 /* */
4100 /* Returns: */
4101 /* Nothing. */
4102 /****************************************************************************/
4103 void
4104 bnx_free_tx_chain(struct bnx_softc *sc)
4105 {
4106 struct bnx_pkt *pkt;
4107 int i;
4108
4109 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4110
4111 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4112 mutex_enter(&sc->tx_pkt_mtx);
4113 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) {
4114 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4115 mutex_exit(&sc->tx_pkt_mtx);
4116
4117 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0,
4118 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4119 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap);
4120
4121 m_freem(pkt->pkt_mbuf);
4122 DBRUNIF(1, sc->tx_mbuf_alloc--);
4123
4124 mutex_enter(&sc->tx_pkt_mtx);
4125 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4126 }
4127
4128 /* Destroy all the dmamaps we allocated for TX */
4129 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) {
4130 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4131 sc->tx_pkt_count--;
4132 mutex_exit(&sc->tx_pkt_mtx);
4133
4134 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
4135 pool_put(bnx_tx_pool, pkt);
4136
4137 mutex_enter(&sc->tx_pkt_mtx);
4138 }
4139 mutex_exit(&sc->tx_pkt_mtx);
4140
4141
4142
4143 /* Clear each TX chain page. */
4144 for (i = 0; i < TX_PAGES; i++) {
4145 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ);
4146 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
4147 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
4148 }
4149
4150 sc->used_tx_bd = 0;
4151
4152 /* Check if we lost any mbufs in the process. */
4153 DBRUNIF((sc->tx_mbuf_alloc),
4154 aprint_error_dev(sc->bnx_dev,
4155 "Memory leak! Lost %d mbufs from tx chain!\n",
4156 sc->tx_mbuf_alloc));
4157
4158 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4159 }
4160
4161 /****************************************************************************/
4162 /* Initialize the RX context memory. */
4163 /* */
4164 /* Returns: */
4165 /* Nothing */
4166 /****************************************************************************/
4167 void
4168 bnx_init_rx_context(struct bnx_softc *sc)
4169 {
4170 uint32_t val;
4171
4172 /* Initialize the context ID for an L2 RX chain. */
4173 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4174 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4175
4176 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4177 uint32_t lo_water, hi_water;
4178
4179 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4180 hi_water = USABLE_RX_BD / 4;
4181
4182 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
4183 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
4184
4185 if (hi_water > 0xf)
4186 hi_water = 0xf;
4187 else if (hi_water == 0)
4188 lo_water = 0;
4189 val |= lo_water |
4190 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
4191 }
4192
4193 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
4194
4195 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4196 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4197 val = REG_RD(sc, BNX_MQ_MAP_L2_5);
4198 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
4199 }
4200
4201 /* Point the hardware to the first page in the chain. */
4202 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32);
4203 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
4204 val = (uint32_t)(sc->rx_bd_chain_paddr[0]);
4205 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
4206 }
4207
4208 /****************************************************************************/
4209 /* Allocate memory and initialize the RX data structures. */
4210 /* */
4211 /* Returns: */
4212 /* 0 for success, positive value for failure. */
4213 /****************************************************************************/
4214 int
4215 bnx_init_rx_chain(struct bnx_softc *sc)
4216 {
4217 struct rx_bd *rxbd;
4218 int i, rc = 0;
4219 uint16_t prod, chain_prod;
4220 uint32_t prod_bseq, addr;
4221
4222 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4223
4224 /* Initialize the RX producer and consumer indices. */
4225 sc->rx_prod = 0;
4226 sc->rx_cons = 0;
4227 sc->rx_prod_bseq = 0;
4228 sc->free_rx_bd = USABLE_RX_BD;
4229 sc->max_rx_bd = USABLE_RX_BD;
4230 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4231 DBRUNIF(1, sc->rx_empty_count = 0);
4232
4233 /* Initialize the RX next pointer chain entries. */
4234 for (i = 0; i < RX_PAGES; i++) {
4235 int j;
4236
4237 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4238
4239 /* Check if we've reached the last page. */
4240 if (i == (RX_PAGES - 1))
4241 j = 0;
4242 else
4243 j = i + 1;
4244
4245 /* Setup the chain page pointers. */
4246 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32);
4247 rxbd->rx_bd_haddr_hi = addr;
4248 addr = (uint32_t)sc->rx_bd_chain_paddr[j];
4249 rxbd->rx_bd_haddr_lo = addr;
4250 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
4251 0, BNX_RX_CHAIN_PAGE_SZ,
4252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4253 }
4254
4255 /* Allocate mbuf clusters for the rx_bd chain. */
4256 prod = prod_bseq = 0;
4257 chain_prod = RX_CHAIN_IDX(prod);
4258 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
4259 BNX_PRINTF(sc,
4260 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod);
4261 }
4262
4263 /* Save the RX chain producer index. */
4264 sc->rx_prod = prod;
4265 sc->rx_prod_bseq = prod_bseq;
4266
4267 for (i = 0; i < RX_PAGES; i++)
4268 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4269 sc->rx_bd_chain_map[i]->dm_mapsize,
4270 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4271
4272 /* Tell the chip about the waiting rx_bd's. */
4273 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4274 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4275
4276 bnx_init_rx_context(sc);
4277
4278 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4279
4280 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4281
4282 return rc;
4283 }
4284
4285 /****************************************************************************/
4286 /* Free memory and clear the RX data structures. */
4287 /* */
4288 /* Returns: */
4289 /* Nothing. */
4290 /****************************************************************************/
4291 void
4292 bnx_free_rx_chain(struct bnx_softc *sc)
4293 {
4294 int i;
4295
4296 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4297
4298 /* Free any mbufs still in the RX mbuf chain. */
4299 for (i = 0; i < TOTAL_RX_BD; i++) {
4300 if (sc->rx_mbuf_ptr[i] != NULL) {
4301 if (sc->rx_mbuf_map[i] != NULL) {
4302 bus_dmamap_sync(sc->bnx_dmatag,
4303 sc->rx_mbuf_map[i], 0,
4304 sc->rx_mbuf_map[i]->dm_mapsize,
4305 BUS_DMASYNC_POSTREAD);
4306 bus_dmamap_unload(sc->bnx_dmatag,
4307 sc->rx_mbuf_map[i]);
4308 }
4309 m_freem(sc->rx_mbuf_ptr[i]);
4310 sc->rx_mbuf_ptr[i] = NULL;
4311 DBRUNIF(1, sc->rx_mbuf_alloc--);
4312 }
4313 }
4314
4315 /* Clear each RX chain page. */
4316 for (i = 0; i < RX_PAGES; i++)
4317 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
4318
4319 sc->free_rx_bd = sc->max_rx_bd;
4320
4321 /* Check if we lost any mbufs in the process. */
4322 DBRUNIF((sc->rx_mbuf_alloc),
4323 aprint_error_dev(sc->bnx_dev,
4324 "Memory leak! Lost %d mbufs from rx chain!\n",
4325 sc->rx_mbuf_alloc));
4326
4327 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4328 }
4329
4330 /****************************************************************************/
4331 /* Handles PHY generated interrupt events. */
4332 /* */
4333 /* Returns: */
4334 /* Nothing. */
4335 /****************************************************************************/
4336 void
4337 bnx_phy_intr(struct bnx_softc *sc)
4338 {
4339 uint32_t new_link_state, old_link_state;
4340
4341 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4342 BUS_DMASYNC_POSTREAD);
4343 new_link_state = sc->status_block->status_attn_bits &
4344 STATUS_ATTN_BITS_LINK_STATE;
4345 old_link_state = sc->status_block->status_attn_bits_ack &
4346 STATUS_ATTN_BITS_LINK_STATE;
4347
4348 /* Handle any changes if the link state has changed. */
4349 if (new_link_state != old_link_state) {
4350 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4351
4352 callout_stop(&sc->bnx_timeout);
4353 bnx_tick(sc);
4354
4355 /* Update the status_attn_bits_ack field in the status block. */
4356 if (new_link_state) {
4357 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4358 STATUS_ATTN_BITS_LINK_STATE);
4359 DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4360 } else {
4361 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4362 STATUS_ATTN_BITS_LINK_STATE);
4363 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4364 }
4365 }
4366
4367 /* Acknowledge the link change interrupt. */
4368 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4369 }
4370
4371 /****************************************************************************/
4372 /* Handles received frame interrupt events. */
4373 /* */
4374 /* Returns: */
4375 /* Nothing. */
4376 /****************************************************************************/
4377 void
4378 bnx_rx_intr(struct bnx_softc *sc)
4379 {
4380 struct status_block *sblk = sc->status_block;
4381 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4382 uint16_t hw_cons, sw_cons, sw_chain_cons;
4383 uint16_t sw_prod, sw_chain_prod;
4384 uint32_t sw_prod_bseq;
4385 struct l2_fhdr *l2fhdr;
4386 int i;
4387
4388 DBRUNIF(1, sc->rx_interrupts++);
4389 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4390 BUS_DMASYNC_POSTREAD);
4391
4392 /* Prepare the RX chain pages to be accessed by the host CPU. */
4393 for (i = 0; i < RX_PAGES; i++)
4394 bus_dmamap_sync(sc->bnx_dmatag,
4395 sc->rx_bd_chain_map[i], 0,
4396 sc->rx_bd_chain_map[i]->dm_mapsize,
4397 BUS_DMASYNC_POSTWRITE);
4398
4399 /* Get the hardware's view of the RX consumer index. */
4400 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4401 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4402 hw_cons++;
4403
4404 /* Get working copies of the driver's view of the RX indices. */
4405 sw_cons = sc->rx_cons;
4406 sw_prod = sc->rx_prod;
4407 sw_prod_bseq = sc->rx_prod_bseq;
4408
4409 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4410 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4411 __func__, sw_prod, sw_cons, sw_prod_bseq);
4412
4413 /* Prevent speculative reads from getting ahead of the status block. */
4414 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4415 BUS_SPACE_BARRIER_READ);
4416
4417 /* Update some debug statistics counters */
4418 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4419 sc->rx_low_watermark = sc->free_rx_bd);
4420 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4421
4422 /*
4423 * Scan through the receive chain as long
4424 * as there is work to do.
4425 */
4426 while (sw_cons != hw_cons) {
4427 struct mbuf *m;
4428 struct rx_bd *rxbd __diagused;
4429 unsigned int len;
4430 uint32_t status;
4431
4432 /* Convert the producer/consumer indices to an actual
4433 * rx_bd index.
4434 */
4435 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4436 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4437
4438 /* Get the used rx_bd. */
4439 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4440 sc->free_rx_bd++;
4441
4442 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__);
4443 bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4444
4445 /* The mbuf is stored with the last rx_bd entry of a packet. */
4446 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4447 #ifdef DIAGNOSTIC
4448 /* Validate that this is the last rx_bd. */
4449 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) {
4450 printf("%s: Unexpected mbuf found in "
4451 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev),
4452 sw_chain_cons);
4453 }
4454 #endif
4455
4456 /* DRC - ToDo: If the received packet is small, say
4457 * less than 128 bytes, allocate a new mbuf
4458 * here, copy the data to that mbuf, and
4459 * recycle the mapped jumbo frame.
4460 */
4461
4462 /* Unmap the mbuf from DMA space. */
4463 #ifdef DIAGNOSTIC
4464 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) {
4465 printf("invalid map sw_cons 0x%x "
4466 "sw_prod 0x%x "
4467 "sw_chain_cons 0x%x "
4468 "sw_chain_prod 0x%x "
4469 "hw_cons 0x%x "
4470 "TOTAL_RX_BD_PER_PAGE 0x%x "
4471 "TOTAL_RX_BD 0x%x\n",
4472 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod,
4473 hw_cons,
4474 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD);
4475 }
4476 #endif
4477 bus_dmamap_sync(sc->bnx_dmatag,
4478 sc->rx_mbuf_map[sw_chain_cons], 0,
4479 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4480 BUS_DMASYNC_POSTREAD);
4481 bus_dmamap_unload(sc->bnx_dmatag,
4482 sc->rx_mbuf_map[sw_chain_cons]);
4483
4484 /* Remove the mbuf from the driver's chain. */
4485 m = sc->rx_mbuf_ptr[sw_chain_cons];
4486 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4487
4488 /*
4489 * Frames received on the NetXteme II are prepended
4490 * with the l2_fhdr structure which provides status
4491 * information about the received frame (including
4492 * VLAN tags and checksum info) and are also
4493 * automatically adjusted to align the IP header
4494 * (i.e. two null bytes are inserted before the
4495 * Ethernet header).
4496 */
4497 l2fhdr = mtod(m, struct l2_fhdr *);
4498
4499 len = l2fhdr->l2_fhdr_pkt_len;
4500 status = l2fhdr->l2_fhdr_status;
4501
4502 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4503 aprint_error("Simulating l2_fhdr status error.\n");
4504 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4505
4506 /* Watch for unusual sized frames. */
4507 DBRUNIF(((len < BNX_MIN_MTU) ||
4508 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4509 aprint_error_dev(sc->bnx_dev,
4510 "Unusual frame size found. "
4511 "Min(%d), Actual(%d), Max(%d)\n",
4512 (int)BNX_MIN_MTU, len,
4513 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4514
4515 bnx_dump_mbuf(sc, m);
4516 bnx_breakpoint(sc));
4517
4518 len -= ETHER_CRC_LEN;
4519
4520 /* Check the received frame for errors. */
4521 if ((status & (L2_FHDR_ERRORS_BAD_CRC |
4522 L2_FHDR_ERRORS_PHY_DECODE |
4523 L2_FHDR_ERRORS_ALIGNMENT |
4524 L2_FHDR_ERRORS_TOO_SHORT |
4525 L2_FHDR_ERRORS_GIANT_FRAME)) ||
4526 len < (BNX_MIN_MTU - ETHER_CRC_LEN) ||
4527 len >
4528 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
4529 ifp->if_ierrors++;
4530 DBRUNIF(1, sc->l2fhdr_status_errors++);
4531
4532 /* Reuse the mbuf for a new frame. */
4533 if (bnx_add_buf(sc, m, &sw_prod,
4534 &sw_chain_prod, &sw_prod_bseq)) {
4535 DBRUNIF(1, bnx_breakpoint(sc));
4536 panic("%s: Can't reuse RX mbuf!\n",
4537 device_xname(sc->bnx_dev));
4538 }
4539 continue;
4540 }
4541
4542 /*
4543 * Get a new mbuf for the rx_bd. If no new
4544 * mbufs are available then reuse the current mbuf,
4545 * log an ierror on the interface, and generate
4546 * an error in the system log.
4547 */
4548 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod,
4549 &sw_prod_bseq)) {
4550 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev,
4551 "Failed to allocate "
4552 "new mbuf, incoming frame dropped!\n"));
4553
4554 ifp->if_ierrors++;
4555
4556 /* Try and reuse the exisitng mbuf. */
4557 if (bnx_add_buf(sc, m, &sw_prod,
4558 &sw_chain_prod, &sw_prod_bseq)) {
4559 DBRUNIF(1, bnx_breakpoint(sc));
4560 panic("%s: Double mbuf allocation "
4561 "failure!",
4562 device_xname(sc->bnx_dev));
4563 }
4564 continue;
4565 }
4566
4567 /* Skip over the l2_fhdr when passing the data up
4568 * the stack.
4569 */
4570 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4571
4572 /* Adjust the pckt length to match the received data. */
4573 m->m_pkthdr.len = m->m_len = len;
4574
4575 /* Send the packet to the appropriate interface. */
4576 m_set_rcvif(m, ifp);
4577
4578 DBRUN(BNX_VERBOSE_RECV,
4579 struct ether_header *eh;
4580 eh = mtod(m, struct ether_header *);
4581 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n",
4582 __func__, ether_sprintf(eh->ether_dhost),
4583 ether_sprintf(eh->ether_shost),
4584 htons(eh->ether_type)));
4585
4586 /* Validate the checksum. */
4587
4588 /* Check for an IP datagram. */
4589 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4590 /* Check if the IP checksum is valid. */
4591 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4592 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
4593 #ifdef BNX_DEBUG
4594 else
4595 DBPRINT(sc, BNX_WARN_SEND,
4596 "%s(): Invalid IP checksum "
4597 "= 0x%04X!\n",
4598 __func__,
4599 l2fhdr->l2_fhdr_ip_xsum
4600 );
4601 #endif
4602 }
4603
4604 /* Check for a valid TCP/UDP frame. */
4605 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4606 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4607 /* Check for a good TCP/UDP checksum. */
4608 if ((status &
4609 (L2_FHDR_ERRORS_TCP_XSUM |
4610 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4611 m->m_pkthdr.csum_flags |=
4612 M_CSUM_TCPv4 |
4613 M_CSUM_UDPv4;
4614 } else {
4615 DBPRINT(sc, BNX_WARN_SEND,
4616 "%s(): Invalid TCP/UDP "
4617 "checksum = 0x%04X!\n",
4618 __func__,
4619 l2fhdr->l2_fhdr_tcp_udp_xsum);
4620 }
4621 }
4622
4623 /*
4624 * If we received a packet with a vlan tag,
4625 * attach that information to the packet.
4626 */
4627 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4628 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4629 vlan_set_tag(m, l2fhdr->l2_fhdr_vlan_tag);
4630 }
4631
4632 /* Pass the mbuf off to the upper layers. */
4633
4634 DBPRINT(sc, BNX_VERBOSE_RECV,
4635 "%s(): Passing received frame up.\n", __func__);
4636 if_percpuq_enqueue(ifp->if_percpuq, m);
4637 DBRUNIF(1, sc->rx_mbuf_alloc--);
4638
4639 }
4640
4641 sw_cons = NEXT_RX_BD(sw_cons);
4642
4643 /* Refresh hw_cons to see if there's new work */
4644 if (sw_cons == hw_cons) {
4645 hw_cons = sc->hw_rx_cons =
4646 sblk->status_rx_quick_consumer_index0;
4647 if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4648 USABLE_RX_BD_PER_PAGE)
4649 hw_cons++;
4650 }
4651
4652 /* Prevent speculative reads from getting ahead of
4653 * the status block.
4654 */
4655 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4656 BUS_SPACE_BARRIER_READ);
4657 }
4658
4659 for (i = 0; i < RX_PAGES; i++)
4660 bus_dmamap_sync(sc->bnx_dmatag,
4661 sc->rx_bd_chain_map[i], 0,
4662 sc->rx_bd_chain_map[i]->dm_mapsize,
4663 BUS_DMASYNC_PREWRITE);
4664
4665 sc->rx_cons = sw_cons;
4666 sc->rx_prod = sw_prod;
4667 sc->rx_prod_bseq = sw_prod_bseq;
4668
4669 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4670 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4671
4672 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4673 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4674 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4675 }
4676
4677 /****************************************************************************/
4678 /* Handles transmit completion interrupt events. */
4679 /* */
4680 /* Returns: */
4681 /* Nothing. */
4682 /****************************************************************************/
4683 void
4684 bnx_tx_intr(struct bnx_softc *sc)
4685 {
4686 struct status_block *sblk = sc->status_block;
4687 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4688 struct bnx_pkt *pkt;
4689 bus_dmamap_t map;
4690 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4691
4692 DBRUNIF(1, sc->tx_interrupts++);
4693 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4694 BUS_DMASYNC_POSTREAD);
4695
4696 /* Get the hardware's view of the TX consumer index. */
4697 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4698
4699 /* Skip to the next entry if this is a chain page pointer. */
4700 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4701 hw_tx_cons++;
4702
4703 sw_tx_cons = sc->tx_cons;
4704
4705 /* Prevent speculative reads from getting ahead of the status block. */
4706 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4707 BUS_SPACE_BARRIER_READ);
4708
4709 /* Cycle through any completed TX chain page entries. */
4710 while (sw_tx_cons != hw_tx_cons) {
4711 #ifdef BNX_DEBUG
4712 struct tx_bd *txbd = NULL;
4713 #endif
4714 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4715
4716 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4717 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4718 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4719
4720 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4721 aprint_error_dev(sc->bnx_dev,
4722 "TX chain consumer out of range! 0x%04X > 0x%04X\n",
4723 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc));
4724
4725 DBRUNIF(1, txbd = &sc->tx_bd_chain
4726 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4727
4728 DBRUNIF((txbd == NULL),
4729 aprint_error_dev(sc->bnx_dev,
4730 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons);
4731 bnx_breakpoint(sc));
4732
4733 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__);
4734 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4735
4736
4737 mutex_enter(&sc->tx_pkt_mtx);
4738 pkt = TAILQ_FIRST(&sc->tx_used_pkts);
4739 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) {
4740 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4741 mutex_exit(&sc->tx_pkt_mtx);
4742 /*
4743 * Free the associated mbuf. Remember
4744 * that only the last tx_bd of a packet
4745 * has an mbuf pointer and DMA map.
4746 */
4747 map = pkt->pkt_dmamap;
4748 bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4749 map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4750 bus_dmamap_unload(sc->bnx_dmatag, map);
4751
4752 m_freem(pkt->pkt_mbuf);
4753 DBRUNIF(1, sc->tx_mbuf_alloc--);
4754
4755 ifp->if_opackets++;
4756
4757 mutex_enter(&sc->tx_pkt_mtx);
4758 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4759 }
4760 mutex_exit(&sc->tx_pkt_mtx);
4761
4762 sc->used_tx_bd--;
4763 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n",
4764 __FILE__, __LINE__, sc->used_tx_bd);
4765
4766 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4767
4768 /* Refresh hw_cons to see if there's new work. */
4769 hw_tx_cons = sc->hw_tx_cons =
4770 sblk->status_tx_quick_consumer_index0;
4771 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4772 USABLE_TX_BD_PER_PAGE)
4773 hw_tx_cons++;
4774
4775 /* Prevent speculative reads from getting ahead of
4776 * the status block.
4777 */
4778 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4779 BUS_SPACE_BARRIER_READ);
4780 }
4781
4782 /* Clear the TX timeout timer. */
4783 ifp->if_timer = 0;
4784
4785 /* Clear the tx hardware queue full flag. */
4786 if (sc->used_tx_bd < sc->max_tx_bd) {
4787 DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4788 aprint_debug_dev(sc->bnx_dev,
4789 "Open TX chain! %d/%d (used/total)\n",
4790 sc->used_tx_bd, sc->max_tx_bd));
4791 ifp->if_flags &= ~IFF_OACTIVE;
4792 }
4793
4794 sc->tx_cons = sw_tx_cons;
4795 }
4796
4797 /****************************************************************************/
4798 /* Disables interrupt generation. */
4799 /* */
4800 /* Returns: */
4801 /* Nothing. */
4802 /****************************************************************************/
4803 void
4804 bnx_disable_intr(struct bnx_softc *sc)
4805 {
4806 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4807 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4808 }
4809
4810 /****************************************************************************/
4811 /* Enables interrupt generation. */
4812 /* */
4813 /* Returns: */
4814 /* Nothing. */
4815 /****************************************************************************/
4816 void
4817 bnx_enable_intr(struct bnx_softc *sc)
4818 {
4819 uint32_t val;
4820
4821 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4822 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4823
4824 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4825 sc->last_status_idx);
4826
4827 val = REG_RD(sc, BNX_HC_COMMAND);
4828 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4829 }
4830
4831 /****************************************************************************/
4832 /* Handles controller initialization. */
4833 /* */
4834 /****************************************************************************/
4835 int
4836 bnx_init(struct ifnet *ifp)
4837 {
4838 struct bnx_softc *sc = ifp->if_softc;
4839 uint32_t ether_mtu;
4840 int s, error = 0;
4841
4842 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4843
4844 s = splnet();
4845
4846 bnx_stop(ifp, 0);
4847
4848 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) {
4849 aprint_error_dev(sc->bnx_dev,
4850 "Controller reset failed!\n");
4851 goto bnx_init_exit;
4852 }
4853
4854 if ((error = bnx_chipinit(sc)) != 0) {
4855 aprint_error_dev(sc->bnx_dev,
4856 "Controller initialization failed!\n");
4857 goto bnx_init_exit;
4858 }
4859
4860 if ((error = bnx_blockinit(sc)) != 0) {
4861 aprint_error_dev(sc->bnx_dev,
4862 "Block initialization failed!\n");
4863 goto bnx_init_exit;
4864 }
4865
4866 /* Calculate and program the Ethernet MRU size. */
4867 if (ifp->if_mtu <= ETHERMTU) {
4868 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4869 sc->mbuf_alloc_size = MCLBYTES;
4870 } else {
4871 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4872 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU;
4873 }
4874
4875
4876 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", __func__, ether_mtu);
4877
4878 /*
4879 * Program the MRU and enable Jumbo frame
4880 * support.
4881 */
4882 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4883 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4884
4885 /* Calculate the RX Ethernet frame size for rx_bd's. */
4886 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4887
4888 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4889 "max_frame_size = %d\n", __func__, (int)MCLBYTES,
4890 sc->mbuf_alloc_size, sc->max_frame_size);
4891
4892 /* Program appropriate promiscuous/multicast filtering. */
4893 bnx_iff(sc);
4894
4895 /* Init RX buffer descriptor chain. */
4896 bnx_init_rx_chain(sc);
4897
4898 /* Init TX buffer descriptor chain. */
4899 bnx_init_tx_chain(sc);
4900
4901 /* Enable host interrupts. */
4902 bnx_enable_intr(sc);
4903
4904 if ((error = ether_mediachange(ifp)) != 0)
4905 goto bnx_init_exit;
4906
4907 SET(ifp->if_flags, IFF_RUNNING);
4908 CLR(ifp->if_flags, IFF_OACTIVE);
4909
4910 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4911
4912 bnx_init_exit:
4913 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4914
4915 splx(s);
4916
4917 return error;
4918 }
4919
4920 /****************************************************************************/
4921 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4922 /* memory visible to the controller. */
4923 /* */
4924 /* Returns: */
4925 /* 0 for success, positive value for failure. */
4926 /****************************************************************************/
4927 int
4928 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m)
4929 {
4930 struct bnx_pkt *pkt;
4931 bus_dmamap_t map;
4932 struct tx_bd *txbd = NULL;
4933 uint16_t vlan_tag = 0, flags = 0;
4934 uint16_t chain_prod, prod;
4935 #ifdef BNX_DEBUG
4936 uint16_t debug_prod;
4937 #endif
4938 uint32_t addr, prod_bseq;
4939 int i, error;
4940 static struct work bnx_wk; /* Dummy work. Statically allocated. */
4941
4942 mutex_enter(&sc->tx_pkt_mtx);
4943 pkt = TAILQ_FIRST(&sc->tx_free_pkts);
4944 if (pkt == NULL) {
4945 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) {
4946 mutex_exit(&sc->tx_pkt_mtx);
4947 return ENETDOWN;
4948 }
4949
4950 if (sc->tx_pkt_count <= TOTAL_TX_BD &&
4951 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) {
4952 workqueue_enqueue(sc->bnx_wq, &bnx_wk, NULL);
4953 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
4954 }
4955
4956 mutex_exit(&sc->tx_pkt_mtx);
4957 return ENOMEM;
4958 }
4959 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4960 mutex_exit(&sc->tx_pkt_mtx);
4961
4962 /* Transfer any checksum offload flags to the bd. */
4963 if (m->m_pkthdr.csum_flags) {
4964 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
4965 flags |= TX_BD_FLAGS_IP_CKSUM;
4966 if (m->m_pkthdr.csum_flags &
4967 (M_CSUM_TCPv4 | M_CSUM_UDPv4))
4968 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4969 }
4970
4971 /* Transfer any VLAN tags to the bd. */
4972 if (vlan_has_tag(m)) {
4973 flags |= TX_BD_FLAGS_VLAN_TAG;
4974 vlan_tag = vlan_get_tag(m);
4975 }
4976
4977 /* Map the mbuf into DMAable memory. */
4978 prod = sc->tx_prod;
4979 chain_prod = TX_CHAIN_IDX(prod);
4980 map = pkt->pkt_dmamap;
4981
4982 /* Map the mbuf into our DMA address space. */
4983 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT);
4984 if (error != 0) {
4985 aprint_error_dev(sc->bnx_dev,
4986 "Error mapping mbuf into TX chain!\n");
4987 sc->tx_dma_map_failures++;
4988 goto maperr;
4989 }
4990 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4991 BUS_DMASYNC_PREWRITE);
4992 /* Make sure there's room in the chain */
4993 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd))
4994 goto nospace;
4995
4996 /* prod points to an empty tx_bd at this point. */
4997 prod_bseq = sc->tx_prod_bseq;
4998 #ifdef BNX_DEBUG
4999 debug_prod = chain_prod;
5000 #endif
5001 DBPRINT(sc, BNX_INFO_SEND,
5002 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
5003 "prod_bseq = 0x%08X\n",
5004 __func__, prod, chain_prod, prod_bseq);
5005
5006 /*
5007 * Cycle through each mbuf segment that makes up
5008 * the outgoing frame, gathering the mapping info
5009 * for that segment and creating a tx_bd for the
5010 * mbuf.
5011 */
5012 for (i = 0; i < map->dm_nsegs ; i++) {
5013 chain_prod = TX_CHAIN_IDX(prod);
5014 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
5015
5016 addr = (uint32_t)map->dm_segs[i].ds_addr;
5017 txbd->tx_bd_haddr_lo = addr;
5018 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32);
5019 txbd->tx_bd_haddr_hi = addr;
5020 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
5021 txbd->tx_bd_vlan_tag = vlan_tag;
5022 txbd->tx_bd_flags = flags;
5023 prod_bseq += map->dm_segs[i].ds_len;
5024 if (i == 0)
5025 txbd->tx_bd_flags |= TX_BD_FLAGS_START;
5026 prod = NEXT_TX_BD(prod);
5027 }
5028 /* Set the END flag on the last TX buffer descriptor. */
5029 txbd->tx_bd_flags |= TX_BD_FLAGS_END;
5030
5031 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs));
5032
5033 DBPRINT(sc, BNX_INFO_SEND,
5034 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
5035 "prod_bseq = 0x%08X\n",
5036 __func__, prod, chain_prod, prod_bseq);
5037
5038 pkt->pkt_mbuf = m;
5039 pkt->pkt_end_desc = chain_prod;
5040
5041 mutex_enter(&sc->tx_pkt_mtx);
5042 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry);
5043 mutex_exit(&sc->tx_pkt_mtx);
5044
5045 sc->used_tx_bd += map->dm_nsegs;
5046 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n",
5047 __FILE__, __LINE__, sc->used_tx_bd);
5048
5049 /* Update some debug statistics counters */
5050 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
5051 sc->tx_hi_watermark = sc->used_tx_bd);
5052 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
5053 DBRUNIF(1, sc->tx_mbuf_alloc++);
5054
5055 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
5056 map->dm_nsegs));
5057
5058 /* prod points to the next free tx_bd at this point. */
5059 sc->tx_prod = prod;
5060 sc->tx_prod_bseq = prod_bseq;
5061
5062 return 0;
5063
5064
5065 nospace:
5066 bus_dmamap_unload(sc->bnx_dmatag, map);
5067 maperr:
5068 mutex_enter(&sc->tx_pkt_mtx);
5069 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
5070 mutex_exit(&sc->tx_pkt_mtx);
5071
5072 return ENOMEM;
5073 }
5074
5075 /****************************************************************************/
5076 /* Main transmit routine. */
5077 /* */
5078 /* Returns: */
5079 /* Nothing. */
5080 /****************************************************************************/
5081 void
5082 bnx_start(struct ifnet *ifp)
5083 {
5084 struct bnx_softc *sc = ifp->if_softc;
5085 struct mbuf *m_head = NULL;
5086 int count = 0;
5087 #ifdef BNX_DEBUG
5088 uint16_t tx_chain_prod;
5089 #endif
5090
5091 /* If there's no link or the transmit queue is empty then just exit. */
5092 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) {
5093 DBPRINT(sc, BNX_INFO_SEND,
5094 "%s(): output active or device not running.\n", __func__);
5095 goto bnx_start_exit;
5096 }
5097
5098 /* prod points to the next free tx_bd. */
5099 #ifdef BNX_DEBUG
5100 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5101 #endif
5102
5103 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
5104 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, "
5105 "used_tx %d max_tx %d\n",
5106 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq,
5107 sc->used_tx_bd, sc->max_tx_bd);
5108
5109 /*
5110 * Keep adding entries while there is space in the ring.
5111 */
5112 while (sc->used_tx_bd < sc->max_tx_bd) {
5113 /* Check for any frames to send. */
5114 IFQ_POLL(&ifp->if_snd, m_head);
5115 if (m_head == NULL)
5116 break;
5117
5118 /*
5119 * Pack the data into the transmit ring. If we
5120 * don't have room, set the OACTIVE flag to wait
5121 * for the NIC to drain the chain.
5122 */
5123 if (bnx_tx_encap(sc, m_head)) {
5124 ifp->if_flags |= IFF_OACTIVE;
5125 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
5126 "business! Total tx_bd used = %d\n",
5127 sc->used_tx_bd);
5128 break;
5129 }
5130
5131 IFQ_DEQUEUE(&ifp->if_snd, m_head);
5132 count++;
5133
5134 /* Send a copy of the frame to any BPF listeners. */
5135 bpf_mtap(ifp, m_head, BPF_D_OUT);
5136 }
5137
5138 if (count == 0) {
5139 /* no packets were dequeued */
5140 DBPRINT(sc, BNX_VERBOSE_SEND,
5141 "%s(): No packets were dequeued\n", __func__);
5142 goto bnx_start_exit;
5143 }
5144
5145 /* Update the driver's counters. */
5146 #ifdef BNX_DEBUG
5147 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5148 #endif
5149
5150 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, "
5151 "tx_chain_prod = 0x%04X, tx_prod_bseq = 0x%08X\n",
5152 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5153
5154 /* Start the transmit. */
5155 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5156 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5157
5158 /* Set the tx timeout. */
5159 ifp->if_timer = BNX_TX_TIMEOUT;
5160
5161 bnx_start_exit:
5162 return;
5163 }
5164
5165 /****************************************************************************/
5166 /* Handles any IOCTL calls from the operating system. */
5167 /* */
5168 /* Returns: */
5169 /* 0 for success, positive value for failure. */
5170 /****************************************************************************/
5171 int
5172 bnx_ioctl(struct ifnet *ifp, u_long command, void *data)
5173 {
5174 struct bnx_softc *sc = ifp->if_softc;
5175 struct ifreq *ifr = (struct ifreq *) data;
5176 struct mii_data *mii = &sc->bnx_mii;
5177 int s, error = 0;
5178
5179 s = splnet();
5180
5181 switch (command) {
5182 case SIOCSIFFLAGS:
5183 if ((error = ifioctl_common(ifp, command, data)) != 0)
5184 break;
5185 /* XXX set an ifflags callback and let ether_ioctl
5186 * handle all of this.
5187 */
5188 if (ISSET(ifp->if_flags, IFF_UP)) {
5189 if (ifp->if_flags & IFF_RUNNING)
5190 error = ENETRESET;
5191 else
5192 bnx_init(ifp);
5193 } else if (ifp->if_flags & IFF_RUNNING)
5194 bnx_stop(ifp, 1);
5195 break;
5196
5197 case SIOCSIFMEDIA:
5198 case SIOCGIFMEDIA:
5199 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5200 sc->bnx_phy_flags);
5201
5202 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5203 break;
5204
5205 default:
5206 error = ether_ioctl(ifp, command, data);
5207 }
5208
5209 if (error == ENETRESET) {
5210 if (ifp->if_flags & IFF_RUNNING)
5211 bnx_iff(sc);
5212 error = 0;
5213 }
5214
5215 splx(s);
5216 return error;
5217 }
5218
5219 /****************************************************************************/
5220 /* Transmit timeout handler. */
5221 /* */
5222 /* Returns: */
5223 /* Nothing. */
5224 /****************************************************************************/
5225 void
5226 bnx_watchdog(struct ifnet *ifp)
5227 {
5228 struct bnx_softc *sc = ifp->if_softc;
5229
5230 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5231 bnx_dump_status_block(sc));
5232 /*
5233 * If we are in this routine because of pause frames, then
5234 * don't reset the hardware.
5235 */
5236 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5237 return;
5238
5239 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n");
5240
5241 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5242
5243 bnx_init(ifp);
5244
5245 ifp->if_oerrors++;
5246 }
5247
5248 /*
5249 * Interrupt handler.
5250 */
5251 /****************************************************************************/
5252 /* Main interrupt entry point. Verifies that the controller generated the */
5253 /* interrupt and then calls a separate routine for handle the various */
5254 /* interrupt causes (PHY, TX, RX). */
5255 /* */
5256 /* Returns: */
5257 /* 0 for success, positive value for failure. */
5258 /****************************************************************************/
5259 int
5260 bnx_intr(void *xsc)
5261 {
5262 struct bnx_softc *sc;
5263 struct ifnet *ifp;
5264 uint32_t status_attn_bits;
5265 const struct status_block *sblk;
5266
5267 sc = xsc;
5268
5269 ifp = &sc->bnx_ec.ec_if;
5270
5271 if (!device_is_active(sc->bnx_dev) ||
5272 (ifp->if_flags & IFF_RUNNING) == 0)
5273 return 0;
5274
5275 DBRUNIF(1, sc->interrupts_generated++);
5276
5277 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5278 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5279
5280 /*
5281 * If the hardware status block index
5282 * matches the last value read by the
5283 * driver and we haven't asserted our
5284 * interrupt then there's nothing to do.
5285 */
5286 if ((sc->status_block->status_idx == sc->last_status_idx) &&
5287 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
5288 BNX_PCICFG_MISC_STATUS_INTA_VALUE))
5289 return 0;
5290
5291 /* Ack the interrupt and stop others from occurring. */
5292 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5293 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5294 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
5295
5296 /* Keep processing data as long as there is work to do. */
5297 for (;;) {
5298 sblk = sc->status_block;
5299 status_attn_bits = sblk->status_attn_bits;
5300
5301 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5302 aprint_debug("Simulating unexpected status attention bit set.");
5303 status_attn_bits = status_attn_bits |
5304 STATUS_ATTN_BITS_PARITY_ERROR);
5305
5306 /* Was it a link change interrupt? */
5307 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5308 (sblk->status_attn_bits_ack &
5309 STATUS_ATTN_BITS_LINK_STATE))
5310 bnx_phy_intr(sc);
5311
5312 /* If any other attention is asserted then the chip is toast. */
5313 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5314 (sblk->status_attn_bits_ack &
5315 ~STATUS_ATTN_BITS_LINK_STATE))) {
5316 DBRUN(1, sc->unexpected_attentions++);
5317
5318 BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5319 sblk->status_attn_bits);
5320
5321 DBRUN(BNX_FATAL,
5322 if (bnx_debug_unexpected_attention == 0)
5323 bnx_breakpoint(sc));
5324
5325 bnx_init(ifp);
5326 return 1;
5327 }
5328
5329 /* Check for any completed RX frames. */
5330 if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5331 bnx_rx_intr(sc);
5332
5333 /* Check for any completed TX frames. */
5334 if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5335 bnx_tx_intr(sc);
5336
5337 /*
5338 * Save the status block index value for use during the
5339 * next interrupt.
5340 */
5341 sc->last_status_idx = sblk->status_idx;
5342
5343 /* Prevent speculative reads from getting ahead of the
5344 * status block.
5345 */
5346 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
5347 BUS_SPACE_BARRIER_READ);
5348
5349 /* If there's no work left then exit the isr. */
5350 if ((sblk->status_rx_quick_consumer_index0 ==
5351 sc->hw_rx_cons) &&
5352 (sblk->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5353 break;
5354 }
5355
5356 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5357 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
5358
5359 /* Re-enable interrupts. */
5360 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5361 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5362 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
5363 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5364 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5365
5366 /* Handle any frames that arrived while handling the interrupt. */
5367 if_schedule_deferred_start(ifp);
5368
5369 return 1;
5370 }
5371
5372 /****************************************************************************/
5373 /* Programs the various packet receive modes (broadcast and multicast). */
5374 /* */
5375 /* Returns: */
5376 /* Nothing. */
5377 /****************************************************************************/
5378 void
5379 bnx_iff(struct bnx_softc *sc)
5380 {
5381 struct ethercom *ec = &sc->bnx_ec;
5382 struct ifnet *ifp = &ec->ec_if;
5383 struct ether_multi *enm;
5384 struct ether_multistep step;
5385 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5386 uint32_t rx_mode, sort_mode;
5387 int h, i;
5388
5389 /* Initialize receive mode default settings. */
5390 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5391 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5392 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5393 ifp->if_flags &= ~IFF_ALLMULTI;
5394
5395 /*
5396 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5397 * be enbled.
5398 */
5399 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
5400 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5401
5402 /*
5403 * Check for promiscuous, all multicast, or selected
5404 * multicast address filtering.
5405 */
5406 if (ifp->if_flags & IFF_PROMISC) {
5407 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5408
5409 ifp->if_flags |= IFF_ALLMULTI;
5410 /* Enable promiscuous mode. */
5411 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5412 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5413 } else if (ifp->if_flags & IFF_ALLMULTI) {
5414 allmulti:
5415 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5416
5417 ifp->if_flags |= IFF_ALLMULTI;
5418 /* Enable all multicast addresses. */
5419 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5420 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5421 0xffffffff);
5422 sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5423 } else {
5424 /* Accept one or more multicast(s). */
5425 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5426
5427 ETHER_FIRST_MULTI(step, ec, enm);
5428 while (enm != NULL) {
5429 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
5430 ETHER_ADDR_LEN)) {
5431 goto allmulti;
5432 }
5433 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5434 0xFF;
5435 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5436 ETHER_NEXT_MULTI(step, enm);
5437 }
5438
5439 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5440 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5441 hashes[i]);
5442
5443 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5444 }
5445
5446 /* Only make changes if the recive mode has actually changed. */
5447 if (rx_mode != sc->rx_mode) {
5448 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5449 rx_mode);
5450
5451 sc->rx_mode = rx_mode;
5452 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5453 }
5454
5455 /* Disable and clear the exisitng sort before enabling a new sort. */
5456 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5457 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5458 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5459 }
5460
5461 /****************************************************************************/
5462 /* Called periodically to updates statistics from the controllers */
5463 /* statistics block. */
5464 /* */
5465 /* Returns: */
5466 /* Nothing. */
5467 /****************************************************************************/
5468 void
5469 bnx_stats_update(struct bnx_softc *sc)
5470 {
5471 struct ifnet *ifp = &sc->bnx_ec.ec_if;
5472 struct statistics_block *stats;
5473
5474 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__);
5475 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5476 BUS_DMASYNC_POSTREAD);
5477
5478 stats = (struct statistics_block *)sc->stats_block;
5479
5480 /*
5481 * Update the interface statistics from the
5482 * hardware statistics.
5483 */
5484 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5485
5486 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5487 (u_long)stats->stat_EtherStatsOverrsizePkts +
5488 (u_long)stats->stat_IfInMBUFDiscards +
5489 (u_long)stats->stat_Dot3StatsAlignmentErrors +
5490 (u_long)stats->stat_Dot3StatsFCSErrors;
5491
5492 ifp->if_oerrors = (u_long)
5493 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5494 (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5495 (u_long)stats->stat_Dot3StatsLateCollisions;
5496
5497 /*
5498 * Certain controllers don't report
5499 * carrier sense errors correctly.
5500 * See errata E11_5708CA0_1165.
5501 */
5502 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5503 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5504 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5505
5506 /*
5507 * Update the sysctl statistics from the
5508 * hardware statistics.
5509 */
5510 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5511 (uint64_t) stats->stat_IfHCInOctets_lo;
5512
5513 sc->stat_IfHCInBadOctets =
5514 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5515 (uint64_t) stats->stat_IfHCInBadOctets_lo;
5516
5517 sc->stat_IfHCOutOctets =
5518 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) +
5519 (uint64_t) stats->stat_IfHCOutOctets_lo;
5520
5521 sc->stat_IfHCOutBadOctets =
5522 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5523 (uint64_t) stats->stat_IfHCOutBadOctets_lo;
5524
5525 sc->stat_IfHCInUcastPkts =
5526 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5527 (uint64_t) stats->stat_IfHCInUcastPkts_lo;
5528
5529 sc->stat_IfHCInMulticastPkts =
5530 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5531 (uint64_t) stats->stat_IfHCInMulticastPkts_lo;
5532
5533 sc->stat_IfHCInBroadcastPkts =
5534 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5535 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo;
5536
5537 sc->stat_IfHCOutUcastPkts =
5538 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5539 (uint64_t) stats->stat_IfHCOutUcastPkts_lo;
5540
5541 sc->stat_IfHCOutMulticastPkts =
5542 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5543 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo;
5544
5545 sc->stat_IfHCOutBroadcastPkts =
5546 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5547 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5548
5549 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5550 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5551
5552 sc->stat_Dot3StatsCarrierSenseErrors =
5553 stats->stat_Dot3StatsCarrierSenseErrors;
5554
5555 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5556
5557 sc->stat_Dot3StatsAlignmentErrors =
5558 stats->stat_Dot3StatsAlignmentErrors;
5559
5560 sc->stat_Dot3StatsSingleCollisionFrames =
5561 stats->stat_Dot3StatsSingleCollisionFrames;
5562
5563 sc->stat_Dot3StatsMultipleCollisionFrames =
5564 stats->stat_Dot3StatsMultipleCollisionFrames;
5565
5566 sc->stat_Dot3StatsDeferredTransmissions =
5567 stats->stat_Dot3StatsDeferredTransmissions;
5568
5569 sc->stat_Dot3StatsExcessiveCollisions =
5570 stats->stat_Dot3StatsExcessiveCollisions;
5571
5572 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5573
5574 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5575
5576 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5577
5578 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5579
5580 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5581
5582 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5583
5584 sc->stat_EtherStatsPktsRx64Octets =
5585 stats->stat_EtherStatsPktsRx64Octets;
5586
5587 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5588 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5589
5590 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5591 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5592
5593 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5594 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5595
5596 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5597 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5598
5599 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5600 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5601
5602 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5603 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5604
5605 sc->stat_EtherStatsPktsTx64Octets =
5606 stats->stat_EtherStatsPktsTx64Octets;
5607
5608 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5609 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5610
5611 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5612 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5613
5614 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5615 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5616
5617 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5618 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5619
5620 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5621 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5622
5623 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5624 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5625
5626 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5627
5628 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5629
5630 sc->stat_OutXonSent = stats->stat_OutXonSent;
5631
5632 sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5633
5634 sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5635
5636 sc->stat_MacControlFramesReceived =
5637 stats->stat_MacControlFramesReceived;
5638
5639 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5640
5641 sc->stat_IfInFramesL2FilterDiscards =
5642 stats->stat_IfInFramesL2FilterDiscards;
5643
5644 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5645
5646 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5647
5648 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5649
5650 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5651
5652 sc->stat_CatchupInRuleCheckerDiscards =
5653 stats->stat_CatchupInRuleCheckerDiscards;
5654
5655 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5656
5657 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5658
5659 sc->stat_CatchupInRuleCheckerP4Hit =
5660 stats->stat_CatchupInRuleCheckerP4Hit;
5661
5662 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__);
5663 }
5664
5665 void
5666 bnx_tick(void *xsc)
5667 {
5668 struct bnx_softc *sc = xsc;
5669 struct mii_data *mii;
5670 uint32_t msg;
5671 uint16_t prod, chain_prod;
5672 uint32_t prod_bseq;
5673 int s = splnet();
5674
5675 /* Tell the firmware that the driver is still running. */
5676 #ifdef BNX_DEBUG
5677 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5678 #else
5679 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5680 #endif
5681 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5682
5683 /* Update the statistics from the hardware statistics block. */
5684 bnx_stats_update(sc);
5685
5686 mii = &sc->bnx_mii;
5687 mii_tick(mii);
5688
5689 /* try to get more RX buffers, just in case */
5690 prod = sc->rx_prod;
5691 prod_bseq = sc->rx_prod_bseq;
5692 chain_prod = RX_CHAIN_IDX(prod);
5693 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
5694 sc->rx_prod = prod;
5695 sc->rx_prod_bseq = prod_bseq;
5696
5697 /* Schedule the next tick. */
5698 if (!sc->bnx_detaching)
5699 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
5700
5701 splx(s);
5702 return;
5703 }
5704
5705 /****************************************************************************/
5706 /* BNX Debug Routines */
5707 /****************************************************************************/
5708 #ifdef BNX_DEBUG
5709
5710 /****************************************************************************/
5711 /* Prints out information about an mbuf. */
5712 /* */
5713 /* Returns: */
5714 /* Nothing. */
5715 /****************************************************************************/
5716 void
5717 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5718 {
5719 struct mbuf *mp = m;
5720
5721 if (m == NULL) {
5722 /* Index out of range. */
5723 aprint_error("mbuf ptr is null!\n");
5724 return;
5725 }
5726
5727 while (mp) {
5728 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5729 mp, mp->m_len);
5730
5731 if (mp->m_flags & M_EXT)
5732 aprint_debug("M_EXT ");
5733 if (mp->m_flags & M_PKTHDR)
5734 aprint_debug("M_PKTHDR ");
5735 aprint_debug("\n");
5736
5737 if (mp->m_flags & M_EXT)
5738 aprint_debug("- m_ext: vaddr = %p, "
5739 "ext_size = 0x%04zX\n", mp, mp->m_ext.ext_size);
5740
5741 mp = mp->m_next;
5742 }
5743 }
5744
5745 /****************************************************************************/
5746 /* Prints out the mbufs in the TX mbuf chain. */
5747 /* */
5748 /* Returns: */
5749 /* Nothing. */
5750 /****************************************************************************/
5751 void
5752 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5753 {
5754 #if 0
5755 struct mbuf *m;
5756 int i;
5757
5758 aprint_debug_dev(sc->bnx_dev,
5759 "----------------------------"
5760 " tx mbuf data "
5761 "----------------------------\n");
5762
5763 for (i = 0; i < count; i++) {
5764 m = sc->tx_mbuf_ptr[chain_prod];
5765 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5766 bnx_dump_mbuf(sc, m);
5767 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5768 }
5769
5770 aprint_debug_dev(sc->bnx_dev,
5771 "--------------------------------------------"
5772 "----------------------------\n");
5773 #endif
5774 }
5775
5776 /*
5777 * This routine prints the RX mbuf chain.
5778 */
5779 void
5780 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5781 {
5782 struct mbuf *m;
5783 int i;
5784
5785 aprint_debug_dev(sc->bnx_dev,
5786 "----------------------------"
5787 " rx mbuf data "
5788 "----------------------------\n");
5789
5790 for (i = 0; i < count; i++) {
5791 m = sc->rx_mbuf_ptr[chain_prod];
5792 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5793 bnx_dump_mbuf(sc, m);
5794 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5795 }
5796
5797
5798 aprint_debug_dev(sc->bnx_dev,
5799 "--------------------------------------------"
5800 "----------------------------\n");
5801 }
5802
5803 void
5804 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5805 {
5806 if (idx > MAX_TX_BD)
5807 /* Index out of range. */
5808 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5809 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5810 /* TX Chain page pointer. */
5811 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5812 "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5813 txbd->tx_bd_haddr_lo);
5814 else
5815 /* Normal tx_bd entry. */
5816 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5817 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5818 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5819 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5820 txbd->tx_bd_flags);
5821 }
5822
5823 void
5824 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5825 {
5826 if (idx > MAX_RX_BD)
5827 /* Index out of range. */
5828 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5829 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5830 /* TX Chain page pointer. */
5831 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5832 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5833 rxbd->rx_bd_haddr_lo);
5834 else
5835 /* Normal tx_bd entry. */
5836 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5837 "0x%08X, flags = 0x%08X\n", idx,
5838 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5839 rxbd->rx_bd_len, rxbd->rx_bd_flags);
5840 }
5841
5842 void
5843 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5844 {
5845 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5846 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5847 "tcp_udp_xsum = 0x%04X\n", idx,
5848 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5849 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5850 l2fhdr->l2_fhdr_tcp_udp_xsum);
5851 }
5852
5853 /*
5854 * This routine prints the TX chain.
5855 */
5856 void
5857 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5858 {
5859 struct tx_bd *txbd;
5860 int i;
5861
5862 /* First some info about the tx_bd chain structure. */
5863 aprint_debug_dev(sc->bnx_dev,
5864 "----------------------------"
5865 " tx_bd chain "
5866 "----------------------------\n");
5867
5868 BNX_PRINTF(sc,
5869 "page size = 0x%08X, tx chain pages = 0x%08X\n",
5870 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES);
5871
5872 BNX_PRINTF(sc,
5873 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5874 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE);
5875
5876 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD);
5877
5878 aprint_error_dev(sc->bnx_dev, ""
5879 "-----------------------------"
5880 " tx_bd data "
5881 "-----------------------------\n");
5882
5883 /* Now print out the tx_bd's themselves. */
5884 for (i = 0; i < count; i++) {
5885 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5886 bnx_dump_txbd(sc, tx_prod, txbd);
5887 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5888 }
5889
5890 aprint_debug_dev(sc->bnx_dev,
5891 "-----------------------------"
5892 "--------------"
5893 "-----------------------------\n");
5894 }
5895
5896 /*
5897 * This routine prints the RX chain.
5898 */
5899 void
5900 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5901 {
5902 struct rx_bd *rxbd;
5903 int i;
5904
5905 /* First some info about the tx_bd chain structure. */
5906 aprint_debug_dev(sc->bnx_dev,
5907 "----------------------------"
5908 " rx_bd chain "
5909 "----------------------------\n");
5910
5911 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n");
5912
5913 BNX_PRINTF(sc,
5914 "page size = 0x%08X, rx chain pages = 0x%08X\n",
5915 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
5916
5917 BNX_PRINTF(sc,
5918 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5919 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE);
5920
5921 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD);
5922
5923 aprint_error_dev(sc->bnx_dev,
5924 "----------------------------"
5925 " rx_bd data "
5926 "----------------------------\n");
5927
5928 /* Now print out the rx_bd's themselves. */
5929 for (i = 0; i < count; i++) {
5930 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5931 bnx_dump_rxbd(sc, rx_prod, rxbd);
5932 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5933 }
5934
5935 aprint_debug_dev(sc->bnx_dev,
5936 "----------------------------"
5937 "--------------"
5938 "----------------------------\n");
5939 }
5940
5941 /*
5942 * This routine prints the status block.
5943 */
5944 void
5945 bnx_dump_status_block(struct bnx_softc *sc)
5946 {
5947 struct status_block *sblk;
5948 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5949 BUS_DMASYNC_POSTREAD);
5950
5951 sblk = sc->status_block;
5952
5953 aprint_debug_dev(sc->bnx_dev, "----------------------------- "
5954 "Status Block -----------------------------\n");
5955
5956 BNX_PRINTF(sc,
5957 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5958 sblk->status_attn_bits, sblk->status_attn_bits_ack,
5959 sblk->status_idx);
5960
5961 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
5962 sblk->status_rx_quick_consumer_index0,
5963 sblk->status_tx_quick_consumer_index0);
5964
5965 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5966
5967 /* Theses indices are not used for normal L2 drivers. */
5968 if (sblk->status_rx_quick_consumer_index1 ||
5969 sblk->status_tx_quick_consumer_index1)
5970 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
5971 sblk->status_rx_quick_consumer_index1,
5972 sblk->status_tx_quick_consumer_index1);
5973
5974 if (sblk->status_rx_quick_consumer_index2 ||
5975 sblk->status_tx_quick_consumer_index2)
5976 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
5977 sblk->status_rx_quick_consumer_index2,
5978 sblk->status_tx_quick_consumer_index2);
5979
5980 if (sblk->status_rx_quick_consumer_index3 ||
5981 sblk->status_tx_quick_consumer_index3)
5982 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
5983 sblk->status_rx_quick_consumer_index3,
5984 sblk->status_tx_quick_consumer_index3);
5985
5986 if (sblk->status_rx_quick_consumer_index4 ||
5987 sblk->status_rx_quick_consumer_index5)
5988 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
5989 sblk->status_rx_quick_consumer_index4,
5990 sblk->status_rx_quick_consumer_index5);
5991
5992 if (sblk->status_rx_quick_consumer_index6 ||
5993 sblk->status_rx_quick_consumer_index7)
5994 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
5995 sblk->status_rx_quick_consumer_index6,
5996 sblk->status_rx_quick_consumer_index7);
5997
5998 if (sblk->status_rx_quick_consumer_index8 ||
5999 sblk->status_rx_quick_consumer_index9)
6000 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
6001 sblk->status_rx_quick_consumer_index8,
6002 sblk->status_rx_quick_consumer_index9);
6003
6004 if (sblk->status_rx_quick_consumer_index10 ||
6005 sblk->status_rx_quick_consumer_index11)
6006 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
6007 sblk->status_rx_quick_consumer_index10,
6008 sblk->status_rx_quick_consumer_index11);
6009
6010 if (sblk->status_rx_quick_consumer_index12 ||
6011 sblk->status_rx_quick_consumer_index13)
6012 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
6013 sblk->status_rx_quick_consumer_index12,
6014 sblk->status_rx_quick_consumer_index13);
6015
6016 if (sblk->status_rx_quick_consumer_index14 ||
6017 sblk->status_rx_quick_consumer_index15)
6018 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
6019 sblk->status_rx_quick_consumer_index14,
6020 sblk->status_rx_quick_consumer_index15);
6021
6022 if (sblk->status_completion_producer_index ||
6023 sblk->status_cmd_consumer_index)
6024 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
6025 sblk->status_completion_producer_index,
6026 sblk->status_cmd_consumer_index);
6027
6028 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------"
6029 "-----------------------------\n");
6030 }
6031
6032 /*
6033 * This routine prints the statistics block.
6034 */
6035 void
6036 bnx_dump_stats_block(struct bnx_softc *sc)
6037 {
6038 struct statistics_block *sblk;
6039 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
6040 BUS_DMASYNC_POSTREAD);
6041
6042 sblk = sc->stats_block;
6043
6044 aprint_debug_dev(sc->bnx_dev, ""
6045 "-----------------------------"
6046 " Stats Block "
6047 "-----------------------------\n");
6048
6049 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
6050 "IfHcInBadOctets = 0x%08X:%08X\n",
6051 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6052 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6053
6054 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
6055 "IfHcOutBadOctets = 0x%08X:%08X\n",
6056 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6057 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6058
6059 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
6060 "IfHcInMulticastPkts = 0x%08X:%08X\n",
6061 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6062 sblk->stat_IfHCInMulticastPkts_hi,
6063 sblk->stat_IfHCInMulticastPkts_lo);
6064
6065 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
6066 "IfHcOutUcastPkts = 0x%08X:%08X\n",
6067 sblk->stat_IfHCInBroadcastPkts_hi,
6068 sblk->stat_IfHCInBroadcastPkts_lo,
6069 sblk->stat_IfHCOutUcastPkts_hi,
6070 sblk->stat_IfHCOutUcastPkts_lo);
6071
6072 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
6073 "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6074 sblk->stat_IfHCOutMulticastPkts_hi,
6075 sblk->stat_IfHCOutMulticastPkts_lo,
6076 sblk->stat_IfHCOutBroadcastPkts_hi,
6077 sblk->stat_IfHCOutBroadcastPkts_lo);
6078
6079 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6080 BNX_PRINTF(sc, "0x%08X : "
6081 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6082 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6083
6084 if (sblk->stat_Dot3StatsCarrierSenseErrors)
6085 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6086 sblk->stat_Dot3StatsCarrierSenseErrors);
6087
6088 if (sblk->stat_Dot3StatsFCSErrors)
6089 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6090 sblk->stat_Dot3StatsFCSErrors);
6091
6092 if (sblk->stat_Dot3StatsAlignmentErrors)
6093 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6094 sblk->stat_Dot3StatsAlignmentErrors);
6095
6096 if (sblk->stat_Dot3StatsSingleCollisionFrames)
6097 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6098 sblk->stat_Dot3StatsSingleCollisionFrames);
6099
6100 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6101 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6102 sblk->stat_Dot3StatsMultipleCollisionFrames);
6103
6104 if (sblk->stat_Dot3StatsDeferredTransmissions)
6105 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6106 sblk->stat_Dot3StatsDeferredTransmissions);
6107
6108 if (sblk->stat_Dot3StatsExcessiveCollisions)
6109 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6110 sblk->stat_Dot3StatsExcessiveCollisions);
6111
6112 if (sblk->stat_Dot3StatsLateCollisions)
6113 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6114 sblk->stat_Dot3StatsLateCollisions);
6115
6116 if (sblk->stat_EtherStatsCollisions)
6117 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6118 sblk->stat_EtherStatsCollisions);
6119
6120 if (sblk->stat_EtherStatsFragments)
6121 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6122 sblk->stat_EtherStatsFragments);
6123
6124 if (sblk->stat_EtherStatsJabbers)
6125 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6126 sblk->stat_EtherStatsJabbers);
6127
6128 if (sblk->stat_EtherStatsUndersizePkts)
6129 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6130 sblk->stat_EtherStatsUndersizePkts);
6131
6132 if (sblk->stat_EtherStatsOverrsizePkts)
6133 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6134 sblk->stat_EtherStatsOverrsizePkts);
6135
6136 if (sblk->stat_EtherStatsPktsRx64Octets)
6137 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6138 sblk->stat_EtherStatsPktsRx64Octets);
6139
6140 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6141 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6142 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6143
6144 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6145 BNX_PRINTF(sc, "0x%08X : "
6146 "EtherStatsPktsRx128Octetsto255Octets\n",
6147 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6148
6149 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6150 BNX_PRINTF(sc, "0x%08X : "
6151 "EtherStatsPktsRx256Octetsto511Octets\n",
6152 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6153
6154 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6155 BNX_PRINTF(sc, "0x%08X : "
6156 "EtherStatsPktsRx512Octetsto1023Octets\n",
6157 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6158
6159 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6160 BNX_PRINTF(sc, "0x%08X : "
6161 "EtherStatsPktsRx1024Octetsto1522Octets\n",
6162 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6163
6164 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6165 BNX_PRINTF(sc, "0x%08X : "
6166 "EtherStatsPktsRx1523Octetsto9022Octets\n",
6167 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6168
6169 if (sblk->stat_EtherStatsPktsTx64Octets)
6170 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6171 sblk->stat_EtherStatsPktsTx64Octets);
6172
6173 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6174 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6175 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6176
6177 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6178 BNX_PRINTF(sc, "0x%08X : "
6179 "EtherStatsPktsTx128Octetsto255Octets\n",
6180 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6181
6182 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6183 BNX_PRINTF(sc, "0x%08X : "
6184 "EtherStatsPktsTx256Octetsto511Octets\n",
6185 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6186
6187 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6188 BNX_PRINTF(sc, "0x%08X : "
6189 "EtherStatsPktsTx512Octetsto1023Octets\n",
6190 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6191
6192 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6193 BNX_PRINTF(sc, "0x%08X : "
6194 "EtherStatsPktsTx1024Octetsto1522Octets\n",
6195 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6196
6197 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6198 BNX_PRINTF(sc, "0x%08X : "
6199 "EtherStatsPktsTx1523Octetsto9022Octets\n",
6200 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6201
6202 if (sblk->stat_XonPauseFramesReceived)
6203 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6204 sblk->stat_XonPauseFramesReceived);
6205
6206 if (sblk->stat_XoffPauseFramesReceived)
6207 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6208 sblk->stat_XoffPauseFramesReceived);
6209
6210 if (sblk->stat_OutXonSent)
6211 BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
6212 sblk->stat_OutXonSent);
6213
6214 if (sblk->stat_OutXoffSent)
6215 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6216 sblk->stat_OutXoffSent);
6217
6218 if (sblk->stat_FlowControlDone)
6219 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6220 sblk->stat_FlowControlDone);
6221
6222 if (sblk->stat_MacControlFramesReceived)
6223 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6224 sblk->stat_MacControlFramesReceived);
6225
6226 if (sblk->stat_XoffStateEntered)
6227 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6228 sblk->stat_XoffStateEntered);
6229
6230 if (sblk->stat_IfInFramesL2FilterDiscards)
6231 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6232 sblk->stat_IfInFramesL2FilterDiscards);
6233
6234 if (sblk->stat_IfInRuleCheckerDiscards)
6235 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6236 sblk->stat_IfInRuleCheckerDiscards);
6237
6238 if (sblk->stat_IfInFTQDiscards)
6239 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6240 sblk->stat_IfInFTQDiscards);
6241
6242 if (sblk->stat_IfInMBUFDiscards)
6243 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6244 sblk->stat_IfInMBUFDiscards);
6245
6246 if (sblk->stat_IfInRuleCheckerP4Hit)
6247 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6248 sblk->stat_IfInRuleCheckerP4Hit);
6249
6250 if (sblk->stat_CatchupInRuleCheckerDiscards)
6251 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6252 sblk->stat_CatchupInRuleCheckerDiscards);
6253
6254 if (sblk->stat_CatchupInFTQDiscards)
6255 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6256 sblk->stat_CatchupInFTQDiscards);
6257
6258 if (sblk->stat_CatchupInMBUFDiscards)
6259 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6260 sblk->stat_CatchupInMBUFDiscards);
6261
6262 if (sblk->stat_CatchupInRuleCheckerP4Hit)
6263 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6264 sblk->stat_CatchupInRuleCheckerP4Hit);
6265
6266 aprint_debug_dev(sc->bnx_dev,
6267 "-----------------------------"
6268 "--------------"
6269 "-----------------------------\n");
6270 }
6271
6272 void
6273 bnx_dump_driver_state(struct bnx_softc *sc)
6274 {
6275 aprint_debug_dev(sc->bnx_dev,
6276 "-----------------------------"
6277 " Driver State "
6278 "-----------------------------\n");
6279
6280 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6281 "address\n", sc);
6282
6283 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6284 sc->status_block);
6285
6286 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6287 "address\n", sc->stats_block);
6288
6289 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6290 "adddress\n", sc->tx_bd_chain);
6291
6292 #if 0
6293 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6294 sc->rx_bd_chain);
6295
6296 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6297 sc->tx_mbuf_ptr);
6298 #endif
6299
6300 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6301 sc->rx_mbuf_ptr);
6302
6303 BNX_PRINTF(sc,
6304 " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
6305 sc->interrupts_generated);
6306
6307 BNX_PRINTF(sc,
6308 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6309 sc->rx_interrupts);
6310
6311 BNX_PRINTF(sc,
6312 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6313 sc->tx_interrupts);
6314
6315 BNX_PRINTF(sc,
6316 " 0x%08X - (sc->last_status_idx) status block index\n",
6317 sc->last_status_idx);
6318
6319 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
6320 sc->tx_prod);
6321
6322 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
6323 sc->tx_cons);
6324
6325 BNX_PRINTF(sc,
6326 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6327 sc->tx_prod_bseq);
6328 BNX_PRINTF(sc,
6329 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6330 sc->tx_mbuf_alloc);
6331
6332 BNX_PRINTF(sc,
6333 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6334 sc->used_tx_bd);
6335
6336 BNX_PRINTF(sc,
6337 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6338 sc->tx_hi_watermark, sc->max_tx_bd);
6339
6340
6341 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
6342 sc->rx_prod);
6343
6344 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
6345 sc->rx_cons);
6346
6347 BNX_PRINTF(sc,
6348 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6349 sc->rx_prod_bseq);
6350
6351 BNX_PRINTF(sc,
6352 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6353 sc->rx_mbuf_alloc);
6354
6355 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6356 sc->free_rx_bd);
6357
6358 BNX_PRINTF(sc,
6359 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6360 sc->rx_low_watermark, sc->max_rx_bd);
6361
6362 BNX_PRINTF(sc,
6363 " 0x%08X - (sc->mbuf_alloc_failed) "
6364 "mbuf alloc failures\n",
6365 sc->mbuf_alloc_failed);
6366
6367 BNX_PRINTF(sc,
6368 " 0x%0X - (sc->mbuf_sim_allocated_failed) "
6369 "simulated mbuf alloc failures\n",
6370 sc->mbuf_sim_alloc_failed);
6371
6372 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------"
6373 "-----------------------------\n");
6374 }
6375
6376 void
6377 bnx_dump_hw_state(struct bnx_softc *sc)
6378 {
6379 uint32_t val1;
6380 int i;
6381
6382 aprint_debug_dev(sc->bnx_dev,
6383 "----------------------------"
6384 " Hardware State "
6385 "----------------------------\n");
6386
6387 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6388
6389 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6390 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6391 val1, BNX_MISC_ENABLE_STATUS_BITS);
6392
6393 val1 = REG_RD(sc, BNX_DMA_STATUS);
6394 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6395
6396 val1 = REG_RD(sc, BNX_CTX_STATUS);
6397 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6398
6399 val1 = REG_RD(sc, BNX_EMAC_STATUS);
6400 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6401 BNX_EMAC_STATUS);
6402
6403 val1 = REG_RD(sc, BNX_RPM_STATUS);
6404 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6405
6406 val1 = REG_RD(sc, BNX_TBDR_STATUS);
6407 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6408 BNX_TBDR_STATUS);
6409
6410 val1 = REG_RD(sc, BNX_TDMA_STATUS);
6411 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6412 BNX_TDMA_STATUS);
6413
6414 val1 = REG_RD(sc, BNX_HC_STATUS);
6415 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6416
6417 aprint_debug_dev(sc->bnx_dev,
6418 "----------------------------"
6419 "----------------"
6420 "----------------------------\n");
6421
6422 aprint_debug_dev(sc->bnx_dev,
6423 "----------------------------"
6424 " Register Dump "
6425 "----------------------------\n");
6426
6427 for (i = 0x400; i < 0x8000; i += 0x10)
6428 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6429 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6430 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6431
6432 aprint_debug_dev(sc->bnx_dev,
6433 "----------------------------"
6434 "----------------"
6435 "----------------------------\n");
6436 }
6437
6438 void
6439 bnx_breakpoint(struct bnx_softc *sc)
6440 {
6441 /* Unreachable code to shut the compiler up about unused functions. */
6442 if (0) {
6443 bnx_dump_txbd(sc, 0, NULL);
6444 bnx_dump_rxbd(sc, 0, NULL);
6445 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6446 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6447 bnx_dump_l2fhdr(sc, 0, NULL);
6448 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6449 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6450 bnx_dump_status_block(sc);
6451 bnx_dump_stats_block(sc);
6452 bnx_dump_driver_state(sc);
6453 bnx_dump_hw_state(sc);
6454 }
6455
6456 bnx_dump_driver_state(sc);
6457 /* Print the important status block fields. */
6458 bnx_dump_status_block(sc);
6459
6460 #if 0
6461 /* Call the debugger. */
6462 breakpoint();
6463 #endif
6464
6465 return;
6466 }
6467 #endif
6468