if_bnx.c revision 1.20.10.1 1 /* $NetBSD: if_bnx.c,v 1.20.10.1 2010/04/21 00:27:41 matt Exp $ */
2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */
3
4 /*-
5 * Copyright (c) 2006 Broadcom Corporation
6 * David Christensen <davidch (at) broadcom.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if 0
36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
37 #endif
38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.20.10.1 2010/04/21 00:27:41 matt Exp $");
39
40 /*
41 * The following controllers are supported by this driver:
42 * BCM5706C A2, A3
43 * BCM5706S A2, A3
44 * BCM5708C B1, B2
45 * BCM5708S B1, B2
46 * BCM5709C A1, C0
47 * BCM5716 C0
48 *
49 * The following controllers are not supported by this driver:
50 *
51 * BCM5706C A0, A1
52 * BCM5706S A0, A1
53 * BCM5708C A0, B0
54 * BCM5708S A0, B0
55 * BCM5709C A0 B0, B1, B2 (pre-production)
56 * BCM5709S A0, A1, B0, B1, B2, C0 (pre-production)
57 */
58
59 #include <sys/callout.h>
60 #include <sys/mutex.h>
61
62 #include <dev/pci/if_bnxreg.h>
63 #include <dev/microcode/bnx/bnxfw.h>
64
65 /****************************************************************************/
66 /* BNX Driver Version */
67 /****************************************************************************/
68 #define BNX_DRIVER_VERSION "v0.9.6"
69
70 /****************************************************************************/
71 /* BNX Debug Options */
72 /****************************************************************************/
73 #ifdef BNX_DEBUG
74 u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND;
75
76 /* 0 = Never */
77 /* 1 = 1 in 2,147,483,648 */
78 /* 256 = 1 in 8,388,608 */
79 /* 2048 = 1 in 1,048,576 */
80 /* 65536 = 1 in 32,768 */
81 /* 1048576 = 1 in 2,048 */
82 /* 268435456 = 1 in 8 */
83 /* 536870912 = 1 in 4 */
84 /* 1073741824 = 1 in 2 */
85
86 /* Controls how often the l2_fhdr frame error check will fail. */
87 int bnx_debug_l2fhdr_status_check = 0;
88
89 /* Controls how often the unexpected attention check will fail. */
90 int bnx_debug_unexpected_attention = 0;
91
92 /* Controls how often to simulate an mbuf allocation failure. */
93 int bnx_debug_mbuf_allocation_failure = 0;
94
95 /* Controls how often to simulate a DMA mapping failure. */
96 int bnx_debug_dma_map_addr_failure = 0;
97
98 /* Controls how often to simulate a bootcode failure. */
99 int bnx_debug_bootcode_running_failure = 0;
100 #endif
101
102 /****************************************************************************/
103 /* PCI Device ID Table */
104 /* */
105 /* Used by bnx_probe() to identify the devices supported by this driver. */
106 /****************************************************************************/
107 static const struct bnx_product {
108 pci_vendor_id_t bp_vendor;
109 pci_product_id_t bp_product;
110 pci_vendor_id_t bp_subvendor;
111 pci_product_id_t bp_subproduct;
112 const char *bp_name;
113 } bnx_devices[] = {
114 #ifdef PCI_SUBPRODUCT_HP_NC370T
115 {
116 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
117 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T,
118 "HP NC370T Multifunction Gigabit Server Adapter"
119 },
120 #endif
121 #ifdef PCI_SUBPRODUCT_HP_NC370i
122 {
123 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
124 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i,
125 "HP NC370i Multifunction Gigabit Server Adapter"
126 },
127 #endif
128 {
129 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
130 0, 0,
131 "Broadcom NetXtreme II BCM5706 1000Base-T"
132 },
133 #ifdef PCI_SUBPRODUCT_HP_NC370F
134 {
135 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
136 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F,
137 "HP NC370F Multifunction Gigabit Server Adapter"
138 },
139 #endif
140 {
141 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
142 0, 0,
143 "Broadcom NetXtreme II BCM5706 1000Base-SX"
144 },
145 {
146 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708,
147 0, 0,
148 "Broadcom NetXtreme II BCM5708 1000Base-T"
149 },
150 {
151 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S,
152 0, 0,
153 "Broadcom NetXtreme II BCM5708 1000Base-SX"
154 },
155 {
156 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709,
157 0, 0,
158 "Broadcom NetXtreme II BCM5709 1000Base-T"
159 },
160 {
161 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S,
162 0, 0,
163 "Broadcom NetXtreme II BCM5709 1000Base-SX"
164 },
165 {
166 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716,
167 0, 0,
168 "Broadcom NetXtreme II BCM5716 1000Base-T"
169 },
170 {
171 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S,
172 0, 0,
173 "Broadcom NetXtreme II BCM5716 1000Base-SX"
174 },
175 };
176
177 /****************************************************************************/
178 /* Supported Flash NVRAM device data. */
179 /****************************************************************************/
180 static struct flash_spec flash_table[] =
181 {
182 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
183 #define NONBUFFERED_FLAGS (BNX_NV_WREN)
184 /* Slow EEPROM */
185 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
186 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
187 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
188 "EEPROM - slow"},
189 /* Expansion entry 0001 */
190 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
191 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
193 "Entry 0001"},
194 /* Saifun SA25F010 (non-buffered flash) */
195 /* strap, cfg1, & write1 need updates */
196 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
199 "Non-buffered flash (128kB)"},
200 /* Saifun SA25F020 (non-buffered flash) */
201 /* strap, cfg1, & write1 need updates */
202 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
205 "Non-buffered flash (256kB)"},
206 /* Expansion entry 0100 */
207 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 0100"},
211 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
212 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
213 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
214 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
215 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
216 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
217 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
219 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
220 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
221 /* Saifun SA25F005 (non-buffered flash) */
222 /* strap, cfg1, & write1 need updates */
223 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
226 "Non-buffered flash (64kB)"},
227 /* Fast EEPROM */
228 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
229 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
230 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
231 "EEPROM - fast"},
232 /* Expansion entry 1001 */
233 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
234 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
235 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
236 "Entry 1001"},
237 /* Expansion entry 1010 */
238 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
239 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
240 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
241 "Entry 1010"},
242 /* ATMEL AT45DB011B (buffered flash) */
243 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
244 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
245 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
246 "Buffered flash (128kB)"},
247 /* Expansion entry 1100 */
248 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
250 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
251 "Entry 1100"},
252 /* Expansion entry 1101 */
253 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
254 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
255 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
256 "Entry 1101"},
257 /* Ateml Expansion entry 1110 */
258 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
259 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
260 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
261 "Entry 1110 (Atmel)"},
262 /* ATMEL AT45DB021B (buffered flash) */
263 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
264 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
265 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
266 "Buffered flash (256kB)"},
267 };
268
269 /*
270 * The BCM5709 controllers transparently handle the
271 * differences between Atmel 264 byte pages and all
272 * flash devices which use 256 byte pages, so no
273 * logical-to-physical mapping is required in the
274 * driver.
275 */
276 static struct flash_spec flash_5709 = {
277 .flags = BNX_NV_BUFFERED,
278 .page_bits = BCM5709_FLASH_PAGE_BITS,
279 .page_size = BCM5709_FLASH_PAGE_SIZE,
280 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
281 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
282 .name = "5709 buffered flash (256kB)",
283 };
284
285 /****************************************************************************/
286 /* OpenBSD device entry points. */
287 /****************************************************************************/
288 static int bnx_probe(device_t, cfdata_t, void *);
289 void bnx_attach(device_t, device_t, void *);
290 int bnx_detach(device_t, int);
291
292 /****************************************************************************/
293 /* BNX Debug Data Structure Dump Routines */
294 /****************************************************************************/
295 #ifdef BNX_DEBUG
296 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
297 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
298 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
299 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
300 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
301 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
302 void bnx_dump_tx_chain(struct bnx_softc *, int, int);
303 void bnx_dump_rx_chain(struct bnx_softc *, int, int);
304 void bnx_dump_status_block(struct bnx_softc *);
305 void bnx_dump_stats_block(struct bnx_softc *);
306 void bnx_dump_driver_state(struct bnx_softc *);
307 void bnx_dump_hw_state(struct bnx_softc *);
308 void bnx_breakpoint(struct bnx_softc *);
309 #endif
310
311 /****************************************************************************/
312 /* BNX Register/Memory Access Routines */
313 /****************************************************************************/
314 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
315 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
316 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
317 int bnx_miibus_read_reg(device_t, int, int);
318 void bnx_miibus_write_reg(device_t, int, int, int);
319 void bnx_miibus_statchg(device_t);
320
321 /****************************************************************************/
322 /* BNX NVRAM Access Routines */
323 /****************************************************************************/
324 int bnx_acquire_nvram_lock(struct bnx_softc *);
325 int bnx_release_nvram_lock(struct bnx_softc *);
326 void bnx_enable_nvram_access(struct bnx_softc *);
327 void bnx_disable_nvram_access(struct bnx_softc *);
328 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
329 u_int32_t);
330 int bnx_init_nvram(struct bnx_softc *);
331 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
332 int bnx_nvram_test(struct bnx_softc *);
333 #ifdef BNX_NVRAM_WRITE_SUPPORT
334 int bnx_enable_nvram_write(struct bnx_softc *);
335 void bnx_disable_nvram_write(struct bnx_softc *);
336 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
337 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
338 u_int32_t);
339 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
340 #endif
341
342 /****************************************************************************/
343 /* */
344 /****************************************************************************/
345 void bnx_get_media(struct bnx_softc *);
346 int bnx_dma_alloc(struct bnx_softc *);
347 void bnx_dma_free(struct bnx_softc *);
348 void bnx_release_resources(struct bnx_softc *);
349
350 /****************************************************************************/
351 /* BNX Firmware Synchronization and Load */
352 /****************************************************************************/
353 int bnx_fw_sync(struct bnx_softc *, u_int32_t);
354 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
355 u_int32_t);
356 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
357 struct fw_info *);
358 void bnx_init_cpus(struct bnx_softc *);
359
360 void bnx_stop(struct ifnet *, int);
361 int bnx_reset(struct bnx_softc *, u_int32_t);
362 int bnx_chipinit(struct bnx_softc *);
363 int bnx_blockinit(struct bnx_softc *);
364 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
365 u_int16_t *, u_int32_t *);
366 int bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
367
368 int bnx_init_tx_chain(struct bnx_softc *);
369 void bnx_init_tx_context(struct bnx_softc *);
370 int bnx_init_rx_chain(struct bnx_softc *);
371 void bnx_init_rx_context(struct bnx_softc *);
372 void bnx_free_rx_chain(struct bnx_softc *);
373 void bnx_free_tx_chain(struct bnx_softc *);
374
375 int bnx_tx_encap(struct bnx_softc *, struct mbuf *);
376 void bnx_start(struct ifnet *);
377 int bnx_ioctl(struct ifnet *, u_long, void *);
378 void bnx_watchdog(struct ifnet *);
379 int bnx_init(struct ifnet *);
380
381 void bnx_init_context(struct bnx_softc *);
382 void bnx_get_mac_addr(struct bnx_softc *);
383 void bnx_set_mac_addr(struct bnx_softc *);
384 void bnx_phy_intr(struct bnx_softc *);
385 void bnx_rx_intr(struct bnx_softc *);
386 void bnx_tx_intr(struct bnx_softc *);
387 void bnx_disable_intr(struct bnx_softc *);
388 void bnx_enable_intr(struct bnx_softc *);
389
390 int bnx_intr(void *);
391 void bnx_iff(struct bnx_softc *);
392 void bnx_stats_update(struct bnx_softc *);
393 void bnx_tick(void *);
394
395 struct pool *bnx_tx_pool = NULL;
396 int bnx_alloc_pkts(struct bnx_softc *);
397
398 /****************************************************************************/
399 /* OpenBSD device dispatch table. */
400 /****************************************************************************/
401 CFATTACH_DECL_NEW(bnx, sizeof(struct bnx_softc),
402 bnx_probe, bnx_attach, bnx_detach, NULL);
403
404 /****************************************************************************/
405 /* Device probe function. */
406 /* */
407 /* Compares the device to the driver's list of supported devices and */
408 /* reports back to the OS whether this is the right driver for the device. */
409 /* */
410 /* Returns: */
411 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
412 /****************************************************************************/
413 static const struct bnx_product *
414 bnx_lookup(const struct pci_attach_args *pa)
415 {
416 int i;
417 pcireg_t subid;
418
419 for (i = 0; i < __arraycount(bnx_devices); i++) {
420 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor ||
421 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product)
422 continue;
423 if (!bnx_devices[i].bp_subvendor)
424 return &bnx_devices[i];
425 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
426 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor &&
427 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct)
428 return &bnx_devices[i];
429 }
430
431 return NULL;
432 }
433 static int
434 bnx_probe(device_t parent, cfdata_t match, void *aux)
435 {
436 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
437
438 if (bnx_lookup(pa) != NULL)
439 return (1);
440
441 return (0);
442 }
443
444 /****************************************************************************/
445 /* Device attach function. */
446 /* */
447 /* Allocates device resources, performs secondary chip identification, */
448 /* resets and initializes the hardware, and initializes driver instance */
449 /* variables. */
450 /* */
451 /* Returns: */
452 /* 0 on success, positive value on failure. */
453 /****************************************************************************/
454 void
455 bnx_attach(device_t parent, device_t self, void *aux)
456 {
457 const struct bnx_product *bp;
458 struct bnx_softc *sc = device_private(self);
459 struct pci_attach_args *pa = aux;
460 pci_chipset_tag_t pc = pa->pa_pc;
461 pci_intr_handle_t ih;
462 const char *intrstr = NULL;
463 u_int32_t command;
464 struct ifnet *ifp;
465 u_int32_t val;
466 int mii_flags = MIIF_FORCEANEG;
467 pcireg_t memtype;
468
469 if (bnx_tx_pool == NULL) {
470 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT);
471 if (bnx_tx_pool != NULL) {
472 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt),
473 0, 0, 0, "bnxpkts", NULL, IPL_NET);
474 } else {
475 aprint_error(": can't alloc bnx_tx_pool\n");
476 return;
477 }
478 }
479
480 bp = bnx_lookup(pa);
481 if (bp == NULL)
482 panic("unknown device");
483
484 sc->bnx_dev = self;
485
486 aprint_naive("\n");
487 aprint_normal(": %s\n", bp->bp_name);
488
489 sc->bnx_pa = *pa;
490
491 /*
492 * Map control/status registers.
493 */
494 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
495 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
496 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
497 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
498
499 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
500 aprint_error_dev(sc->bnx_dev,
501 "failed to enable memory mapping!\n");
502 return;
503 }
504
505 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
506 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
507 &sc->bnx_bhandle, NULL, &sc->bnx_size)) {
508 aprint_error_dev(sc->bnx_dev, "can't find mem space\n");
509 return;
510 }
511
512 if (pci_intr_map(pa, &ih)) {
513 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n");
514 goto bnx_attach_fail;
515 }
516
517 intrstr = pci_intr_string(pc, ih);
518
519 /*
520 * Configure byte swap and enable indirect register access.
521 * Rely on CPU to do target byte swapping on big endian systems.
522 * Access to registers outside of PCI configurtion space are not
523 * valid until this is done.
524 */
525 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
526 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
527 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
528
529 /* Save ASIC revsion info. */
530 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
531
532 /*
533 * Find the base address for shared memory access.
534 * Newer versions of bootcode use a signature and offset
535 * while older versions use a fixed address.
536 */
537 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
538 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
539 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
540 (sc->bnx_pa.pa_function << 2));
541 else
542 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
543
544 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
545
546 /* Set initial device and PHY flags */
547 sc->bnx_flags = 0;
548 sc->bnx_phy_flags = 0;
549
550 /* Get PCI bus information (speed and type). */
551 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
552 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
553 u_int32_t clkreg;
554
555 sc->bnx_flags |= BNX_PCIX_FLAG;
556
557 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
558
559 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
560 switch (clkreg) {
561 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
562 sc->bus_speed_mhz = 133;
563 break;
564
565 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
566 sc->bus_speed_mhz = 100;
567 break;
568
569 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
570 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
571 sc->bus_speed_mhz = 66;
572 break;
573
574 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
575 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
576 sc->bus_speed_mhz = 50;
577 break;
578
579 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
580 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
581 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
582 sc->bus_speed_mhz = 33;
583 break;
584 }
585 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
586 sc->bus_speed_mhz = 66;
587 else
588 sc->bus_speed_mhz = 33;
589
590 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
591 sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
592
593 /* Reset the controller. */
594 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
595 goto bnx_attach_fail;
596
597 /* Initialize the controller. */
598 if (bnx_chipinit(sc)) {
599 aprint_error_dev(sc->bnx_dev,
600 "Controller initialization failed!\n");
601 goto bnx_attach_fail;
602 }
603
604 /* Perform NVRAM test. */
605 if (bnx_nvram_test(sc)) {
606 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n");
607 goto bnx_attach_fail;
608 }
609
610 /* Fetch the permanent Ethernet MAC address. */
611 bnx_get_mac_addr(sc);
612 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n",
613 ether_sprintf(sc->eaddr));
614
615 /*
616 * Trip points control how many BDs
617 * should be ready before generating an
618 * interrupt while ticks control how long
619 * a BD can sit in the chain before
620 * generating an interrupt. Set the default
621 * values for the RX and TX rings.
622 */
623
624 #ifdef BNX_DEBUG
625 /* Force more frequent interrupts. */
626 sc->bnx_tx_quick_cons_trip_int = 1;
627 sc->bnx_tx_quick_cons_trip = 1;
628 sc->bnx_tx_ticks_int = 0;
629 sc->bnx_tx_ticks = 0;
630
631 sc->bnx_rx_quick_cons_trip_int = 1;
632 sc->bnx_rx_quick_cons_trip = 1;
633 sc->bnx_rx_ticks_int = 0;
634 sc->bnx_rx_ticks = 0;
635 #else
636 sc->bnx_tx_quick_cons_trip_int = 20;
637 sc->bnx_tx_quick_cons_trip = 20;
638 sc->bnx_tx_ticks_int = 80;
639 sc->bnx_tx_ticks = 80;
640
641 sc->bnx_rx_quick_cons_trip_int = 6;
642 sc->bnx_rx_quick_cons_trip = 6;
643 sc->bnx_rx_ticks_int = 18;
644 sc->bnx_rx_ticks = 18;
645 #endif
646
647 /* Update statistics once every second. */
648 sc->bnx_stats_ticks = 1000000 & 0xffff00;
649
650 /* Find the media type for the adapter. */
651 bnx_get_media(sc);
652
653 /*
654 * Store config data needed by the PHY driver for
655 * backplane applications
656 */
657 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
658 BNX_SHARED_HW_CFG_CONFIG);
659 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
660 BNX_PORT_HW_CFG_CONFIG);
661
662 /* Allocate DMA memory resources. */
663 sc->bnx_dmatag = pa->pa_dmat;
664 if (bnx_dma_alloc(sc)) {
665 aprint_error_dev(sc->bnx_dev,
666 "DMA resource allocation failed!\n");
667 goto bnx_attach_fail;
668 }
669
670 /* Initialize the ifnet interface. */
671 ifp = &sc->bnx_ec.ec_if;
672 ifp->if_softc = sc;
673 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
674 ifp->if_ioctl = bnx_ioctl;
675 ifp->if_stop = bnx_stop;
676 ifp->if_start = bnx_start;
677 ifp->if_init = bnx_init;
678 ifp->if_timer = 0;
679 ifp->if_watchdog = bnx_watchdog;
680 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
681 IFQ_SET_READY(&ifp->if_snd);
682 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
683
684 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU |
685 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
686
687 ifp->if_capabilities |=
688 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
689 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
690 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
691
692 /* Hookup IRQ last. */
693 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc);
694 if (sc->bnx_intrhand == NULL) {
695 aprint_error_dev(self, "couldn't establish interrupt");
696 if (intrstr != NULL)
697 aprint_error(" at %s", intrstr);
698 aprint_error("\n");
699 goto bnx_attach_fail;
700 }
701 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr);
702
703 sc->bnx_mii.mii_ifp = ifp;
704 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
705 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
706 sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
707
708 sc->bnx_ec.ec_mii = &sc->bnx_mii;
709 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange,
710 ether_mediastatus);
711 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
712 mii_flags |= MIIF_HAVEFIBER;
713 mii_attach(self, &sc->bnx_mii, 0xffffffff,
714 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
715
716 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) {
717 aprint_error_dev(self, "no PHY found!\n");
718 ifmedia_add(&sc->bnx_mii.mii_media,
719 IFM_ETHER|IFM_MANUAL, 0, NULL);
720 ifmedia_set(&sc->bnx_mii.mii_media,
721 IFM_ETHER|IFM_MANUAL);
722 } else {
723 ifmedia_set(&sc->bnx_mii.mii_media,
724 IFM_ETHER|IFM_AUTO);
725 }
726
727 /* Attach to the Ethernet interface list. */
728 if_attach(ifp);
729 ether_ifattach(ifp,sc->eaddr);
730
731 callout_init(&sc->bnx_timeout, 0);
732
733 if (!pmf_device_register(self, NULL, NULL))
734 aprint_error_dev(self, "couldn't establish power handler\n");
735 else
736 pmf_class_network_register(self, ifp);
737
738 /* Print some important debugging info. */
739 DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
740
741 goto bnx_attach_exit;
742
743 bnx_attach_fail:
744 bnx_release_resources(sc);
745
746 bnx_attach_exit:
747 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
748 }
749
750 /****************************************************************************/
751 /* Device detach function. */
752 /* */
753 /* Stops the controller, resets the controller, and releases resources. */
754 /* */
755 /* Returns: */
756 /* 0 on success, positive value on failure. */
757 /****************************************************************************/
758 int
759 bnx_detach(device_t dev, int flags)
760 {
761 int s;
762 struct bnx_softc *sc;
763 struct ifnet *ifp;
764
765 sc = device_private(dev);
766 ifp = &sc->bnx_ec.ec_if;
767
768 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
769
770 /* Stop and reset the controller. */
771 s = splnet();
772 if (ifp->if_flags & IFF_RUNNING)
773 bnx_stop(ifp, 1);
774 else {
775 /* Disable the transmit/receive blocks. */
776 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
777 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
778 DELAY(20);
779 bnx_disable_intr(sc);
780 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
781 }
782
783 splx(s);
784
785 pmf_device_deregister(dev);
786 ether_ifdetach(ifp);
787 if_detach(ifp);
788 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY);
789
790 /* Release all remaining resources. */
791 bnx_release_resources(sc);
792
793 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
794
795 return(0);
796 }
797
798 /****************************************************************************/
799 /* Indirect register read. */
800 /* */
801 /* Reads NetXtreme II registers using an index/data register pair in PCI */
802 /* configuration space. Using this mechanism avoids issues with posted */
803 /* reads but is much slower than memory-mapped I/O. */
804 /* */
805 /* Returns: */
806 /* The value of the register. */
807 /****************************************************************************/
808 u_int32_t
809 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
810 {
811 struct pci_attach_args *pa = &(sc->bnx_pa);
812
813 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
814 offset);
815 #ifdef BNX_DEBUG
816 {
817 u_int32_t val;
818 val = pci_conf_read(pa->pa_pc, pa->pa_tag,
819 BNX_PCICFG_REG_WINDOW);
820 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
821 "val = 0x%08X\n", __func__, offset, val);
822 return (val);
823 }
824 #else
825 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
826 #endif
827 }
828
829 /****************************************************************************/
830 /* Indirect register write. */
831 /* */
832 /* Writes NetXtreme II registers using an index/data register pair in PCI */
833 /* configuration space. Using this mechanism avoids issues with posted */
834 /* writes but is muchh slower than memory-mapped I/O. */
835 /* */
836 /* Returns: */
837 /* Nothing. */
838 /****************************************************************************/
839 void
840 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
841 {
842 struct pci_attach_args *pa = &(sc->bnx_pa);
843
844 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
845 __func__, offset, val);
846
847 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
848 offset);
849 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
850 }
851
852 /****************************************************************************/
853 /* Context memory write. */
854 /* */
855 /* The NetXtreme II controller uses context memory to track connection */
856 /* information for L2 and higher network protocols. */
857 /* */
858 /* Returns: */
859 /* Nothing. */
860 /****************************************************************************/
861 void
862 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset,
863 u_int32_t ctx_val)
864 {
865 u_int32_t idx, offset = ctx_offset + cid_addr;
866 u_int32_t val, retry_cnt = 5;
867
868 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
869 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
870 REG_WR(sc, BNX_CTX_CTX_CTRL,
871 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
872
873 for (idx = 0; idx < retry_cnt; idx++) {
874 val = REG_RD(sc, BNX_CTX_CTX_CTRL);
875 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
876 break;
877 DELAY(5);
878 }
879
880 #if 0
881 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
882 BNX_PRINTF("%s(%d); Unable to write CTX memory: "
883 "cid_addr = 0x%08X, offset = 0x%08X!\n",
884 __FILE__, __LINE__, cid_addr, ctx_offset);
885 #endif
886
887 } else {
888 REG_WR(sc, BNX_CTX_DATA_ADR, offset);
889 REG_WR(sc, BNX_CTX_DATA, ctx_val);
890 }
891 }
892
893 /****************************************************************************/
894 /* PHY register read. */
895 /* */
896 /* Implements register reads on the MII bus. */
897 /* */
898 /* Returns: */
899 /* The value of the register. */
900 /****************************************************************************/
901 int
902 bnx_miibus_read_reg(device_t dev, int phy, int reg)
903 {
904 struct bnx_softc *sc = device_private(dev);
905 u_int32_t val;
906 int i;
907
908 /* Make sure we are accessing the correct PHY address. */
909 if (phy != sc->bnx_phy_addr) {
910 DBPRINT(sc, BNX_VERBOSE,
911 "Invalid PHY address %d for PHY read!\n", phy);
912 return(0);
913 }
914
915 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
916 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
917 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
918
919 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
920 REG_RD(sc, BNX_EMAC_MDIO_MODE);
921
922 DELAY(40);
923 }
924
925 val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
926 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
927 BNX_EMAC_MDIO_COMM_START_BUSY;
928 REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
929
930 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
931 DELAY(10);
932
933 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
934 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
935 DELAY(5);
936
937 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
938 val &= BNX_EMAC_MDIO_COMM_DATA;
939
940 break;
941 }
942 }
943
944 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
945 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
946 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
947 val = 0x0;
948 } else
949 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
950
951 DBPRINT(sc, BNX_EXCESSIVE,
952 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy,
953 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
954
955 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
956 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
957 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
958
959 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
960 REG_RD(sc, BNX_EMAC_MDIO_MODE);
961
962 DELAY(40);
963 }
964
965 return (val & 0xffff);
966 }
967
968 /****************************************************************************/
969 /* PHY register write. */
970 /* */
971 /* Implements register writes on the MII bus. */
972 /* */
973 /* Returns: */
974 /* The value of the register. */
975 /****************************************************************************/
976 void
977 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val)
978 {
979 struct bnx_softc *sc = device_private(dev);
980 u_int32_t val1;
981 int i;
982
983 /* Make sure we are accessing the correct PHY address. */
984 if (phy != sc->bnx_phy_addr) {
985 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n",
986 phy);
987 return;
988 }
989
990 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
991 "val = 0x%04X\n", __func__,
992 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
993
994 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
995 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
996 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
997
998 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
999 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1000
1001 DELAY(40);
1002 }
1003
1004 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1005 BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1006 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1007 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1008
1009 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1010 DELAY(10);
1011
1012 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1013 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1014 DELAY(5);
1015 break;
1016 }
1017 }
1018
1019 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1020 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1021 __LINE__);
1022 }
1023
1024 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1025 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1026 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1027
1028 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1029 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1030
1031 DELAY(40);
1032 }
1033 }
1034
1035 /****************************************************************************/
1036 /* MII bus status change. */
1037 /* */
1038 /* Called by the MII bus driver when the PHY establishes link to set the */
1039 /* MAC interface registers. */
1040 /* */
1041 /* Returns: */
1042 /* Nothing. */
1043 /****************************************************************************/
1044 void
1045 bnx_miibus_statchg(device_t dev)
1046 {
1047 struct bnx_softc *sc = device_private(dev);
1048 struct mii_data *mii = &sc->bnx_mii;
1049 int val;
1050
1051 val = REG_RD(sc, BNX_EMAC_MODE);
1052 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1053 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1054 BNX_EMAC_MODE_25G);
1055
1056 /* Set MII or GMII interface based on the speed
1057 * negotiated by the PHY.
1058 */
1059 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1060 case IFM_10_T:
1061 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1062 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1063 val |= BNX_EMAC_MODE_PORT_MII_10;
1064 break;
1065 }
1066 /* FALLTHROUGH */
1067 case IFM_100_TX:
1068 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1069 val |= BNX_EMAC_MODE_PORT_MII;
1070 break;
1071 case IFM_2500_SX:
1072 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1073 val |= BNX_EMAC_MODE_25G;
1074 /* FALLTHROUGH */
1075 case IFM_1000_T:
1076 case IFM_1000_SX:
1077 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1078 val |= BNX_EMAC_MODE_PORT_GMII;
1079 break;
1080 default:
1081 val |= BNX_EMAC_MODE_PORT_GMII;
1082 break;
1083 }
1084
1085 /* Set half or full duplex based on the duplicity
1086 * negotiated by the PHY.
1087 */
1088 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1089 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1090 val |= BNX_EMAC_MODE_HALF_DUPLEX;
1091 } else {
1092 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1093 }
1094
1095 REG_WR(sc, BNX_EMAC_MODE, val);
1096 }
1097
1098 /****************************************************************************/
1099 /* Acquire NVRAM lock. */
1100 /* */
1101 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1102 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1103 /* for use by the driver. */
1104 /* */
1105 /* Returns: */
1106 /* 0 on success, positive value on failure. */
1107 /****************************************************************************/
1108 int
1109 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1110 {
1111 u_int32_t val;
1112 int j;
1113
1114 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1115
1116 /* Request access to the flash interface. */
1117 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1118 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1119 val = REG_RD(sc, BNX_NVM_SW_ARB);
1120 if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1121 break;
1122
1123 DELAY(5);
1124 }
1125
1126 if (j >= NVRAM_TIMEOUT_COUNT) {
1127 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1128 return (EBUSY);
1129 }
1130
1131 return (0);
1132 }
1133
1134 /****************************************************************************/
1135 /* Release NVRAM lock. */
1136 /* */
1137 /* When the caller is finished accessing NVRAM the lock must be released. */
1138 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1139 /* for use by the driver. */
1140 /* */
1141 /* Returns: */
1142 /* 0 on success, positive value on failure. */
1143 /****************************************************************************/
1144 int
1145 bnx_release_nvram_lock(struct bnx_softc *sc)
1146 {
1147 int j;
1148 u_int32_t val;
1149
1150 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1151
1152 /* Relinquish nvram interface. */
1153 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1154
1155 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1156 val = REG_RD(sc, BNX_NVM_SW_ARB);
1157 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1158 break;
1159
1160 DELAY(5);
1161 }
1162
1163 if (j >= NVRAM_TIMEOUT_COUNT) {
1164 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1165 return (EBUSY);
1166 }
1167
1168 return (0);
1169 }
1170
1171 #ifdef BNX_NVRAM_WRITE_SUPPORT
1172 /****************************************************************************/
1173 /* Enable NVRAM write access. */
1174 /* */
1175 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1176 /* */
1177 /* Returns: */
1178 /* 0 on success, positive value on failure. */
1179 /****************************************************************************/
1180 int
1181 bnx_enable_nvram_write(struct bnx_softc *sc)
1182 {
1183 u_int32_t val;
1184
1185 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1186
1187 val = REG_RD(sc, BNX_MISC_CFG);
1188 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1189
1190 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1191 int j;
1192
1193 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1194 REG_WR(sc, BNX_NVM_COMMAND,
1195 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1196
1197 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1198 DELAY(5);
1199
1200 val = REG_RD(sc, BNX_NVM_COMMAND);
1201 if (val & BNX_NVM_COMMAND_DONE)
1202 break;
1203 }
1204
1205 if (j >= NVRAM_TIMEOUT_COUNT) {
1206 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1207 return (EBUSY);
1208 }
1209 }
1210
1211 return (0);
1212 }
1213
1214 /****************************************************************************/
1215 /* Disable NVRAM write access. */
1216 /* */
1217 /* When the caller is finished writing to NVRAM write access must be */
1218 /* disabled. */
1219 /* */
1220 /* Returns: */
1221 /* Nothing. */
1222 /****************************************************************************/
1223 void
1224 bnx_disable_nvram_write(struct bnx_softc *sc)
1225 {
1226 u_int32_t val;
1227
1228 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1229
1230 val = REG_RD(sc, BNX_MISC_CFG);
1231 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1232 }
1233 #endif
1234
1235 /****************************************************************************/
1236 /* Enable NVRAM access. */
1237 /* */
1238 /* Before accessing NVRAM for read or write operations the caller must */
1239 /* enabled NVRAM access. */
1240 /* */
1241 /* Returns: */
1242 /* Nothing. */
1243 /****************************************************************************/
1244 void
1245 bnx_enable_nvram_access(struct bnx_softc *sc)
1246 {
1247 u_int32_t val;
1248
1249 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1250
1251 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1252 /* Enable both bits, even on read. */
1253 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1254 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1255 }
1256
1257 /****************************************************************************/
1258 /* Disable NVRAM access. */
1259 /* */
1260 /* When the caller is finished accessing NVRAM access must be disabled. */
1261 /* */
1262 /* Returns: */
1263 /* Nothing. */
1264 /****************************************************************************/
1265 void
1266 bnx_disable_nvram_access(struct bnx_softc *sc)
1267 {
1268 u_int32_t val;
1269
1270 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1271
1272 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1273
1274 /* Disable both bits, even after read. */
1275 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1276 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1277 }
1278
1279 #ifdef BNX_NVRAM_WRITE_SUPPORT
1280 /****************************************************************************/
1281 /* Erase NVRAM page before writing. */
1282 /* */
1283 /* Non-buffered flash parts require that a page be erased before it is */
1284 /* written. */
1285 /* */
1286 /* Returns: */
1287 /* 0 on success, positive value on failure. */
1288 /****************************************************************************/
1289 int
1290 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1291 {
1292 u_int32_t cmd;
1293 int j;
1294
1295 /* Buffered flash doesn't require an erase. */
1296 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1297 return (0);
1298
1299 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1300
1301 /* Build an erase command. */
1302 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1303 BNX_NVM_COMMAND_DOIT;
1304
1305 /*
1306 * Clear the DONE bit separately, set the NVRAM adress to erase,
1307 * and issue the erase command.
1308 */
1309 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1310 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1311 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1312
1313 /* Wait for completion. */
1314 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1315 u_int32_t val;
1316
1317 DELAY(5);
1318
1319 val = REG_RD(sc, BNX_NVM_COMMAND);
1320 if (val & BNX_NVM_COMMAND_DONE)
1321 break;
1322 }
1323
1324 if (j >= NVRAM_TIMEOUT_COUNT) {
1325 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1326 return (EBUSY);
1327 }
1328
1329 return (0);
1330 }
1331 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1332
1333 /****************************************************************************/
1334 /* Read a dword (32 bits) from NVRAM. */
1335 /* */
1336 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1337 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1338 /* */
1339 /* Returns: */
1340 /* 0 on success and the 32 bit value read, positive value on failure. */
1341 /****************************************************************************/
1342 int
1343 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1344 u_int8_t *ret_val, u_int32_t cmd_flags)
1345 {
1346 u_int32_t cmd;
1347 int i, rc = 0;
1348
1349 /* Build the command word. */
1350 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1351
1352 /* Calculate the offset for buffered flash if translation is used. */
1353 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1354 offset = ((offset / sc->bnx_flash_info->page_size) <<
1355 sc->bnx_flash_info->page_bits) +
1356 (offset % sc->bnx_flash_info->page_size);
1357 }
1358
1359 /*
1360 * Clear the DONE bit separately, set the address to read,
1361 * and issue the read.
1362 */
1363 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1364 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1365 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1366
1367 /* Wait for completion. */
1368 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1369 u_int32_t val;
1370
1371 DELAY(5);
1372
1373 val = REG_RD(sc, BNX_NVM_COMMAND);
1374 if (val & BNX_NVM_COMMAND_DONE) {
1375 val = REG_RD(sc, BNX_NVM_READ);
1376
1377 val = bnx_be32toh(val);
1378 memcpy(ret_val, &val, 4);
1379 break;
1380 }
1381 }
1382
1383 /* Check for errors. */
1384 if (i >= NVRAM_TIMEOUT_COUNT) {
1385 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1386 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1387 rc = EBUSY;
1388 }
1389
1390 return(rc);
1391 }
1392
1393 #ifdef BNX_NVRAM_WRITE_SUPPORT
1394 /****************************************************************************/
1395 /* Write a dword (32 bits) to NVRAM. */
1396 /* */
1397 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1398 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1399 /* enabled NVRAM write access. */
1400 /* */
1401 /* Returns: */
1402 /* 0 on success, positive value on failure. */
1403 /****************************************************************************/
1404 int
1405 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1406 u_int32_t cmd_flags)
1407 {
1408 u_int32_t cmd, val32;
1409 int j;
1410
1411 /* Build the command word. */
1412 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1413
1414 /* Calculate the offset for buffered flash if translation is used. */
1415 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1416 offset = ((offset / sc->bnx_flash_info->page_size) <<
1417 sc->bnx_flash_info->page_bits) +
1418 (offset % sc->bnx_flash_info->page_size);
1419 }
1420
1421 /*
1422 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1423 * set the NVRAM address to write, and issue the write command
1424 */
1425 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1426 memcpy(&val32, val, 4);
1427 val32 = htobe32(val32);
1428 REG_WR(sc, BNX_NVM_WRITE, val32);
1429 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1430 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1431
1432 /* Wait for completion. */
1433 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1434 DELAY(5);
1435
1436 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1437 break;
1438 }
1439 if (j >= NVRAM_TIMEOUT_COUNT) {
1440 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1441 "offset 0x%08X\n", __FILE__, __LINE__, offset);
1442 return (EBUSY);
1443 }
1444
1445 return (0);
1446 }
1447 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1448
1449 /****************************************************************************/
1450 /* Initialize NVRAM access. */
1451 /* */
1452 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1453 /* access that device. */
1454 /* */
1455 /* Returns: */
1456 /* 0 on success, positive value on failure. */
1457 /****************************************************************************/
1458 int
1459 bnx_init_nvram(struct bnx_softc *sc)
1460 {
1461 u_int32_t val;
1462 int j, entry_count, rc = 0;
1463 struct flash_spec *flash;
1464
1465 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1466
1467 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1468 sc->bnx_flash_info = &flash_5709;
1469 goto bnx_init_nvram_get_flash_size;
1470 }
1471
1472 /* Determine the selected interface. */
1473 val = REG_RD(sc, BNX_NVM_CFG1);
1474
1475 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1476
1477 /*
1478 * Flash reconfiguration is required to support additional
1479 * NVRAM devices not directly supported in hardware.
1480 * Check if the flash interface was reconfigured
1481 * by the bootcode.
1482 */
1483
1484 if (val & 0x40000000) {
1485 /* Flash interface reconfigured by bootcode. */
1486
1487 DBPRINT(sc,BNX_INFO_LOAD,
1488 "bnx_init_nvram(): Flash WAS reconfigured.\n");
1489
1490 for (j = 0, flash = &flash_table[0]; j < entry_count;
1491 j++, flash++) {
1492 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1493 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1494 sc->bnx_flash_info = flash;
1495 break;
1496 }
1497 }
1498 } else {
1499 /* Flash interface not yet reconfigured. */
1500 u_int32_t mask;
1501
1502 DBPRINT(sc,BNX_INFO_LOAD,
1503 "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1504
1505 if (val & (1 << 23))
1506 mask = FLASH_BACKUP_STRAP_MASK;
1507 else
1508 mask = FLASH_STRAP_MASK;
1509
1510 /* Look for the matching NVRAM device configuration data. */
1511 for (j = 0, flash = &flash_table[0]; j < entry_count;
1512 j++, flash++) {
1513 /* Check if the dev matches any of the known devices. */
1514 if ((val & mask) == (flash->strapping & mask)) {
1515 /* Found a device match. */
1516 sc->bnx_flash_info = flash;
1517
1518 /* Request access to the flash interface. */
1519 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1520 return (rc);
1521
1522 /* Reconfigure the flash interface. */
1523 bnx_enable_nvram_access(sc);
1524 REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1525 REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1526 REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1527 REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1528 bnx_disable_nvram_access(sc);
1529 bnx_release_nvram_lock(sc);
1530
1531 break;
1532 }
1533 }
1534 }
1535
1536 /* Check if a matching device was found. */
1537 if (j == entry_count) {
1538 sc->bnx_flash_info = NULL;
1539 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1540 __FILE__, __LINE__);
1541 rc = ENODEV;
1542 }
1543
1544 bnx_init_nvram_get_flash_size:
1545 /* Write the flash config data to the shared memory interface. */
1546 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1547 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1548 if (val)
1549 sc->bnx_flash_size = val;
1550 else
1551 sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1552
1553 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1554 "0x%08X\n", sc->bnx_flash_info->total_size);
1555
1556 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1557
1558 return (rc);
1559 }
1560
1561 /****************************************************************************/
1562 /* Read an arbitrary range of data from NVRAM. */
1563 /* */
1564 /* Prepares the NVRAM interface for access and reads the requested data */
1565 /* into the supplied buffer. */
1566 /* */
1567 /* Returns: */
1568 /* 0 on success and the data read, positive value on failure. */
1569 /****************************************************************************/
1570 int
1571 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1572 int buf_size)
1573 {
1574 int rc = 0;
1575 u_int32_t cmd_flags, offset32, len32, extra;
1576
1577 if (buf_size == 0)
1578 return (0);
1579
1580 /* Request access to the flash interface. */
1581 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1582 return (rc);
1583
1584 /* Enable access to flash interface */
1585 bnx_enable_nvram_access(sc);
1586
1587 len32 = buf_size;
1588 offset32 = offset;
1589 extra = 0;
1590
1591 cmd_flags = 0;
1592
1593 if (offset32 & 3) {
1594 u_int8_t buf[4];
1595 u_int32_t pre_len;
1596
1597 offset32 &= ~3;
1598 pre_len = 4 - (offset & 3);
1599
1600 if (pre_len >= len32) {
1601 pre_len = len32;
1602 cmd_flags =
1603 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1604 } else
1605 cmd_flags = BNX_NVM_COMMAND_FIRST;
1606
1607 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1608
1609 if (rc)
1610 return (rc);
1611
1612 memcpy(ret_buf, buf + (offset & 3), pre_len);
1613
1614 offset32 += 4;
1615 ret_buf += pre_len;
1616 len32 -= pre_len;
1617 }
1618
1619 if (len32 & 3) {
1620 extra = 4 - (len32 & 3);
1621 len32 = (len32 + 4) & ~3;
1622 }
1623
1624 if (len32 == 4) {
1625 u_int8_t buf[4];
1626
1627 if (cmd_flags)
1628 cmd_flags = BNX_NVM_COMMAND_LAST;
1629 else
1630 cmd_flags =
1631 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1632
1633 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1634
1635 memcpy(ret_buf, buf, 4 - extra);
1636 } else if (len32 > 0) {
1637 u_int8_t buf[4];
1638
1639 /* Read the first word. */
1640 if (cmd_flags)
1641 cmd_flags = 0;
1642 else
1643 cmd_flags = BNX_NVM_COMMAND_FIRST;
1644
1645 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1646
1647 /* Advance to the next dword. */
1648 offset32 += 4;
1649 ret_buf += 4;
1650 len32 -= 4;
1651
1652 while (len32 > 4 && rc == 0) {
1653 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1654
1655 /* Advance to the next dword. */
1656 offset32 += 4;
1657 ret_buf += 4;
1658 len32 -= 4;
1659 }
1660
1661 if (rc)
1662 return (rc);
1663
1664 cmd_flags = BNX_NVM_COMMAND_LAST;
1665 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1666
1667 memcpy(ret_buf, buf, 4 - extra);
1668 }
1669
1670 /* Disable access to flash interface and release the lock. */
1671 bnx_disable_nvram_access(sc);
1672 bnx_release_nvram_lock(sc);
1673
1674 return (rc);
1675 }
1676
1677 #ifdef BNX_NVRAM_WRITE_SUPPORT
1678 /****************************************************************************/
1679 /* Write an arbitrary range of data from NVRAM. */
1680 /* */
1681 /* Prepares the NVRAM interface for write access and writes the requested */
1682 /* data from the supplied buffer. The caller is responsible for */
1683 /* calculating any appropriate CRCs. */
1684 /* */
1685 /* Returns: */
1686 /* 0 on success, positive value on failure. */
1687 /****************************************************************************/
1688 int
1689 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1690 int buf_size)
1691 {
1692 u_int32_t written, offset32, len32;
1693 u_int8_t *buf, start[4], end[4];
1694 int rc = 0;
1695 int align_start, align_end;
1696
1697 buf = data_buf;
1698 offset32 = offset;
1699 len32 = buf_size;
1700 align_start = align_end = 0;
1701
1702 if ((align_start = (offset32 & 3))) {
1703 offset32 &= ~3;
1704 len32 += align_start;
1705 if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1706 return (rc);
1707 }
1708
1709 if (len32 & 3) {
1710 if ((len32 > 4) || !align_start) {
1711 align_end = 4 - (len32 & 3);
1712 len32 += align_end;
1713 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1714 end, 4))) {
1715 return (rc);
1716 }
1717 }
1718 }
1719
1720 if (align_start || align_end) {
1721 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1722 if (buf == 0)
1723 return (ENOMEM);
1724
1725 if (align_start)
1726 memcpy(buf, start, 4);
1727
1728 if (align_end)
1729 memcpy(buf + len32 - 4, end, 4);
1730
1731 memcpy(buf + align_start, data_buf, buf_size);
1732 }
1733
1734 written = 0;
1735 while ((written < len32) && (rc == 0)) {
1736 u_int32_t page_start, page_end, data_start, data_end;
1737 u_int32_t addr, cmd_flags;
1738 int i;
1739 u_int8_t flash_buffer[264];
1740
1741 /* Find the page_start addr */
1742 page_start = offset32 + written;
1743 page_start -= (page_start % sc->bnx_flash_info->page_size);
1744 /* Find the page_end addr */
1745 page_end = page_start + sc->bnx_flash_info->page_size;
1746 /* Find the data_start addr */
1747 data_start = (written == 0) ? offset32 : page_start;
1748 /* Find the data_end addr */
1749 data_end = (page_end > offset32 + len32) ?
1750 (offset32 + len32) : page_end;
1751
1752 /* Request access to the flash interface. */
1753 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1754 goto nvram_write_end;
1755
1756 /* Enable access to flash interface */
1757 bnx_enable_nvram_access(sc);
1758
1759 cmd_flags = BNX_NVM_COMMAND_FIRST;
1760 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1761 int j;
1762
1763 /* Read the whole page into the buffer
1764 * (non-buffer flash only) */
1765 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1766 if (j == (sc->bnx_flash_info->page_size - 4))
1767 cmd_flags |= BNX_NVM_COMMAND_LAST;
1768
1769 rc = bnx_nvram_read_dword(sc,
1770 page_start + j,
1771 &flash_buffer[j],
1772 cmd_flags);
1773
1774 if (rc)
1775 goto nvram_write_end;
1776
1777 cmd_flags = 0;
1778 }
1779 }
1780
1781 /* Enable writes to flash interface (unlock write-protect) */
1782 if ((rc = bnx_enable_nvram_write(sc)) != 0)
1783 goto nvram_write_end;
1784
1785 /* Erase the page */
1786 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1787 goto nvram_write_end;
1788
1789 /* Re-enable the write again for the actual write */
1790 bnx_enable_nvram_write(sc);
1791
1792 /* Loop to write back the buffer data from page_start to
1793 * data_start */
1794 i = 0;
1795 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1796 for (addr = page_start; addr < data_start;
1797 addr += 4, i += 4) {
1798
1799 rc = bnx_nvram_write_dword(sc, addr,
1800 &flash_buffer[i], cmd_flags);
1801
1802 if (rc != 0)
1803 goto nvram_write_end;
1804
1805 cmd_flags = 0;
1806 }
1807 }
1808
1809 /* Loop to write the new data from data_start to data_end */
1810 for (addr = data_start; addr < data_end; addr += 4, i++) {
1811 if ((addr == page_end - 4) ||
1812 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
1813 && (addr == data_end - 4))) {
1814
1815 cmd_flags |= BNX_NVM_COMMAND_LAST;
1816 }
1817
1818 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1819
1820 if (rc != 0)
1821 goto nvram_write_end;
1822
1823 cmd_flags = 0;
1824 buf += 4;
1825 }
1826
1827 /* Loop to write back the buffer data from data_end
1828 * to page_end */
1829 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1830 for (addr = data_end; addr < page_end;
1831 addr += 4, i += 4) {
1832
1833 if (addr == page_end-4)
1834 cmd_flags = BNX_NVM_COMMAND_LAST;
1835
1836 rc = bnx_nvram_write_dword(sc, addr,
1837 &flash_buffer[i], cmd_flags);
1838
1839 if (rc != 0)
1840 goto nvram_write_end;
1841
1842 cmd_flags = 0;
1843 }
1844 }
1845
1846 /* Disable writes to flash interface (lock write-protect) */
1847 bnx_disable_nvram_write(sc);
1848
1849 /* Disable access to flash interface */
1850 bnx_disable_nvram_access(sc);
1851 bnx_release_nvram_lock(sc);
1852
1853 /* Increment written */
1854 written += data_end - data_start;
1855 }
1856
1857 nvram_write_end:
1858 if (align_start || align_end)
1859 free(buf, M_DEVBUF);
1860
1861 return (rc);
1862 }
1863 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1864
1865 /****************************************************************************/
1866 /* Verifies that NVRAM is accessible and contains valid data. */
1867 /* */
1868 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1869 /* correct. */
1870 /* */
1871 /* Returns: */
1872 /* 0 on success, positive value on failure. */
1873 /****************************************************************************/
1874 int
1875 bnx_nvram_test(struct bnx_softc *sc)
1876 {
1877 u_int32_t buf[BNX_NVRAM_SIZE / 4];
1878 u_int8_t *data = (u_int8_t *) buf;
1879 int rc = 0;
1880 u_int32_t magic, csum;
1881
1882 /*
1883 * Check that the device NVRAM is valid by reading
1884 * the magic value at offset 0.
1885 */
1886 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
1887 goto bnx_nvram_test_done;
1888
1889 magic = bnx_be32toh(buf[0]);
1890 if (magic != BNX_NVRAM_MAGIC) {
1891 rc = ENODEV;
1892 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
1893 "Expected: 0x%08X, Found: 0x%08X\n",
1894 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
1895 goto bnx_nvram_test_done;
1896 }
1897
1898 /*
1899 * Verify that the device NVRAM includes valid
1900 * configuration data.
1901 */
1902 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
1903 goto bnx_nvram_test_done;
1904
1905 csum = ether_crc32_le(data, 0x100);
1906 if (csum != BNX_CRC32_RESIDUAL) {
1907 rc = ENODEV;
1908 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
1909 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
1910 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1911 goto bnx_nvram_test_done;
1912 }
1913
1914 csum = ether_crc32_le(data + 0x100, 0x100);
1915 if (csum != BNX_CRC32_RESIDUAL) {
1916 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
1917 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1918 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1919 rc = ENODEV;
1920 }
1921
1922 bnx_nvram_test_done:
1923 return (rc);
1924 }
1925
1926 /****************************************************************************/
1927 /* Identifies the current media type of the controller and sets the PHY */
1928 /* address. */
1929 /* */
1930 /* Returns: */
1931 /* Nothing. */
1932 /****************************************************************************/
1933 void
1934 bnx_get_media(struct bnx_softc *sc)
1935 {
1936 sc->bnx_phy_addr = 1;
1937
1938 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1939 u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
1940 u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1941 u_int32_t strap;
1942
1943 /*
1944 * The BCM5709S is software configurable
1945 * for Copper or SerDes operation.
1946 */
1947 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1948 DBPRINT(sc, BNX_INFO_LOAD,
1949 "5709 bonded for copper.\n");
1950 goto bnx_get_media_exit;
1951 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1952 DBPRINT(sc, BNX_INFO_LOAD,
1953 "5709 bonded for dual media.\n");
1954 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
1955 goto bnx_get_media_exit;
1956 }
1957
1958 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
1959 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1960 else {
1961 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
1962 >> 8;
1963 }
1964
1965 if (sc->bnx_pa.pa_function == 0) {
1966 switch (strap) {
1967 case 0x4:
1968 case 0x5:
1969 case 0x6:
1970 DBPRINT(sc, BNX_INFO_LOAD,
1971 "BCM5709 s/w configured for SerDes.\n");
1972 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
1973 default:
1974 DBPRINT(sc, BNX_INFO_LOAD,
1975 "BCM5709 s/w configured for Copper.\n");
1976 }
1977 } else {
1978 switch (strap) {
1979 case 0x1:
1980 case 0x2:
1981 case 0x4:
1982 DBPRINT(sc, BNX_INFO_LOAD,
1983 "BCM5709 s/w configured for SerDes.\n");
1984 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
1985 default:
1986 DBPRINT(sc, BNX_INFO_LOAD,
1987 "BCM5709 s/w configured for Copper.\n");
1988 }
1989 }
1990
1991 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
1992 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
1993
1994 if (sc->bnx_phy_flags && BNX_PHY_SERDES_FLAG) {
1995 u_int32_t val;
1996
1997 sc->bnx_flags |= BNX_NO_WOL_FLAG;
1998 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1999 sc->bnx_phy_addr = 2;
2000 val = REG_RD_IND(sc, sc->bnx_shmem_base +
2001 BNX_SHARED_HW_CFG_CONFIG);
2002 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2003 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2004 DBPRINT(sc, BNX_INFO_LOAD,
2005 "Found 2.5Gb capable adapter\n");
2006 }
2007 }
2008 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2009 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2010 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2011
2012 bnx_get_media_exit:
2013 DBPRINT(sc, (BNX_INFO_LOAD),
2014 "Using PHY address %d.\n", sc->bnx_phy_addr);
2015 }
2016
2017 /****************************************************************************/
2018 /* Free any DMA memory owned by the driver. */
2019 /* */
2020 /* Scans through each data structre that requires DMA memory and frees */
2021 /* the memory if allocated. */
2022 /* */
2023 /* Returns: */
2024 /* Nothing. */
2025 /****************************************************************************/
2026 void
2027 bnx_dma_free(struct bnx_softc *sc)
2028 {
2029 int i;
2030
2031 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2032
2033 /* Destroy the status block. */
2034 if (sc->status_block != NULL && sc->status_map != NULL) {
2035 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2036 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block,
2037 BNX_STATUS_BLK_SZ);
2038 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2039 sc->status_rseg);
2040 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2041 sc->status_block = NULL;
2042 sc->status_map = NULL;
2043 }
2044
2045 /* Destroy the statistics block. */
2046 if (sc->stats_block != NULL && sc->stats_map != NULL) {
2047 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2048 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block,
2049 BNX_STATS_BLK_SZ);
2050 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2051 sc->stats_rseg);
2052 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2053 sc->stats_block = NULL;
2054 sc->stats_map = NULL;
2055 }
2056
2057 /* Free, unmap and destroy all context memory pages. */
2058 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2059 for (i = 0; i < sc->ctx_pages; i++) {
2060 if (sc->ctx_block[i] != NULL) {
2061 bus_dmamap_unload(sc->bnx_dmatag,
2062 sc->ctx_map[i]);
2063 bus_dmamem_unmap(sc->bnx_dmatag,
2064 (void *)sc->ctx_block[i],
2065 BCM_PAGE_SIZE);
2066 bus_dmamem_free(sc->bnx_dmatag,
2067 &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2068 bus_dmamap_destroy(sc->bnx_dmatag,
2069 sc->ctx_map[i]);
2070 sc->ctx_block[i] = NULL;
2071 }
2072 }
2073 }
2074
2075 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2076 for (i = 0; i < TX_PAGES; i++ ) {
2077 if (sc->tx_bd_chain[i] != NULL &&
2078 sc->tx_bd_chain_map[i] != NULL) {
2079 bus_dmamap_unload(sc->bnx_dmatag,
2080 sc->tx_bd_chain_map[i]);
2081 bus_dmamem_unmap(sc->bnx_dmatag,
2082 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2083 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2084 sc->tx_bd_chain_rseg[i]);
2085 bus_dmamap_destroy(sc->bnx_dmatag,
2086 sc->tx_bd_chain_map[i]);
2087 sc->tx_bd_chain[i] = NULL;
2088 sc->tx_bd_chain_map[i] = NULL;
2089 }
2090 }
2091
2092 /* Destroy the TX dmamaps. */
2093 /* This isn't necessary since we dont allocate them up front */
2094
2095 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2096 for (i = 0; i < RX_PAGES; i++ ) {
2097 if (sc->rx_bd_chain[i] != NULL &&
2098 sc->rx_bd_chain_map[i] != NULL) {
2099 bus_dmamap_unload(sc->bnx_dmatag,
2100 sc->rx_bd_chain_map[i]);
2101 bus_dmamem_unmap(sc->bnx_dmatag,
2102 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2103 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2104 sc->rx_bd_chain_rseg[i]);
2105
2106 bus_dmamap_destroy(sc->bnx_dmatag,
2107 sc->rx_bd_chain_map[i]);
2108 sc->rx_bd_chain[i] = NULL;
2109 sc->rx_bd_chain_map[i] = NULL;
2110 }
2111 }
2112
2113 /* Unload and destroy the RX mbuf maps. */
2114 for (i = 0; i < TOTAL_RX_BD; i++) {
2115 if (sc->rx_mbuf_map[i] != NULL) {
2116 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2117 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2118 }
2119 }
2120
2121 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2122 }
2123
2124 /****************************************************************************/
2125 /* Allocate any DMA memory needed by the driver. */
2126 /* */
2127 /* Allocates DMA memory needed for the various global structures needed by */
2128 /* hardware. */
2129 /* */
2130 /* Returns: */
2131 /* 0 for success, positive value for failure. */
2132 /****************************************************************************/
2133 int
2134 bnx_dma_alloc(struct bnx_softc *sc)
2135 {
2136 int i, rc = 0;
2137
2138 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2139
2140 /*
2141 * Allocate DMA memory for the status block, map the memory into DMA
2142 * space, and fetch the physical address of the block.
2143 */
2144 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2145 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2146 aprint_error_dev(sc->bnx_dev,
2147 "Could not create status block DMA map!\n");
2148 rc = ENOMEM;
2149 goto bnx_dma_alloc_exit;
2150 }
2151
2152 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2153 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2154 &sc->status_rseg, BUS_DMA_NOWAIT)) {
2155 aprint_error_dev(sc->bnx_dev,
2156 "Could not allocate status block DMA memory!\n");
2157 rc = ENOMEM;
2158 goto bnx_dma_alloc_exit;
2159 }
2160
2161 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2162 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) {
2163 aprint_error_dev(sc->bnx_dev,
2164 "Could not map status block DMA memory!\n");
2165 rc = ENOMEM;
2166 goto bnx_dma_alloc_exit;
2167 }
2168
2169 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2170 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2171 aprint_error_dev(sc->bnx_dev,
2172 "Could not load status block DMA memory!\n");
2173 rc = ENOMEM;
2174 goto bnx_dma_alloc_exit;
2175 }
2176
2177 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2178 bzero(sc->status_block, BNX_STATUS_BLK_SZ);
2179
2180 /* DRC - Fix for 64 bit addresses. */
2181 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2182 (u_int32_t) sc->status_block_paddr);
2183
2184 /* BCM5709 uses host memory as cache for context memory. */
2185 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2186 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2187 if (sc->ctx_pages == 0)
2188 sc->ctx_pages = 1;
2189 if (sc->ctx_pages > 4) /* XXX */
2190 sc->ctx_pages = 4;
2191
2192 DBRUNIF((sc->ctx_pages > 512),
2193 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n",
2194 __FILE__, __LINE__, sc->ctx_pages));
2195
2196
2197 for (i = 0; i < sc->ctx_pages; i++) {
2198 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2199 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2200 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2201 &sc->ctx_map[i]) != 0) {
2202 rc = ENOMEM;
2203 goto bnx_dma_alloc_exit;
2204 }
2205
2206 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2207 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2208 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2209 rc = ENOMEM;
2210 goto bnx_dma_alloc_exit;
2211 }
2212
2213 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2214 sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2215 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) {
2216 rc = ENOMEM;
2217 goto bnx_dma_alloc_exit;
2218 }
2219
2220 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2221 sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2222 BUS_DMA_NOWAIT) != 0) {
2223 rc = ENOMEM;
2224 goto bnx_dma_alloc_exit;
2225 }
2226
2227 bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2228 }
2229 }
2230
2231 /*
2232 * Allocate DMA memory for the statistics block, map the memory into
2233 * DMA space, and fetch the physical address of the block.
2234 */
2235 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2236 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2237 aprint_error_dev(sc->bnx_dev,
2238 "Could not create stats block DMA map!\n");
2239 rc = ENOMEM;
2240 goto bnx_dma_alloc_exit;
2241 }
2242
2243 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2244 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2245 &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2246 aprint_error_dev(sc->bnx_dev,
2247 "Could not allocate stats block DMA memory!\n");
2248 rc = ENOMEM;
2249 goto bnx_dma_alloc_exit;
2250 }
2251
2252 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2253 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) {
2254 aprint_error_dev(sc->bnx_dev,
2255 "Could not map stats block DMA memory!\n");
2256 rc = ENOMEM;
2257 goto bnx_dma_alloc_exit;
2258 }
2259
2260 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2261 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2262 aprint_error_dev(sc->bnx_dev,
2263 "Could not load status block DMA memory!\n");
2264 rc = ENOMEM;
2265 goto bnx_dma_alloc_exit;
2266 }
2267
2268 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2269 bzero(sc->stats_block, BNX_STATS_BLK_SZ);
2270
2271 /* DRC - Fix for 64 bit address. */
2272 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2273 (u_int32_t) sc->stats_block_paddr);
2274
2275 /*
2276 * Allocate DMA memory for the TX buffer descriptor chain,
2277 * and fetch the physical address of the block.
2278 */
2279 for (i = 0; i < TX_PAGES; i++) {
2280 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2281 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2282 &sc->tx_bd_chain_map[i])) {
2283 aprint_error_dev(sc->bnx_dev,
2284 "Could not create Tx desc %d DMA map!\n", i);
2285 rc = ENOMEM;
2286 goto bnx_dma_alloc_exit;
2287 }
2288
2289 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2290 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2291 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2292 aprint_error_dev(sc->bnx_dev,
2293 "Could not allocate TX desc %d DMA memory!\n",
2294 i);
2295 rc = ENOMEM;
2296 goto bnx_dma_alloc_exit;
2297 }
2298
2299 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2300 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2301 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2302 aprint_error_dev(sc->bnx_dev,
2303 "Could not map TX desc %d DMA memory!\n", i);
2304 rc = ENOMEM;
2305 goto bnx_dma_alloc_exit;
2306 }
2307
2308 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2309 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2310 BUS_DMA_NOWAIT)) {
2311 aprint_error_dev(sc->bnx_dev,
2312 "Could not load TX desc %d DMA memory!\n", i);
2313 rc = ENOMEM;
2314 goto bnx_dma_alloc_exit;
2315 }
2316
2317 sc->tx_bd_chain_paddr[i] =
2318 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2319
2320 /* DRC - Fix for 64 bit systems. */
2321 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2322 i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2323 }
2324
2325 /*
2326 * Create lists to hold TX mbufs.
2327 */
2328 TAILQ_INIT(&sc->tx_free_pkts);
2329 TAILQ_INIT(&sc->tx_used_pkts);
2330 sc->tx_pkt_count = 0;
2331 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET);
2332
2333 /*
2334 * Allocate DMA memory for the Rx buffer descriptor chain,
2335 * and fetch the physical address of the block.
2336 */
2337 for (i = 0; i < RX_PAGES; i++) {
2338 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2339 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2340 &sc->rx_bd_chain_map[i])) {
2341 aprint_error_dev(sc->bnx_dev,
2342 "Could not create Rx desc %d DMA map!\n", i);
2343 rc = ENOMEM;
2344 goto bnx_dma_alloc_exit;
2345 }
2346
2347 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2348 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2349 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2350 aprint_error_dev(sc->bnx_dev,
2351 "Could not allocate Rx desc %d DMA memory!\n", i);
2352 rc = ENOMEM;
2353 goto bnx_dma_alloc_exit;
2354 }
2355
2356 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2357 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2358 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2359 aprint_error_dev(sc->bnx_dev,
2360 "Could not map Rx desc %d DMA memory!\n", i);
2361 rc = ENOMEM;
2362 goto bnx_dma_alloc_exit;
2363 }
2364
2365 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2366 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2367 BUS_DMA_NOWAIT)) {
2368 aprint_error_dev(sc->bnx_dev,
2369 "Could not load Rx desc %d DMA memory!\n", i);
2370 rc = ENOMEM;
2371 goto bnx_dma_alloc_exit;
2372 }
2373
2374 bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2375 sc->rx_bd_chain_paddr[i] =
2376 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2377
2378 /* DRC - Fix for 64 bit systems. */
2379 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2380 i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2381 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2382 0, BNX_RX_CHAIN_PAGE_SZ,
2383 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2384 }
2385
2386 /*
2387 * Create DMA maps for the Rx buffer mbufs.
2388 */
2389 for (i = 0; i < TOTAL_RX_BD; i++) {
2390 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2391 BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2392 &sc->rx_mbuf_map[i])) {
2393 aprint_error_dev(sc->bnx_dev,
2394 "Could not create Rx mbuf %d DMA map!\n", i);
2395 rc = ENOMEM;
2396 goto bnx_dma_alloc_exit;
2397 }
2398 }
2399
2400 bnx_dma_alloc_exit:
2401 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2402
2403 return(rc);
2404 }
2405
2406 /****************************************************************************/
2407 /* Release all resources used by the driver. */
2408 /* */
2409 /* Releases all resources acquired by the driver including interrupts, */
2410 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2411 /* */
2412 /* Returns: */
2413 /* Nothing. */
2414 /****************************************************************************/
2415 void
2416 bnx_release_resources(struct bnx_softc *sc)
2417 {
2418 int i;
2419 struct pci_attach_args *pa = &(sc->bnx_pa);
2420
2421 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2422
2423 bnx_dma_free(sc);
2424
2425 if (sc->bnx_intrhand != NULL)
2426 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2427
2428 if (sc->bnx_size)
2429 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2430
2431 for (i = 0; i < TOTAL_RX_BD; i++)
2432 if (sc->rx_mbuf_map[i])
2433 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2434
2435 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2436 }
2437
2438 /****************************************************************************/
2439 /* Firmware synchronization. */
2440 /* */
2441 /* Before performing certain events such as a chip reset, synchronize with */
2442 /* the firmware first. */
2443 /* */
2444 /* Returns: */
2445 /* 0 for success, positive value for failure. */
2446 /****************************************************************************/
2447 int
2448 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2449 {
2450 int i, rc = 0;
2451 u_int32_t val;
2452
2453 /* Don't waste any time if we've timed out before. */
2454 if (sc->bnx_fw_timed_out) {
2455 rc = EBUSY;
2456 goto bnx_fw_sync_exit;
2457 }
2458
2459 /* Increment the message sequence number. */
2460 sc->bnx_fw_wr_seq++;
2461 msg_data |= sc->bnx_fw_wr_seq;
2462
2463 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2464 msg_data);
2465
2466 /* Send the message to the bootcode driver mailbox. */
2467 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2468
2469 /* Wait for the bootcode to acknowledge the message. */
2470 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2471 /* Check for a response in the bootcode firmware mailbox. */
2472 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2473 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2474 break;
2475 DELAY(1000);
2476 }
2477
2478 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2479 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2480 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2481 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2482 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2483
2484 msg_data &= ~BNX_DRV_MSG_CODE;
2485 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2486
2487 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2488
2489 sc->bnx_fw_timed_out = 1;
2490 rc = EBUSY;
2491 }
2492
2493 bnx_fw_sync_exit:
2494 return (rc);
2495 }
2496
2497 /****************************************************************************/
2498 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2499 /* */
2500 /* Returns: */
2501 /* Nothing. */
2502 /****************************************************************************/
2503 void
2504 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2505 u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2506 {
2507 int i;
2508 u_int32_t val;
2509
2510 /* Set the page size used by RV2P. */
2511 if (rv2p_proc == RV2P_PROC2) {
2512 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2513 USABLE_RX_BD_PER_PAGE);
2514 }
2515
2516 for (i = 0; i < rv2p_code_len; i += 8) {
2517 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2518 rv2p_code++;
2519 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2520 rv2p_code++;
2521
2522 if (rv2p_proc == RV2P_PROC1) {
2523 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2524 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2525 } else {
2526 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2527 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2528 }
2529 }
2530
2531 /* Reset the processor, un-stall is done later. */
2532 if (rv2p_proc == RV2P_PROC1)
2533 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2534 else
2535 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2536 }
2537
2538 /****************************************************************************/
2539 /* Load RISC processor firmware. */
2540 /* */
2541 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */
2542 /* associated with a particular processor. */
2543 /* */
2544 /* Returns: */
2545 /* Nothing. */
2546 /****************************************************************************/
2547 void
2548 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2549 struct fw_info *fw)
2550 {
2551 u_int32_t offset;
2552 u_int32_t val;
2553
2554 /* Halt the CPU. */
2555 val = REG_RD_IND(sc, cpu_reg->mode);
2556 val |= cpu_reg->mode_value_halt;
2557 REG_WR_IND(sc, cpu_reg->mode, val);
2558 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2559
2560 /* Load the Text area. */
2561 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2562 if (fw->text) {
2563 int j;
2564
2565 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2566 REG_WR_IND(sc, offset, fw->text[j]);
2567 }
2568
2569 /* Load the Data area. */
2570 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2571 if (fw->data) {
2572 int j;
2573
2574 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2575 REG_WR_IND(sc, offset, fw->data[j]);
2576 }
2577
2578 /* Load the SBSS area. */
2579 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2580 if (fw->sbss) {
2581 int j;
2582
2583 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2584 REG_WR_IND(sc, offset, fw->sbss[j]);
2585 }
2586
2587 /* Load the BSS area. */
2588 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2589 if (fw->bss) {
2590 int j;
2591
2592 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2593 REG_WR_IND(sc, offset, fw->bss[j]);
2594 }
2595
2596 /* Load the Read-Only area. */
2597 offset = cpu_reg->spad_base +
2598 (fw->rodata_addr - cpu_reg->mips_view_base);
2599 if (fw->rodata) {
2600 int j;
2601
2602 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2603 REG_WR_IND(sc, offset, fw->rodata[j]);
2604 }
2605
2606 /* Clear the pre-fetch instruction. */
2607 REG_WR_IND(sc, cpu_reg->inst, 0);
2608 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2609
2610 /* Start the CPU. */
2611 val = REG_RD_IND(sc, cpu_reg->mode);
2612 val &= ~cpu_reg->mode_value_halt;
2613 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2614 REG_WR_IND(sc, cpu_reg->mode, val);
2615 }
2616
2617 /****************************************************************************/
2618 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2619 /* */
2620 /* Loads the firmware for each CPU and starts the CPU. */
2621 /* */
2622 /* Returns: */
2623 /* Nothing. */
2624 /****************************************************************************/
2625 void
2626 bnx_init_cpus(struct bnx_softc *sc)
2627 {
2628 struct cpu_reg cpu_reg;
2629 struct fw_info fw;
2630
2631 switch(BNX_CHIP_NUM(sc)) {
2632 case BNX_CHIP_NUM_5709:
2633 /* Initialize the RV2P processor. */
2634 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) {
2635 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1,
2636 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1);
2637 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2,
2638 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2);
2639 } else {
2640 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1,
2641 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1);
2642 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2,
2643 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2);
2644 }
2645
2646 /* Initialize the RX Processor. */
2647 cpu_reg.mode = BNX_RXP_CPU_MODE;
2648 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2649 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2650 cpu_reg.state = BNX_RXP_CPU_STATE;
2651 cpu_reg.state_value_clear = 0xffffff;
2652 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2653 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2654 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2655 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2656 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2657 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2658 cpu_reg.mips_view_base = 0x8000000;
2659
2660 fw.ver_major = bnx_RXP_b09FwReleaseMajor;
2661 fw.ver_minor = bnx_RXP_b09FwReleaseMinor;
2662 fw.ver_fix = bnx_RXP_b09FwReleaseFix;
2663 fw.start_addr = bnx_RXP_b09FwStartAddr;
2664
2665 fw.text_addr = bnx_RXP_b09FwTextAddr;
2666 fw.text_len = bnx_RXP_b09FwTextLen;
2667 fw.text_index = 0;
2668 fw.text = bnx_RXP_b09FwText;
2669
2670 fw.data_addr = bnx_RXP_b09FwDataAddr;
2671 fw.data_len = bnx_RXP_b09FwDataLen;
2672 fw.data_index = 0;
2673 fw.data = bnx_RXP_b09FwData;
2674
2675 fw.sbss_addr = bnx_RXP_b09FwSbssAddr;
2676 fw.sbss_len = bnx_RXP_b09FwSbssLen;
2677 fw.sbss_index = 0;
2678 fw.sbss = bnx_RXP_b09FwSbss;
2679
2680 fw.bss_addr = bnx_RXP_b09FwBssAddr;
2681 fw.bss_len = bnx_RXP_b09FwBssLen;
2682 fw.bss_index = 0;
2683 fw.bss = bnx_RXP_b09FwBss;
2684
2685 fw.rodata_addr = bnx_RXP_b09FwRodataAddr;
2686 fw.rodata_len = bnx_RXP_b09FwRodataLen;
2687 fw.rodata_index = 0;
2688 fw.rodata = bnx_RXP_b09FwRodata;
2689
2690 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2691 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2692
2693 /* Initialize the TX Processor. */
2694 cpu_reg.mode = BNX_TXP_CPU_MODE;
2695 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2696 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2697 cpu_reg.state = BNX_TXP_CPU_STATE;
2698 cpu_reg.state_value_clear = 0xffffff;
2699 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2700 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2701 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2702 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2703 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2704 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2705 cpu_reg.mips_view_base = 0x8000000;
2706
2707 fw.ver_major = bnx_TXP_b09FwReleaseMajor;
2708 fw.ver_minor = bnx_TXP_b09FwReleaseMinor;
2709 fw.ver_fix = bnx_TXP_b09FwReleaseFix;
2710 fw.start_addr = bnx_TXP_b09FwStartAddr;
2711
2712 fw.text_addr = bnx_TXP_b09FwTextAddr;
2713 fw.text_len = bnx_TXP_b09FwTextLen;
2714 fw.text_index = 0;
2715 fw.text = bnx_TXP_b09FwText;
2716
2717 fw.data_addr = bnx_TXP_b09FwDataAddr;
2718 fw.data_len = bnx_TXP_b09FwDataLen;
2719 fw.data_index = 0;
2720 fw.data = bnx_TXP_b09FwData;
2721
2722 fw.sbss_addr = bnx_TXP_b09FwSbssAddr;
2723 fw.sbss_len = bnx_TXP_b09FwSbssLen;
2724 fw.sbss_index = 0;
2725 fw.sbss = bnx_TXP_b09FwSbss;
2726
2727 fw.bss_addr = bnx_TXP_b09FwBssAddr;
2728 fw.bss_len = bnx_TXP_b09FwBssLen;
2729 fw.bss_index = 0;
2730 fw.bss = bnx_TXP_b09FwBss;
2731
2732 fw.rodata_addr = bnx_TXP_b09FwRodataAddr;
2733 fw.rodata_len = bnx_TXP_b09FwRodataLen;
2734 fw.rodata_index = 0;
2735 fw.rodata = bnx_TXP_b09FwRodata;
2736
2737 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2738 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2739
2740 /* Initialize the TX Patch-up Processor. */
2741 cpu_reg.mode = BNX_TPAT_CPU_MODE;
2742 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2743 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2744 cpu_reg.state = BNX_TPAT_CPU_STATE;
2745 cpu_reg.state_value_clear = 0xffffff;
2746 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2747 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2748 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2749 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2750 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2751 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2752 cpu_reg.mips_view_base = 0x8000000;
2753
2754 fw.ver_major = bnx_TPAT_b09FwReleaseMajor;
2755 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor;
2756 fw.ver_fix = bnx_TPAT_b09FwReleaseFix;
2757 fw.start_addr = bnx_TPAT_b09FwStartAddr;
2758
2759 fw.text_addr = bnx_TPAT_b09FwTextAddr;
2760 fw.text_len = bnx_TPAT_b09FwTextLen;
2761 fw.text_index = 0;
2762 fw.text = bnx_TPAT_b09FwText;
2763
2764 fw.data_addr = bnx_TPAT_b09FwDataAddr;
2765 fw.data_len = bnx_TPAT_b09FwDataLen;
2766 fw.data_index = 0;
2767 fw.data = bnx_TPAT_b09FwData;
2768
2769 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr;
2770 fw.sbss_len = bnx_TPAT_b09FwSbssLen;
2771 fw.sbss_index = 0;
2772 fw.sbss = bnx_TPAT_b09FwSbss;
2773
2774 fw.bss_addr = bnx_TPAT_b09FwBssAddr;
2775 fw.bss_len = bnx_TPAT_b09FwBssLen;
2776 fw.bss_index = 0;
2777 fw.bss = bnx_TPAT_b09FwBss;
2778
2779 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr;
2780 fw.rodata_len = bnx_TPAT_b09FwRodataLen;
2781 fw.rodata_index = 0;
2782 fw.rodata = bnx_TPAT_b09FwRodata;
2783
2784 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2785 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2786
2787 /* Initialize the Completion Processor. */
2788 cpu_reg.mode = BNX_COM_CPU_MODE;
2789 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2790 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2791 cpu_reg.state = BNX_COM_CPU_STATE;
2792 cpu_reg.state_value_clear = 0xffffff;
2793 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2794 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2795 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2796 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2797 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2798 cpu_reg.spad_base = BNX_COM_SCRATCH;
2799 cpu_reg.mips_view_base = 0x8000000;
2800
2801 fw.ver_major = bnx_COM_b09FwReleaseMajor;
2802 fw.ver_minor = bnx_COM_b09FwReleaseMinor;
2803 fw.ver_fix = bnx_COM_b09FwReleaseFix;
2804 fw.start_addr = bnx_COM_b09FwStartAddr;
2805
2806 fw.text_addr = bnx_COM_b09FwTextAddr;
2807 fw.text_len = bnx_COM_b09FwTextLen;
2808 fw.text_index = 0;
2809 fw.text = bnx_COM_b09FwText;
2810
2811 fw.data_addr = bnx_COM_b09FwDataAddr;
2812 fw.data_len = bnx_COM_b09FwDataLen;
2813 fw.data_index = 0;
2814 fw.data = bnx_COM_b09FwData;
2815
2816 fw.sbss_addr = bnx_COM_b09FwSbssAddr;
2817 fw.sbss_len = bnx_COM_b09FwSbssLen;
2818 fw.sbss_index = 0;
2819 fw.sbss = bnx_COM_b09FwSbss;
2820
2821 fw.bss_addr = bnx_COM_b09FwBssAddr;
2822 fw.bss_len = bnx_COM_b09FwBssLen;
2823 fw.bss_index = 0;
2824 fw.bss = bnx_COM_b09FwBss;
2825
2826 fw.rodata_addr = bnx_COM_b09FwRodataAddr;
2827 fw.rodata_len = bnx_COM_b09FwRodataLen;
2828 fw.rodata_index = 0;
2829 fw.rodata = bnx_COM_b09FwRodata;
2830 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2831 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2832 break;
2833 default:
2834 /* Initialize the RV2P processor. */
2835 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1),
2836 RV2P_PROC1);
2837 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2),
2838 RV2P_PROC2);
2839
2840 /* Initialize the RX Processor. */
2841 cpu_reg.mode = BNX_RXP_CPU_MODE;
2842 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2843 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2844 cpu_reg.state = BNX_RXP_CPU_STATE;
2845 cpu_reg.state_value_clear = 0xffffff;
2846 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2847 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2848 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2849 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2850 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2851 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2852 cpu_reg.mips_view_base = 0x8000000;
2853
2854 fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2855 fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2856 fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2857 fw.start_addr = bnx_RXP_b06FwStartAddr;
2858
2859 fw.text_addr = bnx_RXP_b06FwTextAddr;
2860 fw.text_len = bnx_RXP_b06FwTextLen;
2861 fw.text_index = 0;
2862 fw.text = bnx_RXP_b06FwText;
2863
2864 fw.data_addr = bnx_RXP_b06FwDataAddr;
2865 fw.data_len = bnx_RXP_b06FwDataLen;
2866 fw.data_index = 0;
2867 fw.data = bnx_RXP_b06FwData;
2868
2869 fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2870 fw.sbss_len = bnx_RXP_b06FwSbssLen;
2871 fw.sbss_index = 0;
2872 fw.sbss = bnx_RXP_b06FwSbss;
2873
2874 fw.bss_addr = bnx_RXP_b06FwBssAddr;
2875 fw.bss_len = bnx_RXP_b06FwBssLen;
2876 fw.bss_index = 0;
2877 fw.bss = bnx_RXP_b06FwBss;
2878
2879 fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2880 fw.rodata_len = bnx_RXP_b06FwRodataLen;
2881 fw.rodata_index = 0;
2882 fw.rodata = bnx_RXP_b06FwRodata;
2883
2884 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2885 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2886
2887 /* Initialize the TX Processor. */
2888 cpu_reg.mode = BNX_TXP_CPU_MODE;
2889 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2890 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2891 cpu_reg.state = BNX_TXP_CPU_STATE;
2892 cpu_reg.state_value_clear = 0xffffff;
2893 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2894 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2895 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2896 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2897 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2898 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2899 cpu_reg.mips_view_base = 0x8000000;
2900
2901 fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2902 fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2903 fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2904 fw.start_addr = bnx_TXP_b06FwStartAddr;
2905
2906 fw.text_addr = bnx_TXP_b06FwTextAddr;
2907 fw.text_len = bnx_TXP_b06FwTextLen;
2908 fw.text_index = 0;
2909 fw.text = bnx_TXP_b06FwText;
2910
2911 fw.data_addr = bnx_TXP_b06FwDataAddr;
2912 fw.data_len = bnx_TXP_b06FwDataLen;
2913 fw.data_index = 0;
2914 fw.data = bnx_TXP_b06FwData;
2915
2916 fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2917 fw.sbss_len = bnx_TXP_b06FwSbssLen;
2918 fw.sbss_index = 0;
2919 fw.sbss = bnx_TXP_b06FwSbss;
2920
2921 fw.bss_addr = bnx_TXP_b06FwBssAddr;
2922 fw.bss_len = bnx_TXP_b06FwBssLen;
2923 fw.bss_index = 0;
2924 fw.bss = bnx_TXP_b06FwBss;
2925
2926 fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2927 fw.rodata_len = bnx_TXP_b06FwRodataLen;
2928 fw.rodata_index = 0;
2929 fw.rodata = bnx_TXP_b06FwRodata;
2930
2931 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2932 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2933
2934 /* Initialize the TX Patch-up Processor. */
2935 cpu_reg.mode = BNX_TPAT_CPU_MODE;
2936 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2937 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2938 cpu_reg.state = BNX_TPAT_CPU_STATE;
2939 cpu_reg.state_value_clear = 0xffffff;
2940 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2941 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2942 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2943 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2944 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2945 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2946 cpu_reg.mips_view_base = 0x8000000;
2947
2948 fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2949 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2950 fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2951 fw.start_addr = bnx_TPAT_b06FwStartAddr;
2952
2953 fw.text_addr = bnx_TPAT_b06FwTextAddr;
2954 fw.text_len = bnx_TPAT_b06FwTextLen;
2955 fw.text_index = 0;
2956 fw.text = bnx_TPAT_b06FwText;
2957
2958 fw.data_addr = bnx_TPAT_b06FwDataAddr;
2959 fw.data_len = bnx_TPAT_b06FwDataLen;
2960 fw.data_index = 0;
2961 fw.data = bnx_TPAT_b06FwData;
2962
2963 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2964 fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2965 fw.sbss_index = 0;
2966 fw.sbss = bnx_TPAT_b06FwSbss;
2967
2968 fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2969 fw.bss_len = bnx_TPAT_b06FwBssLen;
2970 fw.bss_index = 0;
2971 fw.bss = bnx_TPAT_b06FwBss;
2972
2973 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2974 fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2975 fw.rodata_index = 0;
2976 fw.rodata = bnx_TPAT_b06FwRodata;
2977
2978 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2979 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2980
2981 /* Initialize the Completion Processor. */
2982 cpu_reg.mode = BNX_COM_CPU_MODE;
2983 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2984 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2985 cpu_reg.state = BNX_COM_CPU_STATE;
2986 cpu_reg.state_value_clear = 0xffffff;
2987 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2988 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2989 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2990 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2991 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2992 cpu_reg.spad_base = BNX_COM_SCRATCH;
2993 cpu_reg.mips_view_base = 0x8000000;
2994
2995 fw.ver_major = bnx_COM_b06FwReleaseMajor;
2996 fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2997 fw.ver_fix = bnx_COM_b06FwReleaseFix;
2998 fw.start_addr = bnx_COM_b06FwStartAddr;
2999
3000 fw.text_addr = bnx_COM_b06FwTextAddr;
3001 fw.text_len = bnx_COM_b06FwTextLen;
3002 fw.text_index = 0;
3003 fw.text = bnx_COM_b06FwText;
3004
3005 fw.data_addr = bnx_COM_b06FwDataAddr;
3006 fw.data_len = bnx_COM_b06FwDataLen;
3007 fw.data_index = 0;
3008 fw.data = bnx_COM_b06FwData;
3009
3010 fw.sbss_addr = bnx_COM_b06FwSbssAddr;
3011 fw.sbss_len = bnx_COM_b06FwSbssLen;
3012 fw.sbss_index = 0;
3013 fw.sbss = bnx_COM_b06FwSbss;
3014
3015 fw.bss_addr = bnx_COM_b06FwBssAddr;
3016 fw.bss_len = bnx_COM_b06FwBssLen;
3017 fw.bss_index = 0;
3018 fw.bss = bnx_COM_b06FwBss;
3019
3020 fw.rodata_addr = bnx_COM_b06FwRodataAddr;
3021 fw.rodata_len = bnx_COM_b06FwRodataLen;
3022 fw.rodata_index = 0;
3023 fw.rodata = bnx_COM_b06FwRodata;
3024 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3025 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3026 break;
3027 }
3028 }
3029
3030 /****************************************************************************/
3031 /* Initialize context memory. */
3032 /* */
3033 /* Clears the memory associated with each Context ID (CID). */
3034 /* */
3035 /* Returns: */
3036 /* Nothing. */
3037 /****************************************************************************/
3038 void
3039 bnx_init_context(struct bnx_softc *sc)
3040 {
3041 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3042 /* DRC: Replace this constant value with a #define. */
3043 int i, retry_cnt = 10;
3044 u_int32_t val;
3045
3046 /*
3047 * BCM5709 context memory may be cached
3048 * in host memory so prepare the host memory
3049 * for access.
3050 */
3051 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3052 | (1 << 12);
3053 val |= (BCM_PAGE_BITS - 8) << 16;
3054 REG_WR(sc, BNX_CTX_COMMAND, val);
3055
3056 /* Wait for mem init command to complete. */
3057 for (i = 0; i < retry_cnt; i++) {
3058 val = REG_RD(sc, BNX_CTX_COMMAND);
3059 if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3060 break;
3061 DELAY(2);
3062 }
3063
3064
3065 /* ToDo: Consider returning an error here. */
3066
3067 for (i = 0; i < sc->ctx_pages; i++) {
3068 int j;
3069
3070
3071 /* Set the physaddr of the context memory cache. */
3072 val = (u_int32_t)(sc->ctx_segs[i].ds_addr);
3073 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3074 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3075 val = (u_int32_t)
3076 ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32);
3077 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3078 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3079 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3080
3081
3082 /* Verify that the context memory write was successful. */
3083 for (j = 0; j < retry_cnt; j++) {
3084 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3085 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3086 break;
3087 DELAY(5);
3088 }
3089
3090 /* ToDo: Consider returning an error here. */
3091 }
3092 } else {
3093 u_int32_t vcid_addr, offset;
3094
3095 /*
3096 * For the 5706/5708, context memory is local to
3097 * the controller, so initialize the controller
3098 * context memory.
3099 */
3100
3101 vcid_addr = GET_CID_ADDR(96);
3102 while (vcid_addr) {
3103
3104 vcid_addr -= PHY_CTX_SIZE;
3105
3106 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3107 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3108
3109 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3110 CTX_WR(sc, 0x00, offset, 0);
3111 }
3112
3113 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3114 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3115 }
3116 }
3117 }
3118
3119 /****************************************************************************/
3120 /* Fetch the permanent MAC address of the controller. */
3121 /* */
3122 /* Returns: */
3123 /* Nothing. */
3124 /****************************************************************************/
3125 void
3126 bnx_get_mac_addr(struct bnx_softc *sc)
3127 {
3128 u_int32_t mac_lo = 0, mac_hi = 0;
3129
3130 /*
3131 * The NetXtreme II bootcode populates various NIC
3132 * power-on and runtime configuration items in a
3133 * shared memory area. The factory configured MAC
3134 * address is available from both NVRAM and the
3135 * shared memory area so we'll read the value from
3136 * shared memory for speed.
3137 */
3138
3139 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3140 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3141
3142 if ((mac_lo == 0) && (mac_hi == 0)) {
3143 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3144 __FILE__, __LINE__);
3145 } else {
3146 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3147 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3148 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3149 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3150 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3151 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3152 }
3153
3154 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3155 "%s\n", ether_sprintf(sc->eaddr));
3156 }
3157
3158 /****************************************************************************/
3159 /* Program the MAC address. */
3160 /* */
3161 /* Returns: */
3162 /* Nothing. */
3163 /****************************************************************************/
3164 void
3165 bnx_set_mac_addr(struct bnx_softc *sc)
3166 {
3167 u_int32_t val;
3168 const u_int8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl);
3169
3170 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3171 "%s\n", ether_sprintf(sc->eaddr));
3172
3173 val = (mac_addr[0] << 8) | mac_addr[1];
3174
3175 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3176
3177 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3178 (mac_addr[4] << 8) | mac_addr[5];
3179
3180 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3181 }
3182
3183 /****************************************************************************/
3184 /* Stop the controller. */
3185 /* */
3186 /* Returns: */
3187 /* Nothing. */
3188 /****************************************************************************/
3189 void
3190 bnx_stop(struct ifnet *ifp, int disable)
3191 {
3192 struct bnx_softc *sc = ifp->if_softc;
3193
3194 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3195
3196 if ((ifp->if_flags & IFF_RUNNING) == 0)
3197 return;
3198
3199 callout_stop(&sc->bnx_timeout);
3200
3201 mii_down(&sc->bnx_mii);
3202
3203 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3204
3205 /* Disable the transmit/receive blocks. */
3206 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3207 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3208 DELAY(20);
3209
3210 bnx_disable_intr(sc);
3211
3212 /* Tell firmware that the driver is going away. */
3213 if (disable)
3214 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
3215 else
3216 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3217
3218 /* Free RX buffers. */
3219 bnx_free_rx_chain(sc);
3220
3221 /* Free TX buffers. */
3222 bnx_free_tx_chain(sc);
3223
3224 ifp->if_timer = 0;
3225
3226 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3227
3228 }
3229
3230 int
3231 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
3232 {
3233 struct pci_attach_args *pa = &(sc->bnx_pa);
3234 u_int32_t val;
3235 int i, rc = 0;
3236
3237 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3238
3239 /* Wait for pending PCI transactions to complete. */
3240 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3241 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3242 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3243 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3244 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3245 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3246 DELAY(5);
3247
3248 /* Disable DMA */
3249 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3250 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3251 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3252 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3253 }
3254
3255 /* Assume bootcode is running. */
3256 sc->bnx_fw_timed_out = 0;
3257
3258 /* Give the firmware a chance to prepare for the reset. */
3259 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3260 if (rc)
3261 goto bnx_reset_exit;
3262
3263 /* Set a firmware reminder that this is a soft reset. */
3264 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3265 BNX_DRV_RESET_SIGNATURE_MAGIC);
3266
3267 /* Dummy read to force the chip to complete all current transactions. */
3268 val = REG_RD(sc, BNX_MISC_ID);
3269
3270 /* Chip reset. */
3271 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3272 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3273 REG_RD(sc, BNX_MISC_COMMAND);
3274 DELAY(5);
3275
3276 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3277 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3278
3279 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3280 val);
3281 } else {
3282 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3283 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3284 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3285 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3286
3287 /* Allow up to 30us for reset to complete. */
3288 for (i = 0; i < 10; i++) {
3289 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3290 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3291 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3292 break;
3293 }
3294 DELAY(10);
3295 }
3296
3297 /* Check that reset completed successfully. */
3298 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3299 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3300 BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3301 __FILE__, __LINE__);
3302 rc = EBUSY;
3303 goto bnx_reset_exit;
3304 }
3305 }
3306
3307 /* Make sure byte swapping is properly configured. */
3308 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3309 if (val != 0x01020304) {
3310 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3311 __FILE__, __LINE__);
3312 rc = ENODEV;
3313 goto bnx_reset_exit;
3314 }
3315
3316 /* Just completed a reset, assume that firmware is running again. */
3317 sc->bnx_fw_timed_out = 0;
3318
3319 /* Wait for the firmware to finish its initialization. */
3320 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3321 if (rc)
3322 BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3323 "initialization!\n", __FILE__, __LINE__);
3324
3325 bnx_reset_exit:
3326 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3327
3328 return (rc);
3329 }
3330
3331 int
3332 bnx_chipinit(struct bnx_softc *sc)
3333 {
3334 struct pci_attach_args *pa = &(sc->bnx_pa);
3335 u_int32_t val;
3336 int rc = 0;
3337
3338 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3339
3340 /* Make sure the interrupt is not active. */
3341 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3342
3343 /* Initialize DMA byte/word swapping, configure the number of DMA */
3344 /* channels and PCI clock compensation delay. */
3345 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3346 BNX_DMA_CONFIG_DATA_WORD_SWAP |
3347 #if BYTE_ORDER == BIG_ENDIAN
3348 BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3349 #endif
3350 BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3351 DMA_READ_CHANS << 12 |
3352 DMA_WRITE_CHANS << 16;
3353
3354 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3355
3356 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3357 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3358
3359 /*
3360 * This setting resolves a problem observed on certain Intel PCI
3361 * chipsets that cannot handle multiple outstanding DMA operations.
3362 * See errata E9_5706A1_65.
3363 */
3364 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3365 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3366 !(sc->bnx_flags & BNX_PCIX_FLAG))
3367 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3368
3369 REG_WR(sc, BNX_DMA_CONFIG, val);
3370
3371 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3372 if (sc->bnx_flags & BNX_PCIX_FLAG) {
3373 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3374 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3375 val & ~0x20000);
3376 }
3377
3378 /* Enable the RX_V2P and Context state machines before access. */
3379 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3380 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3381 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3382 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3383
3384 /* Initialize context mapping and zero out the quick contexts. */
3385 bnx_init_context(sc);
3386
3387 /* Initialize the on-boards CPUs */
3388 bnx_init_cpus(sc);
3389
3390 /* Prepare NVRAM for access. */
3391 if (bnx_init_nvram(sc)) {
3392 rc = ENODEV;
3393 goto bnx_chipinit_exit;
3394 }
3395
3396 /* Set the kernel bypass block size */
3397 val = REG_RD(sc, BNX_MQ_CONFIG);
3398 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3399 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3400
3401 /* Enable bins used on the 5709. */
3402 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3403 val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3404 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3405 val |= BNX_MQ_CONFIG_HALT_DIS;
3406 }
3407
3408 REG_WR(sc, BNX_MQ_CONFIG, val);
3409
3410 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3411 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3412 REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3413
3414 val = (BCM_PAGE_BITS - 8) << 24;
3415 REG_WR(sc, BNX_RV2P_CONFIG, val);
3416
3417 /* Configure page size. */
3418 val = REG_RD(sc, BNX_TBDR_CONFIG);
3419 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3420 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3421 REG_WR(sc, BNX_TBDR_CONFIG, val);
3422
3423 #if 0
3424 /* Set the perfect match control register to default. */
3425 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3426 #endif
3427
3428 bnx_chipinit_exit:
3429 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3430
3431 return(rc);
3432 }
3433
3434 /****************************************************************************/
3435 /* Initialize the controller in preparation to send/receive traffic. */
3436 /* */
3437 /* Returns: */
3438 /* 0 for success, positive value for failure. */
3439 /****************************************************************************/
3440 int
3441 bnx_blockinit(struct bnx_softc *sc)
3442 {
3443 u_int32_t reg, val;
3444 int rc = 0;
3445
3446 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3447
3448 /* Load the hardware default MAC address. */
3449 bnx_set_mac_addr(sc);
3450
3451 /* Set the Ethernet backoff seed value */
3452 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3453 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3454 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3455
3456 sc->last_status_idx = 0;
3457 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3458
3459 /* Set up link change interrupt generation. */
3460 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3461 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3462
3463 /* Program the physical address of the status block. */
3464 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3465 REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3466 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3467
3468 /* Program the physical address of the statistics block. */
3469 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3470 (u_int32_t)(sc->stats_block_paddr));
3471 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3472 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3473
3474 /* Program various host coalescing parameters. */
3475 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3476 << 16) | sc->bnx_tx_quick_cons_trip);
3477 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3478 << 16) | sc->bnx_rx_quick_cons_trip);
3479 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3480 sc->bnx_comp_prod_trip);
3481 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3482 sc->bnx_tx_ticks);
3483 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3484 sc->bnx_rx_ticks);
3485 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3486 sc->bnx_com_ticks);
3487 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3488 sc->bnx_cmd_ticks);
3489 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3490 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3491 REG_WR(sc, BNX_HC_CONFIG,
3492 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3493 BNX_HC_CONFIG_COLLECT_STATS));
3494
3495 /* Clear the internal statistics counters. */
3496 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3497
3498 /* Verify that bootcode is running. */
3499 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3500
3501 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3502 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3503 __FILE__, __LINE__); reg = 0);
3504
3505 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3506 BNX_DEV_INFO_SIGNATURE_MAGIC) {
3507 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3508 "Expected: 08%08X\n", __FILE__, __LINE__,
3509 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3510 BNX_DEV_INFO_SIGNATURE_MAGIC);
3511 rc = ENODEV;
3512 goto bnx_blockinit_exit;
3513 }
3514
3515 /* Check if any management firmware is running. */
3516 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3517 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3518 BNX_PORT_FEATURE_IMD_ENABLED)) {
3519 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3520 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3521 }
3522
3523 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3524 BNX_DEV_INFO_BC_REV);
3525
3526 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3527
3528 /* Enable DMA */
3529 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3530 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3531 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3532 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3533 }
3534
3535 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3536 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3537
3538 /* Enable link state change interrupt generation. */
3539 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3540 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3541 BNX_MISC_ENABLE_DEFAULT_XI);
3542 } else
3543 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3544
3545 /* Enable all remaining blocks in the MAC. */
3546 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3547 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3548 DELAY(20);
3549
3550 bnx_blockinit_exit:
3551 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3552
3553 return (rc);
3554 }
3555
3556 static int
3557 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, u_int16_t *prod,
3558 u_int16_t *chain_prod, u_int32_t *prod_bseq)
3559 {
3560 bus_dmamap_t map;
3561 struct rx_bd *rxbd;
3562 u_int32_t addr;
3563 int i;
3564 #ifdef BNX_DEBUG
3565 u_int16_t debug_chain_prod = *chain_prod;
3566 #endif
3567 u_int16_t first_chain_prod;
3568
3569 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3570
3571 /* Map the mbuf cluster into device memory. */
3572 map = sc->rx_mbuf_map[*chain_prod];
3573 first_chain_prod = *chain_prod;
3574 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3575 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3576 __FILE__, __LINE__);
3577
3578 m_freem(m_new);
3579
3580 DBRUNIF(1, sc->rx_mbuf_alloc--);
3581
3582 return ENOBUFS;
3583 }
3584 /* Make sure there is room in the receive chain. */
3585 if (map->dm_nsegs > sc->free_rx_bd) {
3586 bus_dmamap_unload(sc->bnx_dmatag, map);
3587 m_freem(m_new);
3588 return EFBIG;
3589 }
3590 #ifdef BNX_DEBUG
3591 /* Track the distribution of buffer segments. */
3592 sc->rx_mbuf_segs[map->dm_nsegs]++;
3593 #endif
3594
3595 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
3596 BUS_DMASYNC_PREREAD);
3597
3598 /* Update some debug statistics counters */
3599 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3600 sc->rx_low_watermark = sc->free_rx_bd);
3601 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3602
3603 /*
3604 * Setup the rx_bd for the first segment
3605 */
3606 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3607
3608 addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3609 rxbd->rx_bd_haddr_lo = htole32(addr);
3610 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3611 rxbd->rx_bd_haddr_hi = htole32(addr);
3612 rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3613 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3614 *prod_bseq += map->dm_segs[0].ds_len;
3615 bus_dmamap_sync(sc->bnx_dmatag,
3616 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3617 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd),
3618 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3619
3620 for (i = 1; i < map->dm_nsegs; i++) {
3621 *prod = NEXT_RX_BD(*prod);
3622 *chain_prod = RX_CHAIN_IDX(*prod);
3623
3624 rxbd =
3625 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3626
3627 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3628 rxbd->rx_bd_haddr_lo = htole32(addr);
3629 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3630 rxbd->rx_bd_haddr_hi = htole32(addr);
3631 rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3632 rxbd->rx_bd_flags = 0;
3633 *prod_bseq += map->dm_segs[i].ds_len;
3634 bus_dmamap_sync(sc->bnx_dmatag,
3635 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3636 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3637 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3638 }
3639
3640 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3641 bus_dmamap_sync(sc->bnx_dmatag,
3642 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3643 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3644 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3645
3646 /*
3647 * Save the mbuf, ajust the map pointer (swap map for first and
3648 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches)
3649 * and update counter.
3650 */
3651 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3652 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3653 sc->rx_mbuf_map[*chain_prod] = map;
3654 sc->free_rx_bd -= map->dm_nsegs;
3655
3656 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3657 map->dm_nsegs));
3658 *prod = NEXT_RX_BD(*prod);
3659 *chain_prod = RX_CHAIN_IDX(*prod);
3660
3661 return 0;
3662 }
3663
3664 /****************************************************************************/
3665 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3666 /* */
3667 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3668 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3669 /* necessary. */
3670 /* */
3671 /* Returns: */
3672 /* 0 for success, positive value for failure. */
3673 /****************************************************************************/
3674 int
3675 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3676 u_int16_t *chain_prod, u_int32_t *prod_bseq)
3677 {
3678 struct mbuf *m_new = NULL;
3679 int rc = 0;
3680 u_int16_t min_free_bd;
3681
3682 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3683 __func__);
3684
3685 /* Make sure the inputs are valid. */
3686 DBRUNIF((*chain_prod > MAX_RX_BD),
3687 aprint_error_dev(sc->bnx_dev,
3688 "RX producer out of range: 0x%04X > 0x%04X\n",
3689 *chain_prod, (u_int16_t)MAX_RX_BD));
3690
3691 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3692 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod,
3693 *prod_bseq);
3694
3695 /* try to get in as many mbufs as possible */
3696 if (sc->mbuf_alloc_size == MCLBYTES)
3697 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE;
3698 else
3699 min_free_bd = (BNX_MAX_MRU + PAGE_SIZE - 1) / PAGE_SIZE;
3700 while (sc->free_rx_bd >= min_free_bd) {
3701 /* Simulate an mbuf allocation failure. */
3702 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3703 aprint_error_dev(sc->bnx_dev,
3704 "Simulating mbuf allocation failure.\n");
3705 sc->mbuf_sim_alloc_failed++;
3706 rc = ENOBUFS;
3707 goto bnx_get_buf_exit);
3708
3709 /* This is a new mbuf allocation. */
3710 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3711 if (m_new == NULL) {
3712 DBPRINT(sc, BNX_WARN,
3713 "%s(%d): RX mbuf header allocation failed!\n",
3714 __FILE__, __LINE__);
3715
3716 sc->mbuf_alloc_failed++;
3717
3718 rc = ENOBUFS;
3719 goto bnx_get_buf_exit;
3720 }
3721
3722 DBRUNIF(1, sc->rx_mbuf_alloc++);
3723
3724 /* Simulate an mbuf cluster allocation failure. */
3725 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3726 m_freem(m_new);
3727 sc->rx_mbuf_alloc--;
3728 sc->mbuf_alloc_failed++;
3729 sc->mbuf_sim_alloc_failed++;
3730 rc = ENOBUFS;
3731 goto bnx_get_buf_exit);
3732
3733 if (sc->mbuf_alloc_size == MCLBYTES)
3734 MCLGET(m_new, M_DONTWAIT);
3735 else
3736 MEXTMALLOC(m_new, sc->mbuf_alloc_size,
3737 M_DONTWAIT);
3738 if (!(m_new->m_flags & M_EXT)) {
3739 DBPRINT(sc, BNX_WARN,
3740 "%s(%d): RX mbuf chain allocation failed!\n",
3741 __FILE__, __LINE__);
3742
3743 m_freem(m_new);
3744
3745 DBRUNIF(1, sc->rx_mbuf_alloc--);
3746 sc->mbuf_alloc_failed++;
3747
3748 rc = ENOBUFS;
3749 goto bnx_get_buf_exit;
3750 }
3751
3752 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq);
3753 if (rc != 0)
3754 goto bnx_get_buf_exit;
3755 }
3756
3757 bnx_get_buf_exit:
3758 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3759 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod,
3760 *chain_prod, *prod_bseq);
3761
3762 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3763 __func__);
3764
3765 return(rc);
3766 }
3767
3768 int
3769 bnx_alloc_pkts(struct bnx_softc *sc)
3770 {
3771 struct ifnet *ifp = &sc->bnx_ec.ec_if;
3772 struct bnx_pkt *pkt;
3773 int i;
3774
3775 for (i = 0; i < 4; i++) { /* magic! */
3776 pkt = pool_get(bnx_tx_pool, PR_NOWAIT);
3777 if (pkt == NULL)
3778 break;
3779
3780 if (bus_dmamap_create(sc->bnx_dmatag,
3781 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
3782 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
3783 &pkt->pkt_dmamap) != 0)
3784 goto put;
3785
3786 if (!ISSET(ifp->if_flags, IFF_UP))
3787 goto stopping;
3788
3789 mutex_enter(&sc->tx_pkt_mtx);
3790 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3791 sc->tx_pkt_count++;
3792 mutex_exit(&sc->tx_pkt_mtx);
3793 }
3794
3795 return (i == 0) ? ENOMEM : 0;
3796
3797 stopping:
3798 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3799 put:
3800 pool_put(bnx_tx_pool, pkt);
3801 return (i == 0) ? ENOMEM : 0;
3802 }
3803
3804 /****************************************************************************/
3805 /* Initialize the TX context memory. */
3806 /* */
3807 /* Returns: */
3808 /* Nothing */
3809 /****************************************************************************/
3810 void
3811 bnx_init_tx_context(struct bnx_softc *sc)
3812 {
3813 u_int32_t val;
3814
3815 /* Initialize the context ID for an L2 TX chain. */
3816 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3817 /* Set the CID type to support an L2 connection. */
3818 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3819 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
3820 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3821 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
3822
3823 /* Point the hardware to the first page in the chain. */
3824 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3825 CTX_WR(sc, GET_CID_ADDR(TX_CID),
3826 BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
3827 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3828 CTX_WR(sc, GET_CID_ADDR(TX_CID),
3829 BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
3830 } else {
3831 /* Set the CID type to support an L2 connection. */
3832 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3833 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3834 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3835 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3836
3837 /* Point the hardware to the first page in the chain. */
3838 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3839 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3840 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3841 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3842 }
3843 }
3844
3845
3846 /****************************************************************************/
3847 /* Allocate memory and initialize the TX data structures. */
3848 /* */
3849 /* Returns: */
3850 /* 0 for success, positive value for failure. */
3851 /****************************************************************************/
3852 int
3853 bnx_init_tx_chain(struct bnx_softc *sc)
3854 {
3855 struct tx_bd *txbd;
3856 u_int32_t addr;
3857 int i, rc = 0;
3858
3859 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3860
3861 /* Force an allocation of some dmamaps for tx up front */
3862 bnx_alloc_pkts(sc);
3863
3864 /* Set the initial TX producer/consumer indices. */
3865 sc->tx_prod = 0;
3866 sc->tx_cons = 0;
3867 sc->tx_prod_bseq = 0;
3868 sc->used_tx_bd = 0;
3869 sc->max_tx_bd = USABLE_TX_BD;
3870 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3871 DBRUNIF(1, sc->tx_full_count = 0);
3872
3873 /*
3874 * The NetXtreme II supports a linked-list structure called
3875 * a Buffer Descriptor Chain (or BD chain). A BD chain
3876 * consists of a series of 1 or more chain pages, each of which
3877 * consists of a fixed number of BD entries.
3878 * The last BD entry on each page is a pointer to the next page
3879 * in the chain, and the last pointer in the BD chain
3880 * points back to the beginning of the chain.
3881 */
3882
3883 /* Set the TX next pointer chain entries. */
3884 for (i = 0; i < TX_PAGES; i++) {
3885 int j;
3886
3887 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3888
3889 /* Check if we've reached the last page. */
3890 if (i == (TX_PAGES - 1))
3891 j = 0;
3892 else
3893 j = i + 1;
3894
3895 addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3896 txbd->tx_bd_haddr_lo = htole32(addr);
3897 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3898 txbd->tx_bd_haddr_hi = htole32(addr);
3899 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3900 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3901 }
3902
3903 /*
3904 * Initialize the context ID for an L2 TX chain.
3905 */
3906 bnx_init_tx_context(sc);
3907
3908 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3909
3910 return(rc);
3911 }
3912
3913 /****************************************************************************/
3914 /* Free memory and clear the TX data structures. */
3915 /* */
3916 /* Returns: */
3917 /* Nothing. */
3918 /****************************************************************************/
3919 void
3920 bnx_free_tx_chain(struct bnx_softc *sc)
3921 {
3922 struct bnx_pkt *pkt;
3923 int i;
3924
3925 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3926
3927 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3928 mutex_enter(&sc->tx_pkt_mtx);
3929 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) {
3930 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
3931 mutex_exit(&sc->tx_pkt_mtx);
3932
3933 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0,
3934 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3935 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap);
3936
3937 m_freem(pkt->pkt_mbuf);
3938 DBRUNIF(1, sc->tx_mbuf_alloc--);
3939
3940 mutex_enter(&sc->tx_pkt_mtx);
3941 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3942 }
3943
3944 /* Destroy all the dmamaps we allocated for TX */
3945 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) {
3946 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
3947 sc->tx_pkt_count--;
3948 mutex_exit(&sc->tx_pkt_mtx);
3949
3950 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3951 pool_put(bnx_tx_pool, pkt);
3952
3953 mutex_enter(&sc->tx_pkt_mtx);
3954 }
3955 mutex_exit(&sc->tx_pkt_mtx);
3956
3957
3958
3959 /* Clear each TX chain page. */
3960 for (i = 0; i < TX_PAGES; i++) {
3961 bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3962 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3963 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3964 }
3965
3966 sc->used_tx_bd = 0;
3967
3968 /* Check if we lost any mbufs in the process. */
3969 DBRUNIF((sc->tx_mbuf_alloc),
3970 aprint_error_dev(sc->bnx_dev,
3971 "Memory leak! Lost %d mbufs from tx chain!\n",
3972 sc->tx_mbuf_alloc));
3973
3974 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3975 }
3976
3977 /****************************************************************************/
3978 /* Initialize the RX context memory. */
3979 /* */
3980 /* Returns: */
3981 /* Nothing */
3982 /****************************************************************************/
3983 void
3984 bnx_init_rx_context(struct bnx_softc *sc)
3985 {
3986 u_int32_t val;
3987
3988 /* Initialize the context ID for an L2 RX chain. */
3989 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3990 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3991
3992 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3993 u_int32_t lo_water, hi_water;
3994
3995 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3996 hi_water = USABLE_RX_BD / 4;
3997
3998 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
3999 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
4000
4001 if (hi_water > 0xf)
4002 hi_water = 0xf;
4003 else if (hi_water == 0)
4004 lo_water = 0;
4005 val |= lo_water |
4006 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
4007 }
4008
4009 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
4010
4011 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4012 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4013 val = REG_RD(sc, BNX_MQ_MAP_L2_5);
4014 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
4015 }
4016
4017 /* Point the hardware to the first page in the chain. */
4018 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
4019 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
4020 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
4021 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
4022 }
4023
4024 /****************************************************************************/
4025 /* Allocate memory and initialize the RX data structures. */
4026 /* */
4027 /* Returns: */
4028 /* 0 for success, positive value for failure. */
4029 /****************************************************************************/
4030 int
4031 bnx_init_rx_chain(struct bnx_softc *sc)
4032 {
4033 struct rx_bd *rxbd;
4034 int i, rc = 0;
4035 u_int16_t prod, chain_prod;
4036 u_int32_t prod_bseq, addr;
4037
4038 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4039
4040 /* Initialize the RX producer and consumer indices. */
4041 sc->rx_prod = 0;
4042 sc->rx_cons = 0;
4043 sc->rx_prod_bseq = 0;
4044 sc->free_rx_bd = USABLE_RX_BD;
4045 sc->max_rx_bd = USABLE_RX_BD;
4046 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4047 DBRUNIF(1, sc->rx_empty_count = 0);
4048
4049 /* Initialize the RX next pointer chain entries. */
4050 for (i = 0; i < RX_PAGES; i++) {
4051 int j;
4052
4053 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4054
4055 /* Check if we've reached the last page. */
4056 if (i == (RX_PAGES - 1))
4057 j = 0;
4058 else
4059 j = i + 1;
4060
4061 /* Setup the chain page pointers. */
4062 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
4063 rxbd->rx_bd_haddr_hi = htole32(addr);
4064 addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
4065 rxbd->rx_bd_haddr_lo = htole32(addr);
4066 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
4067 0, BNX_RX_CHAIN_PAGE_SZ,
4068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4069 }
4070
4071 /* Allocate mbuf clusters for the rx_bd chain. */
4072 prod = prod_bseq = 0;
4073 chain_prod = RX_CHAIN_IDX(prod);
4074 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
4075 BNX_PRINTF(sc,
4076 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod);
4077 }
4078
4079 /* Save the RX chain producer index. */
4080 sc->rx_prod = prod;
4081 sc->rx_prod_bseq = prod_bseq;
4082
4083 for (i = 0; i < RX_PAGES; i++)
4084 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4085 sc->rx_bd_chain_map[i]->dm_mapsize,
4086 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4087
4088 /* Tell the chip about the waiting rx_bd's. */
4089 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4090 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4091
4092 bnx_init_rx_context(sc);
4093
4094 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4095
4096 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4097
4098 return(rc);
4099 }
4100
4101 /****************************************************************************/
4102 /* Free memory and clear the RX data structures. */
4103 /* */
4104 /* Returns: */
4105 /* Nothing. */
4106 /****************************************************************************/
4107 void
4108 bnx_free_rx_chain(struct bnx_softc *sc)
4109 {
4110 int i;
4111
4112 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4113
4114 /* Free any mbufs still in the RX mbuf chain. */
4115 for (i = 0; i < TOTAL_RX_BD; i++) {
4116 if (sc->rx_mbuf_ptr[i] != NULL) {
4117 if (sc->rx_mbuf_map[i] != NULL) {
4118 bus_dmamap_sync(sc->bnx_dmatag,
4119 sc->rx_mbuf_map[i], 0,
4120 sc->rx_mbuf_map[i]->dm_mapsize,
4121 BUS_DMASYNC_POSTREAD);
4122 bus_dmamap_unload(sc->bnx_dmatag,
4123 sc->rx_mbuf_map[i]);
4124 }
4125 m_freem(sc->rx_mbuf_ptr[i]);
4126 sc->rx_mbuf_ptr[i] = NULL;
4127 DBRUNIF(1, sc->rx_mbuf_alloc--);
4128 }
4129 }
4130
4131 /* Clear each RX chain page. */
4132 for (i = 0; i < RX_PAGES; i++)
4133 bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
4134
4135 sc->free_rx_bd = sc->max_rx_bd;
4136
4137 /* Check if we lost any mbufs in the process. */
4138 DBRUNIF((sc->rx_mbuf_alloc),
4139 aprint_error_dev(sc->bnx_dev,
4140 "Memory leak! Lost %d mbufs from rx chain!\n",
4141 sc->rx_mbuf_alloc));
4142
4143 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4144 }
4145
4146 /****************************************************************************/
4147 /* Handles PHY generated interrupt events. */
4148 /* */
4149 /* Returns: */
4150 /* Nothing. */
4151 /****************************************************************************/
4152 void
4153 bnx_phy_intr(struct bnx_softc *sc)
4154 {
4155 u_int32_t new_link_state, old_link_state;
4156
4157 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4158 BUS_DMASYNC_POSTREAD);
4159 new_link_state = sc->status_block->status_attn_bits &
4160 STATUS_ATTN_BITS_LINK_STATE;
4161 old_link_state = sc->status_block->status_attn_bits_ack &
4162 STATUS_ATTN_BITS_LINK_STATE;
4163
4164 /* Handle any changes if the link state has changed. */
4165 if (new_link_state != old_link_state) {
4166 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4167
4168 callout_stop(&sc->bnx_timeout);
4169 bnx_tick(sc);
4170
4171 /* Update the status_attn_bits_ack field in the status block. */
4172 if (new_link_state) {
4173 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4174 STATUS_ATTN_BITS_LINK_STATE);
4175 DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4176 } else {
4177 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4178 STATUS_ATTN_BITS_LINK_STATE);
4179 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4180 }
4181 }
4182
4183 /* Acknowledge the link change interrupt. */
4184 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4185 }
4186
4187 /****************************************************************************/
4188 /* Handles received frame interrupt events. */
4189 /* */
4190 /* Returns: */
4191 /* Nothing. */
4192 /****************************************************************************/
4193 void
4194 bnx_rx_intr(struct bnx_softc *sc)
4195 {
4196 struct status_block *sblk = sc->status_block;
4197 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4198 u_int16_t hw_cons, sw_cons, sw_chain_cons;
4199 u_int16_t sw_prod, sw_chain_prod;
4200 u_int32_t sw_prod_bseq;
4201 struct l2_fhdr *l2fhdr;
4202 int i;
4203
4204 DBRUNIF(1, sc->rx_interrupts++);
4205 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4206 BUS_DMASYNC_POSTREAD);
4207
4208 /* Prepare the RX chain pages to be accessed by the host CPU. */
4209 for (i = 0; i < RX_PAGES; i++)
4210 bus_dmamap_sync(sc->bnx_dmatag,
4211 sc->rx_bd_chain_map[i], 0,
4212 sc->rx_bd_chain_map[i]->dm_mapsize,
4213 BUS_DMASYNC_POSTWRITE);
4214
4215 /* Get the hardware's view of the RX consumer index. */
4216 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4217 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4218 hw_cons++;
4219
4220 /* Get working copies of the driver's view of the RX indices. */
4221 sw_cons = sc->rx_cons;
4222 sw_prod = sc->rx_prod;
4223 sw_prod_bseq = sc->rx_prod_bseq;
4224
4225 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4226 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4227 __func__, sw_prod, sw_cons, sw_prod_bseq);
4228
4229 /* Prevent speculative reads from getting ahead of the status block. */
4230 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4231 BUS_SPACE_BARRIER_READ);
4232
4233 /* Update some debug statistics counters */
4234 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4235 sc->rx_low_watermark = sc->free_rx_bd);
4236 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4237
4238 /*
4239 * Scan through the receive chain as long
4240 * as there is work to do.
4241 */
4242 while (sw_cons != hw_cons) {
4243 struct mbuf *m;
4244 struct rx_bd *rxbd;
4245 unsigned int len;
4246 u_int32_t status;
4247
4248 /* Convert the producer/consumer indices to an actual
4249 * rx_bd index.
4250 */
4251 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4252 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4253
4254 /* Get the used rx_bd. */
4255 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4256 sc->free_rx_bd++;
4257
4258 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__);
4259 bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4260
4261 /* The mbuf is stored with the last rx_bd entry of a packet. */
4262 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4263 #ifdef DIAGNOSTIC
4264 /* Validate that this is the last rx_bd. */
4265 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) {
4266 printf("%s: Unexpected mbuf found in "
4267 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev),
4268 sw_chain_cons);
4269 }
4270 #endif
4271
4272 /* DRC - ToDo: If the received packet is small, say less
4273 * than 128 bytes, allocate a new mbuf here,
4274 * copy the data to that mbuf, and recycle
4275 * the mapped jumbo frame.
4276 */
4277
4278 /* Unmap the mbuf from DMA space. */
4279 #ifdef DIAGNOSTIC
4280 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) {
4281 printf("invalid map sw_cons 0x%x "
4282 "sw_prod 0x%x "
4283 "sw_chain_cons 0x%x "
4284 "sw_chain_prod 0x%x "
4285 "hw_cons 0x%x "
4286 "TOTAL_RX_BD_PER_PAGE 0x%x "
4287 "TOTAL_RX_BD 0x%x\n",
4288 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod,
4289 hw_cons,
4290 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD);
4291 }
4292 #endif
4293 bus_dmamap_sync(sc->bnx_dmatag,
4294 sc->rx_mbuf_map[sw_chain_cons], 0,
4295 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4296 BUS_DMASYNC_POSTREAD);
4297 bus_dmamap_unload(sc->bnx_dmatag,
4298 sc->rx_mbuf_map[sw_chain_cons]);
4299
4300 /* Remove the mbuf from the driver's chain. */
4301 m = sc->rx_mbuf_ptr[sw_chain_cons];
4302 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4303
4304 /*
4305 * Frames received on the NetXteme II are prepended
4306 * with the l2_fhdr structure which provides status
4307 * information about the received frame (including
4308 * VLAN tags and checksum info) and are also
4309 * automatically adjusted to align the IP header
4310 * (i.e. two null bytes are inserted before the
4311 * Ethernet header).
4312 */
4313 l2fhdr = mtod(m, struct l2_fhdr *);
4314
4315 len = l2fhdr->l2_fhdr_pkt_len;
4316 status = l2fhdr->l2_fhdr_status;
4317
4318 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4319 aprint_error("Simulating l2_fhdr status error.\n");
4320 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4321
4322 /* Watch for unusual sized frames. */
4323 DBRUNIF(((len < BNX_MIN_MTU) ||
4324 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4325 aprint_error_dev(sc->bnx_dev,
4326 "Unusual frame size found. "
4327 "Min(%d), Actual(%d), Max(%d)\n",
4328 (int)BNX_MIN_MTU, len,
4329 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4330
4331 bnx_dump_mbuf(sc, m);
4332 bnx_breakpoint(sc));
4333
4334 len -= ETHER_CRC_LEN;
4335
4336 /* Check the received frame for errors. */
4337 if ((status & (L2_FHDR_ERRORS_BAD_CRC |
4338 L2_FHDR_ERRORS_PHY_DECODE |
4339 L2_FHDR_ERRORS_ALIGNMENT |
4340 L2_FHDR_ERRORS_TOO_SHORT |
4341 L2_FHDR_ERRORS_GIANT_FRAME)) ||
4342 len < (BNX_MIN_MTU - ETHER_CRC_LEN) ||
4343 len >
4344 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
4345 ifp->if_ierrors++;
4346 DBRUNIF(1, sc->l2fhdr_status_errors++);
4347
4348 /* Reuse the mbuf for a new frame. */
4349 if (bnx_add_buf(sc, m, &sw_prod,
4350 &sw_chain_prod, &sw_prod_bseq)) {
4351 DBRUNIF(1, bnx_breakpoint(sc));
4352 panic("%s: Can't reuse RX mbuf!\n",
4353 device_xname(sc->bnx_dev));
4354 }
4355 continue;
4356 }
4357
4358 /*
4359 * Get a new mbuf for the rx_bd. If no new
4360 * mbufs are available then reuse the current mbuf,
4361 * log an ierror on the interface, and generate
4362 * an error in the system log.
4363 */
4364 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod,
4365 &sw_prod_bseq)) {
4366 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev,
4367 "Failed to allocate "
4368 "new mbuf, incoming frame dropped!\n"));
4369
4370 ifp->if_ierrors++;
4371
4372 /* Try and reuse the exisitng mbuf. */
4373 if (bnx_add_buf(sc, m, &sw_prod,
4374 &sw_chain_prod, &sw_prod_bseq)) {
4375 DBRUNIF(1, bnx_breakpoint(sc));
4376 panic("%s: Double mbuf allocation "
4377 "failure!",
4378 device_xname(sc->bnx_dev));
4379 }
4380 continue;
4381 }
4382
4383 /* Skip over the l2_fhdr when passing the data up
4384 * the stack.
4385 */
4386 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4387
4388 /* Adjust the pckt length to match the received data. */
4389 m->m_pkthdr.len = m->m_len = len;
4390
4391 /* Send the packet to the appropriate interface. */
4392 m->m_pkthdr.rcvif = ifp;
4393
4394 DBRUN(BNX_VERBOSE_RECV,
4395 struct ether_header *eh;
4396 eh = mtod(m, struct ether_header *);
4397 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n",
4398 __func__, ether_sprintf(eh->ether_dhost),
4399 ether_sprintf(eh->ether_shost),
4400 htons(eh->ether_type)));
4401
4402 /* Validate the checksum. */
4403
4404 /* Check for an IP datagram. */
4405 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4406 /* Check if the IP checksum is valid. */
4407 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
4408 == 0)
4409 m->m_pkthdr.csum_flags |=
4410 M_CSUM_IPv4;
4411 #ifdef BNX_DEBUG
4412 else
4413 DBPRINT(sc, BNX_WARN_SEND,
4414 "%s(): Invalid IP checksum "
4415 "= 0x%04X!\n",
4416 __func__,
4417 l2fhdr->l2_fhdr_ip_xsum
4418 );
4419 #endif
4420 }
4421
4422 /* Check for a valid TCP/UDP frame. */
4423 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4424 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4425 /* Check for a good TCP/UDP checksum. */
4426 if ((status &
4427 (L2_FHDR_ERRORS_TCP_XSUM |
4428 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4429 m->m_pkthdr.csum_flags |=
4430 M_CSUM_TCPv4 |
4431 M_CSUM_UDPv4;
4432 } else {
4433 DBPRINT(sc, BNX_WARN_SEND,
4434 "%s(): Invalid TCP/UDP "
4435 "checksum = 0x%04X!\n",
4436 __func__,
4437 l2fhdr->l2_fhdr_tcp_udp_xsum);
4438 }
4439 }
4440
4441 /*
4442 * If we received a packet with a vlan tag,
4443 * attach that information to the packet.
4444 */
4445 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4446 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4447 VLAN_INPUT_TAG(ifp, m,
4448 l2fhdr->l2_fhdr_vlan_tag,
4449 continue);
4450 }
4451
4452 #if NBPFILTER > 0
4453 /*
4454 * Handle BPF listeners. Let the BPF
4455 * user see the packet.
4456 */
4457 if (ifp->if_bpf)
4458 bpf_mtap(ifp->if_bpf, m);
4459 #endif
4460
4461 /* Pass the mbuf off to the upper layers. */
4462 ifp->if_ipackets++;
4463 DBPRINT(sc, BNX_VERBOSE_RECV,
4464 "%s(): Passing received frame up.\n", __func__);
4465 (*ifp->if_input)(ifp, m);
4466 DBRUNIF(1, sc->rx_mbuf_alloc--);
4467
4468 }
4469
4470 sw_cons = NEXT_RX_BD(sw_cons);
4471
4472 /* Refresh hw_cons to see if there's new work */
4473 if (sw_cons == hw_cons) {
4474 hw_cons = sc->hw_rx_cons =
4475 sblk->status_rx_quick_consumer_index0;
4476 if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4477 USABLE_RX_BD_PER_PAGE)
4478 hw_cons++;
4479 }
4480
4481 /* Prevent speculative reads from getting ahead of
4482 * the status block.
4483 */
4484 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4485 BUS_SPACE_BARRIER_READ);
4486 }
4487
4488 for (i = 0; i < RX_PAGES; i++)
4489 bus_dmamap_sync(sc->bnx_dmatag,
4490 sc->rx_bd_chain_map[i], 0,
4491 sc->rx_bd_chain_map[i]->dm_mapsize,
4492 BUS_DMASYNC_PREWRITE);
4493
4494 sc->rx_cons = sw_cons;
4495 sc->rx_prod = sw_prod;
4496 sc->rx_prod_bseq = sw_prod_bseq;
4497
4498 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4499 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4500
4501 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4502 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4503 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4504 }
4505
4506 /****************************************************************************/
4507 /* Handles transmit completion interrupt events. */
4508 /* */
4509 /* Returns: */
4510 /* Nothing. */
4511 /****************************************************************************/
4512 void
4513 bnx_tx_intr(struct bnx_softc *sc)
4514 {
4515 struct status_block *sblk = sc->status_block;
4516 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4517 struct bnx_pkt *pkt;
4518 bus_dmamap_t map;
4519 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4520
4521 DBRUNIF(1, sc->tx_interrupts++);
4522 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4523 BUS_DMASYNC_POSTREAD);
4524
4525 /* Get the hardware's view of the TX consumer index. */
4526 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4527
4528 /* Skip to the next entry if this is a chain page pointer. */
4529 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4530 hw_tx_cons++;
4531
4532 sw_tx_cons = sc->tx_cons;
4533
4534 /* Prevent speculative reads from getting ahead of the status block. */
4535 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4536 BUS_SPACE_BARRIER_READ);
4537
4538 /* Cycle through any completed TX chain page entries. */
4539 while (sw_tx_cons != hw_tx_cons) {
4540 #ifdef BNX_DEBUG
4541 struct tx_bd *txbd = NULL;
4542 #endif
4543 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4544
4545 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4546 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4547 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4548
4549 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4550 aprint_error_dev(sc->bnx_dev,
4551 "TX chain consumer out of range! 0x%04X > 0x%04X\n",
4552 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc));
4553
4554 DBRUNIF(1, txbd = &sc->tx_bd_chain
4555 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4556
4557 DBRUNIF((txbd == NULL),
4558 aprint_error_dev(sc->bnx_dev,
4559 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons);
4560 bnx_breakpoint(sc));
4561
4562 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__);
4563 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4564
4565
4566 mutex_enter(&sc->tx_pkt_mtx);
4567 pkt = TAILQ_FIRST(&sc->tx_used_pkts);
4568 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) {
4569 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4570 mutex_exit(&sc->tx_pkt_mtx);
4571 /*
4572 * Free the associated mbuf. Remember
4573 * that only the last tx_bd of a packet
4574 * has an mbuf pointer and DMA map.
4575 */
4576 map = pkt->pkt_dmamap;
4577 bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4578 map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4579 bus_dmamap_unload(sc->bnx_dmatag, map);
4580
4581 m_freem(pkt->pkt_mbuf);
4582 DBRUNIF(1, sc->tx_mbuf_alloc--);
4583
4584 ifp->if_opackets++;
4585
4586 mutex_enter(&sc->tx_pkt_mtx);
4587 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4588 }
4589 mutex_exit(&sc->tx_pkt_mtx);
4590
4591 sc->used_tx_bd--;
4592 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n",
4593 __FILE__, __LINE__, sc->used_tx_bd);
4594
4595 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4596
4597 /* Refresh hw_cons to see if there's new work. */
4598 hw_tx_cons = sc->hw_tx_cons =
4599 sblk->status_tx_quick_consumer_index0;
4600 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4601 USABLE_TX_BD_PER_PAGE)
4602 hw_tx_cons++;
4603
4604 /* Prevent speculative reads from getting ahead of
4605 * the status block.
4606 */
4607 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4608 BUS_SPACE_BARRIER_READ);
4609 }
4610
4611 /* Clear the TX timeout timer. */
4612 ifp->if_timer = 0;
4613
4614 /* Clear the tx hardware queue full flag. */
4615 if (sc->used_tx_bd < sc->max_tx_bd) {
4616 DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4617 aprint_debug_dev(sc->bnx_dev,
4618 "Open TX chain! %d/%d (used/total)\n",
4619 sc->used_tx_bd, sc->max_tx_bd));
4620 ifp->if_flags &= ~IFF_OACTIVE;
4621 }
4622
4623 sc->tx_cons = sw_tx_cons;
4624 }
4625
4626 /****************************************************************************/
4627 /* Disables interrupt generation. */
4628 /* */
4629 /* Returns: */
4630 /* Nothing. */
4631 /****************************************************************************/
4632 void
4633 bnx_disable_intr(struct bnx_softc *sc)
4634 {
4635 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4636 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4637 }
4638
4639 /****************************************************************************/
4640 /* Enables interrupt generation. */
4641 /* */
4642 /* Returns: */
4643 /* Nothing. */
4644 /****************************************************************************/
4645 void
4646 bnx_enable_intr(struct bnx_softc *sc)
4647 {
4648 u_int32_t val;
4649
4650 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4651 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4652
4653 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4654 sc->last_status_idx);
4655
4656 val = REG_RD(sc, BNX_HC_COMMAND);
4657 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4658 }
4659
4660 /****************************************************************************/
4661 /* Handles controller initialization. */
4662 /* */
4663 /****************************************************************************/
4664 int
4665 bnx_init(struct ifnet *ifp)
4666 {
4667 struct bnx_softc *sc = ifp->if_softc;
4668 u_int32_t ether_mtu;
4669 int s, error = 0;
4670
4671 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4672
4673 s = splnet();
4674
4675 bnx_stop(ifp, 0);
4676
4677 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) {
4678 aprint_error_dev(sc->bnx_dev,
4679 "Controller reset failed!\n");
4680 goto bnx_init_exit;
4681 }
4682
4683 if ((error = bnx_chipinit(sc)) != 0) {
4684 aprint_error_dev(sc->bnx_dev,
4685 "Controller initialization failed!\n");
4686 goto bnx_init_exit;
4687 }
4688
4689 if ((error = bnx_blockinit(sc)) != 0) {
4690 aprint_error_dev(sc->bnx_dev,
4691 "Block initialization failed!\n");
4692 goto bnx_init_exit;
4693 }
4694
4695 /* Calculate and program the Ethernet MRU size. */
4696 if (ifp->if_mtu <= ETHERMTU) {
4697 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4698 sc->mbuf_alloc_size = MCLBYTES;
4699 } else {
4700 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4701 sc->mbuf_alloc_size = BNX_MAX_MRU;
4702 }
4703
4704
4705 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4706 __func__, ether_mtu);
4707
4708 /*
4709 * Program the MRU and enable Jumbo frame
4710 * support.
4711 */
4712 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4713 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4714
4715 /* Calculate the RX Ethernet frame size for rx_bd's. */
4716 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4717
4718 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4719 "max_frame_size = %d\n", __func__, (int)MCLBYTES,
4720 sc->mbuf_alloc_size, sc->max_frame_size);
4721
4722 /* Program appropriate promiscuous/multicast filtering. */
4723 bnx_iff(sc);
4724
4725 /* Init RX buffer descriptor chain. */
4726 bnx_init_rx_chain(sc);
4727
4728 /* Init TX buffer descriptor chain. */
4729 bnx_init_tx_chain(sc);
4730
4731 /* Enable host interrupts. */
4732 bnx_enable_intr(sc);
4733
4734 if ((error = ether_mediachange(ifp)) != 0)
4735 goto bnx_init_exit;
4736
4737 ifp->if_flags |= IFF_RUNNING;
4738 ifp->if_flags &= ~IFF_OACTIVE;
4739
4740 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4741
4742 bnx_init_exit:
4743 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4744
4745 splx(s);
4746
4747 return(error);
4748 }
4749
4750 /****************************************************************************/
4751 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4752 /* memory visible to the controller. */
4753 /* */
4754 /* Returns: */
4755 /* 0 for success, positive value for failure. */
4756 /****************************************************************************/
4757 int
4758 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m)
4759 {
4760 struct bnx_pkt *pkt;
4761 bus_dmamap_t map;
4762 struct tx_bd *txbd = NULL;
4763 u_int16_t vlan_tag = 0, flags = 0;
4764 u_int16_t chain_prod, prod;
4765 #ifdef BNX_DEBUG
4766 u_int16_t debug_prod;
4767 #endif
4768 u_int32_t addr, prod_bseq;
4769 int i, error;
4770 struct m_tag *mtag;
4771
4772 again:
4773 mutex_enter(&sc->tx_pkt_mtx);
4774 pkt = TAILQ_FIRST(&sc->tx_free_pkts);
4775 if (pkt == NULL) {
4776 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) {
4777 mutex_exit(&sc->tx_pkt_mtx);
4778 return ENETDOWN;
4779 }
4780 if (sc->tx_pkt_count <= TOTAL_TX_BD) {
4781 mutex_exit(&sc->tx_pkt_mtx);
4782 if (bnx_alloc_pkts(sc) == 0)
4783 goto again;
4784 } else {
4785 mutex_exit(&sc->tx_pkt_mtx);
4786 }
4787 return (ENOMEM);
4788 }
4789 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4790 mutex_exit(&sc->tx_pkt_mtx);
4791
4792 /* Transfer any checksum offload flags to the bd. */
4793 if (m->m_pkthdr.csum_flags) {
4794 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
4795 flags |= TX_BD_FLAGS_IP_CKSUM;
4796 if (m->m_pkthdr.csum_flags &
4797 (M_CSUM_TCPv4 | M_CSUM_UDPv4))
4798 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4799 }
4800
4801 /* Transfer any VLAN tags to the bd. */
4802 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m);
4803 if (mtag != NULL) {
4804 flags |= TX_BD_FLAGS_VLAN_TAG;
4805 vlan_tag = VLAN_TAG_VALUE(mtag);
4806 }
4807
4808 /* Map the mbuf into DMAable memory. */
4809 prod = sc->tx_prod;
4810 chain_prod = TX_CHAIN_IDX(prod);
4811 map = pkt->pkt_dmamap;
4812
4813 /* Map the mbuf into our DMA address space. */
4814 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT);
4815 if (error != 0) {
4816 aprint_error_dev(sc->bnx_dev,
4817 "Error mapping mbuf into TX chain!\n");
4818 sc->tx_dma_map_failures++;
4819 goto maperr;
4820 }
4821 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4822 BUS_DMASYNC_PREWRITE);
4823 /* Make sure there's room in the chain */
4824 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd))
4825 goto nospace;
4826
4827 /* prod points to an empty tx_bd at this point. */
4828 prod_bseq = sc->tx_prod_bseq;
4829 #ifdef BNX_DEBUG
4830 debug_prod = chain_prod;
4831 #endif
4832 DBPRINT(sc, BNX_INFO_SEND,
4833 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4834 "prod_bseq = 0x%08X\n",
4835 __func__, prod, chain_prod, prod_bseq);
4836
4837 /*
4838 * Cycle through each mbuf segment that makes up
4839 * the outgoing frame, gathering the mapping info
4840 * for that segment and creating a tx_bd for the
4841 * mbuf.
4842 */
4843 for (i = 0; i < map->dm_nsegs ; i++) {
4844 chain_prod = TX_CHAIN_IDX(prod);
4845 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4846
4847 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4848 txbd->tx_bd_haddr_lo = htole32(addr);
4849 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4850 txbd->tx_bd_haddr_hi = htole32(addr);
4851 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4852 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4853 txbd->tx_bd_flags = htole16(flags);
4854 prod_bseq += map->dm_segs[i].ds_len;
4855 if (i == 0)
4856 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4857 prod = NEXT_TX_BD(prod);
4858 }
4859 /* Set the END flag on the last TX buffer descriptor. */
4860 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4861
4862 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs));
4863
4864 DBPRINT(sc, BNX_INFO_SEND,
4865 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4866 "prod_bseq = 0x%08X\n",
4867 __func__, prod, chain_prod, prod_bseq);
4868
4869 pkt->pkt_mbuf = m;
4870 pkt->pkt_end_desc = chain_prod;
4871
4872 mutex_enter(&sc->tx_pkt_mtx);
4873 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry);
4874 mutex_exit(&sc->tx_pkt_mtx);
4875
4876 sc->used_tx_bd += map->dm_nsegs;
4877 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n",
4878 __FILE__, __LINE__, sc->used_tx_bd);
4879
4880 /* Update some debug statistics counters */
4881 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4882 sc->tx_hi_watermark = sc->used_tx_bd);
4883 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
4884 DBRUNIF(1, sc->tx_mbuf_alloc++);
4885
4886 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4887 map->dm_nsegs));
4888
4889 /* prod points to the next free tx_bd at this point. */
4890 sc->tx_prod = prod;
4891 sc->tx_prod_bseq = prod_bseq;
4892
4893 return (0);
4894
4895
4896 nospace:
4897 bus_dmamap_unload(sc->bnx_dmatag, map);
4898 maperr:
4899 mutex_enter(&sc->tx_pkt_mtx);
4900 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4901 mutex_exit(&sc->tx_pkt_mtx);
4902
4903 return (ENOMEM);
4904 }
4905
4906 /****************************************************************************/
4907 /* Main transmit routine. */
4908 /* */
4909 /* Returns: */
4910 /* Nothing. */
4911 /****************************************************************************/
4912 void
4913 bnx_start(struct ifnet *ifp)
4914 {
4915 struct bnx_softc *sc = ifp->if_softc;
4916 struct mbuf *m_head = NULL;
4917 int count = 0;
4918 u_int16_t tx_prod, tx_chain_prod;
4919
4920 /* If there's no link or the transmit queue is empty then just exit. */
4921 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) {
4922 DBPRINT(sc, BNX_INFO_SEND,
4923 "%s(): output active or device not running.\n", __func__);
4924 goto bnx_start_exit;
4925 }
4926
4927 /* prod points to the next free tx_bd. */
4928 tx_prod = sc->tx_prod;
4929 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4930
4931 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4932 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, "
4933 "used_tx %d max_tx %d\n",
4934 __func__, tx_prod, tx_chain_prod, sc->tx_prod_bseq,
4935 sc->used_tx_bd, sc->max_tx_bd);
4936
4937 /*
4938 * Keep adding entries while there is space in the ring.
4939 */
4940 while (sc->used_tx_bd < sc->max_tx_bd) {
4941 /* Check for any frames to send. */
4942 IFQ_POLL(&ifp->if_snd, m_head);
4943 if (m_head == NULL)
4944 break;
4945
4946 /*
4947 * Pack the data into the transmit ring. If we
4948 * don't have room, set the OACTIVE flag to wait
4949 * for the NIC to drain the chain.
4950 */
4951 if (bnx_tx_encap(sc, m_head)) {
4952 ifp->if_flags |= IFF_OACTIVE;
4953 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4954 "business! Total tx_bd used = %d\n",
4955 sc->used_tx_bd);
4956 break;
4957 }
4958
4959 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4960 count++;
4961
4962 #if NBPFILTER > 0
4963 /* Send a copy of the frame to any BPF listeners. */
4964 if (ifp->if_bpf)
4965 bpf_mtap(ifp->if_bpf, m_head);
4966 #endif
4967 }
4968
4969 if (count == 0) {
4970 /* no packets were dequeued */
4971 DBPRINT(sc, BNX_VERBOSE_SEND,
4972 "%s(): No packets were dequeued\n", __func__);
4973 goto bnx_start_exit;
4974 }
4975
4976 /* Update the driver's counters. */
4977 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4978
4979 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4980 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, tx_prod,
4981 tx_chain_prod, sc->tx_prod_bseq);
4982
4983 /* Start the transmit. */
4984 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4985 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4986
4987 /* Set the tx timeout. */
4988 ifp->if_timer = BNX_TX_TIMEOUT;
4989
4990 bnx_start_exit:
4991 return;
4992 }
4993
4994 /****************************************************************************/
4995 /* Handles any IOCTL calls from the operating system. */
4996 /* */
4997 /* Returns: */
4998 /* 0 for success, positive value for failure. */
4999 /****************************************************************************/
5000 int
5001 bnx_ioctl(struct ifnet *ifp, u_long command, void *data)
5002 {
5003 struct bnx_softc *sc = ifp->if_softc;
5004 struct ifreq *ifr = (struct ifreq *) data;
5005 struct mii_data *mii = &sc->bnx_mii;
5006 int s, error = 0;
5007
5008 s = splnet();
5009
5010 switch (command) {
5011 case SIOCSIFFLAGS:
5012 if (ifp->if_flags & IFF_UP) {
5013 if (ifp->if_flags & IFF_RUNNING)
5014 error = ENETRESET;
5015 else
5016 bnx_init(ifp);
5017 } else if (ifp->if_flags & IFF_RUNNING)
5018 bnx_stop(ifp, 1);
5019 break;
5020
5021 case SIOCSIFMEDIA:
5022 case SIOCGIFMEDIA:
5023 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5024 sc->bnx_phy_flags);
5025
5026 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5027 break;
5028
5029 default:
5030 error = ether_ioctl(ifp, command, data);
5031 }
5032
5033 if (error == ENETRESET) {
5034 if (ifp->if_flags & IFF_RUNNING)
5035 bnx_iff(sc);
5036 error = 0;
5037 }
5038
5039 splx(s);
5040 return (error);
5041 }
5042
5043 /****************************************************************************/
5044 /* Transmit timeout handler. */
5045 /* */
5046 /* Returns: */
5047 /* Nothing. */
5048 /****************************************************************************/
5049 void
5050 bnx_watchdog(struct ifnet *ifp)
5051 {
5052 struct bnx_softc *sc = ifp->if_softc;
5053
5054 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5055 bnx_dump_status_block(sc));
5056 /*
5057 * If we are in this routine because of pause frames, then
5058 * don't reset the hardware.
5059 */
5060 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5061 return;
5062
5063 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n");
5064
5065 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5066
5067 bnx_init(ifp);
5068
5069 ifp->if_oerrors++;
5070 }
5071
5072 /*
5073 * Interrupt handler.
5074 */
5075 /****************************************************************************/
5076 /* Main interrupt entry point. Verifies that the controller generated the */
5077 /* interrupt and then calls a separate routine for handle the various */
5078 /* interrupt causes (PHY, TX, RX). */
5079 /* */
5080 /* Returns: */
5081 /* 0 for success, positive value for failure. */
5082 /****************************************************************************/
5083 int
5084 bnx_intr(void *xsc)
5085 {
5086 struct bnx_softc *sc;
5087 struct ifnet *ifp;
5088 u_int32_t status_attn_bits;
5089 const struct status_block *sblk;
5090
5091 sc = xsc;
5092 if (!device_is_active(sc->bnx_dev))
5093 return 0;
5094
5095 ifp = &sc->bnx_ec.ec_if;
5096
5097 DBRUNIF(1, sc->interrupts_generated++);
5098
5099 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5100 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5101
5102 /*
5103 * If the hardware status block index
5104 * matches the last value read by the
5105 * driver and we haven't asserted our
5106 * interrupt then there's nothing to do.
5107 */
5108 if ((sc->status_block->status_idx == sc->last_status_idx) &&
5109 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
5110 BNX_PCICFG_MISC_STATUS_INTA_VALUE))
5111 return (0);
5112
5113 /* Ack the interrupt and stop others from occuring. */
5114 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5115 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5116 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
5117
5118 /* Keep processing data as long as there is work to do. */
5119 for (;;) {
5120 sblk = sc->status_block;
5121 status_attn_bits = sblk->status_attn_bits;
5122
5123 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5124 aprint_debug("Simulating unexpected status attention bit set.");
5125 status_attn_bits = status_attn_bits |
5126 STATUS_ATTN_BITS_PARITY_ERROR);
5127
5128 /* Was it a link change interrupt? */
5129 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5130 (sblk->status_attn_bits_ack &
5131 STATUS_ATTN_BITS_LINK_STATE))
5132 bnx_phy_intr(sc);
5133
5134 /* If any other attention is asserted then the chip is toast. */
5135 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5136 (sblk->status_attn_bits_ack &
5137 ~STATUS_ATTN_BITS_LINK_STATE))) {
5138 DBRUN(1, sc->unexpected_attentions++);
5139
5140 BNX_PRINTF(sc,
5141 "Fatal attention detected: 0x%08X\n",
5142 sblk->status_attn_bits);
5143
5144 DBRUN(BNX_FATAL,
5145 if (bnx_debug_unexpected_attention == 0)
5146 bnx_breakpoint(sc));
5147
5148 bnx_init(ifp);
5149 return (1);
5150 }
5151
5152 /* Check for any completed RX frames. */
5153 if (sblk->status_rx_quick_consumer_index0 !=
5154 sc->hw_rx_cons)
5155 bnx_rx_intr(sc);
5156
5157 /* Check for any completed TX frames. */
5158 if (sblk->status_tx_quick_consumer_index0 !=
5159 sc->hw_tx_cons)
5160 bnx_tx_intr(sc);
5161
5162 /* Save the status block index value for use during the
5163 * next interrupt.
5164 */
5165 sc->last_status_idx = sblk->status_idx;
5166
5167 /* Prevent speculative reads from getting ahead of the
5168 * status block.
5169 */
5170 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
5171 BUS_SPACE_BARRIER_READ);
5172
5173 /* If there's no work left then exit the isr. */
5174 if ((sblk->status_rx_quick_consumer_index0 ==
5175 sc->hw_rx_cons) &&
5176 (sblk->status_tx_quick_consumer_index0 ==
5177 sc->hw_tx_cons))
5178 break;
5179 }
5180
5181 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5182 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
5183
5184 /* Re-enable interrupts. */
5185 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5186 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5187 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
5188 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5189 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5190
5191 /* Handle any frames that arrived while handling the interrupt. */
5192 if (!IFQ_IS_EMPTY(&ifp->if_snd))
5193 bnx_start(ifp);
5194
5195 return (1);
5196 }
5197
5198 /****************************************************************************/
5199 /* Programs the various packet receive modes (broadcast and multicast). */
5200 /* */
5201 /* Returns: */
5202 /* Nothing. */
5203 /****************************************************************************/
5204 void
5205 bnx_iff(struct bnx_softc *sc)
5206 {
5207 struct ethercom *ec = &sc->bnx_ec;
5208 struct ifnet *ifp = &ec->ec_if;
5209 struct ether_multi *enm;
5210 struct ether_multistep step;
5211 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5212 u_int32_t rx_mode, sort_mode;
5213 int h, i;
5214
5215 /* Initialize receive mode default settings. */
5216 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5217 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5218 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5219 ifp->if_flags &= ~IFF_ALLMULTI;
5220
5221 /*
5222 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5223 * be enbled.
5224 */
5225 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
5226 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5227
5228 /*
5229 * Check for promiscuous, all multicast, or selected
5230 * multicast address filtering.
5231 */
5232 if (ifp->if_flags & IFF_PROMISC) {
5233 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5234
5235 ifp->if_flags |= IFF_ALLMULTI;
5236 /* Enable promiscuous mode. */
5237 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5238 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5239 } else if (ifp->if_flags & IFF_ALLMULTI) {
5240 allmulti:
5241 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5242
5243 ifp->if_flags |= IFF_ALLMULTI;
5244 /* Enable all multicast addresses. */
5245 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5246 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5247 0xffffffff);
5248 sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5249 } else {
5250 /* Accept one or more multicast(s). */
5251 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5252
5253 ETHER_FIRST_MULTI(step, ec, enm);
5254 while (enm != NULL) {
5255 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
5256 ETHER_ADDR_LEN)) {
5257 goto allmulti;
5258 }
5259 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5260 0xFF;
5261 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5262 ETHER_NEXT_MULTI(step, enm);
5263 }
5264
5265 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5266 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5267 hashes[i]);
5268
5269 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5270 }
5271
5272 /* Only make changes if the recive mode has actually changed. */
5273 if (rx_mode != sc->rx_mode) {
5274 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5275 rx_mode);
5276
5277 sc->rx_mode = rx_mode;
5278 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5279 }
5280
5281 /* Disable and clear the exisitng sort before enabling a new sort. */
5282 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5283 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5284 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5285 }
5286
5287 /****************************************************************************/
5288 /* Called periodically to updates statistics from the controllers */
5289 /* statistics block. */
5290 /* */
5291 /* Returns: */
5292 /* Nothing. */
5293 /****************************************************************************/
5294 void
5295 bnx_stats_update(struct bnx_softc *sc)
5296 {
5297 struct ifnet *ifp = &sc->bnx_ec.ec_if;
5298 struct statistics_block *stats;
5299
5300 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__);
5301 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5302 BUS_DMASYNC_POSTREAD);
5303
5304 stats = (struct statistics_block *)sc->stats_block;
5305
5306 /*
5307 * Update the interface statistics from the
5308 * hardware statistics.
5309 */
5310 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5311
5312 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5313 (u_long)stats->stat_EtherStatsOverrsizePkts +
5314 (u_long)stats->stat_IfInMBUFDiscards +
5315 (u_long)stats->stat_Dot3StatsAlignmentErrors +
5316 (u_long)stats->stat_Dot3StatsFCSErrors;
5317
5318 ifp->if_oerrors = (u_long)
5319 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5320 (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5321 (u_long)stats->stat_Dot3StatsLateCollisions;
5322
5323 /*
5324 * Certain controllers don't report
5325 * carrier sense errors correctly.
5326 * See errata E11_5708CA0_1165.
5327 */
5328 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5329 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5330 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5331
5332 /*
5333 * Update the sysctl statistics from the
5334 * hardware statistics.
5335 */
5336 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
5337 (u_int64_t) stats->stat_IfHCInOctets_lo;
5338
5339 sc->stat_IfHCInBadOctets =
5340 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5341 (u_int64_t) stats->stat_IfHCInBadOctets_lo;
5342
5343 sc->stat_IfHCOutOctets =
5344 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
5345 (u_int64_t) stats->stat_IfHCOutOctets_lo;
5346
5347 sc->stat_IfHCOutBadOctets =
5348 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5349 (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
5350
5351 sc->stat_IfHCInUcastPkts =
5352 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5353 (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
5354
5355 sc->stat_IfHCInMulticastPkts =
5356 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5357 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
5358
5359 sc->stat_IfHCInBroadcastPkts =
5360 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5361 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
5362
5363 sc->stat_IfHCOutUcastPkts =
5364 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5365 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
5366
5367 sc->stat_IfHCOutMulticastPkts =
5368 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5369 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5370
5371 sc->stat_IfHCOutBroadcastPkts =
5372 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5373 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5374
5375 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5376 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5377
5378 sc->stat_Dot3StatsCarrierSenseErrors =
5379 stats->stat_Dot3StatsCarrierSenseErrors;
5380
5381 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5382
5383 sc->stat_Dot3StatsAlignmentErrors =
5384 stats->stat_Dot3StatsAlignmentErrors;
5385
5386 sc->stat_Dot3StatsSingleCollisionFrames =
5387 stats->stat_Dot3StatsSingleCollisionFrames;
5388
5389 sc->stat_Dot3StatsMultipleCollisionFrames =
5390 stats->stat_Dot3StatsMultipleCollisionFrames;
5391
5392 sc->stat_Dot3StatsDeferredTransmissions =
5393 stats->stat_Dot3StatsDeferredTransmissions;
5394
5395 sc->stat_Dot3StatsExcessiveCollisions =
5396 stats->stat_Dot3StatsExcessiveCollisions;
5397
5398 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5399
5400 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5401
5402 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5403
5404 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5405
5406 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5407
5408 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5409
5410 sc->stat_EtherStatsPktsRx64Octets =
5411 stats->stat_EtherStatsPktsRx64Octets;
5412
5413 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5414 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5415
5416 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5417 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5418
5419 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5420 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5421
5422 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5423 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5424
5425 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5426 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5427
5428 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5429 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5430
5431 sc->stat_EtherStatsPktsTx64Octets =
5432 stats->stat_EtherStatsPktsTx64Octets;
5433
5434 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5435 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5436
5437 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5438 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5439
5440 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5441 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5442
5443 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5444 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5445
5446 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5447 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5448
5449 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5450 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5451
5452 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5453
5454 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5455
5456 sc->stat_OutXonSent = stats->stat_OutXonSent;
5457
5458 sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5459
5460 sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5461
5462 sc->stat_MacControlFramesReceived =
5463 stats->stat_MacControlFramesReceived;
5464
5465 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5466
5467 sc->stat_IfInFramesL2FilterDiscards =
5468 stats->stat_IfInFramesL2FilterDiscards;
5469
5470 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5471
5472 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5473
5474 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5475
5476 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5477
5478 sc->stat_CatchupInRuleCheckerDiscards =
5479 stats->stat_CatchupInRuleCheckerDiscards;
5480
5481 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5482
5483 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5484
5485 sc->stat_CatchupInRuleCheckerP4Hit =
5486 stats->stat_CatchupInRuleCheckerP4Hit;
5487
5488 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__);
5489 }
5490
5491 void
5492 bnx_tick(void *xsc)
5493 {
5494 struct bnx_softc *sc = xsc;
5495 struct mii_data *mii;
5496 u_int32_t msg;
5497 u_int16_t prod, chain_prod;
5498 u_int32_t prod_bseq;
5499 int s = splnet();
5500
5501 /* Tell the firmware that the driver is still running. */
5502 #ifdef BNX_DEBUG
5503 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5504 #else
5505 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5506 #endif
5507 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5508
5509 /* Update the statistics from the hardware statistics block. */
5510 bnx_stats_update(sc);
5511
5512 /* Schedule the next tick. */
5513 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
5514
5515 mii = &sc->bnx_mii;
5516 mii_tick(mii);
5517
5518 /* try to get more RX buffers, just in case */
5519 prod = sc->rx_prod;
5520 prod_bseq = sc->rx_prod_bseq;
5521 chain_prod = RX_CHAIN_IDX(prod);
5522 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
5523 sc->rx_prod = prod;
5524 sc->rx_prod_bseq = prod_bseq;
5525 splx(s);
5526 return;
5527 }
5528
5529 /****************************************************************************/
5530 /* BNX Debug Routines */
5531 /****************************************************************************/
5532 #ifdef BNX_DEBUG
5533
5534 /****************************************************************************/
5535 /* Prints out information about an mbuf. */
5536 /* */
5537 /* Returns: */
5538 /* Nothing. */
5539 /****************************************************************************/
5540 void
5541 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5542 {
5543 struct mbuf *mp = m;
5544
5545 if (m == NULL) {
5546 /* Index out of range. */
5547 aprint_error("mbuf ptr is null!\n");
5548 return;
5549 }
5550
5551 while (mp) {
5552 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5553 mp, mp->m_len);
5554
5555 if (mp->m_flags & M_EXT)
5556 aprint_debug("M_EXT ");
5557 if (mp->m_flags & M_PKTHDR)
5558 aprint_debug("M_PKTHDR ");
5559 aprint_debug("\n");
5560
5561 if (mp->m_flags & M_EXT)
5562 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n",
5563 mp, mp->m_ext.ext_size);
5564
5565 mp = mp->m_next;
5566 }
5567 }
5568
5569 /****************************************************************************/
5570 /* Prints out the mbufs in the TX mbuf chain. */
5571 /* */
5572 /* Returns: */
5573 /* Nothing. */
5574 /****************************************************************************/
5575 void
5576 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5577 {
5578 #if 0
5579 struct mbuf *m;
5580 int i;
5581
5582 aprint_debug_dev(sc->bnx_dev,
5583 "----------------------------"
5584 " tx mbuf data "
5585 "----------------------------\n");
5586
5587 for (i = 0; i < count; i++) {
5588 m = sc->tx_mbuf_ptr[chain_prod];
5589 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5590 bnx_dump_mbuf(sc, m);
5591 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5592 }
5593
5594 aprint_debug_dev(sc->bnx_dev,
5595 "--------------------------------------------"
5596 "----------------------------\n");
5597 #endif
5598 }
5599
5600 /*
5601 * This routine prints the RX mbuf chain.
5602 */
5603 void
5604 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5605 {
5606 struct mbuf *m;
5607 int i;
5608
5609 aprint_debug_dev(sc->bnx_dev,
5610 "----------------------------"
5611 " rx mbuf data "
5612 "----------------------------\n");
5613
5614 for (i = 0; i < count; i++) {
5615 m = sc->rx_mbuf_ptr[chain_prod];
5616 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5617 bnx_dump_mbuf(sc, m);
5618 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5619 }
5620
5621
5622 aprint_debug_dev(sc->bnx_dev,
5623 "--------------------------------------------"
5624 "----------------------------\n");
5625 }
5626
5627 void
5628 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5629 {
5630 if (idx > MAX_TX_BD)
5631 /* Index out of range. */
5632 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5633 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5634 /* TX Chain page pointer. */
5635 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5636 "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5637 txbd->tx_bd_haddr_lo);
5638 else
5639 /* Normal tx_bd entry. */
5640 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5641 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5642 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5643 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5644 txbd->tx_bd_flags);
5645 }
5646
5647 void
5648 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5649 {
5650 if (idx > MAX_RX_BD)
5651 /* Index out of range. */
5652 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5653 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5654 /* TX Chain page pointer. */
5655 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5656 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5657 rxbd->rx_bd_haddr_lo);
5658 else
5659 /* Normal tx_bd entry. */
5660 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5661 "0x%08X, flags = 0x%08X\n", idx,
5662 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5663 rxbd->rx_bd_len, rxbd->rx_bd_flags);
5664 }
5665
5666 void
5667 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5668 {
5669 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5670 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5671 "tcp_udp_xsum = 0x%04X\n", idx,
5672 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5673 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5674 l2fhdr->l2_fhdr_tcp_udp_xsum);
5675 }
5676
5677 /*
5678 * This routine prints the TX chain.
5679 */
5680 void
5681 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5682 {
5683 struct tx_bd *txbd;
5684 int i;
5685
5686 /* First some info about the tx_bd chain structure. */
5687 aprint_debug_dev(sc->bnx_dev,
5688 "----------------------------"
5689 " tx_bd chain "
5690 "----------------------------\n");
5691
5692 BNX_PRINTF(sc,
5693 "page size = 0x%08X, tx chain pages = 0x%08X\n",
5694 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5695
5696 BNX_PRINTF(sc,
5697 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5698 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5699
5700 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD);
5701
5702 aprint_error_dev(sc->bnx_dev, ""
5703 "-----------------------------"
5704 " tx_bd data "
5705 "-----------------------------\n");
5706
5707 /* Now print out the tx_bd's themselves. */
5708 for (i = 0; i < count; i++) {
5709 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5710 bnx_dump_txbd(sc, tx_prod, txbd);
5711 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5712 }
5713
5714 aprint_debug_dev(sc->bnx_dev,
5715 "-----------------------------"
5716 "--------------"
5717 "-----------------------------\n");
5718 }
5719
5720 /*
5721 * This routine prints the RX chain.
5722 */
5723 void
5724 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5725 {
5726 struct rx_bd *rxbd;
5727 int i;
5728
5729 /* First some info about the tx_bd chain structure. */
5730 aprint_debug_dev(sc->bnx_dev,
5731 "----------------------------"
5732 " rx_bd chain "
5733 "----------------------------\n");
5734
5735 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n");
5736
5737 BNX_PRINTF(sc,
5738 "page size = 0x%08X, rx chain pages = 0x%08X\n",
5739 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5740
5741 BNX_PRINTF(sc,
5742 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5743 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5744
5745 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD);
5746
5747 aprint_error_dev(sc->bnx_dev,
5748 "----------------------------"
5749 " rx_bd data "
5750 "----------------------------\n");
5751
5752 /* Now print out the rx_bd's themselves. */
5753 for (i = 0; i < count; i++) {
5754 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5755 bnx_dump_rxbd(sc, rx_prod, rxbd);
5756 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5757 }
5758
5759 aprint_debug_dev(sc->bnx_dev,
5760 "----------------------------"
5761 "--------------"
5762 "----------------------------\n");
5763 }
5764
5765 /*
5766 * This routine prints the status block.
5767 */
5768 void
5769 bnx_dump_status_block(struct bnx_softc *sc)
5770 {
5771 struct status_block *sblk;
5772 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5773 BUS_DMASYNC_POSTREAD);
5774
5775 sblk = sc->status_block;
5776
5777 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block "
5778 "-----------------------------\n");
5779
5780 BNX_PRINTF(sc,
5781 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5782 sblk->status_attn_bits, sblk->status_attn_bits_ack,
5783 sblk->status_idx);
5784
5785 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
5786 sblk->status_rx_quick_consumer_index0,
5787 sblk->status_tx_quick_consumer_index0);
5788
5789 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5790
5791 /* Theses indices are not used for normal L2 drivers. */
5792 if (sblk->status_rx_quick_consumer_index1 ||
5793 sblk->status_tx_quick_consumer_index1)
5794 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
5795 sblk->status_rx_quick_consumer_index1,
5796 sblk->status_tx_quick_consumer_index1);
5797
5798 if (sblk->status_rx_quick_consumer_index2 ||
5799 sblk->status_tx_quick_consumer_index2)
5800 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
5801 sblk->status_rx_quick_consumer_index2,
5802 sblk->status_tx_quick_consumer_index2);
5803
5804 if (sblk->status_rx_quick_consumer_index3 ||
5805 sblk->status_tx_quick_consumer_index3)
5806 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
5807 sblk->status_rx_quick_consumer_index3,
5808 sblk->status_tx_quick_consumer_index3);
5809
5810 if (sblk->status_rx_quick_consumer_index4 ||
5811 sblk->status_rx_quick_consumer_index5)
5812 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
5813 sblk->status_rx_quick_consumer_index4,
5814 sblk->status_rx_quick_consumer_index5);
5815
5816 if (sblk->status_rx_quick_consumer_index6 ||
5817 sblk->status_rx_quick_consumer_index7)
5818 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
5819 sblk->status_rx_quick_consumer_index6,
5820 sblk->status_rx_quick_consumer_index7);
5821
5822 if (sblk->status_rx_quick_consumer_index8 ||
5823 sblk->status_rx_quick_consumer_index9)
5824 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
5825 sblk->status_rx_quick_consumer_index8,
5826 sblk->status_rx_quick_consumer_index9);
5827
5828 if (sblk->status_rx_quick_consumer_index10 ||
5829 sblk->status_rx_quick_consumer_index11)
5830 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
5831 sblk->status_rx_quick_consumer_index10,
5832 sblk->status_rx_quick_consumer_index11);
5833
5834 if (sblk->status_rx_quick_consumer_index12 ||
5835 sblk->status_rx_quick_consumer_index13)
5836 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
5837 sblk->status_rx_quick_consumer_index12,
5838 sblk->status_rx_quick_consumer_index13);
5839
5840 if (sblk->status_rx_quick_consumer_index14 ||
5841 sblk->status_rx_quick_consumer_index15)
5842 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
5843 sblk->status_rx_quick_consumer_index14,
5844 sblk->status_rx_quick_consumer_index15);
5845
5846 if (sblk->status_completion_producer_index ||
5847 sblk->status_cmd_consumer_index)
5848 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
5849 sblk->status_completion_producer_index,
5850 sblk->status_cmd_consumer_index);
5851
5852 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------"
5853 "-----------------------------\n");
5854 }
5855
5856 /*
5857 * This routine prints the statistics block.
5858 */
5859 void
5860 bnx_dump_stats_block(struct bnx_softc *sc)
5861 {
5862 struct statistics_block *sblk;
5863 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5864 BUS_DMASYNC_POSTREAD);
5865
5866 sblk = sc->stats_block;
5867
5868 aprint_debug_dev(sc->bnx_dev, ""
5869 "-----------------------------"
5870 " Stats Block "
5871 "-----------------------------\n");
5872
5873 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
5874 "IfHcInBadOctets = 0x%08X:%08X\n",
5875 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5876 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5877
5878 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
5879 "IfHcOutBadOctets = 0x%08X:%08X\n",
5880 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5881 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5882
5883 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
5884 "IfHcInMulticastPkts = 0x%08X:%08X\n",
5885 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5886 sblk->stat_IfHCInMulticastPkts_hi,
5887 sblk->stat_IfHCInMulticastPkts_lo);
5888
5889 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
5890 "IfHcOutUcastPkts = 0x%08X:%08X\n",
5891 sblk->stat_IfHCInBroadcastPkts_hi,
5892 sblk->stat_IfHCInBroadcastPkts_lo,
5893 sblk->stat_IfHCOutUcastPkts_hi,
5894 sblk->stat_IfHCOutUcastPkts_lo);
5895
5896 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5897 "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5898 sblk->stat_IfHCOutMulticastPkts_hi,
5899 sblk->stat_IfHCOutMulticastPkts_lo,
5900 sblk->stat_IfHCOutBroadcastPkts_hi,
5901 sblk->stat_IfHCOutBroadcastPkts_lo);
5902
5903 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5904 BNX_PRINTF(sc, "0x%08X : "
5905 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5906 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5907
5908 if (sblk->stat_Dot3StatsCarrierSenseErrors)
5909 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5910 sblk->stat_Dot3StatsCarrierSenseErrors);
5911
5912 if (sblk->stat_Dot3StatsFCSErrors)
5913 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5914 sblk->stat_Dot3StatsFCSErrors);
5915
5916 if (sblk->stat_Dot3StatsAlignmentErrors)
5917 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5918 sblk->stat_Dot3StatsAlignmentErrors);
5919
5920 if (sblk->stat_Dot3StatsSingleCollisionFrames)
5921 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5922 sblk->stat_Dot3StatsSingleCollisionFrames);
5923
5924 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5925 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5926 sblk->stat_Dot3StatsMultipleCollisionFrames);
5927
5928 if (sblk->stat_Dot3StatsDeferredTransmissions)
5929 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5930 sblk->stat_Dot3StatsDeferredTransmissions);
5931
5932 if (sblk->stat_Dot3StatsExcessiveCollisions)
5933 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5934 sblk->stat_Dot3StatsExcessiveCollisions);
5935
5936 if (sblk->stat_Dot3StatsLateCollisions)
5937 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5938 sblk->stat_Dot3StatsLateCollisions);
5939
5940 if (sblk->stat_EtherStatsCollisions)
5941 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5942 sblk->stat_EtherStatsCollisions);
5943
5944 if (sblk->stat_EtherStatsFragments)
5945 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5946 sblk->stat_EtherStatsFragments);
5947
5948 if (sblk->stat_EtherStatsJabbers)
5949 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5950 sblk->stat_EtherStatsJabbers);
5951
5952 if (sblk->stat_EtherStatsUndersizePkts)
5953 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5954 sblk->stat_EtherStatsUndersizePkts);
5955
5956 if (sblk->stat_EtherStatsOverrsizePkts)
5957 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5958 sblk->stat_EtherStatsOverrsizePkts);
5959
5960 if (sblk->stat_EtherStatsPktsRx64Octets)
5961 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5962 sblk->stat_EtherStatsPktsRx64Octets);
5963
5964 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5965 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5966 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5967
5968 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5969 BNX_PRINTF(sc, "0x%08X : "
5970 "EtherStatsPktsRx128Octetsto255Octets\n",
5971 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5972
5973 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5974 BNX_PRINTF(sc, "0x%08X : "
5975 "EtherStatsPktsRx256Octetsto511Octets\n",
5976 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5977
5978 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5979 BNX_PRINTF(sc, "0x%08X : "
5980 "EtherStatsPktsRx512Octetsto1023Octets\n",
5981 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5982
5983 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5984 BNX_PRINTF(sc, "0x%08X : "
5985 "EtherStatsPktsRx1024Octetsto1522Octets\n",
5986 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5987
5988 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5989 BNX_PRINTF(sc, "0x%08X : "
5990 "EtherStatsPktsRx1523Octetsto9022Octets\n",
5991 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5992
5993 if (sblk->stat_EtherStatsPktsTx64Octets)
5994 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5995 sblk->stat_EtherStatsPktsTx64Octets);
5996
5997 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5998 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5999 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6000
6001 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6002 BNX_PRINTF(sc, "0x%08X : "
6003 "EtherStatsPktsTx128Octetsto255Octets\n",
6004 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6005
6006 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6007 BNX_PRINTF(sc, "0x%08X : "
6008 "EtherStatsPktsTx256Octetsto511Octets\n",
6009 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6010
6011 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6012 BNX_PRINTF(sc, "0x%08X : "
6013 "EtherStatsPktsTx512Octetsto1023Octets\n",
6014 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6015
6016 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6017 BNX_PRINTF(sc, "0x%08X : "
6018 "EtherStatsPktsTx1024Octetsto1522Octets\n",
6019 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6020
6021 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6022 BNX_PRINTF(sc, "0x%08X : "
6023 "EtherStatsPktsTx1523Octetsto9022Octets\n",
6024 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6025
6026 if (sblk->stat_XonPauseFramesReceived)
6027 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6028 sblk->stat_XonPauseFramesReceived);
6029
6030 if (sblk->stat_XoffPauseFramesReceived)
6031 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6032 sblk->stat_XoffPauseFramesReceived);
6033
6034 if (sblk->stat_OutXonSent)
6035 BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
6036 sblk->stat_OutXonSent);
6037
6038 if (sblk->stat_OutXoffSent)
6039 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6040 sblk->stat_OutXoffSent);
6041
6042 if (sblk->stat_FlowControlDone)
6043 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6044 sblk->stat_FlowControlDone);
6045
6046 if (sblk->stat_MacControlFramesReceived)
6047 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6048 sblk->stat_MacControlFramesReceived);
6049
6050 if (sblk->stat_XoffStateEntered)
6051 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6052 sblk->stat_XoffStateEntered);
6053
6054 if (sblk->stat_IfInFramesL2FilterDiscards)
6055 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6056 sblk->stat_IfInFramesL2FilterDiscards);
6057
6058 if (sblk->stat_IfInRuleCheckerDiscards)
6059 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6060 sblk->stat_IfInRuleCheckerDiscards);
6061
6062 if (sblk->stat_IfInFTQDiscards)
6063 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6064 sblk->stat_IfInFTQDiscards);
6065
6066 if (sblk->stat_IfInMBUFDiscards)
6067 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6068 sblk->stat_IfInMBUFDiscards);
6069
6070 if (sblk->stat_IfInRuleCheckerP4Hit)
6071 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6072 sblk->stat_IfInRuleCheckerP4Hit);
6073
6074 if (sblk->stat_CatchupInRuleCheckerDiscards)
6075 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6076 sblk->stat_CatchupInRuleCheckerDiscards);
6077
6078 if (sblk->stat_CatchupInFTQDiscards)
6079 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6080 sblk->stat_CatchupInFTQDiscards);
6081
6082 if (sblk->stat_CatchupInMBUFDiscards)
6083 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6084 sblk->stat_CatchupInMBUFDiscards);
6085
6086 if (sblk->stat_CatchupInRuleCheckerP4Hit)
6087 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6088 sblk->stat_CatchupInRuleCheckerP4Hit);
6089
6090 aprint_debug_dev(sc->bnx_dev,
6091 "-----------------------------"
6092 "--------------"
6093 "-----------------------------\n");
6094 }
6095
6096 void
6097 bnx_dump_driver_state(struct bnx_softc *sc)
6098 {
6099 aprint_debug_dev(sc->bnx_dev,
6100 "-----------------------------"
6101 " Driver State "
6102 "-----------------------------\n");
6103
6104 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6105 "address\n", sc);
6106
6107 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6108 sc->status_block);
6109
6110 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6111 "address\n", sc->stats_block);
6112
6113 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6114 "adddress\n", sc->tx_bd_chain);
6115
6116 #if 0
6117 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6118 sc->rx_bd_chain);
6119
6120 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6121 sc->tx_mbuf_ptr);
6122 #endif
6123
6124 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6125 sc->rx_mbuf_ptr);
6126
6127 BNX_PRINTF(sc,
6128 " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
6129 sc->interrupts_generated);
6130
6131 BNX_PRINTF(sc,
6132 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6133 sc->rx_interrupts);
6134
6135 BNX_PRINTF(sc,
6136 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6137 sc->tx_interrupts);
6138
6139 BNX_PRINTF(sc,
6140 " 0x%08X - (sc->last_status_idx) status block index\n",
6141 sc->last_status_idx);
6142
6143 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
6144 sc->tx_prod);
6145
6146 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
6147 sc->tx_cons);
6148
6149 BNX_PRINTF(sc,
6150 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6151 sc->tx_prod_bseq);
6152 BNX_PRINTF(sc,
6153 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6154 sc->tx_mbuf_alloc);
6155
6156 BNX_PRINTF(sc,
6157 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6158 sc->used_tx_bd);
6159
6160 BNX_PRINTF(sc,
6161 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6162 sc->tx_hi_watermark, sc->max_tx_bd);
6163
6164
6165 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
6166 sc->rx_prod);
6167
6168 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
6169 sc->rx_cons);
6170
6171 BNX_PRINTF(sc,
6172 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6173 sc->rx_prod_bseq);
6174
6175 BNX_PRINTF(sc,
6176 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6177 sc->rx_mbuf_alloc);
6178
6179 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6180 sc->free_rx_bd);
6181
6182 BNX_PRINTF(sc,
6183 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6184 sc->rx_low_watermark, sc->max_rx_bd);
6185
6186 BNX_PRINTF(sc,
6187 " 0x%08X - (sc->mbuf_alloc_failed) "
6188 "mbuf alloc failures\n",
6189 sc->mbuf_alloc_failed);
6190
6191 BNX_PRINTF(sc,
6192 " 0x%0X - (sc->mbuf_sim_allocated_failed) "
6193 "simulated mbuf alloc failures\n",
6194 sc->mbuf_sim_alloc_failed);
6195
6196 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------"
6197 "-----------------------------\n");
6198 }
6199
6200 void
6201 bnx_dump_hw_state(struct bnx_softc *sc)
6202 {
6203 u_int32_t val1;
6204 int i;
6205
6206 aprint_debug_dev(sc->bnx_dev,
6207 "----------------------------"
6208 " Hardware State "
6209 "----------------------------\n");
6210
6211 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6212
6213 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6214 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6215 val1, BNX_MISC_ENABLE_STATUS_BITS);
6216
6217 val1 = REG_RD(sc, BNX_DMA_STATUS);
6218 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6219
6220 val1 = REG_RD(sc, BNX_CTX_STATUS);
6221 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6222
6223 val1 = REG_RD(sc, BNX_EMAC_STATUS);
6224 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6225 BNX_EMAC_STATUS);
6226
6227 val1 = REG_RD(sc, BNX_RPM_STATUS);
6228 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6229
6230 val1 = REG_RD(sc, BNX_TBDR_STATUS);
6231 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6232 BNX_TBDR_STATUS);
6233
6234 val1 = REG_RD(sc, BNX_TDMA_STATUS);
6235 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6236 BNX_TDMA_STATUS);
6237
6238 val1 = REG_RD(sc, BNX_HC_STATUS);
6239 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6240
6241 aprint_debug_dev(sc->bnx_dev,
6242 "----------------------------"
6243 "----------------"
6244 "----------------------------\n");
6245
6246 aprint_debug_dev(sc->bnx_dev,
6247 "----------------------------"
6248 " Register Dump "
6249 "----------------------------\n");
6250
6251 for (i = 0x400; i < 0x8000; i += 0x10)
6252 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6253 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6254 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6255
6256 aprint_debug_dev(sc->bnx_dev,
6257 "----------------------------"
6258 "----------------"
6259 "----------------------------\n");
6260 }
6261
6262 void
6263 bnx_breakpoint(struct bnx_softc *sc)
6264 {
6265 /* Unreachable code to shut the compiler up about unused functions. */
6266 if (0) {
6267 bnx_dump_txbd(sc, 0, NULL);
6268 bnx_dump_rxbd(sc, 0, NULL);
6269 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6270 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6271 bnx_dump_l2fhdr(sc, 0, NULL);
6272 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6273 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6274 bnx_dump_status_block(sc);
6275 bnx_dump_stats_block(sc);
6276 bnx_dump_driver_state(sc);
6277 bnx_dump_hw_state(sc);
6278 }
6279
6280 bnx_dump_driver_state(sc);
6281 /* Print the important status block fields. */
6282 bnx_dump_status_block(sc);
6283
6284 #if 0
6285 /* Call the debugger. */
6286 breakpoint();
6287 #endif
6288
6289 return;
6290 }
6291 #endif
6292