if_bnx.c revision 1.20 1 /* $NetBSD: if_bnx.c,v 1.20 2008/09/09 20:12:18 mhitch Exp $ */
2 /* $OpenBSD: if_bnx.c,v 1.43 2007/01/30 03:21:10 krw Exp $ */
3
4 /*-
5 * Copyright (c) 2006 Broadcom Corporation
6 * David Christensen <davidch (at) broadcom.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if 0
36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
37 #endif
38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.20 2008/09/09 20:12:18 mhitch Exp $");
39
40 /*
41 * The following controllers are supported by this driver:
42 * BCM5706C A2, A3
43 * BCM5708C B1, B2
44 *
45 * The following controllers are not supported by this driver:
46 * (These are not "Production" versions of the controller.)
47 *
48 * BCM5706C A0, A1
49 * BCM5706S A0, A1, A2, A3
50 * BCM5708C A0, B0
51 * BCM5708S A0, B0, B1
52 */
53
54 #include <sys/callout.h>
55
56 #include <dev/pci/if_bnxreg.h>
57 #include <dev/microcode/bnx/bnxfw.h>
58
59 /****************************************************************************/
60 /* BNX Driver Version */
61 /****************************************************************************/
62 const char bnx_driver_version[] = "v0.9.6";
63
64 /****************************************************************************/
65 /* BNX Debug Options */
66 /****************************************************************************/
67 #ifdef BNX_DEBUG
68 u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND;
69
70 /* 0 = Never */
71 /* 1 = 1 in 2,147,483,648 */
72 /* 256 = 1 in 8,388,608 */
73 /* 2048 = 1 in 1,048,576 */
74 /* 65536 = 1 in 32,768 */
75 /* 1048576 = 1 in 2,048 */
76 /* 268435456 = 1 in 8 */
77 /* 536870912 = 1 in 4 */
78 /* 1073741824 = 1 in 2 */
79
80 /* Controls how often the l2_fhdr frame error check will fail. */
81 int bnx_debug_l2fhdr_status_check = 0;
82
83 /* Controls how often the unexpected attention check will fail. */
84 int bnx_debug_unexpected_attention = 0;
85
86 /* Controls how often to simulate an mbuf allocation failure. */
87 int bnx_debug_mbuf_allocation_failure = 0;
88
89 /* Controls how often to simulate a DMA mapping failure. */
90 int bnx_debug_dma_map_addr_failure = 0;
91
92 /* Controls how often to simulate a bootcode failure. */
93 int bnx_debug_bootcode_running_failure = 0;
94 #endif
95
96 /****************************************************************************/
97 /* PCI Device ID Table */
98 /* */
99 /* Used by bnx_probe() to identify the devices supported by this driver. */
100 /****************************************************************************/
101 static const struct bnx_product {
102 pci_vendor_id_t bp_vendor;
103 pci_product_id_t bp_product;
104 pci_vendor_id_t bp_subvendor;
105 pci_product_id_t bp_subproduct;
106 const char *bp_name;
107 } bnx_devices[] = {
108 #ifdef PCI_SUBPRODUCT_HP_NC370T
109 {
110 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
111 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T,
112 "HP NC370T Multifunction Gigabit Server Adapter"
113 },
114 #endif
115 #ifdef PCI_SUBPRODUCT_HP_NC370i
116 {
117 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
118 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i,
119 "HP NC370i Multifunction Gigabit Server Adapter"
120 },
121 #endif
122 {
123 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
124 0, 0,
125 "Broadcom NetXtreme II BCM5706 1000Base-T"
126 },
127 #ifdef PCI_SUBPRODUCT_HP_NC370F
128 {
129 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
130 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F,
131 "HP NC370F Multifunction Gigabit Server Adapter"
132 },
133 #endif
134 {
135 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
136 0, 0,
137 "Broadcom NetXtreme II BCM5706 1000Base-SX"
138 },
139 {
140 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708,
141 0, 0,
142 "Broadcom NetXtreme II BCM5708 1000Base-T"
143 },
144 {
145 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S,
146 0, 0,
147 "Broadcom NetXtreme II BCM5708 1000Base-SX"
148 },
149 };
150
151 /****************************************************************************/
152 /* Supported Flash NVRAM device data. */
153 /****************************************************************************/
154 static struct flash_spec flash_table[] =
155 {
156 /* Slow EEPROM */
157 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
158 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
159 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
160 "EEPROM - slow"},
161 /* Expansion entry 0001 */
162 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
165 "Entry 0001"},
166 /* Saifun SA25F010 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
171 "Non-buffered flash (128kB)"},
172 /* Saifun SA25F020 (non-buffered flash) */
173 /* strap, cfg1, & write1 need updates */
174 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
177 "Non-buffered flash (256kB)"},
178 /* Expansion entry 0100 */
179 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 0100"},
183 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
184 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
185 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
187 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
188 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
189 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
190 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
191 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
192 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
193 /* Saifun SA25F005 (non-buffered flash) */
194 /* strap, cfg1, & write1 need updates */
195 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
196 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
198 "Non-buffered flash (64kB)"},
199 /* Fast EEPROM */
200 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
201 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
202 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
203 "EEPROM - fast"},
204 /* Expansion entry 1001 */
205 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
206 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1001"},
209 /* Expansion entry 1010 */
210 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
211 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
213 "Entry 1010"},
214 /* ATMEL AT45DB011B (buffered flash) */
215 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
216 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
217 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
218 "Buffered flash (128kB)"},
219 /* Expansion entry 1100 */
220 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
221 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 "Entry 1100"},
224 /* Expansion entry 1101 */
225 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
226 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228 "Entry 1101"},
229 /* Ateml Expansion entry 1110 */
230 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
231 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
233 "Entry 1110 (Atmel)"},
234 /* ATMEL AT45DB021B (buffered flash) */
235 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
236 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
237 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
238 "Buffered flash (256kB)"},
239 };
240
241 /****************************************************************************/
242 /* OpenBSD device entry points. */
243 /****************************************************************************/
244 static int bnx_probe(device_t, cfdata_t, void *);
245 void bnx_attach(device_t, device_t, void *);
246 int bnx_detach(device_t, int);
247
248 /****************************************************************************/
249 /* BNX Debug Data Structure Dump Routines */
250 /****************************************************************************/
251 #ifdef BNX_DEBUG
252 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
253 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
254 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
255 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
256 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
257 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
258 void bnx_dump_tx_chain(struct bnx_softc *, int, int);
259 void bnx_dump_rx_chain(struct bnx_softc *, int, int);
260 void bnx_dump_status_block(struct bnx_softc *);
261 void bnx_dump_stats_block(struct bnx_softc *);
262 void bnx_dump_driver_state(struct bnx_softc *);
263 void bnx_dump_hw_state(struct bnx_softc *);
264 void bnx_breakpoint(struct bnx_softc *);
265 #endif
266
267 /****************************************************************************/
268 /* BNX Register/Memory Access Routines */
269 /****************************************************************************/
270 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
271 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
272 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
273 int bnx_miibus_read_reg(device_t, int, int);
274 void bnx_miibus_write_reg(device_t, int, int, int);
275 void bnx_miibus_statchg(device_t);
276
277 /****************************************************************************/
278 /* BNX NVRAM Access Routines */
279 /****************************************************************************/
280 int bnx_acquire_nvram_lock(struct bnx_softc *);
281 int bnx_release_nvram_lock(struct bnx_softc *);
282 void bnx_enable_nvram_access(struct bnx_softc *);
283 void bnx_disable_nvram_access(struct bnx_softc *);
284 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
285 u_int32_t);
286 int bnx_init_nvram(struct bnx_softc *);
287 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
288 int bnx_nvram_test(struct bnx_softc *);
289 #ifdef BNX_NVRAM_WRITE_SUPPORT
290 int bnx_enable_nvram_write(struct bnx_softc *);
291 void bnx_disable_nvram_write(struct bnx_softc *);
292 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
293 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
294 u_int32_t);
295 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
296 #endif
297
298 /****************************************************************************/
299 /* */
300 /****************************************************************************/
301 int bnx_dma_alloc(struct bnx_softc *);
302 void bnx_dma_free(struct bnx_softc *);
303 void bnx_release_resources(struct bnx_softc *);
304
305 /****************************************************************************/
306 /* BNX Firmware Synchronization and Load */
307 /****************************************************************************/
308 int bnx_fw_sync(struct bnx_softc *, u_int32_t);
309 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
310 u_int32_t);
311 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
312 struct fw_info *);
313 void bnx_init_cpus(struct bnx_softc *);
314
315 void bnx_stop(struct ifnet *, int);
316 int bnx_reset(struct bnx_softc *, u_int32_t);
317 int bnx_chipinit(struct bnx_softc *);
318 int bnx_blockinit(struct bnx_softc *);
319 int bnx_get_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
320 u_int16_t *, u_int32_t *);
321
322 int bnx_init_tx_chain(struct bnx_softc *);
323 int bnx_init_rx_chain(struct bnx_softc *);
324 void bnx_free_rx_chain(struct bnx_softc *);
325 void bnx_free_tx_chain(struct bnx_softc *);
326
327 int bnx_tx_encap(struct bnx_softc *, struct mbuf **);
328 void bnx_start(struct ifnet *);
329 int bnx_ioctl(struct ifnet *, u_long, void *);
330 void bnx_watchdog(struct ifnet *);
331 int bnx_init(struct ifnet *);
332
333 void bnx_init_context(struct bnx_softc *);
334 void bnx_get_mac_addr(struct bnx_softc *);
335 void bnx_set_mac_addr(struct bnx_softc *);
336 void bnx_phy_intr(struct bnx_softc *);
337 void bnx_rx_intr(struct bnx_softc *);
338 void bnx_tx_intr(struct bnx_softc *);
339 void bnx_disable_intr(struct bnx_softc *);
340 void bnx_enable_intr(struct bnx_softc *);
341
342 int bnx_intr(void *);
343 void bnx_set_rx_mode(struct bnx_softc *);
344 void bnx_stats_update(struct bnx_softc *);
345 void bnx_tick(void *);
346
347 /****************************************************************************/
348 /* OpenBSD device dispatch table. */
349 /****************************************************************************/
350 CFATTACH_DECL_NEW(bnx, sizeof(struct bnx_softc),
351 bnx_probe, bnx_attach, bnx_detach, NULL);
352
353 /****************************************************************************/
354 /* Device probe function. */
355 /* */
356 /* Compares the device to the driver's list of supported devices and */
357 /* reports back to the OS whether this is the right driver for the device. */
358 /* */
359 /* Returns: */
360 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
361 /****************************************************************************/
362 static const struct bnx_product *
363 bnx_lookup(const struct pci_attach_args *pa)
364 {
365 int i;
366 pcireg_t subid;
367
368 for (i = 0; i < __arraycount(bnx_devices); i++) {
369 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor ||
370 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product)
371 continue;
372 if (!bnx_devices[i].bp_subvendor)
373 return &bnx_devices[i];
374 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
375 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor &&
376 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct)
377 return &bnx_devices[i];
378 }
379
380 return NULL;
381 }
382 static int
383 bnx_probe(device_t parent, cfdata_t match, void *aux)
384 {
385 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
386
387 if (bnx_lookup(pa) != NULL)
388 return (1);
389
390 return (0);
391 }
392
393 /****************************************************************************/
394 /* Device attach function. */
395 /* */
396 /* Allocates device resources, performs secondary chip identification, */
397 /* resets and initializes the hardware, and initializes driver instance */
398 /* variables. */
399 /* */
400 /* Returns: */
401 /* 0 on success, positive value on failure. */
402 /****************************************************************************/
403 void
404 bnx_attach(device_t parent, device_t self, void *aux)
405 {
406 const struct bnx_product *bp;
407 struct bnx_softc *sc = device_private(self);
408 struct pci_attach_args *pa = aux;
409 pci_chipset_tag_t pc = pa->pa_pc;
410 pci_intr_handle_t ih;
411 const char *intrstr = NULL;
412 u_int32_t command;
413 struct ifnet *ifp;
414 u_int32_t val;
415 int mii_flags = MIIF_FORCEANEG;
416 pcireg_t memtype;
417
418 bp = bnx_lookup(pa);
419 if (bp == NULL)
420 panic("unknown device");
421
422 sc->bnx_dev = self;
423
424 aprint_naive("\n");
425 aprint_normal(": %s\n", bp->bp_name);
426
427 sc->bnx_pa = *pa;
428
429 /*
430 * Map control/status registers.
431 */
432 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
433 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
434 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
435 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
436
437 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
438 aprint_error_dev(sc->bnx_dev,
439 "failed to enable memory mapping!\n");
440 return;
441 }
442
443 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
444 switch (memtype) {
445 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
446 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
447 if (pci_mapreg_map(pa, BNX_PCI_BAR0,
448 memtype, 0, &sc->bnx_btag, &sc->bnx_bhandle,
449 NULL, &sc->bnx_size) == 0)
450 break;
451 default:
452 aprint_error_dev(sc->bnx_dev, "can't find mem space\n");
453 return;
454 }
455
456 if (pci_intr_map(pa, &ih)) {
457 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n");
458 goto bnx_attach_fail;
459 }
460
461 intrstr = pci_intr_string(pc, ih);
462
463 /*
464 * Configure byte swap and enable indirect register access.
465 * Rely on CPU to do target byte swapping on big endian systems.
466 * Access to registers outside of PCI configurtion space are not
467 * valid until this is done.
468 */
469 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
470 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
471 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
472
473 /* Save ASIC revsion info. */
474 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
475
476 /* Weed out any non-production controller revisions. */
477 switch(BNX_CHIP_ID(sc)) {
478 case BNX_CHIP_ID_5706_A0:
479 case BNX_CHIP_ID_5706_A1:
480 case BNX_CHIP_ID_5708_A0:
481 case BNX_CHIP_ID_5708_B0:
482 aprint_error_dev(sc->bnx_dev,
483 "unsupported controller revision (%c%d)!\n",
484 ((PCI_REVISION(pa->pa_class) & 0xf0) >> 4) + 'A',
485 PCI_REVISION(pa->pa_class) & 0x0f);
486 goto bnx_attach_fail;
487 }
488
489 /*
490 * Find the base address for shared memory access.
491 * Newer versions of bootcode use a signature and offset
492 * while older versions use a fixed address.
493 */
494 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
495 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
496 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
497 else
498 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
499
500 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
501
502 /* Set initial device and PHY flags */
503 sc->bnx_flags = 0;
504 sc->bnx_phy_flags = 0;
505
506 /* Get PCI bus information (speed and type). */
507 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
508 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
509 u_int32_t clkreg;
510
511 sc->bnx_flags |= BNX_PCIX_FLAG;
512
513 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
514
515 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
516 switch (clkreg) {
517 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
518 sc->bus_speed_mhz = 133;
519 break;
520
521 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
522 sc->bus_speed_mhz = 100;
523 break;
524
525 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
526 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
527 sc->bus_speed_mhz = 66;
528 break;
529
530 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
531 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
532 sc->bus_speed_mhz = 50;
533 break;
534
535 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
536 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
537 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
538 sc->bus_speed_mhz = 33;
539 break;
540 }
541 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
542 sc->bus_speed_mhz = 66;
543 else
544 sc->bus_speed_mhz = 33;
545
546 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
547 sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
548
549 /* Reset the controller. */
550 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
551 goto bnx_attach_fail;
552
553 /* Initialize the controller. */
554 if (bnx_chipinit(sc)) {
555 aprint_error_dev(sc->bnx_dev,
556 "Controller initialization failed!\n");
557 goto bnx_attach_fail;
558 }
559
560 /* Perform NVRAM test. */
561 if (bnx_nvram_test(sc)) {
562 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n");
563 goto bnx_attach_fail;
564 }
565
566 /* Fetch the permanent Ethernet MAC address. */
567 bnx_get_mac_addr(sc);
568 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n",
569 ether_sprintf(sc->eaddr));
570
571 /*
572 * Trip points control how many BDs
573 * should be ready before generating an
574 * interrupt while ticks control how long
575 * a BD can sit in the chain before
576 * generating an interrupt. Set the default
577 * values for the RX and TX rings.
578 */
579
580 #ifdef BNX_DEBUG
581 /* Force more frequent interrupts. */
582 sc->bnx_tx_quick_cons_trip_int = 1;
583 sc->bnx_tx_quick_cons_trip = 1;
584 sc->bnx_tx_ticks_int = 0;
585 sc->bnx_tx_ticks = 0;
586
587 sc->bnx_rx_quick_cons_trip_int = 1;
588 sc->bnx_rx_quick_cons_trip = 1;
589 sc->bnx_rx_ticks_int = 0;
590 sc->bnx_rx_ticks = 0;
591 #else
592 sc->bnx_tx_quick_cons_trip_int = 20;
593 sc->bnx_tx_quick_cons_trip = 20;
594 sc->bnx_tx_ticks_int = 80;
595 sc->bnx_tx_ticks = 80;
596
597 sc->bnx_rx_quick_cons_trip_int = 6;
598 sc->bnx_rx_quick_cons_trip = 6;
599 sc->bnx_rx_ticks_int = 18;
600 sc->bnx_rx_ticks = 18;
601 #endif
602
603 /* Update statistics once every second. */
604 sc->bnx_stats_ticks = 1000000 & 0xffff00;
605
606 /*
607 * The copper based NetXtreme II controllers
608 * that support 2.5Gb operation (currently
609 * 5708S) use a PHY at address 2, otherwise
610 * the PHY is present at address 1.
611 */
612 sc->bnx_phy_addr = 1;
613
614 if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
615 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
616 sc->bnx_flags |= BNX_NO_WOL_FLAG;
617 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
618 sc->bnx_phy_addr = 2;
619 val = REG_RD_IND(sc, sc->bnx_shmem_base +
620 BNX_SHARED_HW_CFG_CONFIG);
621 if (val & BNX_SHARED_HW_CFG_PHY_2_5G)
622 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
623 }
624 }
625
626 /* Allocate DMA memory resources. */
627 sc->bnx_dmatag = pa->pa_dmat;
628 if (bnx_dma_alloc(sc)) {
629 aprint_error_dev(sc->bnx_dev,
630 "DMA resource allocation failed!\n");
631 goto bnx_attach_fail;
632 }
633
634 /* Initialize the ifnet interface. */
635 ifp = &sc->bnx_ec.ec_if;
636 ifp->if_softc = sc;
637 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
638 ifp->if_ioctl = bnx_ioctl;
639 ifp->if_stop = bnx_stop;
640 ifp->if_start = bnx_start;
641 ifp->if_init = bnx_init;
642 ifp->if_timer = 0;
643 ifp->if_watchdog = bnx_watchdog;
644 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
645 IFQ_SET_READY(&ifp->if_snd);
646 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
647
648 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU |
649 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
650
651 ifp->if_capabilities |=
652 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
653 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
654 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
655
656 /* Hookup IRQ last. */
657 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc);
658 if (sc->bnx_intrhand == NULL) {
659 aprint_error_dev(self, "couldn't establish interrupt");
660 if (intrstr != NULL)
661 aprint_error(" at %s", intrstr);
662 aprint_error("\n");
663 goto bnx_attach_fail;
664 }
665
666 sc->bnx_mii.mii_ifp = ifp;
667 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
668 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
669 sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
670
671 sc->bnx_ec.ec_mii = &sc->bnx_mii;
672 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange,
673 ether_mediastatus);
674 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
675 mii_flags |= MIIF_HAVEFIBER;
676 mii_attach(self, &sc->bnx_mii, 0xffffffff,
677 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
678
679 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) {
680 aprint_error_dev(self, "no PHY found!\n");
681 ifmedia_add(&sc->bnx_mii.mii_media,
682 IFM_ETHER|IFM_MANUAL, 0, NULL);
683 ifmedia_set(&sc->bnx_mii.mii_media,
684 IFM_ETHER|IFM_MANUAL);
685 } else {
686 ifmedia_set(&sc->bnx_mii.mii_media,
687 IFM_ETHER|IFM_AUTO);
688 }
689
690 /* Attach to the Ethernet interface list. */
691 if_attach(ifp);
692 ether_ifattach(ifp,sc->eaddr);
693
694 callout_init(&sc->bnx_timeout, 0);
695
696 if (!pmf_device_register(self, NULL, NULL))
697 aprint_error_dev(self, "couldn't establish power handler\n");
698 else
699 pmf_class_network_register(self, ifp);
700
701 /* Print some important debugging info. */
702 DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
703
704 goto bnx_attach_exit;
705
706 bnx_attach_fail:
707 bnx_release_resources(sc);
708
709 bnx_attach_exit:
710 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
711 }
712
713 /****************************************************************************/
714 /* Device detach function. */
715 /* */
716 /* Stops the controller, resets the controller, and releases resources. */
717 /* */
718 /* Returns: */
719 /* 0 on success, positive value on failure. */
720 /****************************************************************************/
721 int
722 bnx_detach(device_t dev, int flags)
723 {
724 int s;
725 struct bnx_softc *sc;
726 struct ifnet *ifp;
727
728 sc = device_private(dev);
729 ifp = &sc->bnx_ec.ec_if;
730
731 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
732
733 /* Stop and reset the controller. */
734 s = splnet();
735 if (ifp->if_flags & IFF_RUNNING)
736 bnx_stop(ifp, 1);
737 splx(s);
738
739 pmf_device_deregister(dev);
740 ether_ifdetach(ifp);
741 if_detach(ifp);
742 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY);
743
744 /* Release all remaining resources. */
745 bnx_release_resources(sc);
746
747 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
748
749 return(0);
750 }
751
752 /****************************************************************************/
753 /* Indirect register read. */
754 /* */
755 /* Reads NetXtreme II registers using an index/data register pair in PCI */
756 /* configuration space. Using this mechanism avoids issues with posted */
757 /* reads but is much slower than memory-mapped I/O. */
758 /* */
759 /* Returns: */
760 /* The value of the register. */
761 /****************************************************************************/
762 u_int32_t
763 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
764 {
765 struct pci_attach_args *pa = &(sc->bnx_pa);
766
767 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
768 offset);
769 #ifdef BNX_DEBUG
770 {
771 u_int32_t val;
772 val = pci_conf_read(pa->pa_pc, pa->pa_tag,
773 BNX_PCICFG_REG_WINDOW);
774 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
775 "val = 0x%08X\n", __func__, offset, val);
776 return (val);
777 }
778 #else
779 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
780 #endif
781 }
782
783 /****************************************************************************/
784 /* Indirect register write. */
785 /* */
786 /* Writes NetXtreme II registers using an index/data register pair in PCI */
787 /* configuration space. Using this mechanism avoids issues with posted */
788 /* writes but is muchh slower than memory-mapped I/O. */
789 /* */
790 /* Returns: */
791 /* Nothing. */
792 /****************************************************************************/
793 void
794 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
795 {
796 struct pci_attach_args *pa = &(sc->bnx_pa);
797
798 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
799 __func__, offset, val);
800
801 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
802 offset);
803 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
804 }
805
806 /****************************************************************************/
807 /* Context memory write. */
808 /* */
809 /* The NetXtreme II controller uses context memory to track connection */
810 /* information for L2 and higher network protocols. */
811 /* */
812 /* Returns: */
813 /* Nothing. */
814 /****************************************************************************/
815 void
816 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset,
817 u_int32_t val)
818 {
819
820 DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
821 "val = 0x%08X\n", __func__, cid_addr, offset, val);
822
823 offset += cid_addr;
824 REG_WR(sc, BNX_CTX_DATA_ADR, offset);
825 REG_WR(sc, BNX_CTX_DATA, val);
826 }
827
828 /****************************************************************************/
829 /* PHY register read. */
830 /* */
831 /* Implements register reads on the MII bus. */
832 /* */
833 /* Returns: */
834 /* The value of the register. */
835 /****************************************************************************/
836 int
837 bnx_miibus_read_reg(device_t dev, int phy, int reg)
838 {
839 struct bnx_softc *sc = device_private(dev);
840 u_int32_t val;
841 int i;
842
843 /* Make sure we are accessing the correct PHY address. */
844 if (phy != sc->bnx_phy_addr) {
845 DBPRINT(sc, BNX_VERBOSE,
846 "Invalid PHY address %d for PHY read!\n", phy);
847 return(0);
848 }
849
850 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
851 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
852 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
853
854 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
855 REG_RD(sc, BNX_EMAC_MDIO_MODE);
856
857 DELAY(40);
858 }
859
860 val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
861 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
862 BNX_EMAC_MDIO_COMM_START_BUSY;
863 REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
864
865 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
866 DELAY(10);
867
868 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
869 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
870 DELAY(5);
871
872 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
873 val &= BNX_EMAC_MDIO_COMM_DATA;
874
875 break;
876 }
877 }
878
879 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
880 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
881 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
882 val = 0x0;
883 } else
884 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
885
886 DBPRINT(sc, BNX_EXCESSIVE,
887 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy,
888 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
889
890 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
891 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
892 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
893
894 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
895 REG_RD(sc, BNX_EMAC_MDIO_MODE);
896
897 DELAY(40);
898 }
899
900 return (val & 0xffff);
901 }
902
903 /****************************************************************************/
904 /* PHY register write. */
905 /* */
906 /* Implements register writes on the MII bus. */
907 /* */
908 /* Returns: */
909 /* The value of the register. */
910 /****************************************************************************/
911 void
912 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val)
913 {
914 struct bnx_softc *sc = device_private(dev);
915 u_int32_t val1;
916 int i;
917
918 /* Make sure we are accessing the correct PHY address. */
919 if (phy != sc->bnx_phy_addr) {
920 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n",
921 phy);
922 return;
923 }
924
925 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
926 "val = 0x%04X\n", __func__,
927 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
928
929 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
930 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
931 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
932
933 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
934 REG_RD(sc, BNX_EMAC_MDIO_MODE);
935
936 DELAY(40);
937 }
938
939 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
940 BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
941 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
942 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
943
944 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
945 DELAY(10);
946
947 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
948 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
949 DELAY(5);
950 break;
951 }
952 }
953
954 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
955 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
956 __LINE__);
957 }
958
959 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
960 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
961 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
962
963 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
964 REG_RD(sc, BNX_EMAC_MDIO_MODE);
965
966 DELAY(40);
967 }
968 }
969
970 /****************************************************************************/
971 /* MII bus status change. */
972 /* */
973 /* Called by the MII bus driver when the PHY establishes link to set the */
974 /* MAC interface registers. */
975 /* */
976 /* Returns: */
977 /* Nothing. */
978 /****************************************************************************/
979 void
980 bnx_miibus_statchg(device_t dev)
981 {
982 struct bnx_softc *sc = device_private(dev);
983 struct mii_data *mii = &sc->bnx_mii;
984 int val;
985
986 val = REG_RD(sc, BNX_EMAC_MODE);
987 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
988 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
989 BNX_EMAC_MODE_25G);
990
991 /* Set MII or GMII interface based on the speed
992 * negotiated by the PHY.
993 */
994 switch (IFM_SUBTYPE(mii->mii_media_active)) {
995 case IFM_10_T:
996 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
997 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
998 val |= BNX_EMAC_MODE_PORT_MII_10;
999 break;
1000 }
1001 /* FALLTHROUGH */
1002 case IFM_100_TX:
1003 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1004 val |= BNX_EMAC_MODE_PORT_MII;
1005 break;
1006 case IFM_2500_SX:
1007 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1008 val |= BNX_EMAC_MODE_25G;
1009 /* FALLTHROUGH */
1010 case IFM_1000_T:
1011 case IFM_1000_SX:
1012 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1013 val |= BNX_EMAC_MODE_PORT_GMII;
1014 break;
1015 default:
1016 val |= BNX_EMAC_MODE_PORT_GMII;
1017 break;
1018 }
1019
1020 /* Set half or full duplex based on the duplicity
1021 * negotiated by the PHY.
1022 */
1023 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1024 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1025 val |= BNX_EMAC_MODE_HALF_DUPLEX;
1026 } else {
1027 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1028 }
1029
1030 REG_WR(sc, BNX_EMAC_MODE, val);
1031 }
1032
1033 /****************************************************************************/
1034 /* Acquire NVRAM lock. */
1035 /* */
1036 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1037 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1038 /* for use by the driver. */
1039 /* */
1040 /* Returns: */
1041 /* 0 on success, positive value on failure. */
1042 /****************************************************************************/
1043 int
1044 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1045 {
1046 u_int32_t val;
1047 int j;
1048
1049 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1050
1051 /* Request access to the flash interface. */
1052 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1053 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1054 val = REG_RD(sc, BNX_NVM_SW_ARB);
1055 if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1056 break;
1057
1058 DELAY(5);
1059 }
1060
1061 if (j >= NVRAM_TIMEOUT_COUNT) {
1062 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1063 return (EBUSY);
1064 }
1065
1066 return (0);
1067 }
1068
1069 /****************************************************************************/
1070 /* Release NVRAM lock. */
1071 /* */
1072 /* When the caller is finished accessing NVRAM the lock must be released. */
1073 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1074 /* for use by the driver. */
1075 /* */
1076 /* Returns: */
1077 /* 0 on success, positive value on failure. */
1078 /****************************************************************************/
1079 int
1080 bnx_release_nvram_lock(struct bnx_softc *sc)
1081 {
1082 int j;
1083 u_int32_t val;
1084
1085 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1086
1087 /* Relinquish nvram interface. */
1088 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1089
1090 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1091 val = REG_RD(sc, BNX_NVM_SW_ARB);
1092 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1093 break;
1094
1095 DELAY(5);
1096 }
1097
1098 if (j >= NVRAM_TIMEOUT_COUNT) {
1099 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1100 return (EBUSY);
1101 }
1102
1103 return (0);
1104 }
1105
1106 #ifdef BNX_NVRAM_WRITE_SUPPORT
1107 /****************************************************************************/
1108 /* Enable NVRAM write access. */
1109 /* */
1110 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1111 /* */
1112 /* Returns: */
1113 /* 0 on success, positive value on failure. */
1114 /****************************************************************************/
1115 int
1116 bnx_enable_nvram_write(struct bnx_softc *sc)
1117 {
1118 u_int32_t val;
1119
1120 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1121
1122 val = REG_RD(sc, BNX_MISC_CFG);
1123 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1124
1125 if (!sc->bnx_flash_info->buffered) {
1126 int j;
1127
1128 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1129 REG_WR(sc, BNX_NVM_COMMAND,
1130 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1131
1132 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1133 DELAY(5);
1134
1135 val = REG_RD(sc, BNX_NVM_COMMAND);
1136 if (val & BNX_NVM_COMMAND_DONE)
1137 break;
1138 }
1139
1140 if (j >= NVRAM_TIMEOUT_COUNT) {
1141 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1142 return (EBUSY);
1143 }
1144 }
1145
1146 return (0);
1147 }
1148
1149 /****************************************************************************/
1150 /* Disable NVRAM write access. */
1151 /* */
1152 /* When the caller is finished writing to NVRAM write access must be */
1153 /* disabled. */
1154 /* */
1155 /* Returns: */
1156 /* Nothing. */
1157 /****************************************************************************/
1158 void
1159 bnx_disable_nvram_write(struct bnx_softc *sc)
1160 {
1161 u_int32_t val;
1162
1163 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1164
1165 val = REG_RD(sc, BNX_MISC_CFG);
1166 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1167 }
1168 #endif
1169
1170 /****************************************************************************/
1171 /* Enable NVRAM access. */
1172 /* */
1173 /* Before accessing NVRAM for read or write operations the caller must */
1174 /* enabled NVRAM access. */
1175 /* */
1176 /* Returns: */
1177 /* Nothing. */
1178 /****************************************************************************/
1179 void
1180 bnx_enable_nvram_access(struct bnx_softc *sc)
1181 {
1182 u_int32_t val;
1183
1184 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1185
1186 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1187 /* Enable both bits, even on read. */
1188 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1189 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1190 }
1191
1192 /****************************************************************************/
1193 /* Disable NVRAM access. */
1194 /* */
1195 /* When the caller is finished accessing NVRAM access must be disabled. */
1196 /* */
1197 /* Returns: */
1198 /* Nothing. */
1199 /****************************************************************************/
1200 void
1201 bnx_disable_nvram_access(struct bnx_softc *sc)
1202 {
1203 u_int32_t val;
1204
1205 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1206
1207 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1208
1209 /* Disable both bits, even after read. */
1210 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1211 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1212 }
1213
1214 #ifdef BNX_NVRAM_WRITE_SUPPORT
1215 /****************************************************************************/
1216 /* Erase NVRAM page before writing. */
1217 /* */
1218 /* Non-buffered flash parts require that a page be erased before it is */
1219 /* written. */
1220 /* */
1221 /* Returns: */
1222 /* 0 on success, positive value on failure. */
1223 /****************************************************************************/
1224 int
1225 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1226 {
1227 u_int32_t cmd;
1228 int j;
1229
1230 /* Buffered flash doesn't require an erase. */
1231 if (sc->bnx_flash_info->buffered)
1232 return (0);
1233
1234 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1235
1236 /* Build an erase command. */
1237 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1238 BNX_NVM_COMMAND_DOIT;
1239
1240 /*
1241 * Clear the DONE bit separately, set the NVRAM adress to erase,
1242 * and issue the erase command.
1243 */
1244 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1245 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1246 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1247
1248 /* Wait for completion. */
1249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1250 u_int32_t val;
1251
1252 DELAY(5);
1253
1254 val = REG_RD(sc, BNX_NVM_COMMAND);
1255 if (val & BNX_NVM_COMMAND_DONE)
1256 break;
1257 }
1258
1259 if (j >= NVRAM_TIMEOUT_COUNT) {
1260 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1261 return (EBUSY);
1262 }
1263
1264 return (0);
1265 }
1266 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1267
1268 /****************************************************************************/
1269 /* Read a dword (32 bits) from NVRAM. */
1270 /* */
1271 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1272 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1273 /* */
1274 /* Returns: */
1275 /* 0 on success and the 32 bit value read, positive value on failure. */
1276 /****************************************************************************/
1277 int
1278 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1279 u_int8_t *ret_val, u_int32_t cmd_flags)
1280 {
1281 u_int32_t cmd;
1282 int i, rc = 0;
1283
1284 /* Build the command word. */
1285 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1286
1287 /* Calculate the offset for buffered flash. */
1288 if (sc->bnx_flash_info->buffered)
1289 offset = ((offset / sc->bnx_flash_info->page_size) <<
1290 sc->bnx_flash_info->page_bits) +
1291 (offset % sc->bnx_flash_info->page_size);
1292
1293 /*
1294 * Clear the DONE bit separately, set the address to read,
1295 * and issue the read.
1296 */
1297 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1298 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1299 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1300
1301 /* Wait for completion. */
1302 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1303 u_int32_t val;
1304
1305 DELAY(5);
1306
1307 val = REG_RD(sc, BNX_NVM_COMMAND);
1308 if (val & BNX_NVM_COMMAND_DONE) {
1309 val = REG_RD(sc, BNX_NVM_READ);
1310
1311 val = bnx_be32toh(val);
1312 memcpy(ret_val, &val, 4);
1313 break;
1314 }
1315 }
1316
1317 /* Check for errors. */
1318 if (i >= NVRAM_TIMEOUT_COUNT) {
1319 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1320 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1321 rc = EBUSY;
1322 }
1323
1324 return(rc);
1325 }
1326
1327 #ifdef BNX_NVRAM_WRITE_SUPPORT
1328 /****************************************************************************/
1329 /* Write a dword (32 bits) to NVRAM. */
1330 /* */
1331 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1332 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1333 /* enabled NVRAM write access. */
1334 /* */
1335 /* Returns: */
1336 /* 0 on success, positive value on failure. */
1337 /****************************************************************************/
1338 int
1339 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1340 u_int32_t cmd_flags)
1341 {
1342 u_int32_t cmd, val32;
1343 int j;
1344
1345 /* Build the command word. */
1346 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1347
1348 /* Calculate the offset for buffered flash. */
1349 if (sc->bnx_flash_info->buffered)
1350 offset = ((offset / sc->bnx_flash_info->page_size) <<
1351 sc->bnx_flash_info->page_bits) +
1352 (offset % sc->bnx_flash_info->page_size);
1353
1354 /*
1355 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1356 * set the NVRAM address to write, and issue the write command
1357 */
1358 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1359 memcpy(&val32, val, 4);
1360 val32 = htobe32(val32);
1361 REG_WR(sc, BNX_NVM_WRITE, val32);
1362 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1363 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1364
1365 /* Wait for completion. */
1366 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1367 DELAY(5);
1368
1369 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1370 break;
1371 }
1372 if (j >= NVRAM_TIMEOUT_COUNT) {
1373 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1374 "offset 0x%08X\n", __FILE__, __LINE__, offset);
1375 return (EBUSY);
1376 }
1377
1378 return (0);
1379 }
1380 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1381
1382 /****************************************************************************/
1383 /* Initialize NVRAM access. */
1384 /* */
1385 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1386 /* access that device. */
1387 /* */
1388 /* Returns: */
1389 /* 0 on success, positive value on failure. */
1390 /****************************************************************************/
1391 int
1392 bnx_init_nvram(struct bnx_softc *sc)
1393 {
1394 u_int32_t val;
1395 int j, entry_count, rc;
1396 struct flash_spec *flash;
1397
1398 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1399
1400 /* Determine the selected interface. */
1401 val = REG_RD(sc, BNX_NVM_CFG1);
1402
1403 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1404
1405 rc = 0;
1406
1407 /*
1408 * Flash reconfiguration is required to support additional
1409 * NVRAM devices not directly supported in hardware.
1410 * Check if the flash interface was reconfigured
1411 * by the bootcode.
1412 */
1413
1414 if (val & 0x40000000) {
1415 /* Flash interface reconfigured by bootcode. */
1416
1417 DBPRINT(sc,BNX_INFO_LOAD,
1418 "bnx_init_nvram(): Flash WAS reconfigured.\n");
1419
1420 for (j = 0, flash = &flash_table[0]; j < entry_count;
1421 j++, flash++) {
1422 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1423 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1424 sc->bnx_flash_info = flash;
1425 break;
1426 }
1427 }
1428 } else {
1429 /* Flash interface not yet reconfigured. */
1430 u_int32_t mask;
1431
1432 DBPRINT(sc,BNX_INFO_LOAD,
1433 "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1434
1435 if (val & (1 << 23))
1436 mask = FLASH_BACKUP_STRAP_MASK;
1437 else
1438 mask = FLASH_STRAP_MASK;
1439
1440 /* Look for the matching NVRAM device configuration data. */
1441 for (j = 0, flash = &flash_table[0]; j < entry_count;
1442 j++, flash++) {
1443 /* Check if the dev matches any of the known devices. */
1444 if ((val & mask) == (flash->strapping & mask)) {
1445 /* Found a device match. */
1446 sc->bnx_flash_info = flash;
1447
1448 /* Request access to the flash interface. */
1449 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1450 return (rc);
1451
1452 /* Reconfigure the flash interface. */
1453 bnx_enable_nvram_access(sc);
1454 REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1455 REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1456 REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1457 REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1458 bnx_disable_nvram_access(sc);
1459 bnx_release_nvram_lock(sc);
1460
1461 break;
1462 }
1463 }
1464 }
1465
1466 /* Check if a matching device was found. */
1467 if (j == entry_count) {
1468 sc->bnx_flash_info = NULL;
1469 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1470 __FILE__, __LINE__);
1471 rc = ENODEV;
1472 }
1473
1474 /* Write the flash config data to the shared memory interface. */
1475 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1476 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1477 if (val)
1478 sc->bnx_flash_size = val;
1479 else
1480 sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1481
1482 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1483 "0x%08X\n", sc->bnx_flash_info->total_size);
1484
1485 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1486
1487 return (rc);
1488 }
1489
1490 /****************************************************************************/
1491 /* Read an arbitrary range of data from NVRAM. */
1492 /* */
1493 /* Prepares the NVRAM interface for access and reads the requested data */
1494 /* into the supplied buffer. */
1495 /* */
1496 /* Returns: */
1497 /* 0 on success and the data read, positive value on failure. */
1498 /****************************************************************************/
1499 int
1500 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1501 int buf_size)
1502 {
1503 int rc = 0;
1504 u_int32_t cmd_flags, offset32, len32, extra;
1505
1506 if (buf_size == 0)
1507 return (0);
1508
1509 /* Request access to the flash interface. */
1510 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1511 return (rc);
1512
1513 /* Enable access to flash interface */
1514 bnx_enable_nvram_access(sc);
1515
1516 len32 = buf_size;
1517 offset32 = offset;
1518 extra = 0;
1519
1520 cmd_flags = 0;
1521
1522 if (offset32 & 3) {
1523 u_int8_t buf[4];
1524 u_int32_t pre_len;
1525
1526 offset32 &= ~3;
1527 pre_len = 4 - (offset & 3);
1528
1529 if (pre_len >= len32) {
1530 pre_len = len32;
1531 cmd_flags =
1532 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1533 } else
1534 cmd_flags = BNX_NVM_COMMAND_FIRST;
1535
1536 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1537
1538 if (rc)
1539 return (rc);
1540
1541 memcpy(ret_buf, buf + (offset & 3), pre_len);
1542
1543 offset32 += 4;
1544 ret_buf += pre_len;
1545 len32 -= pre_len;
1546 }
1547
1548 if (len32 & 3) {
1549 extra = 4 - (len32 & 3);
1550 len32 = (len32 + 4) & ~3;
1551 }
1552
1553 if (len32 == 4) {
1554 u_int8_t buf[4];
1555
1556 if (cmd_flags)
1557 cmd_flags = BNX_NVM_COMMAND_LAST;
1558 else
1559 cmd_flags =
1560 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1561
1562 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1563
1564 memcpy(ret_buf, buf, 4 - extra);
1565 } else if (len32 > 0) {
1566 u_int8_t buf[4];
1567
1568 /* Read the first word. */
1569 if (cmd_flags)
1570 cmd_flags = 0;
1571 else
1572 cmd_flags = BNX_NVM_COMMAND_FIRST;
1573
1574 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1575
1576 /* Advance to the next dword. */
1577 offset32 += 4;
1578 ret_buf += 4;
1579 len32 -= 4;
1580
1581 while (len32 > 4 && rc == 0) {
1582 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1583
1584 /* Advance to the next dword. */
1585 offset32 += 4;
1586 ret_buf += 4;
1587 len32 -= 4;
1588 }
1589
1590 if (rc)
1591 return (rc);
1592
1593 cmd_flags = BNX_NVM_COMMAND_LAST;
1594 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1595
1596 memcpy(ret_buf, buf, 4 - extra);
1597 }
1598
1599 /* Disable access to flash interface and release the lock. */
1600 bnx_disable_nvram_access(sc);
1601 bnx_release_nvram_lock(sc);
1602
1603 return (rc);
1604 }
1605
1606 #ifdef BNX_NVRAM_WRITE_SUPPORT
1607 /****************************************************************************/
1608 /* Write an arbitrary range of data from NVRAM. */
1609 /* */
1610 /* Prepares the NVRAM interface for write access and writes the requested */
1611 /* data from the supplied buffer. The caller is responsible for */
1612 /* calculating any appropriate CRCs. */
1613 /* */
1614 /* Returns: */
1615 /* 0 on success, positive value on failure. */
1616 /****************************************************************************/
1617 int
1618 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1619 int buf_size)
1620 {
1621 u_int32_t written, offset32, len32;
1622 u_int8_t *buf, start[4], end[4];
1623 int rc = 0;
1624 int align_start, align_end;
1625
1626 buf = data_buf;
1627 offset32 = offset;
1628 len32 = buf_size;
1629 align_start = align_end = 0;
1630
1631 if ((align_start = (offset32 & 3))) {
1632 offset32 &= ~3;
1633 len32 += align_start;
1634 if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1635 return (rc);
1636 }
1637
1638 if (len32 & 3) {
1639 if ((len32 > 4) || !align_start) {
1640 align_end = 4 - (len32 & 3);
1641 len32 += align_end;
1642 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1643 end, 4))) {
1644 return (rc);
1645 }
1646 }
1647 }
1648
1649 if (align_start || align_end) {
1650 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1651 if (buf == 0)
1652 return (ENOMEM);
1653
1654 if (align_start)
1655 memcpy(buf, start, 4);
1656
1657 if (align_end)
1658 memcpy(buf + len32 - 4, end, 4);
1659
1660 memcpy(buf + align_start, data_buf, buf_size);
1661 }
1662
1663 written = 0;
1664 while ((written < len32) && (rc == 0)) {
1665 u_int32_t page_start, page_end, data_start, data_end;
1666 u_int32_t addr, cmd_flags;
1667 int i;
1668 u_int8_t flash_buffer[264];
1669
1670 /* Find the page_start addr */
1671 page_start = offset32 + written;
1672 page_start -= (page_start % sc->bnx_flash_info->page_size);
1673 /* Find the page_end addr */
1674 page_end = page_start + sc->bnx_flash_info->page_size;
1675 /* Find the data_start addr */
1676 data_start = (written == 0) ? offset32 : page_start;
1677 /* Find the data_end addr */
1678 data_end = (page_end > offset32 + len32) ?
1679 (offset32 + len32) : page_end;
1680
1681 /* Request access to the flash interface. */
1682 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1683 goto nvram_write_end;
1684
1685 /* Enable access to flash interface */
1686 bnx_enable_nvram_access(sc);
1687
1688 cmd_flags = BNX_NVM_COMMAND_FIRST;
1689 if (sc->bnx_flash_info->buffered == 0) {
1690 int j;
1691
1692 /* Read the whole page into the buffer
1693 * (non-buffer flash only) */
1694 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1695 if (j == (sc->bnx_flash_info->page_size - 4))
1696 cmd_flags |= BNX_NVM_COMMAND_LAST;
1697
1698 rc = bnx_nvram_read_dword(sc,
1699 page_start + j,
1700 &flash_buffer[j],
1701 cmd_flags);
1702
1703 if (rc)
1704 goto nvram_write_end;
1705
1706 cmd_flags = 0;
1707 }
1708 }
1709
1710 /* Enable writes to flash interface (unlock write-protect) */
1711 if ((rc = bnx_enable_nvram_write(sc)) != 0)
1712 goto nvram_write_end;
1713
1714 /* Erase the page */
1715 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1716 goto nvram_write_end;
1717
1718 /* Re-enable the write again for the actual write */
1719 bnx_enable_nvram_write(sc);
1720
1721 /* Loop to write back the buffer data from page_start to
1722 * data_start */
1723 i = 0;
1724 if (sc->bnx_flash_info->buffered == 0) {
1725 for (addr = page_start; addr < data_start;
1726 addr += 4, i += 4) {
1727
1728 rc = bnx_nvram_write_dword(sc, addr,
1729 &flash_buffer[i], cmd_flags);
1730
1731 if (rc != 0)
1732 goto nvram_write_end;
1733
1734 cmd_flags = 0;
1735 }
1736 }
1737
1738 /* Loop to write the new data from data_start to data_end */
1739 for (addr = data_start; addr < data_end; addr += 4, i++) {
1740 if ((addr == page_end - 4) ||
1741 ((sc->bnx_flash_info->buffered) &&
1742 (addr == data_end - 4))) {
1743
1744 cmd_flags |= BNX_NVM_COMMAND_LAST;
1745 }
1746
1747 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1748
1749 if (rc != 0)
1750 goto nvram_write_end;
1751
1752 cmd_flags = 0;
1753 buf += 4;
1754 }
1755
1756 /* Loop to write back the buffer data from data_end
1757 * to page_end */
1758 if (sc->bnx_flash_info->buffered == 0) {
1759 for (addr = data_end; addr < page_end;
1760 addr += 4, i += 4) {
1761
1762 if (addr == page_end-4)
1763 cmd_flags = BNX_NVM_COMMAND_LAST;
1764
1765 rc = bnx_nvram_write_dword(sc, addr,
1766 &flash_buffer[i], cmd_flags);
1767
1768 if (rc != 0)
1769 goto nvram_write_end;
1770
1771 cmd_flags = 0;
1772 }
1773 }
1774
1775 /* Disable writes to flash interface (lock write-protect) */
1776 bnx_disable_nvram_write(sc);
1777
1778 /* Disable access to flash interface */
1779 bnx_disable_nvram_access(sc);
1780 bnx_release_nvram_lock(sc);
1781
1782 /* Increment written */
1783 written += data_end - data_start;
1784 }
1785
1786 nvram_write_end:
1787 if (align_start || align_end)
1788 free(buf, M_DEVBUF);
1789
1790 return (rc);
1791 }
1792 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1793
1794 /****************************************************************************/
1795 /* Verifies that NVRAM is accessible and contains valid data. */
1796 /* */
1797 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1798 /* correct. */
1799 /* */
1800 /* Returns: */
1801 /* 0 on success, positive value on failure. */
1802 /****************************************************************************/
1803 int
1804 bnx_nvram_test(struct bnx_softc *sc)
1805 {
1806 u_int32_t buf[BNX_NVRAM_SIZE / 4];
1807 u_int8_t *data = (u_int8_t *) buf;
1808 int rc = 0;
1809 u_int32_t magic, csum;
1810
1811 /*
1812 * Check that the device NVRAM is valid by reading
1813 * the magic value at offset 0.
1814 */
1815 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
1816 goto bnx_nvram_test_done;
1817
1818 magic = bnx_be32toh(buf[0]);
1819 if (magic != BNX_NVRAM_MAGIC) {
1820 rc = ENODEV;
1821 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
1822 "Expected: 0x%08X, Found: 0x%08X\n",
1823 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
1824 goto bnx_nvram_test_done;
1825 }
1826
1827 /*
1828 * Verify that the device NVRAM includes valid
1829 * configuration data.
1830 */
1831 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
1832 goto bnx_nvram_test_done;
1833
1834 csum = ether_crc32_le(data, 0x100);
1835 if (csum != BNX_CRC32_RESIDUAL) {
1836 rc = ENODEV;
1837 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
1838 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
1839 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1840 goto bnx_nvram_test_done;
1841 }
1842
1843 csum = ether_crc32_le(data + 0x100, 0x100);
1844 if (csum != BNX_CRC32_RESIDUAL) {
1845 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
1846 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1847 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1848 rc = ENODEV;
1849 }
1850
1851 bnx_nvram_test_done:
1852 return (rc);
1853 }
1854
1855 /****************************************************************************/
1856 /* Free any DMA memory owned by the driver. */
1857 /* */
1858 /* Scans through each data structre that requires DMA memory and frees */
1859 /* the memory if allocated. */
1860 /* */
1861 /* Returns: */
1862 /* Nothing. */
1863 /****************************************************************************/
1864 void
1865 bnx_dma_free(struct bnx_softc *sc)
1866 {
1867 int i;
1868
1869 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1870
1871 /* Destroy the status block. */
1872 if (sc->status_block != NULL && sc->status_map != NULL) {
1873 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
1874 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block,
1875 BNX_STATUS_BLK_SZ);
1876 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
1877 sc->status_rseg);
1878 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
1879 sc->status_block = NULL;
1880 sc->status_map = NULL;
1881 }
1882
1883 /* Destroy the statistics block. */
1884 if (sc->stats_block != NULL && sc->stats_map != NULL) {
1885 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
1886 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block,
1887 BNX_STATS_BLK_SZ);
1888 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
1889 sc->stats_rseg);
1890 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
1891 sc->stats_block = NULL;
1892 sc->stats_map = NULL;
1893 }
1894
1895 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
1896 for (i = 0; i < TX_PAGES; i++ ) {
1897 if (sc->tx_bd_chain[i] != NULL &&
1898 sc->tx_bd_chain_map[i] != NULL) {
1899 bus_dmamap_unload(sc->bnx_dmatag,
1900 sc->tx_bd_chain_map[i]);
1901 bus_dmamem_unmap(sc->bnx_dmatag,
1902 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
1903 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
1904 sc->tx_bd_chain_rseg[i]);
1905 bus_dmamap_destroy(sc->bnx_dmatag,
1906 sc->tx_bd_chain_map[i]);
1907 sc->tx_bd_chain[i] = NULL;
1908 sc->tx_bd_chain_map[i] = NULL;
1909 }
1910 }
1911
1912 /* Unload and destroy the TX mbuf maps. */
1913 for (i = 0; i < TOTAL_TX_BD; i++) {
1914 if (sc->tx_mbuf_map[i] != NULL) {
1915 bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1916 bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1917 }
1918 }
1919
1920 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
1921 for (i = 0; i < RX_PAGES; i++ ) {
1922 if (sc->rx_bd_chain[i] != NULL &&
1923 sc->rx_bd_chain_map[i] != NULL) {
1924 bus_dmamap_unload(sc->bnx_dmatag,
1925 sc->rx_bd_chain_map[i]);
1926 bus_dmamem_unmap(sc->bnx_dmatag,
1927 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
1928 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
1929 sc->rx_bd_chain_rseg[i]);
1930
1931 bus_dmamap_destroy(sc->bnx_dmatag,
1932 sc->rx_bd_chain_map[i]);
1933 sc->rx_bd_chain[i] = NULL;
1934 sc->rx_bd_chain_map[i] = NULL;
1935 }
1936 }
1937
1938 /* Unload and destroy the RX mbuf maps. */
1939 for (i = 0; i < TOTAL_RX_BD; i++) {
1940 if (sc->rx_mbuf_map[i] != NULL) {
1941 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1942 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1943 }
1944 }
1945
1946 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1947 }
1948
1949 /****************************************************************************/
1950 /* Allocate any DMA memory needed by the driver. */
1951 /* */
1952 /* Allocates DMA memory needed for the various global structures needed by */
1953 /* hardware. */
1954 /* */
1955 /* Returns: */
1956 /* 0 for success, positive value for failure. */
1957 /****************************************************************************/
1958 int
1959 bnx_dma_alloc(struct bnx_softc *sc)
1960 {
1961 int i, rc = 0;
1962
1963 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1964
1965 /*
1966 * Allocate DMA memory for the status block, map the memory into DMA
1967 * space, and fetch the physical address of the block.
1968 */
1969 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
1970 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
1971 aprint_error_dev(sc->bnx_dev,
1972 "Could not create status block DMA map!\n");
1973 rc = ENOMEM;
1974 goto bnx_dma_alloc_exit;
1975 }
1976
1977 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
1978 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
1979 &sc->status_rseg, BUS_DMA_NOWAIT)) {
1980 aprint_error_dev(sc->bnx_dev,
1981 "Could not allocate status block DMA memory!\n");
1982 rc = ENOMEM;
1983 goto bnx_dma_alloc_exit;
1984 }
1985
1986 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
1987 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) {
1988 aprint_error_dev(sc->bnx_dev,
1989 "Could not map status block DMA memory!\n");
1990 rc = ENOMEM;
1991 goto bnx_dma_alloc_exit;
1992 }
1993
1994 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
1995 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
1996 aprint_error_dev(sc->bnx_dev,
1997 "Could not load status block DMA memory!\n");
1998 rc = ENOMEM;
1999 goto bnx_dma_alloc_exit;
2000 }
2001
2002 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2003 bzero(sc->status_block, BNX_STATUS_BLK_SZ);
2004
2005 /* DRC - Fix for 64 bit addresses. */
2006 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2007 (u_int32_t) sc->status_block_paddr);
2008
2009 /*
2010 * Allocate DMA memory for the statistics block, map the memory into
2011 * DMA space, and fetch the physical address of the block.
2012 */
2013 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2014 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2015 aprint_error_dev(sc->bnx_dev,
2016 "Could not create stats block DMA map!\n");
2017 rc = ENOMEM;
2018 goto bnx_dma_alloc_exit;
2019 }
2020
2021 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2022 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2023 &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2024 aprint_error_dev(sc->bnx_dev,
2025 "Could not allocate stats block DMA memory!\n");
2026 rc = ENOMEM;
2027 goto bnx_dma_alloc_exit;
2028 }
2029
2030 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2031 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) {
2032 aprint_error_dev(sc->bnx_dev,
2033 "Could not map stats block DMA memory!\n");
2034 rc = ENOMEM;
2035 goto bnx_dma_alloc_exit;
2036 }
2037
2038 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2039 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2040 aprint_error_dev(sc->bnx_dev,
2041 "Could not load status block DMA memory!\n");
2042 rc = ENOMEM;
2043 goto bnx_dma_alloc_exit;
2044 }
2045
2046 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2047 bzero(sc->stats_block, BNX_STATS_BLK_SZ);
2048
2049 /* DRC - Fix for 64 bit address. */
2050 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2051 (u_int32_t) sc->stats_block_paddr);
2052
2053 /*
2054 * Allocate DMA memory for the TX buffer descriptor chain,
2055 * and fetch the physical address of the block.
2056 */
2057 for (i = 0; i < TX_PAGES; i++) {
2058 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2059 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2060 &sc->tx_bd_chain_map[i])) {
2061 aprint_error_dev(sc->bnx_dev,
2062 "Could not create Tx desc %d DMA map!\n", i);
2063 rc = ENOMEM;
2064 goto bnx_dma_alloc_exit;
2065 }
2066
2067 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2068 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2069 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2070 aprint_error_dev(sc->bnx_dev,
2071 "Could not allocate TX desc %d DMA memory!\n",
2072 i);
2073 rc = ENOMEM;
2074 goto bnx_dma_alloc_exit;
2075 }
2076
2077 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2078 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2079 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2080 aprint_error_dev(sc->bnx_dev,
2081 "Could not map TX desc %d DMA memory!\n", i);
2082 rc = ENOMEM;
2083 goto bnx_dma_alloc_exit;
2084 }
2085
2086 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2087 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2088 BUS_DMA_NOWAIT)) {
2089 aprint_error_dev(sc->bnx_dev,
2090 "Could not load TX desc %d DMA memory!\n", i);
2091 rc = ENOMEM;
2092 goto bnx_dma_alloc_exit;
2093 }
2094
2095 sc->tx_bd_chain_paddr[i] =
2096 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2097
2098 /* DRC - Fix for 64 bit systems. */
2099 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2100 i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2101 }
2102
2103 /*
2104 * Create DMA maps for the TX buffer mbufs.
2105 */
2106 for (i = 0; i < TOTAL_TX_BD; i++) {
2107 if (bus_dmamap_create(sc->bnx_dmatag,
2108 MCLBYTES * BNX_MAX_SEGMENTS,
2109 USABLE_TX_BD - BNX_TX_SLACK_SPACE,
2110 MCLBYTES, 0, BUS_DMA_NOWAIT,
2111 &sc->tx_mbuf_map[i])) {
2112 aprint_error_dev(sc->bnx_dev,
2113 "Could not create Tx mbuf %d DMA map!\n", i);
2114 rc = ENOMEM;
2115 goto bnx_dma_alloc_exit;
2116 }
2117 }
2118
2119 /*
2120 * Allocate DMA memory for the Rx buffer descriptor chain,
2121 * and fetch the physical address of the block.
2122 */
2123 for (i = 0; i < RX_PAGES; i++) {
2124 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2125 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2126 &sc->rx_bd_chain_map[i])) {
2127 aprint_error_dev(sc->bnx_dev,
2128 "Could not create Rx desc %d DMA map!\n", i);
2129 rc = ENOMEM;
2130 goto bnx_dma_alloc_exit;
2131 }
2132
2133 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2134 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2135 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2136 aprint_error_dev(sc->bnx_dev,
2137 "Could not allocate Rx desc %d DMA memory!\n", i);
2138 rc = ENOMEM;
2139 goto bnx_dma_alloc_exit;
2140 }
2141
2142 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2143 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2144 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2145 aprint_error_dev(sc->bnx_dev,
2146 "Could not map Rx desc %d DMA memory!\n", i);
2147 rc = ENOMEM;
2148 goto bnx_dma_alloc_exit;
2149 }
2150
2151 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2152 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2153 BUS_DMA_NOWAIT)) {
2154 aprint_error_dev(sc->bnx_dev,
2155 "Could not load Rx desc %d DMA memory!\n", i);
2156 rc = ENOMEM;
2157 goto bnx_dma_alloc_exit;
2158 }
2159
2160 bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2161 sc->rx_bd_chain_paddr[i] =
2162 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2163
2164 /* DRC - Fix for 64 bit systems. */
2165 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2166 i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2167 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2168 0, BNX_RX_CHAIN_PAGE_SZ,
2169 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2170 }
2171
2172 /*
2173 * Create DMA maps for the Rx buffer mbufs.
2174 */
2175 for (i = 0; i < TOTAL_RX_BD; i++) {
2176 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2177 BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2178 &sc->rx_mbuf_map[i])) {
2179 aprint_error_dev(sc->bnx_dev,
2180 "Could not create Rx mbuf %d DMA map!\n", i);
2181 rc = ENOMEM;
2182 goto bnx_dma_alloc_exit;
2183 }
2184 }
2185
2186 bnx_dma_alloc_exit:
2187 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2188
2189 return(rc);
2190 }
2191
2192 /****************************************************************************/
2193 /* Release all resources used by the driver. */
2194 /* */
2195 /* Releases all resources acquired by the driver including interrupts, */
2196 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2197 /* */
2198 /* Returns: */
2199 /* Nothing. */
2200 /****************************************************************************/
2201 void
2202 bnx_release_resources(struct bnx_softc *sc)
2203 {
2204 int i;
2205 struct pci_attach_args *pa = &(sc->bnx_pa);
2206
2207 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2208
2209 bnx_dma_free(sc);
2210
2211 if (sc->bnx_intrhand != NULL)
2212 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2213
2214 if (sc->bnx_size)
2215 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2216
2217 for (i = 0; i < TOTAL_RX_BD; i++)
2218 if (sc->rx_mbuf_map[i])
2219 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2220
2221 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2222 }
2223
2224 /****************************************************************************/
2225 /* Firmware synchronization. */
2226 /* */
2227 /* Before performing certain events such as a chip reset, synchronize with */
2228 /* the firmware first. */
2229 /* */
2230 /* Returns: */
2231 /* 0 for success, positive value for failure. */
2232 /****************************************************************************/
2233 int
2234 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2235 {
2236 int i, rc = 0;
2237 u_int32_t val;
2238
2239 /* Don't waste any time if we've timed out before. */
2240 if (sc->bnx_fw_timed_out) {
2241 rc = EBUSY;
2242 goto bnx_fw_sync_exit;
2243 }
2244
2245 /* Increment the message sequence number. */
2246 sc->bnx_fw_wr_seq++;
2247 msg_data |= sc->bnx_fw_wr_seq;
2248
2249 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2250 msg_data);
2251
2252 /* Send the message to the bootcode driver mailbox. */
2253 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2254
2255 /* Wait for the bootcode to acknowledge the message. */
2256 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2257 /* Check for a response in the bootcode firmware mailbox. */
2258 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2259 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2260 break;
2261 DELAY(1000);
2262 }
2263
2264 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2265 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2266 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2267 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2268 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2269
2270 msg_data &= ~BNX_DRV_MSG_CODE;
2271 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2272
2273 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2274
2275 sc->bnx_fw_timed_out = 1;
2276 rc = EBUSY;
2277 }
2278
2279 bnx_fw_sync_exit:
2280 return (rc);
2281 }
2282
2283 /****************************************************************************/
2284 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2285 /* */
2286 /* Returns: */
2287 /* Nothing. */
2288 /****************************************************************************/
2289 void
2290 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2291 u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2292 {
2293 int i;
2294 u_int32_t val;
2295
2296 for (i = 0; i < rv2p_code_len; i += 8) {
2297 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2298 rv2p_code++;
2299 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2300 rv2p_code++;
2301
2302 if (rv2p_proc == RV2P_PROC1) {
2303 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2304 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2305 }
2306 else {
2307 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2308 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2309 }
2310 }
2311
2312 /* Reset the processor, un-stall is done later. */
2313 if (rv2p_proc == RV2P_PROC1)
2314 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2315 else
2316 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2317 }
2318
2319 /****************************************************************************/
2320 /* Load RISC processor firmware. */
2321 /* */
2322 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */
2323 /* associated with a particular processor. */
2324 /* */
2325 /* Returns: */
2326 /* Nothing. */
2327 /****************************************************************************/
2328 void
2329 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2330 struct fw_info *fw)
2331 {
2332 u_int32_t offset;
2333 u_int32_t val;
2334
2335 /* Halt the CPU. */
2336 val = REG_RD_IND(sc, cpu_reg->mode);
2337 val |= cpu_reg->mode_value_halt;
2338 REG_WR_IND(sc, cpu_reg->mode, val);
2339 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2340
2341 /* Load the Text area. */
2342 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2343 if (fw->text) {
2344 int j;
2345
2346 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2347 REG_WR_IND(sc, offset, fw->text[j]);
2348 }
2349
2350 /* Load the Data area. */
2351 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2352 if (fw->data) {
2353 int j;
2354
2355 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2356 REG_WR_IND(sc, offset, fw->data[j]);
2357 }
2358
2359 /* Load the SBSS area. */
2360 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2361 if (fw->sbss) {
2362 int j;
2363
2364 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2365 REG_WR_IND(sc, offset, fw->sbss[j]);
2366 }
2367
2368 /* Load the BSS area. */
2369 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2370 if (fw->bss) {
2371 int j;
2372
2373 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2374 REG_WR_IND(sc, offset, fw->bss[j]);
2375 }
2376
2377 /* Load the Read-Only area. */
2378 offset = cpu_reg->spad_base +
2379 (fw->rodata_addr - cpu_reg->mips_view_base);
2380 if (fw->rodata) {
2381 int j;
2382
2383 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2384 REG_WR_IND(sc, offset, fw->rodata[j]);
2385 }
2386
2387 /* Clear the pre-fetch instruction. */
2388 REG_WR_IND(sc, cpu_reg->inst, 0);
2389 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2390
2391 /* Start the CPU. */
2392 val = REG_RD_IND(sc, cpu_reg->mode);
2393 val &= ~cpu_reg->mode_value_halt;
2394 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2395 REG_WR_IND(sc, cpu_reg->mode, val);
2396 }
2397
2398 /****************************************************************************/
2399 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2400 /* */
2401 /* Loads the firmware for each CPU and starts the CPU. */
2402 /* */
2403 /* Returns: */
2404 /* Nothing. */
2405 /****************************************************************************/
2406 void
2407 bnx_init_cpus(struct bnx_softc *sc)
2408 {
2409 struct cpu_reg cpu_reg;
2410 struct fw_info fw;
2411
2412 /* Initialize the RV2P processor. */
2413 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1),
2414 RV2P_PROC1);
2415 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2),
2416 RV2P_PROC2);
2417
2418 /* Initialize the RX Processor. */
2419 cpu_reg.mode = BNX_RXP_CPU_MODE;
2420 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2421 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2422 cpu_reg.state = BNX_RXP_CPU_STATE;
2423 cpu_reg.state_value_clear = 0xffffff;
2424 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2425 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2426 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2427 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2428 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2429 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2430 cpu_reg.mips_view_base = 0x8000000;
2431
2432 fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2433 fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2434 fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2435 fw.start_addr = bnx_RXP_b06FwStartAddr;
2436
2437 fw.text_addr = bnx_RXP_b06FwTextAddr;
2438 fw.text_len = bnx_RXP_b06FwTextLen;
2439 fw.text_index = 0;
2440 fw.text = bnx_RXP_b06FwText;
2441
2442 fw.data_addr = bnx_RXP_b06FwDataAddr;
2443 fw.data_len = bnx_RXP_b06FwDataLen;
2444 fw.data_index = 0;
2445 fw.data = bnx_RXP_b06FwData;
2446
2447 fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2448 fw.sbss_len = bnx_RXP_b06FwSbssLen;
2449 fw.sbss_index = 0;
2450 fw.sbss = bnx_RXP_b06FwSbss;
2451
2452 fw.bss_addr = bnx_RXP_b06FwBssAddr;
2453 fw.bss_len = bnx_RXP_b06FwBssLen;
2454 fw.bss_index = 0;
2455 fw.bss = bnx_RXP_b06FwBss;
2456
2457 fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2458 fw.rodata_len = bnx_RXP_b06FwRodataLen;
2459 fw.rodata_index = 0;
2460 fw.rodata = bnx_RXP_b06FwRodata;
2461
2462 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2463 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2464
2465 /* Initialize the TX Processor. */
2466 cpu_reg.mode = BNX_TXP_CPU_MODE;
2467 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2468 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2469 cpu_reg.state = BNX_TXP_CPU_STATE;
2470 cpu_reg.state_value_clear = 0xffffff;
2471 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2472 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2473 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2474 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2475 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2476 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2477 cpu_reg.mips_view_base = 0x8000000;
2478
2479 fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2480 fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2481 fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2482 fw.start_addr = bnx_TXP_b06FwStartAddr;
2483
2484 fw.text_addr = bnx_TXP_b06FwTextAddr;
2485 fw.text_len = bnx_TXP_b06FwTextLen;
2486 fw.text_index = 0;
2487 fw.text = bnx_TXP_b06FwText;
2488
2489 fw.data_addr = bnx_TXP_b06FwDataAddr;
2490 fw.data_len = bnx_TXP_b06FwDataLen;
2491 fw.data_index = 0;
2492 fw.data = bnx_TXP_b06FwData;
2493
2494 fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2495 fw.sbss_len = bnx_TXP_b06FwSbssLen;
2496 fw.sbss_index = 0;
2497 fw.sbss = bnx_TXP_b06FwSbss;
2498
2499 fw.bss_addr = bnx_TXP_b06FwBssAddr;
2500 fw.bss_len = bnx_TXP_b06FwBssLen;
2501 fw.bss_index = 0;
2502 fw.bss = bnx_TXP_b06FwBss;
2503
2504 fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2505 fw.rodata_len = bnx_TXP_b06FwRodataLen;
2506 fw.rodata_index = 0;
2507 fw.rodata = bnx_TXP_b06FwRodata;
2508
2509 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2510 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2511
2512 /* Initialize the TX Patch-up Processor. */
2513 cpu_reg.mode = BNX_TPAT_CPU_MODE;
2514 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2515 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2516 cpu_reg.state = BNX_TPAT_CPU_STATE;
2517 cpu_reg.state_value_clear = 0xffffff;
2518 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2519 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2520 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2521 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2522 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2523 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2524 cpu_reg.mips_view_base = 0x8000000;
2525
2526 fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2527 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2528 fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2529 fw.start_addr = bnx_TPAT_b06FwStartAddr;
2530
2531 fw.text_addr = bnx_TPAT_b06FwTextAddr;
2532 fw.text_len = bnx_TPAT_b06FwTextLen;
2533 fw.text_index = 0;
2534 fw.text = bnx_TPAT_b06FwText;
2535
2536 fw.data_addr = bnx_TPAT_b06FwDataAddr;
2537 fw.data_len = bnx_TPAT_b06FwDataLen;
2538 fw.data_index = 0;
2539 fw.data = bnx_TPAT_b06FwData;
2540
2541 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2542 fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2543 fw.sbss_index = 0;
2544 fw.sbss = bnx_TPAT_b06FwSbss;
2545
2546 fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2547 fw.bss_len = bnx_TPAT_b06FwBssLen;
2548 fw.bss_index = 0;
2549 fw.bss = bnx_TPAT_b06FwBss;
2550
2551 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2552 fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2553 fw.rodata_index = 0;
2554 fw.rodata = bnx_TPAT_b06FwRodata;
2555
2556 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2557 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2558
2559 /* Initialize the Completion Processor. */
2560 cpu_reg.mode = BNX_COM_CPU_MODE;
2561 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2562 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2563 cpu_reg.state = BNX_COM_CPU_STATE;
2564 cpu_reg.state_value_clear = 0xffffff;
2565 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2566 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2567 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2568 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2569 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2570 cpu_reg.spad_base = BNX_COM_SCRATCH;
2571 cpu_reg.mips_view_base = 0x8000000;
2572
2573 fw.ver_major = bnx_COM_b06FwReleaseMajor;
2574 fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2575 fw.ver_fix = bnx_COM_b06FwReleaseFix;
2576 fw.start_addr = bnx_COM_b06FwStartAddr;
2577
2578 fw.text_addr = bnx_COM_b06FwTextAddr;
2579 fw.text_len = bnx_COM_b06FwTextLen;
2580 fw.text_index = 0;
2581 fw.text = bnx_COM_b06FwText;
2582
2583 fw.data_addr = bnx_COM_b06FwDataAddr;
2584 fw.data_len = bnx_COM_b06FwDataLen;
2585 fw.data_index = 0;
2586 fw.data = bnx_COM_b06FwData;
2587
2588 fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2589 fw.sbss_len = bnx_COM_b06FwSbssLen;
2590 fw.sbss_index = 0;
2591 fw.sbss = bnx_COM_b06FwSbss;
2592
2593 fw.bss_addr = bnx_COM_b06FwBssAddr;
2594 fw.bss_len = bnx_COM_b06FwBssLen;
2595 fw.bss_index = 0;
2596 fw.bss = bnx_COM_b06FwBss;
2597
2598 fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2599 fw.rodata_len = bnx_COM_b06FwRodataLen;
2600 fw.rodata_index = 0;
2601 fw.rodata = bnx_COM_b06FwRodata;
2602
2603 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2604 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2605 }
2606
2607 /****************************************************************************/
2608 /* Initialize context memory. */
2609 /* */
2610 /* Clears the memory associated with each Context ID (CID). */
2611 /* */
2612 /* Returns: */
2613 /* Nothing. */
2614 /****************************************************************************/
2615 void
2616 bnx_init_context(struct bnx_softc *sc)
2617 {
2618 u_int32_t vcid;
2619
2620 vcid = 96;
2621 while (vcid) {
2622 u_int32_t vcid_addr, pcid_addr, offset;
2623
2624 vcid--;
2625
2626 vcid_addr = GET_CID_ADDR(vcid);
2627 pcid_addr = vcid_addr;
2628
2629 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2630 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2631
2632 /* Zero out the context. */
2633 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2634 CTX_WR(sc, 0x00, offset, 0);
2635
2636 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2637 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2638 }
2639 }
2640
2641 /****************************************************************************/
2642 /* Fetch the permanent MAC address of the controller. */
2643 /* */
2644 /* Returns: */
2645 /* Nothing. */
2646 /****************************************************************************/
2647 void
2648 bnx_get_mac_addr(struct bnx_softc *sc)
2649 {
2650 u_int32_t mac_lo = 0, mac_hi = 0;
2651
2652 /*
2653 * The NetXtreme II bootcode populates various NIC
2654 * power-on and runtime configuration items in a
2655 * shared memory area. The factory configured MAC
2656 * address is available from both NVRAM and the
2657 * shared memory area so we'll read the value from
2658 * shared memory for speed.
2659 */
2660
2661 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
2662 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
2663
2664 if ((mac_lo == 0) && (mac_hi == 0)) {
2665 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2666 __FILE__, __LINE__);
2667 } else {
2668 sc->eaddr[0] = (u_char)(mac_hi >> 8);
2669 sc->eaddr[1] = (u_char)(mac_hi >> 0);
2670 sc->eaddr[2] = (u_char)(mac_lo >> 24);
2671 sc->eaddr[3] = (u_char)(mac_lo >> 16);
2672 sc->eaddr[4] = (u_char)(mac_lo >> 8);
2673 sc->eaddr[5] = (u_char)(mac_lo >> 0);
2674 }
2675
2676 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
2677 "%s\n", ether_sprintf(sc->eaddr));
2678 }
2679
2680 /****************************************************************************/
2681 /* Program the MAC address. */
2682 /* */
2683 /* Returns: */
2684 /* Nothing. */
2685 /****************************************************************************/
2686 void
2687 bnx_set_mac_addr(struct bnx_softc *sc)
2688 {
2689 u_int32_t val;
2690 const u_int8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl);
2691
2692 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
2693 "%s\n", ether_sprintf(sc->eaddr));
2694
2695 val = (mac_addr[0] << 8) | mac_addr[1];
2696
2697 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2698
2699 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2700 (mac_addr[4] << 8) | mac_addr[5];
2701
2702 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2703 }
2704
2705 /****************************************************************************/
2706 /* Stop the controller. */
2707 /* */
2708 /* Returns: */
2709 /* Nothing. */
2710 /****************************************************************************/
2711 void
2712 bnx_stop(struct ifnet *ifp, int disable)
2713 {
2714 struct bnx_softc *sc = ifp->if_softc;
2715
2716 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2717
2718 if ((ifp->if_flags & IFF_RUNNING) == 0)
2719 return;
2720
2721 callout_stop(&sc->bnx_timeout);
2722
2723 mii_down(&sc->bnx_mii);
2724
2725 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2726
2727 /* Disable the transmit/receive blocks. */
2728 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2729 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2730 DELAY(20);
2731
2732 bnx_disable_intr(sc);
2733
2734 /* Tell firmware that the driver is going away. */
2735 if (disable)
2736 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
2737 else
2738 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2739
2740 /* Free the RX lists. */
2741 bnx_free_rx_chain(sc);
2742
2743 /* Free TX buffers. */
2744 bnx_free_tx_chain(sc);
2745
2746 ifp->if_timer = 0;
2747
2748 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2749
2750 }
2751
2752 int
2753 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2754 {
2755 u_int32_t val;
2756 int i, rc = 0;
2757
2758 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2759
2760 /* Wait for pending PCI transactions to complete. */
2761 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2762 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2763 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2764 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2765 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2766 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2767 DELAY(5);
2768
2769 /* Assume bootcode is running. */
2770 sc->bnx_fw_timed_out = 0;
2771
2772 /* Give the firmware a chance to prepare for the reset. */
2773 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2774 if (rc)
2775 goto bnx_reset_exit;
2776
2777 /* Set a firmware reminder that this is a soft reset. */
2778 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2779 BNX_DRV_RESET_SIGNATURE_MAGIC);
2780
2781 /* Dummy read to force the chip to complete all current transactions. */
2782 val = REG_RD(sc, BNX_MISC_ID);
2783
2784 /* Chip reset. */
2785 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2786 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2787 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2788 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
2789
2790 /* Allow up to 30us for reset to complete. */
2791 for (i = 0; i < 10; i++) {
2792 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
2793 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2794 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
2795 break;
2796
2797 DELAY(10);
2798 }
2799
2800 /* Check that reset completed successfully. */
2801 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2802 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2803 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", __FILE__, __LINE__);
2804 rc = EBUSY;
2805 goto bnx_reset_exit;
2806 }
2807
2808 /* Make sure byte swapping is properly configured. */
2809 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
2810 if (val != 0x01020304) {
2811 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
2812 __FILE__, __LINE__);
2813 rc = ENODEV;
2814 goto bnx_reset_exit;
2815 }
2816
2817 /* Just completed a reset, assume that firmware is running again. */
2818 sc->bnx_fw_timed_out = 0;
2819
2820 /* Wait for the firmware to finish its initialization. */
2821 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
2822 if (rc)
2823 BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
2824 "initialization!\n", __FILE__, __LINE__);
2825
2826 bnx_reset_exit:
2827 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2828
2829 return (rc);
2830 }
2831
2832 int
2833 bnx_chipinit(struct bnx_softc *sc)
2834 {
2835 struct pci_attach_args *pa = &(sc->bnx_pa);
2836 u_int32_t val;
2837 int rc = 0;
2838
2839 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2840
2841 /* Make sure the interrupt is not active. */
2842 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
2843
2844 /* Initialize DMA byte/word swapping, configure the number of DMA */
2845 /* channels and PCI clock compensation delay. */
2846 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
2847 BNX_DMA_CONFIG_DATA_WORD_SWAP |
2848 #if BYTE_ORDER == BIG_ENDIAN
2849 BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
2850 #endif
2851 BNX_DMA_CONFIG_CNTL_WORD_SWAP |
2852 DMA_READ_CHANS << 12 |
2853 DMA_WRITE_CHANS << 16;
2854
2855 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
2856
2857 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
2858 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
2859
2860 /*
2861 * This setting resolves a problem observed on certain Intel PCI
2862 * chipsets that cannot handle multiple outstanding DMA operations.
2863 * See errata E9_5706A1_65.
2864 */
2865 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
2866 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
2867 !(sc->bnx_flags & BNX_PCIX_FLAG))
2868 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
2869
2870 REG_WR(sc, BNX_DMA_CONFIG, val);
2871
2872 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
2873 if (sc->bnx_flags & BNX_PCIX_FLAG) {
2874 u_int16_t nval;
2875
2876 nval = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
2877 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
2878 nval & ~0x20000);
2879 }
2880
2881 /* Enable the RX_V2P and Context state machines before access. */
2882 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
2883 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
2884 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
2885 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
2886
2887 /* Initialize context mapping and zero out the quick contexts. */
2888 bnx_init_context(sc);
2889
2890 /* Initialize the on-boards CPUs */
2891 bnx_init_cpus(sc);
2892
2893 /* Prepare NVRAM for access. */
2894 if (bnx_init_nvram(sc)) {
2895 rc = ENODEV;
2896 goto bnx_chipinit_exit;
2897 }
2898
2899 /* Set the kernel bypass block size */
2900 val = REG_RD(sc, BNX_MQ_CONFIG);
2901 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2902 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
2903 REG_WR(sc, BNX_MQ_CONFIG, val);
2904
2905 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
2906 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
2907 REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
2908
2909 val = (BCM_PAGE_BITS - 8) << 24;
2910 REG_WR(sc, BNX_RV2P_CONFIG, val);
2911
2912 /* Configure page size. */
2913 val = REG_RD(sc, BNX_TBDR_CONFIG);
2914 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
2915 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
2916 REG_WR(sc, BNX_TBDR_CONFIG, val);
2917
2918 bnx_chipinit_exit:
2919 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2920
2921 return(rc);
2922 }
2923
2924 /****************************************************************************/
2925 /* Initialize the controller in preparation to send/receive traffic. */
2926 /* */
2927 /* Returns: */
2928 /* 0 for success, positive value for failure. */
2929 /****************************************************************************/
2930 int
2931 bnx_blockinit(struct bnx_softc *sc)
2932 {
2933 u_int32_t reg, val;
2934 int rc = 0;
2935
2936 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2937
2938 /* Load the hardware default MAC address. */
2939 bnx_set_mac_addr(sc);
2940
2941 /* Set the Ethernet backoff seed value */
2942 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
2943 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
2944 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
2945
2946 sc->last_status_idx = 0;
2947 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
2948
2949 /* Set up link change interrupt generation. */
2950 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
2951
2952 /* Program the physical address of the status block. */
2953 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
2954 REG_WR(sc, BNX_HC_STATUS_ADDR_H,
2955 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
2956
2957 /* Program the physical address of the statistics block. */
2958 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
2959 (u_int32_t)(sc->stats_block_paddr));
2960 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
2961 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
2962
2963 /* Program various host coalescing parameters. */
2964 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
2965 << 16) | sc->bnx_tx_quick_cons_trip);
2966 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
2967 << 16) | sc->bnx_rx_quick_cons_trip);
2968 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
2969 sc->bnx_comp_prod_trip);
2970 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
2971 sc->bnx_tx_ticks);
2972 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
2973 sc->bnx_rx_ticks);
2974 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
2975 sc->bnx_com_ticks);
2976 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
2977 sc->bnx_cmd_ticks);
2978 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
2979 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
2980 REG_WR(sc, BNX_HC_CONFIG,
2981 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
2982 BNX_HC_CONFIG_COLLECT_STATS));
2983
2984 /* Clear the internal statistics counters. */
2985 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
2986
2987 /* Verify that bootcode is running. */
2988 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
2989
2990 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
2991 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
2992 __FILE__, __LINE__); reg = 0);
2993
2994 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
2995 BNX_DEV_INFO_SIGNATURE_MAGIC) {
2996 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
2997 "Expected: 08%08X\n", __FILE__, __LINE__,
2998 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
2999 BNX_DEV_INFO_SIGNATURE_MAGIC);
3000 rc = ENODEV;
3001 goto bnx_blockinit_exit;
3002 }
3003
3004 /* Check if any management firmware is running. */
3005 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3006 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3007 BNX_PORT_FEATURE_IMD_ENABLED)) {
3008 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3009 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3010 }
3011
3012 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3013 BNX_DEV_INFO_BC_REV);
3014
3015 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3016
3017 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3018 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3019
3020 /* Enable link state change interrupt generation. */
3021 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3022
3023 /* Enable all remaining blocks in the MAC. */
3024 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3025 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3026 DELAY(20);
3027
3028 bnx_blockinit_exit:
3029 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3030
3031 return (rc);
3032 }
3033
3034 /****************************************************************************/
3035 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3036 /* */
3037 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3038 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3039 /* necessary. */
3040 /* */
3041 /* Returns: */
3042 /* 0 for success, positive value for failure. */
3043 /****************************************************************************/
3044 int
3045 bnx_get_buf(struct bnx_softc *sc, struct mbuf *m, u_int16_t *prod,
3046 u_int16_t *chain_prod, u_int32_t *prod_bseq)
3047 {
3048 bus_dmamap_t map;
3049 struct mbuf *m_new = NULL;
3050 struct rx_bd *rxbd;
3051 int i, rc = 0;
3052 u_int32_t addr;
3053 #ifdef BNX_DEBUG
3054 u_int16_t debug_chain_prod = *chain_prod;
3055 #endif
3056 u_int16_t first_chain_prod;
3057 u_int16_t min_free_bd;
3058
3059 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3060 __func__);
3061
3062 /* Make sure the inputs are valid. */
3063 DBRUNIF((*chain_prod > MAX_RX_BD),
3064 aprint_error_dev(sc->bnx_dev,
3065 "RX producer out of range: 0x%04X > 0x%04X\n",
3066 *chain_prod, (u_int16_t)MAX_RX_BD));
3067
3068 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3069 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod,
3070 *prod_bseq);
3071
3072 /* try to get in as many mbufs as possible */
3073 if (sc->mbuf_alloc_size == MCLBYTES)
3074 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE;
3075 else
3076 min_free_bd = (BNX_MAX_MRU + PAGE_SIZE - 1) / PAGE_SIZE;
3077 while (sc->free_rx_bd >= min_free_bd) {
3078 if (m == NULL) {
3079 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3080 BNX_PRINTF(sc, "Simulating mbuf allocation failure.\n");
3081
3082 sc->mbuf_alloc_failed++;
3083 rc = ENOBUFS;
3084 goto bnx_get_buf_exit);
3085
3086 /* This is a new mbuf allocation. */
3087 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3088 if (m_new == NULL) {
3089 DBPRINT(sc, BNX_WARN,
3090 "%s(%d): RX mbuf header allocation failed!\n",
3091 __FILE__, __LINE__);
3092
3093 DBRUNIF(1, sc->mbuf_alloc_failed++);
3094
3095 rc = ENOBUFS;
3096 goto bnx_get_buf_exit;
3097 }
3098
3099 DBRUNIF(1, sc->rx_mbuf_alloc++);
3100 if (sc->mbuf_alloc_size == MCLBYTES)
3101 MCLGET(m_new, M_DONTWAIT);
3102 else
3103 MEXTMALLOC(m_new, sc->mbuf_alloc_size,
3104 M_DONTWAIT);
3105 if (!(m_new->m_flags & M_EXT)) {
3106 DBPRINT(sc, BNX_WARN,
3107 "%s(%d): RX mbuf chain allocation failed!\n",
3108 __FILE__, __LINE__);
3109
3110 m_freem(m_new);
3111
3112 DBRUNIF(1, sc->rx_mbuf_alloc--);
3113 DBRUNIF(1, sc->mbuf_alloc_failed++);
3114
3115 rc = ENOBUFS;
3116 goto bnx_get_buf_exit;
3117 }
3118
3119 } else {
3120 m_new = m;
3121 m = NULL;
3122 m_new->m_data = m_new->m_ext.ext_buf;
3123 }
3124 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3125
3126 /* Map the mbuf cluster into device memory. */
3127 map = sc->rx_mbuf_map[*chain_prod];
3128 first_chain_prod = *chain_prod;
3129 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3130 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3131 __FILE__, __LINE__);
3132
3133 m_freem(m_new);
3134
3135 DBRUNIF(1, sc->rx_mbuf_alloc--);
3136
3137 rc = ENOBUFS;
3138 goto bnx_get_buf_exit;
3139 }
3140 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
3141 BUS_DMASYNC_PREREAD);
3142
3143 /* Watch for overflow. */
3144 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3145 aprint_error_dev(sc->bnx_dev,
3146 "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3147 sc->free_rx_bd, (u_int16_t)USABLE_RX_BD));
3148
3149 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3150 sc->rx_low_watermark = sc->free_rx_bd);
3151
3152 /*
3153 * Setup the rx_bd for the first segment
3154 */
3155 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3156
3157 addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3158 rxbd->rx_bd_haddr_lo = htole32(addr);
3159 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3160 rxbd->rx_bd_haddr_hi = htole32(addr);
3161 rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3162 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3163 *prod_bseq += map->dm_segs[0].ds_len;
3164 bus_dmamap_sync(sc->bnx_dmatag,
3165 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3166 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd),
3167 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3168
3169 for (i = 1; i < map->dm_nsegs; i++) {
3170 *prod = NEXT_RX_BD(*prod);
3171 *chain_prod = RX_CHAIN_IDX(*prod);
3172
3173 rxbd =
3174 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3175
3176 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3177 rxbd->rx_bd_haddr_lo = htole32(addr);
3178 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3179 rxbd->rx_bd_haddr_hi = htole32(addr);
3180 rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3181 rxbd->rx_bd_flags = 0;
3182 *prod_bseq += map->dm_segs[i].ds_len;
3183 bus_dmamap_sync(sc->bnx_dmatag,
3184 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3185 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3186 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3187 }
3188
3189 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3190 bus_dmamap_sync(sc->bnx_dmatag,
3191 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3192 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3193 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3194
3195 /*
3196 * Save the mbuf, ajust the map pointer (swap map for first and
3197 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches)
3198 * and update counter.
3199 */
3200 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3201 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3202 sc->rx_mbuf_map[*chain_prod] = map;
3203 sc->free_rx_bd -= map->dm_nsegs;
3204
3205 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3206 map->dm_nsegs));
3207 *prod = NEXT_RX_BD(*prod);
3208 *chain_prod = RX_CHAIN_IDX(*prod);
3209 }
3210
3211 bnx_get_buf_exit:
3212 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3213 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod,
3214 *chain_prod, *prod_bseq);
3215
3216 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3217 __func__);
3218
3219 return(rc);
3220 }
3221
3222 /****************************************************************************/
3223 /* Allocate memory and initialize the TX data structures. */
3224 /* */
3225 /* Returns: */
3226 /* 0 for success, positive value for failure. */
3227 /****************************************************************************/
3228 int
3229 bnx_init_tx_chain(struct bnx_softc *sc)
3230 {
3231 struct tx_bd *txbd;
3232 u_int32_t val, addr;
3233 int i, rc = 0;
3234
3235 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3236
3237 /* Set the initial TX producer/consumer indices. */
3238 sc->tx_prod = 0;
3239 sc->tx_cons = 0;
3240 sc->tx_prod_bseq = 0;
3241 sc->used_tx_bd = 0;
3242 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3243
3244 /*
3245 * The NetXtreme II supports a linked-list structure called
3246 * a Buffer Descriptor Chain (or BD chain). A BD chain
3247 * consists of a series of 1 or more chain pages, each of which
3248 * consists of a fixed number of BD entries.
3249 * The last BD entry on each page is a pointer to the next page
3250 * in the chain, and the last pointer in the BD chain
3251 * points back to the beginning of the chain.
3252 */
3253
3254 /* Set the TX next pointer chain entries. */
3255 for (i = 0; i < TX_PAGES; i++) {
3256 int j;
3257
3258 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3259
3260 /* Check if we've reached the last page. */
3261 if (i == (TX_PAGES - 1))
3262 j = 0;
3263 else
3264 j = i + 1;
3265
3266 addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3267 txbd->tx_bd_haddr_lo = htole32(addr);
3268 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3269 txbd->tx_bd_haddr_hi = htole32(addr);
3270 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3271 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3272 }
3273
3274 /*
3275 * Initialize the context ID for an L2 TX chain.
3276 */
3277 val = BNX_L2CTX_TYPE_TYPE_L2;
3278 val |= BNX_L2CTX_TYPE_SIZE_L2;
3279 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3280
3281 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3282 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3283
3284 /* Point the hardware to the first page in the chain. */
3285 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3286 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3287 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3288 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3289
3290 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3291
3292 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3293
3294 return(rc);
3295 }
3296
3297 /****************************************************************************/
3298 /* Free memory and clear the TX data structures. */
3299 /* */
3300 /* Returns: */
3301 /* Nothing. */
3302 /****************************************************************************/
3303 void
3304 bnx_free_tx_chain(struct bnx_softc *sc)
3305 {
3306 int i;
3307
3308 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3309
3310 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3311 for (i = 0; i < TOTAL_TX_BD; i++) {
3312 if (sc->tx_mbuf_ptr[i] != NULL) {
3313 if (sc->tx_mbuf_map != NULL)
3314 bus_dmamap_sync(sc->bnx_dmatag,
3315 sc->tx_mbuf_map[i], 0,
3316 sc->tx_mbuf_map[i]->dm_mapsize,
3317 BUS_DMASYNC_POSTWRITE);
3318 m_freem(sc->tx_mbuf_ptr[i]);
3319 sc->tx_mbuf_ptr[i] = NULL;
3320 DBRUNIF(1, sc->tx_mbuf_alloc--);
3321 }
3322 }
3323
3324 /* Clear each TX chain page. */
3325 for (i = 0; i < TX_PAGES; i++) {
3326 bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3327 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3328 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3329 }
3330
3331 /* Check if we lost any mbufs in the process. */
3332 DBRUNIF((sc->tx_mbuf_alloc),
3333 aprint_error_dev(sc->bnx_dev,
3334 "Memory leak! Lost %d mbufs from tx chain!\n",
3335 sc->tx_mbuf_alloc));
3336
3337 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3338 }
3339
3340 /****************************************************************************/
3341 /* Allocate memory and initialize the RX data structures. */
3342 /* */
3343 /* Returns: */
3344 /* 0 for success, positive value for failure. */
3345 /****************************************************************************/
3346 int
3347 bnx_init_rx_chain(struct bnx_softc *sc)
3348 {
3349 struct rx_bd *rxbd;
3350 int i, rc = 0;
3351 u_int16_t prod, chain_prod;
3352 u_int32_t prod_bseq, val, addr;
3353
3354 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3355
3356 /* Initialize the RX producer and consumer indices. */
3357 sc->rx_prod = 0;
3358 sc->rx_cons = 0;
3359 sc->rx_prod_bseq = 0;
3360 sc->free_rx_bd = BNX_RX_SLACK_SPACE;
3361 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3362
3363 /* Initialize the RX next pointer chain entries. */
3364 for (i = 0; i < RX_PAGES; i++) {
3365 int j;
3366
3367 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3368
3369 /* Check if we've reached the last page. */
3370 if (i == (RX_PAGES - 1))
3371 j = 0;
3372 else
3373 j = i + 1;
3374
3375 /* Setup the chain page pointers. */
3376 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
3377 rxbd->rx_bd_haddr_hi = htole32(addr);
3378 addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
3379 rxbd->rx_bd_haddr_lo = htole32(addr);
3380 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
3381 0, BNX_RX_CHAIN_PAGE_SZ,
3382 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3383 }
3384
3385 /* Initialize the context ID for an L2 RX chain. */
3386 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3387 val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3388 val |= 0x02 << 8;
3389 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3390
3391 /* Point the hardware to the first page in the chain. */
3392 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3393 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3394 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3395 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3396
3397 /* Allocate mbuf clusters for the rx_bd chain. */
3398 prod = prod_bseq = 0;
3399 chain_prod = RX_CHAIN_IDX(prod);
3400 if (bnx_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3401 BNX_PRINTF(sc,
3402 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod);
3403 }
3404
3405 /* Save the RX chain producer index. */
3406 sc->rx_prod = prod;
3407 sc->rx_prod_bseq = prod_bseq;
3408
3409 for (i = 0; i < RX_PAGES; i++)
3410 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3411 sc->rx_bd_chain_map[i]->dm_mapsize,
3412 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3413
3414 /* Tell the chip about the waiting rx_bd's. */
3415 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3416 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3417
3418 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3419
3420 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3421
3422 return(rc);
3423 }
3424
3425 /****************************************************************************/
3426 /* Free memory and clear the RX data structures. */
3427 /* */
3428 /* Returns: */
3429 /* Nothing. */
3430 /****************************************************************************/
3431 void
3432 bnx_free_rx_chain(struct bnx_softc *sc)
3433 {
3434 int i;
3435
3436 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3437
3438 /* Free any mbufs still in the RX mbuf chain. */
3439 for (i = 0; i < TOTAL_RX_BD; i++) {
3440 if (sc->rx_mbuf_ptr[i] != NULL) {
3441 if (sc->rx_mbuf_map[i] != NULL)
3442 bus_dmamap_sync(sc->bnx_dmatag,
3443 sc->rx_mbuf_map[i], 0,
3444 sc->rx_mbuf_map[i]->dm_mapsize,
3445 BUS_DMASYNC_POSTREAD);
3446 m_freem(sc->rx_mbuf_ptr[i]);
3447 sc->rx_mbuf_ptr[i] = NULL;
3448 DBRUNIF(1, sc->rx_mbuf_alloc--);
3449 }
3450 }
3451
3452 /* Clear each RX chain page. */
3453 for (i = 0; i < RX_PAGES; i++)
3454 bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
3455
3456 /* Check if we lost any mbufs in the process. */
3457 DBRUNIF((sc->rx_mbuf_alloc),
3458 aprint_error_dev(sc->bnx_dev,
3459 "Memory leak! Lost %d mbufs from rx chain!\n",
3460 sc->rx_mbuf_alloc));
3461
3462 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3463 }
3464
3465 /****************************************************************************/
3466 /* Handles PHY generated interrupt events. */
3467 /* */
3468 /* Returns: */
3469 /* Nothing. */
3470 /****************************************************************************/
3471 void
3472 bnx_phy_intr(struct bnx_softc *sc)
3473 {
3474 u_int32_t new_link_state, old_link_state;
3475
3476 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3477 BUS_DMASYNC_POSTREAD);
3478 new_link_state = sc->status_block->status_attn_bits &
3479 STATUS_ATTN_BITS_LINK_STATE;
3480 old_link_state = sc->status_block->status_attn_bits_ack &
3481 STATUS_ATTN_BITS_LINK_STATE;
3482
3483 /* Handle any changes if the link state has changed. */
3484 if (new_link_state != old_link_state) {
3485 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3486
3487 callout_stop(&sc->bnx_timeout);
3488 bnx_tick(sc);
3489
3490 /* Update the status_attn_bits_ack field in the status block. */
3491 if (new_link_state) {
3492 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3493 STATUS_ATTN_BITS_LINK_STATE);
3494 DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3495 } else {
3496 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3497 STATUS_ATTN_BITS_LINK_STATE);
3498 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3499 }
3500 }
3501
3502 /* Acknowledge the link change interrupt. */
3503 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3504 }
3505
3506 /****************************************************************************/
3507 /* Handles received frame interrupt events. */
3508 /* */
3509 /* Returns: */
3510 /* Nothing. */
3511 /****************************************************************************/
3512 void
3513 bnx_rx_intr(struct bnx_softc *sc)
3514 {
3515 struct status_block *sblk = sc->status_block;
3516 struct ifnet *ifp = &sc->bnx_ec.ec_if;
3517 u_int16_t hw_cons, sw_cons, sw_chain_cons;
3518 u_int16_t sw_prod, sw_chain_prod;
3519 u_int32_t sw_prod_bseq;
3520 struct l2_fhdr *l2fhdr;
3521 int i;
3522
3523 DBRUNIF(1, sc->rx_interrupts++);
3524 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3525 BUS_DMASYNC_POSTREAD);
3526
3527 /* Prepare the RX chain pages to be accessed by the host CPU. */
3528 for (i = 0; i < RX_PAGES; i++)
3529 bus_dmamap_sync(sc->bnx_dmatag,
3530 sc->rx_bd_chain_map[i], 0,
3531 sc->rx_bd_chain_map[i]->dm_mapsize,
3532 BUS_DMASYNC_POSTWRITE);
3533
3534 /* Get the hardware's view of the RX consumer index. */
3535 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3536 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3537 hw_cons++;
3538
3539 /* Get working copies of the driver's view of the RX indices. */
3540 sw_cons = sc->rx_cons;
3541 sw_prod = sc->rx_prod;
3542 sw_prod_bseq = sc->rx_prod_bseq;
3543
3544 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3545 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3546 __func__, sw_prod, sw_cons, sw_prod_bseq);
3547
3548 /* Prevent speculative reads from getting ahead of the status block. */
3549 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3550 BUS_SPACE_BARRIER_READ);
3551
3552 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3553 sc->rx_low_watermark = sc->free_rx_bd);
3554
3555 /*
3556 * Scan through the receive chain as long
3557 * as there is work to do.
3558 */
3559 while (sw_cons != hw_cons) {
3560 struct mbuf *m;
3561 struct rx_bd *rxbd;
3562 unsigned int len;
3563 u_int32_t status;
3564
3565 /* Convert the producer/consumer indices to an actual
3566 * rx_bd index.
3567 */
3568 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3569 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3570
3571 /* Get the used rx_bd. */
3572 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3573 sc->free_rx_bd++;
3574
3575 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__);
3576 bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3577
3578 /* The mbuf is stored with the last rx_bd entry of a packet. */
3579 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3580 #ifdef DIAGNOSTIC
3581 /* Validate that this is the last rx_bd. */
3582 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) {
3583 printf("%s: Unexpected mbuf found in "
3584 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev),
3585 sw_chain_cons);
3586 }
3587 #endif
3588
3589 /* DRC - ToDo: If the received packet is small, say less
3590 * than 128 bytes, allocate a new mbuf here,
3591 * copy the data to that mbuf, and recycle
3592 * the mapped jumbo frame.
3593 */
3594
3595 /* Unmap the mbuf from DMA space. */
3596 #ifdef DIAGNOSTIC
3597 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) {
3598 printf("invalid map sw_cons 0x%x "
3599 "sw_prod 0x%x "
3600 "sw_chain_cons 0x%x "
3601 "sw_chain_prod 0x%x "
3602 "hw_cons 0x%x "
3603 "TOTAL_RX_BD_PER_PAGE 0x%x "
3604 "TOTAL_RX_BD 0x%x\n",
3605 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod,
3606 hw_cons,
3607 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD);
3608 }
3609 #endif
3610 bus_dmamap_sync(sc->bnx_dmatag,
3611 sc->rx_mbuf_map[sw_chain_cons], 0,
3612 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3613 BUS_DMASYNC_POSTREAD);
3614 bus_dmamap_unload(sc->bnx_dmatag,
3615 sc->rx_mbuf_map[sw_chain_cons]);
3616
3617 /* Remove the mbuf from the driver's chain. */
3618 m = sc->rx_mbuf_ptr[sw_chain_cons];
3619 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3620
3621 /*
3622 * Frames received on the NetXteme II are prepended
3623 * with the l2_fhdr structure which provides status
3624 * information about the received frame (including
3625 * VLAN tags and checksum info) and are also
3626 * automatically adjusted to align the IP header
3627 * (i.e. two null bytes are inserted before the
3628 * Ethernet header).
3629 */
3630 l2fhdr = mtod(m, struct l2_fhdr *);
3631
3632 len = l2fhdr->l2_fhdr_pkt_len;
3633 status = l2fhdr->l2_fhdr_status;
3634
3635 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3636 aprint_error("Simulating l2_fhdr status error.\n");
3637 status = status | L2_FHDR_ERRORS_PHY_DECODE);
3638
3639 /* Watch for unusual sized frames. */
3640 DBRUNIF(((len < BNX_MIN_MTU) ||
3641 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3642 aprint_error_dev(sc->bnx_dev,
3643 "Unusual frame size found. "
3644 "Min(%d), Actual(%d), Max(%d)\n",
3645 (int)BNX_MIN_MTU, len,
3646 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3647
3648 bnx_dump_mbuf(sc, m);
3649 bnx_breakpoint(sc));
3650
3651 len -= ETHER_CRC_LEN;
3652
3653 /* Check the received frame for errors. */
3654 if ((status & (L2_FHDR_ERRORS_BAD_CRC |
3655 L2_FHDR_ERRORS_PHY_DECODE |
3656 L2_FHDR_ERRORS_ALIGNMENT |
3657 L2_FHDR_ERRORS_TOO_SHORT |
3658 L2_FHDR_ERRORS_GIANT_FRAME)) ||
3659 len < (BNX_MIN_MTU - ETHER_CRC_LEN) ||
3660 len >
3661 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
3662 ifp->if_ierrors++;
3663 DBRUNIF(1, sc->l2fhdr_status_errors++);
3664
3665 /* Reuse the mbuf for a new frame. */
3666 if (bnx_get_buf(sc, m, &sw_prod,
3667 &sw_chain_prod, &sw_prod_bseq)) {
3668 DBRUNIF(1, bnx_breakpoint(sc));
3669 panic("%s: Can't reuse RX mbuf!\n",
3670 device_xname(sc->bnx_dev));
3671 }
3672 continue;
3673 }
3674
3675 /*
3676 * Get a new mbuf for the rx_bd. If no new
3677 * mbufs are available then reuse the current mbuf,
3678 * log an ierror on the interface, and generate
3679 * an error in the system log.
3680 */
3681 if (bnx_get_buf(sc, NULL, &sw_prod, &sw_chain_prod,
3682 &sw_prod_bseq)) {
3683 DBRUN(BNX_WARN, BNX_PRINTF(sc, "Failed to allocate "
3684 "new mbuf, incoming frame dropped!\n"));
3685
3686 ifp->if_ierrors++;
3687
3688 /* Try and reuse the exisitng mbuf. */
3689 if (bnx_get_buf(sc, m, &sw_prod,
3690 &sw_chain_prod, &sw_prod_bseq)) {
3691 DBRUNIF(1, bnx_breakpoint(sc));
3692 panic("%s: Double mbuf allocation "
3693 "failure!",
3694 device_xname(sc->bnx_dev));
3695 }
3696 continue;
3697 }
3698
3699 /* Skip over the l2_fhdr when passing the data up
3700 * the stack.
3701 */
3702 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3703
3704 /* Adjust the pckt length to match the received data. */
3705 m->m_pkthdr.len = m->m_len = len;
3706
3707 /* Send the packet to the appropriate interface. */
3708 m->m_pkthdr.rcvif = ifp;
3709
3710 DBRUN(BNX_VERBOSE_RECV,
3711 struct ether_header *eh;
3712 eh = mtod(m, struct ether_header *);
3713 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n",
3714 __func__, ether_sprintf(eh->ether_dhost),
3715 ether_sprintf(eh->ether_shost),
3716 htons(eh->ether_type)));
3717
3718 /* Validate the checksum. */
3719
3720 /* Check for an IP datagram. */
3721 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3722 /* Check if the IP checksum is valid. */
3723 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
3724 == 0)
3725 m->m_pkthdr.csum_flags |=
3726 M_CSUM_IPv4;
3727 #ifdef BNX_DEBUG
3728 else
3729 DBPRINT(sc, BNX_WARN_SEND,
3730 "%s(): Invalid IP checksum "
3731 "= 0x%04X!\n",
3732 __func__,
3733 l2fhdr->l2_fhdr_ip_xsum
3734 );
3735 #endif
3736 }
3737
3738 /* Check for a valid TCP/UDP frame. */
3739 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3740 L2_FHDR_STATUS_UDP_DATAGRAM)) {
3741 /* Check for a good TCP/UDP checksum. */
3742 if ((status &
3743 (L2_FHDR_ERRORS_TCP_XSUM |
3744 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3745 m->m_pkthdr.csum_flags |=
3746 M_CSUM_TCPv4 |
3747 M_CSUM_UDPv4;
3748 } else {
3749 DBPRINT(sc, BNX_WARN_SEND,
3750 "%s(): Invalid TCP/UDP "
3751 "checksum = 0x%04X!\n",
3752 __func__,
3753 l2fhdr->l2_fhdr_tcp_udp_xsum);
3754 }
3755 }
3756
3757 /*
3758 * If we received a packet with a vlan tag,
3759 * attach that information to the packet.
3760 */
3761 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3762 #if 0
3763 struct ether_vlan_header vh;
3764
3765 DBPRINT(sc, BNX_VERBOSE_SEND,
3766 "%s(): VLAN tag = 0x%04X\n",
3767 __func__,
3768 l2fhdr->l2_fhdr_vlan_tag);
3769
3770 if (m->m_pkthdr.len < ETHER_HDR_LEN) {
3771 m_freem(m);
3772 continue;
3773 }
3774 m_copydata(m, 0, ETHER_HDR_LEN, (void *)&vh);
3775 vh.evl_proto = vh.evl_encap_proto;
3776 vh.evl_tag = l2fhdr->l2_fhdr_vlan_tag;
3777 vh.evl_encap_proto = htons(ETHERTYPE_VLAN);
3778 m_adj(m, ETHER_HDR_LEN);
3779 if ((m = m_prepend(m, sizeof(vh), M_DONTWAIT)) == NULL)
3780 continue;
3781 m->m_pkthdr.len += sizeof(vh);
3782 if (m->m_len < sizeof(vh) &&
3783 (m = m_pullup(m, sizeof(vh))) == NULL)
3784 goto bnx_rx_int_next_rx;
3785 m_copyback(m, 0, sizeof(vh), &vh);
3786 #else
3787 VLAN_INPUT_TAG(ifp, m,
3788 l2fhdr->l2_fhdr_vlan_tag,
3789 continue);
3790 #endif
3791 }
3792
3793 #if NBPFILTER > 0
3794 /*
3795 * Handle BPF listeners. Let the BPF
3796 * user see the packet.
3797 */
3798 if (ifp->if_bpf)
3799 bpf_mtap(ifp->if_bpf, m);
3800 #endif
3801
3802 /* Pass the mbuf off to the upper layers. */
3803 ifp->if_ipackets++;
3804 DBPRINT(sc, BNX_VERBOSE_RECV,
3805 "%s(): Passing received frame up.\n", __func__);
3806 (*ifp->if_input)(ifp, m);
3807 DBRUNIF(1, sc->rx_mbuf_alloc--);
3808
3809 }
3810
3811 sw_cons = NEXT_RX_BD(sw_cons);
3812
3813 /* Refresh hw_cons to see if there's new work */
3814 if (sw_cons == hw_cons) {
3815 hw_cons = sc->hw_rx_cons =
3816 sblk->status_rx_quick_consumer_index0;
3817 if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
3818 USABLE_RX_BD_PER_PAGE)
3819 hw_cons++;
3820 }
3821
3822 /* Prevent speculative reads from getting ahead of
3823 * the status block.
3824 */
3825 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3826 BUS_SPACE_BARRIER_READ);
3827 }
3828
3829 for (i = 0; i < RX_PAGES; i++)
3830 bus_dmamap_sync(sc->bnx_dmatag,
3831 sc->rx_bd_chain_map[i], 0,
3832 sc->rx_bd_chain_map[i]->dm_mapsize,
3833 BUS_DMASYNC_PREWRITE);
3834
3835 sc->rx_cons = sw_cons;
3836 sc->rx_prod = sw_prod;
3837 sc->rx_prod_bseq = sw_prod_bseq;
3838
3839 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3840 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3841
3842 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3843 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3844 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3845 }
3846
3847 /****************************************************************************/
3848 /* Handles transmit completion interrupt events. */
3849 /* */
3850 /* Returns: */
3851 /* Nothing. */
3852 /****************************************************************************/
3853 void
3854 bnx_tx_intr(struct bnx_softc *sc)
3855 {
3856 struct status_block *sblk = sc->status_block;
3857 struct ifnet *ifp = &sc->bnx_ec.ec_if;
3858 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3859
3860 DBRUNIF(1, sc->tx_interrupts++);
3861 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3862 BUS_DMASYNC_POSTREAD);
3863
3864 /* Get the hardware's view of the TX consumer index. */
3865 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3866
3867 /* Skip to the next entry if this is a chain page pointer. */
3868 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3869 hw_tx_cons++;
3870
3871 sw_tx_cons = sc->tx_cons;
3872
3873 /* Prevent speculative reads from getting ahead of the status block. */
3874 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3875 BUS_SPACE_BARRIER_READ);
3876
3877 /* Cycle through any completed TX chain page entries. */
3878 while (sw_tx_cons != hw_tx_cons) {
3879 #ifdef BNX_DEBUG
3880 struct tx_bd *txbd = NULL;
3881 #endif
3882 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
3883
3884 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
3885 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
3886 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
3887
3888 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
3889 aprint_error_dev(sc->bnx_dev,
3890 "TX chain consumer out of range! 0x%04X > 0x%04X\n",
3891 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc));
3892
3893 DBRUNIF(1, txbd = &sc->tx_bd_chain
3894 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
3895
3896 DBRUNIF((txbd == NULL),
3897 aprint_error_dev(sc->bnx_dev,
3898 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons);
3899 bnx_breakpoint(sc));
3900
3901 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__);
3902 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
3903
3904 /*
3905 * Free the associated mbuf. Remember
3906 * that only the last tx_bd of a packet
3907 * has an mbuf pointer and DMA map.
3908 */
3909 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
3910 /* Validate that this is the last tx_bd. */
3911 DBRUNIF((!(txbd->tx_bd_vlan_tag_flags &
3912 TX_BD_FLAGS_END)),
3913 aprint_error_dev(sc->bnx_dev,
3914 "tx_bd END flag not set but txmbuf == NULL!\n");
3915 bnx_breakpoint(sc));
3916
3917 DBRUN(BNX_INFO_SEND,
3918 aprint_debug("%s: Unloading map/freeing mbuf "
3919 "from tx_bd[0x%04X]\n",
3920 __func__, sw_tx_chain_cons));
3921
3922 /* Unmap the mbuf. */
3923 bus_dmamap_unload(sc->bnx_dmatag,
3924 sc->tx_mbuf_map[sw_tx_chain_cons]);
3925
3926 /* Free the mbuf. */
3927 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
3928 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
3929 DBRUNIF(1, sc->tx_mbuf_alloc--);
3930
3931 ifp->if_opackets++;
3932 }
3933
3934 sc->used_tx_bd--;
3935 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
3936
3937 /* Refresh hw_cons to see if there's new work. */
3938 hw_tx_cons = sc->hw_tx_cons =
3939 sblk->status_tx_quick_consumer_index0;
3940 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
3941 USABLE_TX_BD_PER_PAGE)
3942 hw_tx_cons++;
3943
3944 /* Prevent speculative reads from getting ahead of
3945 * the status block.
3946 */
3947 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3948 BUS_SPACE_BARRIER_READ);
3949 }
3950
3951 /* Clear the TX timeout timer. */
3952 ifp->if_timer = 0;
3953
3954 /* Clear the tx hardware queue full flag. */
3955 if ((sc->used_tx_bd + BNX_TX_SLACK_SPACE) < USABLE_TX_BD) {
3956 DBRUNIF((ifp->if_flags & IFF_OACTIVE),
3957 aprint_debug_dev(sc->bnx_dev,
3958 "TX chain is open for business! Used tx_bd = %d\n",
3959 sc->used_tx_bd));
3960 ifp->if_flags &= ~IFF_OACTIVE;
3961 }
3962
3963 sc->tx_cons = sw_tx_cons;
3964 }
3965
3966 /****************************************************************************/
3967 /* Disables interrupt generation. */
3968 /* */
3969 /* Returns: */
3970 /* Nothing. */
3971 /****************************************************************************/
3972 void
3973 bnx_disable_intr(struct bnx_softc *sc)
3974 {
3975 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3976 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
3977 }
3978
3979 /****************************************************************************/
3980 /* Enables interrupt generation. */
3981 /* */
3982 /* Returns: */
3983 /* Nothing. */
3984 /****************************************************************************/
3985 void
3986 bnx_enable_intr(struct bnx_softc *sc)
3987 {
3988 u_int32_t val;
3989
3990 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
3991 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
3992
3993 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
3994 sc->last_status_idx);
3995
3996 val = REG_RD(sc, BNX_HC_COMMAND);
3997 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
3998 }
3999
4000 /****************************************************************************/
4001 /* Handles controller initialization. */
4002 /* */
4003 /****************************************************************************/
4004 int
4005 bnx_init(struct ifnet *ifp)
4006 {
4007 struct bnx_softc *sc = ifp->if_softc;
4008 u_int32_t ether_mtu;
4009 int s, error = 0;
4010
4011 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4012
4013 s = splnet();
4014
4015 bnx_stop(ifp, 0);
4016
4017 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) {
4018 aprint_error("bnx: Controller reset failed!\n");
4019 goto bnx_init_exit;
4020 }
4021
4022 if ((error = bnx_chipinit(sc)) != 0) {
4023 aprint_error("bnx: Controller initialization failed!\n");
4024 goto bnx_init_exit;
4025 }
4026
4027 if ((error = bnx_blockinit(sc)) != 0) {
4028 aprint_error("bnx: Block initialization failed!\n");
4029 goto bnx_init_exit;
4030 }
4031
4032 /* Calculate and program the Ethernet MRU size. */
4033 if (ifp->if_mtu <= ETHERMTU) {
4034 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4035 sc->mbuf_alloc_size = MCLBYTES;
4036 } else {
4037 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4038 sc->mbuf_alloc_size = BNX_MAX_MRU;
4039 }
4040
4041
4042 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4043 __func__, ether_mtu);
4044
4045 /*
4046 * Program the MRU and enable Jumbo frame
4047 * support.
4048 */
4049 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4050 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4051
4052 /* Calculate the RX Ethernet frame size for rx_bd's. */
4053 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4054
4055 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4056 "max_frame_size = %d\n", __func__, (int)MCLBYTES,
4057 sc->mbuf_alloc_size, sc->max_frame_size);
4058
4059 /* Program appropriate promiscuous/multicast filtering. */
4060 bnx_set_rx_mode(sc);
4061
4062 /* Init RX buffer descriptor chain. */
4063 bnx_init_rx_chain(sc);
4064
4065 /* Init TX buffer descriptor chain. */
4066 bnx_init_tx_chain(sc);
4067
4068 /* Enable host interrupts. */
4069 bnx_enable_intr(sc);
4070
4071 if ((error = ether_mediachange(ifp)) != 0)
4072 goto bnx_init_exit;
4073
4074 ifp->if_flags |= IFF_RUNNING;
4075 ifp->if_flags &= ~IFF_OACTIVE;
4076
4077 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4078
4079 bnx_init_exit:
4080 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4081
4082 splx(s);
4083
4084 return(error);
4085 }
4086
4087 /****************************************************************************/
4088 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4089 /* memory visible to the controller. */
4090 /* */
4091 /* Returns: */
4092 /* 0 for success, positive value for failure. */
4093 /****************************************************************************/
4094 int
4095 bnx_tx_encap(struct bnx_softc *sc, struct mbuf **m_head)
4096 {
4097 bus_dmamap_t map;
4098 struct tx_bd *txbd = NULL;
4099 struct mbuf *m0;
4100 u_int16_t vlan_tag = 0, flags = 0;
4101 u_int16_t chain_prod, prod;
4102 #ifdef BNX_DEBUG
4103 u_int16_t debug_prod;
4104 #endif
4105 u_int32_t addr, prod_bseq;
4106 int i, error, rc = 0;
4107 struct m_tag *mtag;
4108
4109 m0 = *m_head;
4110
4111 /* Transfer any checksum offload flags to the bd. */
4112 if (m0->m_pkthdr.csum_flags) {
4113 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
4114 flags |= TX_BD_FLAGS_IP_CKSUM;
4115 if (m0->m_pkthdr.csum_flags &
4116 (M_CSUM_TCPv4 | M_CSUM_UDPv4))
4117 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4118 }
4119
4120 /* Transfer any VLAN tags to the bd. */
4121 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m0);
4122 if (mtag != NULL) {
4123 flags |= TX_BD_FLAGS_VLAN_TAG;
4124 vlan_tag = VLAN_TAG_VALUE(mtag);
4125 }
4126
4127 /* Map the mbuf into DMAable memory. */
4128 prod = sc->tx_prod;
4129 chain_prod = TX_CHAIN_IDX(prod);
4130 map = sc->tx_mbuf_map[chain_prod];
4131
4132 /* Map the mbuf into our DMA address space. */
4133 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
4134 if (error != 0) {
4135 aprint_error_dev(sc->bnx_dev,
4136 "Error mapping mbuf into TX chain!\n");
4137 m_freem(m0);
4138 *m_head = NULL;
4139 return (error);
4140 }
4141 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4142 BUS_DMASYNC_PREWRITE);
4143 /*
4144 * The chip seems to require that at least 16 descriptors be kept
4145 * empty at all times. Make sure we honor that.
4146 * XXX Would it be faster to assume worst case scenario for
4147 * map->dm_nsegs and do this calculation higher up?
4148 */
4149 if (map->dm_nsegs > (USABLE_TX_BD - sc->used_tx_bd - BNX_TX_SLACK_SPACE)) {
4150 bus_dmamap_unload(sc->bnx_dmatag, map);
4151 return (ENOBUFS);
4152 }
4153
4154 /* prod points to an empty tx_bd at this point. */
4155 prod_bseq = sc->tx_prod_bseq;
4156 #ifdef BNX_DEBUG
4157 debug_prod = chain_prod;
4158 #endif
4159 DBPRINT(sc, BNX_INFO_SEND,
4160 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4161 "prod_bseq = 0x%08X\n",
4162 __func__, *prod, chain_prod, prod_bseq);
4163
4164 /*
4165 * Cycle through each mbuf segment that makes up
4166 * the outgoing frame, gathering the mapping info
4167 * for that segment and creating a tx_bd for the
4168 * mbuf.
4169 */
4170 for (i = 0; i < map->dm_nsegs ; i++) {
4171 chain_prod = TX_CHAIN_IDX(prod);
4172 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4173
4174 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4175 txbd->tx_bd_haddr_lo = htole32(addr);
4176 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4177 txbd->tx_bd_haddr_hi = htole32(addr);
4178 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4179 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4180 txbd->tx_bd_flags = htole16(flags);
4181 prod_bseq += map->dm_segs[i].ds_len;
4182 if (i == 0)
4183 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4184 prod = NEXT_TX_BD(prod);
4185 }
4186 /* Set the END flag on the last TX buffer descriptor. */
4187 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4188
4189 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, nseg));
4190
4191 DBPRINT(sc, BNX_INFO_SEND,
4192 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4193 "prod_bseq = 0x%08X\n",
4194 __func__, prod, chain_prod, prod_bseq);
4195
4196 /*
4197 * Ensure that the mbuf pointer for this
4198 * transmission is placed at the array
4199 * index of the last descriptor in this
4200 * chain. This is done because a single
4201 * map is used for all segments of the mbuf
4202 * and we don't want to unload the map before
4203 * all of the segments have been freed.
4204 */
4205 sc->tx_mbuf_ptr[chain_prod] = m0;
4206 sc->used_tx_bd += map->dm_nsegs;
4207
4208 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4209 sc->tx_hi_watermark = sc->used_tx_bd);
4210
4211 DBRUNIF(1, sc->tx_mbuf_alloc++);
4212
4213 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4214 map_arg.maxsegs));
4215
4216 /* prod points to the next free tx_bd at this point. */
4217 sc->tx_prod = prod;
4218 sc->tx_prod_bseq = prod_bseq;
4219
4220 return (rc);
4221 }
4222
4223 /****************************************************************************/
4224 /* Main transmit routine. */
4225 /* */
4226 /* Returns: */
4227 /* Nothing. */
4228 /****************************************************************************/
4229 void
4230 bnx_start(struct ifnet *ifp)
4231 {
4232 struct bnx_softc *sc = ifp->if_softc;
4233 struct mbuf *m_head = NULL;
4234 int count = 0;
4235 u_int16_t tx_prod, tx_chain_prod;
4236
4237 /* If there's no link or the transmit queue is empty then just exit. */
4238 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) {
4239 DBPRINT(sc, BNX_INFO_SEND,
4240 "%s(): output active or device not running.\n", __func__);
4241 goto bnx_start_exit;
4242 }
4243
4244 /* prod points to the next free tx_bd. */
4245 tx_prod = sc->tx_prod;
4246 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4247
4248 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4249 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4250 __func__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4251
4252 /*
4253 * Keep adding entries while there is space in the ring. We keep
4254 * BNX_TX_SLACK_SPACE entries unused at all times.
4255 */
4256 while (sc->used_tx_bd < USABLE_TX_BD - BNX_TX_SLACK_SPACE) {
4257 /* Check for any frames to send. */
4258 IFQ_POLL(&ifp->if_snd, m_head);
4259 if (m_head == NULL)
4260 break;
4261
4262 /*
4263 * Pack the data into the transmit ring. If we
4264 * don't have room, set the OACTIVE flag to wait
4265 * for the NIC to drain the chain.
4266 */
4267 if (bnx_tx_encap(sc, &m_head)) {
4268 ifp->if_flags |= IFF_OACTIVE;
4269 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4270 "business! Total tx_bd used = %d\n",
4271 sc->used_tx_bd);
4272 break;
4273 }
4274
4275 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4276 count++;
4277
4278 #if NBPFILTER > 0
4279 /* Send a copy of the frame to any BPF listeners. */
4280 if (ifp->if_bpf)
4281 bpf_mtap(ifp->if_bpf, m_head);
4282 #endif
4283 }
4284
4285 if (count == 0) {
4286 /* no packets were dequeued */
4287 DBPRINT(sc, BNX_VERBOSE_SEND,
4288 "%s(): No packets were dequeued\n", __func__);
4289 goto bnx_start_exit;
4290 }
4291
4292 /* Update the driver's counters. */
4293 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4294
4295 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4296 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, tx_prod,
4297 tx_chain_prod, sc->tx_prod_bseq);
4298
4299 /* Start the transmit. */
4300 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4301 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4302
4303 /* Set the tx timeout. */
4304 ifp->if_timer = BNX_TX_TIMEOUT;
4305
4306 bnx_start_exit:
4307 return;
4308 }
4309
4310 /****************************************************************************/
4311 /* Handles any IOCTL calls from the operating system. */
4312 /* */
4313 /* Returns: */
4314 /* 0 for success, positive value for failure. */
4315 /****************************************************************************/
4316 int
4317 bnx_ioctl(struct ifnet *ifp, u_long command, void *data)
4318 {
4319 struct bnx_softc *sc = ifp->if_softc;
4320 struct ifreq *ifr = (struct ifreq *) data;
4321 struct mii_data *mii = &sc->bnx_mii;
4322 int s, error = 0;
4323
4324 s = splnet();
4325
4326 switch (command) {
4327 case SIOCSIFFLAGS:
4328 if (ifp->if_flags & IFF_UP) {
4329 if ((ifp->if_flags & IFF_RUNNING) &&
4330 ((ifp->if_flags ^ sc->bnx_if_flags) &
4331 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
4332 bnx_set_rx_mode(sc);
4333 } else if (!(ifp->if_flags & IFF_RUNNING))
4334 bnx_init(ifp);
4335
4336 } else if (ifp->if_flags & IFF_RUNNING)
4337 bnx_stop(ifp, 1);
4338
4339 sc->bnx_if_flags = ifp->if_flags;
4340 break;
4341
4342 case SIOCSIFMEDIA:
4343 case SIOCGIFMEDIA:
4344 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4345 sc->bnx_phy_flags);
4346
4347 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4348 break;
4349
4350 default:
4351 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
4352 break;
4353
4354 error = 0;
4355
4356 if (command != SIOCADDMULTI && command && SIOCDELMULTI)
4357 ;
4358 else if (ifp->if_flags & IFF_RUNNING) {
4359 /* reload packet filter if running */
4360 bnx_set_rx_mode(sc);
4361 }
4362 break;
4363 }
4364
4365 splx(s);
4366
4367 return (error);
4368 }
4369
4370 /****************************************************************************/
4371 /* Transmit timeout handler. */
4372 /* */
4373 /* Returns: */
4374 /* Nothing. */
4375 /****************************************************************************/
4376 void
4377 bnx_watchdog(struct ifnet *ifp)
4378 {
4379 struct bnx_softc *sc = ifp->if_softc;
4380
4381 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
4382 bnx_dump_status_block(sc));
4383
4384 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n");
4385
4386 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
4387
4388 bnx_init(ifp);
4389
4390 ifp->if_oerrors++;
4391 }
4392
4393 /*
4394 * Interrupt handler.
4395 */
4396 /****************************************************************************/
4397 /* Main interrupt entry point. Verifies that the controller generated the */
4398 /* interrupt and then calls a separate routine for handle the various */
4399 /* interrupt causes (PHY, TX, RX). */
4400 /* */
4401 /* Returns: */
4402 /* 0 for success, positive value for failure. */
4403 /****************************************************************************/
4404 int
4405 bnx_intr(void *xsc)
4406 {
4407 struct bnx_softc *sc;
4408 struct ifnet *ifp;
4409 u_int32_t status_attn_bits;
4410 const struct status_block *sblk;
4411
4412 sc = xsc;
4413 if (!device_is_active(sc->bnx_dev))
4414 return 0;
4415
4416 ifp = &sc->bnx_ec.ec_if;
4417
4418 DBRUNIF(1, sc->interrupts_generated++);
4419
4420 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4421 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4422
4423 /*
4424 * If the hardware status block index
4425 * matches the last value read by the
4426 * driver and we haven't asserted our
4427 * interrupt then there's nothing to do.
4428 */
4429 if ((sc->status_block->status_idx == sc->last_status_idx) &&
4430 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
4431 BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4432 return (0);
4433
4434 /* Ack the interrupt and stop others from occuring. */
4435 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4436 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4437 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4438
4439 /* Keep processing data as long as there is work to do. */
4440 for (;;) {
4441 sblk = sc->status_block;
4442 status_attn_bits = sblk->status_attn_bits;
4443
4444 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4445 aprint_debug("Simulating unexpected status attention bit set.");
4446 status_attn_bits = status_attn_bits |
4447 STATUS_ATTN_BITS_PARITY_ERROR);
4448
4449 /* Was it a link change interrupt? */
4450 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4451 (sblk->status_attn_bits_ack &
4452 STATUS_ATTN_BITS_LINK_STATE))
4453 bnx_phy_intr(sc);
4454
4455 /* If any other attention is asserted then the chip is toast. */
4456 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4457 (sblk->status_attn_bits_ack &
4458 ~STATUS_ATTN_BITS_LINK_STATE))) {
4459 DBRUN(1, sc->unexpected_attentions++);
4460
4461 aprint_error_dev(sc->bnx_dev,
4462 "Fatal attention detected: 0x%08X\n",
4463 sblk->status_attn_bits);
4464
4465 DBRUN(BNX_FATAL,
4466 if (bnx_debug_unexpected_attention == 0)
4467 bnx_breakpoint(sc));
4468
4469 bnx_init(ifp);
4470 return (1);
4471 }
4472
4473 /* Check for any completed RX frames. */
4474 if (sblk->status_rx_quick_consumer_index0 !=
4475 sc->hw_rx_cons)
4476 bnx_rx_intr(sc);
4477
4478 /* Check for any completed TX frames. */
4479 if (sblk->status_tx_quick_consumer_index0 !=
4480 sc->hw_tx_cons)
4481 bnx_tx_intr(sc);
4482
4483 /* Save the status block index value for use during the
4484 * next interrupt.
4485 */
4486 sc->last_status_idx = sblk->status_idx;
4487
4488 /* Prevent speculative reads from getting ahead of the
4489 * status block.
4490 */
4491 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4492 BUS_SPACE_BARRIER_READ);
4493
4494 /* If there's no work left then exit the isr. */
4495 if ((sblk->status_rx_quick_consumer_index0 ==
4496 sc->hw_rx_cons) &&
4497 (sblk->status_tx_quick_consumer_index0 ==
4498 sc->hw_tx_cons))
4499 break;
4500 }
4501
4502 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4503 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4504
4505 /* Re-enable interrupts. */
4506 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4507 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4508 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4509 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4510 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4511
4512 /* Handle any frames that arrived while handling the interrupt. */
4513 if (!IFQ_IS_EMPTY(&ifp->if_snd))
4514 bnx_start(ifp);
4515
4516 return (1);
4517 }
4518
4519 /****************************************************************************/
4520 /* Programs the various packet receive modes (broadcast and multicast). */
4521 /* */
4522 /* Returns: */
4523 /* Nothing. */
4524 /****************************************************************************/
4525 void
4526 bnx_set_rx_mode(struct bnx_softc *sc)
4527 {
4528 struct ethercom *ec = &sc->bnx_ec;
4529 struct ifnet *ifp = &ec->ec_if;
4530 struct ether_multi *enm;
4531 struct ether_multistep step;
4532 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4533 u_int32_t rx_mode, sort_mode;
4534 int h, i;
4535
4536 /* Initialize receive mode default settings. */
4537 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4538 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4539 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4540
4541 /*
4542 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4543 * be enbled.
4544 */
4545 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
4546 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4547
4548 /*
4549 * Check for promiscuous, all multicast, or selected
4550 * multicast address filtering.
4551 */
4552 if (ifp->if_flags & IFF_PROMISC) {
4553 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4554
4555 /* Enable promiscuous mode. */
4556 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4557 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4558 } else if (ifp->if_flags & IFF_ALLMULTI) {
4559 allmulti:
4560 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4561
4562 /* Enable all multicast addresses. */
4563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4564 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4565 0xffffffff);
4566 sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4567 } else {
4568 /* Accept one or more multicast(s). */
4569 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4570
4571 ETHER_FIRST_MULTI(step, ec, enm);
4572 while (enm != NULL) {
4573 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
4574 ETHER_ADDR_LEN)) {
4575 ifp->if_flags |= IFF_ALLMULTI;
4576 goto allmulti;
4577 }
4578 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
4579 0xFF;
4580 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4581 ETHER_NEXT_MULTI(step, enm);
4582 }
4583
4584 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4585 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4586 hashes[i]);
4587
4588 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4589 }
4590
4591 /* Only make changes if the recive mode has actually changed. */
4592 if (rx_mode != sc->rx_mode) {
4593 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4594 rx_mode);
4595
4596 sc->rx_mode = rx_mode;
4597 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4598 }
4599
4600 /* Disable and clear the exisitng sort before enabling a new sort. */
4601 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4602 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4603 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4604 }
4605
4606 /****************************************************************************/
4607 /* Called periodically to updates statistics from the controllers */
4608 /* statistics block. */
4609 /* */
4610 /* Returns: */
4611 /* Nothing. */
4612 /****************************************************************************/
4613 void
4614 bnx_stats_update(struct bnx_softc *sc)
4615 {
4616 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4617 struct statistics_block *stats;
4618
4619 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__);
4620 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4621 BUS_DMASYNC_POSTREAD);
4622
4623 stats = (struct statistics_block *)sc->stats_block;
4624
4625 /*
4626 * Update the interface statistics from the
4627 * hardware statistics.
4628 */
4629 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4630
4631 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4632 (u_long)stats->stat_EtherStatsOverrsizePkts +
4633 (u_long)stats->stat_IfInMBUFDiscards +
4634 (u_long)stats->stat_Dot3StatsAlignmentErrors +
4635 (u_long)stats->stat_Dot3StatsFCSErrors;
4636
4637 ifp->if_oerrors = (u_long)
4638 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4639 (u_long)stats->stat_Dot3StatsExcessiveCollisions +
4640 (u_long)stats->stat_Dot3StatsLateCollisions;
4641
4642 /*
4643 * Certain controllers don't report
4644 * carrier sense errors correctly.
4645 * See errata E11_5708CA0_1165.
4646 */
4647 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4648 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4649 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4650
4651 /*
4652 * Update the sysctl statistics from the
4653 * hardware statistics.
4654 */
4655 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
4656 (u_int64_t) stats->stat_IfHCInOctets_lo;
4657
4658 sc->stat_IfHCInBadOctets =
4659 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4660 (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4661
4662 sc->stat_IfHCOutOctets =
4663 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4664 (u_int64_t) stats->stat_IfHCOutOctets_lo;
4665
4666 sc->stat_IfHCOutBadOctets =
4667 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4668 (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4669
4670 sc->stat_IfHCInUcastPkts =
4671 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4672 (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4673
4674 sc->stat_IfHCInMulticastPkts =
4675 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4676 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4677
4678 sc->stat_IfHCInBroadcastPkts =
4679 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4680 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4681
4682 sc->stat_IfHCOutUcastPkts =
4683 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4684 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4685
4686 sc->stat_IfHCOutMulticastPkts =
4687 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
4688 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
4689
4690 sc->stat_IfHCOutBroadcastPkts =
4691 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
4692 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
4693
4694 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
4695 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
4696
4697 sc->stat_Dot3StatsCarrierSenseErrors =
4698 stats->stat_Dot3StatsCarrierSenseErrors;
4699
4700 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
4701
4702 sc->stat_Dot3StatsAlignmentErrors =
4703 stats->stat_Dot3StatsAlignmentErrors;
4704
4705 sc->stat_Dot3StatsSingleCollisionFrames =
4706 stats->stat_Dot3StatsSingleCollisionFrames;
4707
4708 sc->stat_Dot3StatsMultipleCollisionFrames =
4709 stats->stat_Dot3StatsMultipleCollisionFrames;
4710
4711 sc->stat_Dot3StatsDeferredTransmissions =
4712 stats->stat_Dot3StatsDeferredTransmissions;
4713
4714 sc->stat_Dot3StatsExcessiveCollisions =
4715 stats->stat_Dot3StatsExcessiveCollisions;
4716
4717 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
4718
4719 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
4720
4721 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
4722
4723 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
4724
4725 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
4726
4727 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
4728
4729 sc->stat_EtherStatsPktsRx64Octets =
4730 stats->stat_EtherStatsPktsRx64Octets;
4731
4732 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
4733 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
4734
4735 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
4736 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
4737
4738 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
4739 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
4740
4741 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
4742 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
4743
4744 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
4745 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
4746
4747 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
4748 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
4749
4750 sc->stat_EtherStatsPktsTx64Octets =
4751 stats->stat_EtherStatsPktsTx64Octets;
4752
4753 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
4754 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
4755
4756 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
4757 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
4758
4759 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
4760 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
4761
4762 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
4763 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
4764
4765 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
4766 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
4767
4768 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
4769 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
4770
4771 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
4772
4773 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
4774
4775 sc->stat_OutXonSent = stats->stat_OutXonSent;
4776
4777 sc->stat_OutXoffSent = stats->stat_OutXoffSent;
4778
4779 sc->stat_FlowControlDone = stats->stat_FlowControlDone;
4780
4781 sc->stat_MacControlFramesReceived =
4782 stats->stat_MacControlFramesReceived;
4783
4784 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
4785
4786 sc->stat_IfInFramesL2FilterDiscards =
4787 stats->stat_IfInFramesL2FilterDiscards;
4788
4789 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
4790
4791 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
4792
4793 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
4794
4795 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
4796
4797 sc->stat_CatchupInRuleCheckerDiscards =
4798 stats->stat_CatchupInRuleCheckerDiscards;
4799
4800 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
4801
4802 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
4803
4804 sc->stat_CatchupInRuleCheckerP4Hit =
4805 stats->stat_CatchupInRuleCheckerP4Hit;
4806
4807 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__);
4808 }
4809
4810 void
4811 bnx_tick(void *xsc)
4812 {
4813 struct bnx_softc *sc = xsc;
4814 struct mii_data *mii;
4815 u_int32_t msg;
4816 u_int16_t prod, chain_prod;
4817 u_int32_t prod_bseq;
4818 int s = splnet();
4819
4820 /* Tell the firmware that the driver is still running. */
4821 #ifdef BNX_DEBUG
4822 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
4823 #else
4824 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
4825 #endif
4826 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
4827
4828 /* Update the statistics from the hardware statistics block. */
4829 bnx_stats_update(sc);
4830
4831 /* Schedule the next tick. */
4832 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4833
4834 mii = &sc->bnx_mii;
4835 mii_tick(mii);
4836
4837 /* try to get more RX buffers, just in case */
4838 prod = sc->rx_prod;
4839 prod_bseq = sc->rx_prod_bseq;
4840 chain_prod = RX_CHAIN_IDX(prod);
4841 bnx_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq);
4842 sc->rx_prod = prod;
4843 sc->rx_prod_bseq = prod_bseq;
4844 splx(s);
4845 return;
4846 }
4847
4848 /****************************************************************************/
4849 /* BNX Debug Routines */
4850 /****************************************************************************/
4851 #ifdef BNX_DEBUG
4852
4853 /****************************************************************************/
4854 /* Prints out information about an mbuf. */
4855 /* */
4856 /* Returns: */
4857 /* Nothing. */
4858 /****************************************************************************/
4859 void
4860 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
4861 {
4862 struct mbuf *mp = m;
4863
4864 if (m == NULL) {
4865 /* Index out of range. */
4866 aprint_error("mbuf ptr is null!\n");
4867 return;
4868 }
4869
4870 while (mp) {
4871 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ",
4872 mp, mp->m_len);
4873
4874 if (mp->m_flags & M_EXT)
4875 aprint_debug("M_EXT ");
4876 if (mp->m_flags & M_PKTHDR)
4877 aprint_debug("M_PKTHDR ");
4878 aprint_debug("\n");
4879
4880 if (mp->m_flags & M_EXT)
4881 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n",
4882 mp, mp->m_ext.ext_size);
4883
4884 mp = mp->m_next;
4885 }
4886 }
4887
4888 /****************************************************************************/
4889 /* Prints out the mbufs in the TX mbuf chain. */
4890 /* */
4891 /* Returns: */
4892 /* Nothing. */
4893 /****************************************************************************/
4894 void
4895 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4896 {
4897 struct mbuf *m;
4898 int i;
4899
4900 BNX_PRINTF(sc,
4901 "----------------------------"
4902 " tx mbuf data "
4903 "----------------------------\n");
4904
4905 for (i = 0; i < count; i++) {
4906 m = sc->tx_mbuf_ptr[chain_prod];
4907 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
4908 bnx_dump_mbuf(sc, m);
4909 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
4910 }
4911
4912 BNX_PRINTF(sc,
4913 "--------------------------------------------"
4914 "----------------------------\n");
4915 }
4916
4917 /*
4918 * This routine prints the RX mbuf chain.
4919 */
4920 void
4921 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4922 {
4923 struct mbuf *m;
4924 int i;
4925
4926 BNX_PRINTF(sc,
4927 "----------------------------"
4928 " rx mbuf data "
4929 "----------------------------\n");
4930
4931 for (i = 0; i < count; i++) {
4932 m = sc->rx_mbuf_ptr[chain_prod];
4933 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
4934 bnx_dump_mbuf(sc, m);
4935 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
4936 }
4937
4938
4939 BNX_PRINTF(sc,
4940 "--------------------------------------------"
4941 "----------------------------\n");
4942 }
4943
4944 void
4945 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
4946 {
4947 if (idx > MAX_TX_BD)
4948 /* Index out of range. */
4949 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
4950 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4951 /* TX Chain page pointer. */
4952 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
4953 "page pointer\n", idx, txbd->tx_bd_haddr_hi,
4954 txbd->tx_bd_haddr_lo);
4955 else
4956 /* Normal tx_bd entry. */
4957 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
4958 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
4959 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
4960 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
4961 txbd->tx_bd_flags);
4962 }
4963
4964 void
4965 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
4966 {
4967 if (idx > MAX_RX_BD)
4968 /* Index out of range. */
4969 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
4970 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4971 /* TX Chain page pointer. */
4972 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
4973 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
4974 rxbd->rx_bd_haddr_lo);
4975 else
4976 /* Normal tx_bd entry. */
4977 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
4978 "0x%08X, flags = 0x%08X\n", idx,
4979 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
4980 rxbd->rx_bd_len, rxbd->rx_bd_flags);
4981 }
4982
4983 void
4984 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
4985 {
4986 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
4987 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
4988 "tcp_udp_xsum = 0x%04X\n", idx,
4989 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
4990 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
4991 l2fhdr->l2_fhdr_tcp_udp_xsum);
4992 }
4993
4994 /*
4995 * This routine prints the TX chain.
4996 */
4997 void
4998 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
4999 {
5000 struct tx_bd *txbd;
5001 int i;
5002
5003 /* First some info about the tx_bd chain structure. */
5004 BNX_PRINTF(sc,
5005 "----------------------------"
5006 " tx_bd chain "
5007 "----------------------------\n");
5008
5009 BNX_PRINTF(sc,
5010 "page size = 0x%08X, tx chain pages = 0x%08X\n",
5011 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5012
5013 BNX_PRINTF(sc,
5014 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5015 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5016
5017 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5018
5019 BNX_PRINTF(sc, ""
5020 "-----------------------------"
5021 " tx_bd data "
5022 "-----------------------------\n");
5023
5024 /* Now print out the tx_bd's themselves. */
5025 for (i = 0; i < count; i++) {
5026 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5027 bnx_dump_txbd(sc, tx_prod, txbd);
5028 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5029 }
5030
5031 BNX_PRINTF(sc,
5032 "-----------------------------"
5033 "--------------"
5034 "-----------------------------\n");
5035 }
5036
5037 /*
5038 * This routine prints the RX chain.
5039 */
5040 void
5041 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5042 {
5043 struct rx_bd *rxbd;
5044 int i;
5045
5046 /* First some info about the tx_bd chain structure. */
5047 BNX_PRINTF(sc,
5048 "----------------------------"
5049 " rx_bd chain "
5050 "----------------------------\n");
5051
5052 BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5053
5054 BNX_PRINTF(sc,
5055 "page size = 0x%08X, rx chain pages = 0x%08X\n",
5056 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5057
5058 BNX_PRINTF(sc,
5059 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5060 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5061
5062 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5063
5064 BNX_PRINTF(sc,
5065 "----------------------------"
5066 " rx_bd data "
5067 "----------------------------\n");
5068
5069 /* Now print out the rx_bd's themselves. */
5070 for (i = 0; i < count; i++) {
5071 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5072 bnx_dump_rxbd(sc, rx_prod, rxbd);
5073 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5074 }
5075
5076 BNX_PRINTF(sc,
5077 "----------------------------"
5078 "--------------"
5079 "----------------------------\n");
5080 }
5081
5082 /*
5083 * This routine prints the status block.
5084 */
5085 void
5086 bnx_dump_status_block(struct bnx_softc *sc)
5087 {
5088 struct status_block *sblk;
5089 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5090 BUS_DMASYNC_POSTREAD);
5091
5092 sblk = sc->status_block;
5093
5094 BNX_PRINTF(sc, "----------------------------- Status Block "
5095 "-----------------------------\n");
5096
5097 BNX_PRINTF(sc,
5098 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5099 sblk->status_attn_bits, sblk->status_attn_bits_ack,
5100 sblk->status_idx);
5101
5102 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
5103 sblk->status_rx_quick_consumer_index0,
5104 sblk->status_tx_quick_consumer_index0);
5105
5106 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5107
5108 /* Theses indices are not used for normal L2 drivers. */
5109 if (sblk->status_rx_quick_consumer_index1 ||
5110 sblk->status_tx_quick_consumer_index1)
5111 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
5112 sblk->status_rx_quick_consumer_index1,
5113 sblk->status_tx_quick_consumer_index1);
5114
5115 if (sblk->status_rx_quick_consumer_index2 ||
5116 sblk->status_tx_quick_consumer_index2)
5117 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
5118 sblk->status_rx_quick_consumer_index2,
5119 sblk->status_tx_quick_consumer_index2);
5120
5121 if (sblk->status_rx_quick_consumer_index3 ||
5122 sblk->status_tx_quick_consumer_index3)
5123 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
5124 sblk->status_rx_quick_consumer_index3,
5125 sblk->status_tx_quick_consumer_index3);
5126
5127 if (sblk->status_rx_quick_consumer_index4 ||
5128 sblk->status_rx_quick_consumer_index5)
5129 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
5130 sblk->status_rx_quick_consumer_index4,
5131 sblk->status_rx_quick_consumer_index5);
5132
5133 if (sblk->status_rx_quick_consumer_index6 ||
5134 sblk->status_rx_quick_consumer_index7)
5135 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
5136 sblk->status_rx_quick_consumer_index6,
5137 sblk->status_rx_quick_consumer_index7);
5138
5139 if (sblk->status_rx_quick_consumer_index8 ||
5140 sblk->status_rx_quick_consumer_index9)
5141 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
5142 sblk->status_rx_quick_consumer_index8,
5143 sblk->status_rx_quick_consumer_index9);
5144
5145 if (sblk->status_rx_quick_consumer_index10 ||
5146 sblk->status_rx_quick_consumer_index11)
5147 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
5148 sblk->status_rx_quick_consumer_index10,
5149 sblk->status_rx_quick_consumer_index11);
5150
5151 if (sblk->status_rx_quick_consumer_index12 ||
5152 sblk->status_rx_quick_consumer_index13)
5153 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
5154 sblk->status_rx_quick_consumer_index12,
5155 sblk->status_rx_quick_consumer_index13);
5156
5157 if (sblk->status_rx_quick_consumer_index14 ||
5158 sblk->status_rx_quick_consumer_index15)
5159 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
5160 sblk->status_rx_quick_consumer_index14,
5161 sblk->status_rx_quick_consumer_index15);
5162
5163 if (sblk->status_completion_producer_index ||
5164 sblk->status_cmd_consumer_index)
5165 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
5166 sblk->status_completion_producer_index,
5167 sblk->status_cmd_consumer_index);
5168
5169 BNX_PRINTF(sc, "-------------------------------------------"
5170 "-----------------------------\n");
5171 }
5172
5173 /*
5174 * This routine prints the statistics block.
5175 */
5176 void
5177 bnx_dump_stats_block(struct bnx_softc *sc)
5178 {
5179 struct statistics_block *sblk;
5180 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5181 BUS_DMASYNC_POSTREAD);
5182
5183 sblk = sc->stats_block;
5184
5185 BNX_PRINTF(sc, ""
5186 "-----------------------------"
5187 " Stats Block "
5188 "-----------------------------\n");
5189
5190 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
5191 "IfHcInBadOctets = 0x%08X:%08X\n",
5192 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5193 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5194
5195 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
5196 "IfHcOutBadOctets = 0x%08X:%08X\n",
5197 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5198 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5199
5200 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
5201 "IfHcInMulticastPkts = 0x%08X:%08X\n",
5202 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5203 sblk->stat_IfHCInMulticastPkts_hi,
5204 sblk->stat_IfHCInMulticastPkts_lo);
5205
5206 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
5207 "IfHcOutUcastPkts = 0x%08X:%08X\n",
5208 sblk->stat_IfHCInBroadcastPkts_hi,
5209 sblk->stat_IfHCInBroadcastPkts_lo,
5210 sblk->stat_IfHCOutUcastPkts_hi,
5211 sblk->stat_IfHCOutUcastPkts_lo);
5212
5213 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5214 "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5215 sblk->stat_IfHCOutMulticastPkts_hi,
5216 sblk->stat_IfHCOutMulticastPkts_lo,
5217 sblk->stat_IfHCOutBroadcastPkts_hi,
5218 sblk->stat_IfHCOutBroadcastPkts_lo);
5219
5220 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5221 BNX_PRINTF(sc, "0x%08X : "
5222 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5223 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5224
5225 if (sblk->stat_Dot3StatsCarrierSenseErrors)
5226 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5227 sblk->stat_Dot3StatsCarrierSenseErrors);
5228
5229 if (sblk->stat_Dot3StatsFCSErrors)
5230 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5231 sblk->stat_Dot3StatsFCSErrors);
5232
5233 if (sblk->stat_Dot3StatsAlignmentErrors)
5234 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5235 sblk->stat_Dot3StatsAlignmentErrors);
5236
5237 if (sblk->stat_Dot3StatsSingleCollisionFrames)
5238 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5239 sblk->stat_Dot3StatsSingleCollisionFrames);
5240
5241 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5242 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5243 sblk->stat_Dot3StatsMultipleCollisionFrames);
5244
5245 if (sblk->stat_Dot3StatsDeferredTransmissions)
5246 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5247 sblk->stat_Dot3StatsDeferredTransmissions);
5248
5249 if (sblk->stat_Dot3StatsExcessiveCollisions)
5250 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5251 sblk->stat_Dot3StatsExcessiveCollisions);
5252
5253 if (sblk->stat_Dot3StatsLateCollisions)
5254 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5255 sblk->stat_Dot3StatsLateCollisions);
5256
5257 if (sblk->stat_EtherStatsCollisions)
5258 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5259 sblk->stat_EtherStatsCollisions);
5260
5261 if (sblk->stat_EtherStatsFragments)
5262 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5263 sblk->stat_EtherStatsFragments);
5264
5265 if (sblk->stat_EtherStatsJabbers)
5266 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5267 sblk->stat_EtherStatsJabbers);
5268
5269 if (sblk->stat_EtherStatsUndersizePkts)
5270 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5271 sblk->stat_EtherStatsUndersizePkts);
5272
5273 if (sblk->stat_EtherStatsOverrsizePkts)
5274 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5275 sblk->stat_EtherStatsOverrsizePkts);
5276
5277 if (sblk->stat_EtherStatsPktsRx64Octets)
5278 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5279 sblk->stat_EtherStatsPktsRx64Octets);
5280
5281 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5282 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5283 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5284
5285 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5286 BNX_PRINTF(sc, "0x%08X : "
5287 "EtherStatsPktsRx128Octetsto255Octets\n",
5288 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5289
5290 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5291 BNX_PRINTF(sc, "0x%08X : "
5292 "EtherStatsPktsRx256Octetsto511Octets\n",
5293 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5294
5295 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5296 BNX_PRINTF(sc, "0x%08X : "
5297 "EtherStatsPktsRx512Octetsto1023Octets\n",
5298 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5299
5300 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5301 BNX_PRINTF(sc, "0x%08X : "
5302 "EtherStatsPktsRx1024Octetsto1522Octets\n",
5303 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5304
5305 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5306 BNX_PRINTF(sc, "0x%08X : "
5307 "EtherStatsPktsRx1523Octetsto9022Octets\n",
5308 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5309
5310 if (sblk->stat_EtherStatsPktsTx64Octets)
5311 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5312 sblk->stat_EtherStatsPktsTx64Octets);
5313
5314 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5315 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5316 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5317
5318 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5319 BNX_PRINTF(sc, "0x%08X : "
5320 "EtherStatsPktsTx128Octetsto255Octets\n",
5321 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5322
5323 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5324 BNX_PRINTF(sc, "0x%08X : "
5325 "EtherStatsPktsTx256Octetsto511Octets\n",
5326 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5327
5328 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5329 BNX_PRINTF(sc, "0x%08X : "
5330 "EtherStatsPktsTx512Octetsto1023Octets\n",
5331 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5332
5333 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5334 BNX_PRINTF(sc, "0x%08X : "
5335 "EtherStatsPktsTx1024Octetsto1522Octets\n",
5336 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5337
5338 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5339 BNX_PRINTF(sc, "0x%08X : "
5340 "EtherStatsPktsTx1523Octetsto9022Octets\n",
5341 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5342
5343 if (sblk->stat_XonPauseFramesReceived)
5344 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5345 sblk->stat_XonPauseFramesReceived);
5346
5347 if (sblk->stat_XoffPauseFramesReceived)
5348 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5349 sblk->stat_XoffPauseFramesReceived);
5350
5351 if (sblk->stat_OutXonSent)
5352 BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5353 sblk->stat_OutXonSent);
5354
5355 if (sblk->stat_OutXoffSent)
5356 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5357 sblk->stat_OutXoffSent);
5358
5359 if (sblk->stat_FlowControlDone)
5360 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5361 sblk->stat_FlowControlDone);
5362
5363 if (sblk->stat_MacControlFramesReceived)
5364 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5365 sblk->stat_MacControlFramesReceived);
5366
5367 if (sblk->stat_XoffStateEntered)
5368 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5369 sblk->stat_XoffStateEntered);
5370
5371 if (sblk->stat_IfInFramesL2FilterDiscards)
5372 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5373 sblk->stat_IfInFramesL2FilterDiscards);
5374
5375 if (sblk->stat_IfInRuleCheckerDiscards)
5376 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5377 sblk->stat_IfInRuleCheckerDiscards);
5378
5379 if (sblk->stat_IfInFTQDiscards)
5380 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5381 sblk->stat_IfInFTQDiscards);
5382
5383 if (sblk->stat_IfInMBUFDiscards)
5384 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5385 sblk->stat_IfInMBUFDiscards);
5386
5387 if (sblk->stat_IfInRuleCheckerP4Hit)
5388 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5389 sblk->stat_IfInRuleCheckerP4Hit);
5390
5391 if (sblk->stat_CatchupInRuleCheckerDiscards)
5392 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5393 sblk->stat_CatchupInRuleCheckerDiscards);
5394
5395 if (sblk->stat_CatchupInFTQDiscards)
5396 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5397 sblk->stat_CatchupInFTQDiscards);
5398
5399 if (sblk->stat_CatchupInMBUFDiscards)
5400 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5401 sblk->stat_CatchupInMBUFDiscards);
5402
5403 if (sblk->stat_CatchupInRuleCheckerP4Hit)
5404 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5405 sblk->stat_CatchupInRuleCheckerP4Hit);
5406
5407 BNX_PRINTF(sc,
5408 "-----------------------------"
5409 "--------------"
5410 "-----------------------------\n");
5411 }
5412
5413 void
5414 bnx_dump_driver_state(struct bnx_softc *sc)
5415 {
5416 BNX_PRINTF(sc,
5417 "-----------------------------"
5418 " Driver State "
5419 "-----------------------------\n");
5420
5421 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
5422 "address\n", sc);
5423
5424 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
5425 sc->status_block);
5426
5427 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
5428 "address\n", sc->stats_block);
5429
5430 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
5431 "adddress\n", sc->tx_bd_chain);
5432
5433 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5434 sc->rx_bd_chain);
5435
5436 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5437 sc->tx_mbuf_ptr);
5438
5439 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5440 sc->rx_mbuf_ptr);
5441
5442 BNX_PRINTF(sc,
5443 " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
5444 sc->interrupts_generated);
5445
5446 BNX_PRINTF(sc,
5447 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5448 sc->rx_interrupts);
5449
5450 BNX_PRINTF(sc,
5451 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5452 sc->tx_interrupts);
5453
5454 BNX_PRINTF(sc,
5455 " 0x%08X - (sc->last_status_idx) status block index\n",
5456 sc->last_status_idx);
5457
5458 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
5459 sc->tx_prod);
5460
5461 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
5462 sc->tx_cons);
5463
5464 BNX_PRINTF(sc,
5465 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5466 sc->tx_prod_bseq);
5467
5468 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
5469 sc->rx_prod);
5470
5471 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
5472 sc->rx_cons);
5473
5474 BNX_PRINTF(sc,
5475 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5476 sc->rx_prod_bseq);
5477
5478 BNX_PRINTF(sc,
5479 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5480 sc->rx_mbuf_alloc);
5481
5482 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5483 sc->free_rx_bd);
5484
5485 BNX_PRINTF(sc,
5486 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5487 sc->rx_low_watermark, (u_int32_t) USABLE_RX_BD);
5488
5489 BNX_PRINTF(sc,
5490 " 0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
5491 sc->tx_mbuf_alloc);
5492
5493 BNX_PRINTF(sc,
5494 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5495 sc->rx_mbuf_alloc);
5496
5497 BNX_PRINTF(sc, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5498 sc->used_tx_bd);
5499
5500 BNX_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5501 sc->tx_hi_watermark, (u_int32_t) USABLE_TX_BD);
5502
5503 BNX_PRINTF(sc,
5504 " 0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
5505 sc->mbuf_alloc_failed);
5506
5507 BNX_PRINTF(sc, "-------------------------------------------"
5508 "-----------------------------\n");
5509 }
5510
5511 void
5512 bnx_dump_hw_state(struct bnx_softc *sc)
5513 {
5514 u_int32_t val1;
5515 int i;
5516
5517 BNX_PRINTF(sc,
5518 "----------------------------"
5519 " Hardware State "
5520 "----------------------------\n");
5521
5522 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5523
5524 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5525 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5526 val1, BNX_MISC_ENABLE_STATUS_BITS);
5527
5528 val1 = REG_RD(sc, BNX_DMA_STATUS);
5529 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5530
5531 val1 = REG_RD(sc, BNX_CTX_STATUS);
5532 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5533
5534 val1 = REG_RD(sc, BNX_EMAC_STATUS);
5535 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
5536 BNX_EMAC_STATUS);
5537
5538 val1 = REG_RD(sc, BNX_RPM_STATUS);
5539 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5540
5541 val1 = REG_RD(sc, BNX_TBDR_STATUS);
5542 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
5543 BNX_TBDR_STATUS);
5544
5545 val1 = REG_RD(sc, BNX_TDMA_STATUS);
5546 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
5547 BNX_TDMA_STATUS);
5548
5549 val1 = REG_RD(sc, BNX_HC_STATUS);
5550 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5551
5552 BNX_PRINTF(sc,
5553 "----------------------------"
5554 "----------------"
5555 "----------------------------\n");
5556
5557 BNX_PRINTF(sc,
5558 "----------------------------"
5559 " Register Dump "
5560 "----------------------------\n");
5561
5562 for (i = 0x400; i < 0x8000; i += 0x10)
5563 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5564 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5565 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5566
5567 BNX_PRINTF(sc,
5568 "----------------------------"
5569 "----------------"
5570 "----------------------------\n");
5571 }
5572
5573 void
5574 bnx_breakpoint(struct bnx_softc *sc)
5575 {
5576 /* Unreachable code to shut the compiler up about unused functions. */
5577 if (0) {
5578 bnx_dump_txbd(sc, 0, NULL);
5579 bnx_dump_rxbd(sc, 0, NULL);
5580 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5581 bnx_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
5582 bnx_dump_l2fhdr(sc, 0, NULL);
5583 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5584 bnx_dump_rx_chain(sc, 0, USABLE_RX_BD);
5585 bnx_dump_status_block(sc);
5586 bnx_dump_stats_block(sc);
5587 bnx_dump_driver_state(sc);
5588 bnx_dump_hw_state(sc);
5589 }
5590
5591 bnx_dump_driver_state(sc);
5592 /* Print the important status block fields. */
5593 bnx_dump_status_block(sc);
5594
5595 #if 0
5596 /* Call the debugger. */
5597 breakpoint();
5598 #endif
5599
5600 return;
5601 }
5602 #endif
5603