if_bnx.c revision 1.23 1 /* $NetBSD: if_bnx.c,v 1.23 2009/03/18 16:00:19 cegger Exp $ */
2 /* $OpenBSD: if_bnx.c,v 1.43 2007/01/30 03:21:10 krw Exp $ */
3
4 /*-
5 * Copyright (c) 2006 Broadcom Corporation
6 * David Christensen <davidch (at) broadcom.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if 0
36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
37 #endif
38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.23 2009/03/18 16:00:19 cegger Exp $");
39
40 /*
41 * The following controllers are supported by this driver:
42 * BCM5706C A2, A3
43 * BCM5708C B1, B2
44 *
45 * The following controllers are not supported by this driver:
46 * (These are not "Production" versions of the controller.)
47 *
48 * BCM5706C A0, A1
49 * BCM5706S A0, A1, A2, A3
50 * BCM5708C A0, B0
51 * BCM5708S A0, B0, B1
52 */
53
54 #include <sys/callout.h>
55
56 #include <dev/pci/if_bnxreg.h>
57 #include <dev/microcode/bnx/bnxfw.h>
58
59 /****************************************************************************/
60 /* BNX Driver Version */
61 /****************************************************************************/
62 const char bnx_driver_version[] = "v0.9.6";
63
64 /****************************************************************************/
65 /* BNX Debug Options */
66 /****************************************************************************/
67 #ifdef BNX_DEBUG
68 u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND;
69
70 /* 0 = Never */
71 /* 1 = 1 in 2,147,483,648 */
72 /* 256 = 1 in 8,388,608 */
73 /* 2048 = 1 in 1,048,576 */
74 /* 65536 = 1 in 32,768 */
75 /* 1048576 = 1 in 2,048 */
76 /* 268435456 = 1 in 8 */
77 /* 536870912 = 1 in 4 */
78 /* 1073741824 = 1 in 2 */
79
80 /* Controls how often the l2_fhdr frame error check will fail. */
81 int bnx_debug_l2fhdr_status_check = 0;
82
83 /* Controls how often the unexpected attention check will fail. */
84 int bnx_debug_unexpected_attention = 0;
85
86 /* Controls how often to simulate an mbuf allocation failure. */
87 int bnx_debug_mbuf_allocation_failure = 0;
88
89 /* Controls how often to simulate a DMA mapping failure. */
90 int bnx_debug_dma_map_addr_failure = 0;
91
92 /* Controls how often to simulate a bootcode failure. */
93 int bnx_debug_bootcode_running_failure = 0;
94 #endif
95
96 /****************************************************************************/
97 /* PCI Device ID Table */
98 /* */
99 /* Used by bnx_probe() to identify the devices supported by this driver. */
100 /****************************************************************************/
101 static const struct bnx_product {
102 pci_vendor_id_t bp_vendor;
103 pci_product_id_t bp_product;
104 pci_vendor_id_t bp_subvendor;
105 pci_product_id_t bp_subproduct;
106 const char *bp_name;
107 } bnx_devices[] = {
108 #ifdef PCI_SUBPRODUCT_HP_NC370T
109 {
110 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
111 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T,
112 "HP NC370T Multifunction Gigabit Server Adapter"
113 },
114 #endif
115 #ifdef PCI_SUBPRODUCT_HP_NC370i
116 {
117 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
118 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i,
119 "HP NC370i Multifunction Gigabit Server Adapter"
120 },
121 #endif
122 {
123 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
124 0, 0,
125 "Broadcom NetXtreme II BCM5706 1000Base-T"
126 },
127 #ifdef PCI_SUBPRODUCT_HP_NC370F
128 {
129 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
130 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F,
131 "HP NC370F Multifunction Gigabit Server Adapter"
132 },
133 #endif
134 {
135 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
136 0, 0,
137 "Broadcom NetXtreme II BCM5706 1000Base-SX"
138 },
139 {
140 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708,
141 0, 0,
142 "Broadcom NetXtreme II BCM5708 1000Base-T"
143 },
144 {
145 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S,
146 0, 0,
147 "Broadcom NetXtreme II BCM5708 1000Base-SX"
148 },
149 };
150
151 /****************************************************************************/
152 /* Supported Flash NVRAM device data. */
153 /****************************************************************************/
154 static struct flash_spec flash_table[] =
155 {
156 /* Slow EEPROM */
157 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
158 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
159 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
160 "EEPROM - slow"},
161 /* Expansion entry 0001 */
162 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
165 "Entry 0001"},
166 /* Saifun SA25F010 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
171 "Non-buffered flash (128kB)"},
172 /* Saifun SA25F020 (non-buffered flash) */
173 /* strap, cfg1, & write1 need updates */
174 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
177 "Non-buffered flash (256kB)"},
178 /* Expansion entry 0100 */
179 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 0100"},
183 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
184 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
185 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
187 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
188 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
189 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
190 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
191 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
192 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
193 /* Saifun SA25F005 (non-buffered flash) */
194 /* strap, cfg1, & write1 need updates */
195 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
196 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
198 "Non-buffered flash (64kB)"},
199 /* Fast EEPROM */
200 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
201 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
202 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
203 "EEPROM - fast"},
204 /* Expansion entry 1001 */
205 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
206 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1001"},
209 /* Expansion entry 1010 */
210 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
211 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
213 "Entry 1010"},
214 /* ATMEL AT45DB011B (buffered flash) */
215 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
216 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
217 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
218 "Buffered flash (128kB)"},
219 /* Expansion entry 1100 */
220 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
221 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 "Entry 1100"},
224 /* Expansion entry 1101 */
225 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
226 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228 "Entry 1101"},
229 /* Ateml Expansion entry 1110 */
230 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
231 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
233 "Entry 1110 (Atmel)"},
234 /* ATMEL AT45DB021B (buffered flash) */
235 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
236 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
237 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
238 "Buffered flash (256kB)"},
239 };
240
241 /****************************************************************************/
242 /* OpenBSD device entry points. */
243 /****************************************************************************/
244 static int bnx_probe(device_t, cfdata_t, void *);
245 void bnx_attach(device_t, device_t, void *);
246 int bnx_detach(device_t, int);
247
248 /****************************************************************************/
249 /* BNX Debug Data Structure Dump Routines */
250 /****************************************************************************/
251 #ifdef BNX_DEBUG
252 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
253 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
254 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
255 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
256 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
257 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
258 void bnx_dump_tx_chain(struct bnx_softc *, int, int);
259 void bnx_dump_rx_chain(struct bnx_softc *, int, int);
260 void bnx_dump_status_block(struct bnx_softc *);
261 void bnx_dump_stats_block(struct bnx_softc *);
262 void bnx_dump_driver_state(struct bnx_softc *);
263 void bnx_dump_hw_state(struct bnx_softc *);
264 void bnx_breakpoint(struct bnx_softc *);
265 #endif
266
267 /****************************************************************************/
268 /* BNX Register/Memory Access Routines */
269 /****************************************************************************/
270 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
271 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
272 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
273 int bnx_miibus_read_reg(device_t, int, int);
274 void bnx_miibus_write_reg(device_t, int, int, int);
275 void bnx_miibus_statchg(device_t);
276
277 /****************************************************************************/
278 /* BNX NVRAM Access Routines */
279 /****************************************************************************/
280 int bnx_acquire_nvram_lock(struct bnx_softc *);
281 int bnx_release_nvram_lock(struct bnx_softc *);
282 void bnx_enable_nvram_access(struct bnx_softc *);
283 void bnx_disable_nvram_access(struct bnx_softc *);
284 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
285 u_int32_t);
286 int bnx_init_nvram(struct bnx_softc *);
287 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
288 int bnx_nvram_test(struct bnx_softc *);
289 #ifdef BNX_NVRAM_WRITE_SUPPORT
290 int bnx_enable_nvram_write(struct bnx_softc *);
291 void bnx_disable_nvram_write(struct bnx_softc *);
292 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
293 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
294 u_int32_t);
295 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
296 #endif
297
298 /****************************************************************************/
299 /* */
300 /****************************************************************************/
301 int bnx_dma_alloc(struct bnx_softc *);
302 void bnx_dma_free(struct bnx_softc *);
303 void bnx_release_resources(struct bnx_softc *);
304
305 /****************************************************************************/
306 /* BNX Firmware Synchronization and Load */
307 /****************************************************************************/
308 int bnx_fw_sync(struct bnx_softc *, u_int32_t);
309 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
310 u_int32_t);
311 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
312 struct fw_info *);
313 void bnx_init_cpus(struct bnx_softc *);
314
315 void bnx_stop(struct ifnet *, int);
316 int bnx_reset(struct bnx_softc *, u_int32_t);
317 int bnx_chipinit(struct bnx_softc *);
318 int bnx_blockinit(struct bnx_softc *);
319 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
320 u_int16_t *, u_int32_t *);
321 int bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
322
323 int bnx_init_tx_chain(struct bnx_softc *);
324 int bnx_init_rx_chain(struct bnx_softc *);
325 void bnx_free_rx_chain(struct bnx_softc *);
326 void bnx_free_tx_chain(struct bnx_softc *);
327
328 int bnx_tx_encap(struct bnx_softc *, struct mbuf **);
329 void bnx_start(struct ifnet *);
330 int bnx_ioctl(struct ifnet *, u_long, void *);
331 void bnx_watchdog(struct ifnet *);
332 int bnx_init(struct ifnet *);
333
334 void bnx_init_context(struct bnx_softc *);
335 void bnx_get_mac_addr(struct bnx_softc *);
336 void bnx_set_mac_addr(struct bnx_softc *);
337 void bnx_phy_intr(struct bnx_softc *);
338 void bnx_rx_intr(struct bnx_softc *);
339 void bnx_tx_intr(struct bnx_softc *);
340 void bnx_disable_intr(struct bnx_softc *);
341 void bnx_enable_intr(struct bnx_softc *);
342
343 int bnx_intr(void *);
344 void bnx_set_rx_mode(struct bnx_softc *);
345 void bnx_stats_update(struct bnx_softc *);
346 void bnx_tick(void *);
347
348 /****************************************************************************/
349 /* OpenBSD device dispatch table. */
350 /****************************************************************************/
351 CFATTACH_DECL_NEW(bnx, sizeof(struct bnx_softc),
352 bnx_probe, bnx_attach, bnx_detach, NULL);
353
354 /****************************************************************************/
355 /* Device probe function. */
356 /* */
357 /* Compares the device to the driver's list of supported devices and */
358 /* reports back to the OS whether this is the right driver for the device. */
359 /* */
360 /* Returns: */
361 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
362 /****************************************************************************/
363 static const struct bnx_product *
364 bnx_lookup(const struct pci_attach_args *pa)
365 {
366 int i;
367 pcireg_t subid;
368
369 for (i = 0; i < __arraycount(bnx_devices); i++) {
370 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor ||
371 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product)
372 continue;
373 if (!bnx_devices[i].bp_subvendor)
374 return &bnx_devices[i];
375 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
376 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor &&
377 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct)
378 return &bnx_devices[i];
379 }
380
381 return NULL;
382 }
383 static int
384 bnx_probe(device_t parent, cfdata_t match, void *aux)
385 {
386 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
387
388 if (bnx_lookup(pa) != NULL)
389 return (1);
390
391 return (0);
392 }
393
394 /****************************************************************************/
395 /* Device attach function. */
396 /* */
397 /* Allocates device resources, performs secondary chip identification, */
398 /* resets and initializes the hardware, and initializes driver instance */
399 /* variables. */
400 /* */
401 /* Returns: */
402 /* 0 on success, positive value on failure. */
403 /****************************************************************************/
404 void
405 bnx_attach(device_t parent, device_t self, void *aux)
406 {
407 const struct bnx_product *bp;
408 struct bnx_softc *sc = device_private(self);
409 struct pci_attach_args *pa = aux;
410 pci_chipset_tag_t pc = pa->pa_pc;
411 pci_intr_handle_t ih;
412 const char *intrstr = NULL;
413 u_int32_t command;
414 struct ifnet *ifp;
415 u_int32_t val;
416 int mii_flags = MIIF_FORCEANEG;
417 pcireg_t memtype;
418
419 bp = bnx_lookup(pa);
420 if (bp == NULL)
421 panic("unknown device");
422
423 sc->bnx_dev = self;
424
425 aprint_naive("\n");
426 aprint_normal(": %s\n", bp->bp_name);
427
428 sc->bnx_pa = *pa;
429
430 /*
431 * Map control/status registers.
432 */
433 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
434 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
435 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
436 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
437
438 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
439 aprint_error_dev(sc->bnx_dev,
440 "failed to enable memory mapping!\n");
441 return;
442 }
443
444 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
445 switch (memtype) {
446 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
447 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
448 if (pci_mapreg_map(pa, BNX_PCI_BAR0,
449 memtype, 0, &sc->bnx_btag, &sc->bnx_bhandle,
450 NULL, &sc->bnx_size) == 0)
451 break;
452 default:
453 aprint_error_dev(sc->bnx_dev, "can't find mem space\n");
454 return;
455 }
456
457 if (pci_intr_map(pa, &ih)) {
458 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n");
459 goto bnx_attach_fail;
460 }
461
462 intrstr = pci_intr_string(pc, ih);
463
464 /*
465 * Configure byte swap and enable indirect register access.
466 * Rely on CPU to do target byte swapping on big endian systems.
467 * Access to registers outside of PCI configurtion space are not
468 * valid until this is done.
469 */
470 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
471 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
472 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
473
474 /* Save ASIC revsion info. */
475 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
476
477 /* Weed out any non-production controller revisions. */
478 switch(BNX_CHIP_ID(sc)) {
479 case BNX_CHIP_ID_5706_A0:
480 case BNX_CHIP_ID_5706_A1:
481 case BNX_CHIP_ID_5708_A0:
482 case BNX_CHIP_ID_5708_B0:
483 aprint_error_dev(sc->bnx_dev,
484 "unsupported controller revision (%c%d)!\n",
485 ((PCI_REVISION(pa->pa_class) & 0xf0) >> 4) + 'A',
486 PCI_REVISION(pa->pa_class) & 0x0f);
487 goto bnx_attach_fail;
488 }
489
490 /*
491 * Find the base address for shared memory access.
492 * Newer versions of bootcode use a signature and offset
493 * while older versions use a fixed address.
494 */
495 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
496 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
497 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
498 else
499 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
500
501 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
502
503 /* Set initial device and PHY flags */
504 sc->bnx_flags = 0;
505 sc->bnx_phy_flags = 0;
506
507 /* Get PCI bus information (speed and type). */
508 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
509 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
510 u_int32_t clkreg;
511
512 sc->bnx_flags |= BNX_PCIX_FLAG;
513
514 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
515
516 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
517 switch (clkreg) {
518 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
519 sc->bus_speed_mhz = 133;
520 break;
521
522 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
523 sc->bus_speed_mhz = 100;
524 break;
525
526 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
527 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
528 sc->bus_speed_mhz = 66;
529 break;
530
531 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
532 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
533 sc->bus_speed_mhz = 50;
534 break;
535
536 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
537 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
538 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
539 sc->bus_speed_mhz = 33;
540 break;
541 }
542 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
543 sc->bus_speed_mhz = 66;
544 else
545 sc->bus_speed_mhz = 33;
546
547 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
548 sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
549
550 /* Reset the controller. */
551 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
552 goto bnx_attach_fail;
553
554 /* Initialize the controller. */
555 if (bnx_chipinit(sc)) {
556 aprint_error_dev(sc->bnx_dev,
557 "Controller initialization failed!\n");
558 goto bnx_attach_fail;
559 }
560
561 /* Perform NVRAM test. */
562 if (bnx_nvram_test(sc)) {
563 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n");
564 goto bnx_attach_fail;
565 }
566
567 /* Fetch the permanent Ethernet MAC address. */
568 bnx_get_mac_addr(sc);
569 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n",
570 ether_sprintf(sc->eaddr));
571
572 /*
573 * Trip points control how many BDs
574 * should be ready before generating an
575 * interrupt while ticks control how long
576 * a BD can sit in the chain before
577 * generating an interrupt. Set the default
578 * values for the RX and TX rings.
579 */
580
581 #ifdef BNX_DEBUG
582 /* Force more frequent interrupts. */
583 sc->bnx_tx_quick_cons_trip_int = 1;
584 sc->bnx_tx_quick_cons_trip = 1;
585 sc->bnx_tx_ticks_int = 0;
586 sc->bnx_tx_ticks = 0;
587
588 sc->bnx_rx_quick_cons_trip_int = 1;
589 sc->bnx_rx_quick_cons_trip = 1;
590 sc->bnx_rx_ticks_int = 0;
591 sc->bnx_rx_ticks = 0;
592 #else
593 sc->bnx_tx_quick_cons_trip_int = 20;
594 sc->bnx_tx_quick_cons_trip = 20;
595 sc->bnx_tx_ticks_int = 80;
596 sc->bnx_tx_ticks = 80;
597
598 sc->bnx_rx_quick_cons_trip_int = 6;
599 sc->bnx_rx_quick_cons_trip = 6;
600 sc->bnx_rx_ticks_int = 18;
601 sc->bnx_rx_ticks = 18;
602 #endif
603
604 /* Update statistics once every second. */
605 sc->bnx_stats_ticks = 1000000 & 0xffff00;
606
607 /*
608 * The copper based NetXtreme II controllers
609 * that support 2.5Gb operation (currently
610 * 5708S) use a PHY at address 2, otherwise
611 * the PHY is present at address 1.
612 */
613 sc->bnx_phy_addr = 1;
614
615 if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
616 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
617 sc->bnx_flags |= BNX_NO_WOL_FLAG;
618 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
619 sc->bnx_phy_addr = 2;
620 val = REG_RD_IND(sc, sc->bnx_shmem_base +
621 BNX_SHARED_HW_CFG_CONFIG);
622 if (val & BNX_SHARED_HW_CFG_PHY_2_5G)
623 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
624 }
625 }
626
627 /* Allocate DMA memory resources. */
628 sc->bnx_dmatag = pa->pa_dmat;
629 if (bnx_dma_alloc(sc)) {
630 aprint_error_dev(sc->bnx_dev,
631 "DMA resource allocation failed!\n");
632 goto bnx_attach_fail;
633 }
634
635 /* Initialize the ifnet interface. */
636 ifp = &sc->bnx_ec.ec_if;
637 ifp->if_softc = sc;
638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
639 ifp->if_ioctl = bnx_ioctl;
640 ifp->if_stop = bnx_stop;
641 ifp->if_start = bnx_start;
642 ifp->if_init = bnx_init;
643 ifp->if_timer = 0;
644 ifp->if_watchdog = bnx_watchdog;
645 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
646 IFQ_SET_READY(&ifp->if_snd);
647 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
648
649 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU |
650 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
651
652 ifp->if_capabilities |=
653 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
654 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
655 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
656
657 /* Hookup IRQ last. */
658 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc);
659 if (sc->bnx_intrhand == NULL) {
660 aprint_error_dev(self, "couldn't establish interrupt");
661 if (intrstr != NULL)
662 aprint_error(" at %s", intrstr);
663 aprint_error("\n");
664 goto bnx_attach_fail;
665 }
666
667 sc->bnx_mii.mii_ifp = ifp;
668 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
669 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
670 sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
671
672 sc->bnx_ec.ec_mii = &sc->bnx_mii;
673 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange,
674 ether_mediastatus);
675 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
676 mii_flags |= MIIF_HAVEFIBER;
677 mii_attach(self, &sc->bnx_mii, 0xffffffff,
678 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
679
680 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) {
681 aprint_error_dev(self, "no PHY found!\n");
682 ifmedia_add(&sc->bnx_mii.mii_media,
683 IFM_ETHER|IFM_MANUAL, 0, NULL);
684 ifmedia_set(&sc->bnx_mii.mii_media,
685 IFM_ETHER|IFM_MANUAL);
686 } else {
687 ifmedia_set(&sc->bnx_mii.mii_media,
688 IFM_ETHER|IFM_AUTO);
689 }
690
691 /* Attach to the Ethernet interface list. */
692 if_attach(ifp);
693 ether_ifattach(ifp,sc->eaddr);
694
695 callout_init(&sc->bnx_timeout, 0);
696
697 if (!pmf_device_register(self, NULL, NULL))
698 aprint_error_dev(self, "couldn't establish power handler\n");
699 else
700 pmf_class_network_register(self, ifp);
701
702 /* Print some important debugging info. */
703 DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
704
705 goto bnx_attach_exit;
706
707 bnx_attach_fail:
708 bnx_release_resources(sc);
709
710 bnx_attach_exit:
711 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
712 }
713
714 /****************************************************************************/
715 /* Device detach function. */
716 /* */
717 /* Stops the controller, resets the controller, and releases resources. */
718 /* */
719 /* Returns: */
720 /* 0 on success, positive value on failure. */
721 /****************************************************************************/
722 int
723 bnx_detach(device_t dev, int flags)
724 {
725 int s;
726 struct bnx_softc *sc;
727 struct ifnet *ifp;
728
729 sc = device_private(dev);
730 ifp = &sc->bnx_ec.ec_if;
731
732 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
733
734 /* Stop and reset the controller. */
735 s = splnet();
736 if (ifp->if_flags & IFF_RUNNING)
737 bnx_stop(ifp, 1);
738 splx(s);
739
740 pmf_device_deregister(dev);
741 ether_ifdetach(ifp);
742 if_detach(ifp);
743 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY);
744
745 /* Release all remaining resources. */
746 bnx_release_resources(sc);
747
748 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
749
750 return(0);
751 }
752
753 /****************************************************************************/
754 /* Indirect register read. */
755 /* */
756 /* Reads NetXtreme II registers using an index/data register pair in PCI */
757 /* configuration space. Using this mechanism avoids issues with posted */
758 /* reads but is much slower than memory-mapped I/O. */
759 /* */
760 /* Returns: */
761 /* The value of the register. */
762 /****************************************************************************/
763 u_int32_t
764 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
765 {
766 struct pci_attach_args *pa = &(sc->bnx_pa);
767
768 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
769 offset);
770 #ifdef BNX_DEBUG
771 {
772 u_int32_t val;
773 val = pci_conf_read(pa->pa_pc, pa->pa_tag,
774 BNX_PCICFG_REG_WINDOW);
775 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
776 "val = 0x%08X\n", __func__, offset, val);
777 return (val);
778 }
779 #else
780 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
781 #endif
782 }
783
784 /****************************************************************************/
785 /* Indirect register write. */
786 /* */
787 /* Writes NetXtreme II registers using an index/data register pair in PCI */
788 /* configuration space. Using this mechanism avoids issues with posted */
789 /* writes but is muchh slower than memory-mapped I/O. */
790 /* */
791 /* Returns: */
792 /* Nothing. */
793 /****************************************************************************/
794 void
795 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
796 {
797 struct pci_attach_args *pa = &(sc->bnx_pa);
798
799 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
800 __func__, offset, val);
801
802 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
803 offset);
804 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
805 }
806
807 /****************************************************************************/
808 /* Context memory write. */
809 /* */
810 /* The NetXtreme II controller uses context memory to track connection */
811 /* information for L2 and higher network protocols. */
812 /* */
813 /* Returns: */
814 /* Nothing. */
815 /****************************************************************************/
816 void
817 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset,
818 u_int32_t val)
819 {
820
821 DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
822 "val = 0x%08X\n", __func__, cid_addr, offset, val);
823
824 offset += cid_addr;
825 REG_WR(sc, BNX_CTX_DATA_ADR, offset);
826 REG_WR(sc, BNX_CTX_DATA, val);
827 }
828
829 /****************************************************************************/
830 /* PHY register read. */
831 /* */
832 /* Implements register reads on the MII bus. */
833 /* */
834 /* Returns: */
835 /* The value of the register. */
836 /****************************************************************************/
837 int
838 bnx_miibus_read_reg(device_t dev, int phy, int reg)
839 {
840 struct bnx_softc *sc = device_private(dev);
841 u_int32_t val;
842 int i;
843
844 /* Make sure we are accessing the correct PHY address. */
845 if (phy != sc->bnx_phy_addr) {
846 DBPRINT(sc, BNX_VERBOSE,
847 "Invalid PHY address %d for PHY read!\n", phy);
848 return(0);
849 }
850
851 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
852 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
853 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
854
855 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
856 REG_RD(sc, BNX_EMAC_MDIO_MODE);
857
858 DELAY(40);
859 }
860
861 val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
862 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
863 BNX_EMAC_MDIO_COMM_START_BUSY;
864 REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
865
866 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
867 DELAY(10);
868
869 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
870 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
871 DELAY(5);
872
873 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
874 val &= BNX_EMAC_MDIO_COMM_DATA;
875
876 break;
877 }
878 }
879
880 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
881 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
882 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
883 val = 0x0;
884 } else
885 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
886
887 DBPRINT(sc, BNX_EXCESSIVE,
888 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy,
889 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
890
891 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
892 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
893 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
894
895 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
896 REG_RD(sc, BNX_EMAC_MDIO_MODE);
897
898 DELAY(40);
899 }
900
901 return (val & 0xffff);
902 }
903
904 /****************************************************************************/
905 /* PHY register write. */
906 /* */
907 /* Implements register writes on the MII bus. */
908 /* */
909 /* Returns: */
910 /* The value of the register. */
911 /****************************************************************************/
912 void
913 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val)
914 {
915 struct bnx_softc *sc = device_private(dev);
916 u_int32_t val1;
917 int i;
918
919 /* Make sure we are accessing the correct PHY address. */
920 if (phy != sc->bnx_phy_addr) {
921 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n",
922 phy);
923 return;
924 }
925
926 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
927 "val = 0x%04X\n", __func__,
928 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
929
930 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
931 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
932 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
933
934 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
935 REG_RD(sc, BNX_EMAC_MDIO_MODE);
936
937 DELAY(40);
938 }
939
940 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
941 BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
942 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
943 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
944
945 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
946 DELAY(10);
947
948 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
949 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
950 DELAY(5);
951 break;
952 }
953 }
954
955 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
956 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
957 __LINE__);
958 }
959
960 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
961 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
962 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
963
964 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
965 REG_RD(sc, BNX_EMAC_MDIO_MODE);
966
967 DELAY(40);
968 }
969 }
970
971 /****************************************************************************/
972 /* MII bus status change. */
973 /* */
974 /* Called by the MII bus driver when the PHY establishes link to set the */
975 /* MAC interface registers. */
976 /* */
977 /* Returns: */
978 /* Nothing. */
979 /****************************************************************************/
980 void
981 bnx_miibus_statchg(device_t dev)
982 {
983 struct bnx_softc *sc = device_private(dev);
984 struct mii_data *mii = &sc->bnx_mii;
985 int val;
986
987 val = REG_RD(sc, BNX_EMAC_MODE);
988 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
989 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
990 BNX_EMAC_MODE_25G);
991
992 /* Set MII or GMII interface based on the speed
993 * negotiated by the PHY.
994 */
995 switch (IFM_SUBTYPE(mii->mii_media_active)) {
996 case IFM_10_T:
997 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
998 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
999 val |= BNX_EMAC_MODE_PORT_MII_10;
1000 break;
1001 }
1002 /* FALLTHROUGH */
1003 case IFM_100_TX:
1004 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1005 val |= BNX_EMAC_MODE_PORT_MII;
1006 break;
1007 case IFM_2500_SX:
1008 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1009 val |= BNX_EMAC_MODE_25G;
1010 /* FALLTHROUGH */
1011 case IFM_1000_T:
1012 case IFM_1000_SX:
1013 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1014 val |= BNX_EMAC_MODE_PORT_GMII;
1015 break;
1016 default:
1017 val |= BNX_EMAC_MODE_PORT_GMII;
1018 break;
1019 }
1020
1021 /* Set half or full duplex based on the duplicity
1022 * negotiated by the PHY.
1023 */
1024 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1025 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1026 val |= BNX_EMAC_MODE_HALF_DUPLEX;
1027 } else {
1028 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1029 }
1030
1031 REG_WR(sc, BNX_EMAC_MODE, val);
1032 }
1033
1034 /****************************************************************************/
1035 /* Acquire NVRAM lock. */
1036 /* */
1037 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1038 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1039 /* for use by the driver. */
1040 /* */
1041 /* Returns: */
1042 /* 0 on success, positive value on failure. */
1043 /****************************************************************************/
1044 int
1045 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1046 {
1047 u_int32_t val;
1048 int j;
1049
1050 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1051
1052 /* Request access to the flash interface. */
1053 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1054 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1055 val = REG_RD(sc, BNX_NVM_SW_ARB);
1056 if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1057 break;
1058
1059 DELAY(5);
1060 }
1061
1062 if (j >= NVRAM_TIMEOUT_COUNT) {
1063 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1064 return (EBUSY);
1065 }
1066
1067 return (0);
1068 }
1069
1070 /****************************************************************************/
1071 /* Release NVRAM lock. */
1072 /* */
1073 /* When the caller is finished accessing NVRAM the lock must be released. */
1074 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1075 /* for use by the driver. */
1076 /* */
1077 /* Returns: */
1078 /* 0 on success, positive value on failure. */
1079 /****************************************************************************/
1080 int
1081 bnx_release_nvram_lock(struct bnx_softc *sc)
1082 {
1083 int j;
1084 u_int32_t val;
1085
1086 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1087
1088 /* Relinquish nvram interface. */
1089 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1090
1091 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1092 val = REG_RD(sc, BNX_NVM_SW_ARB);
1093 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1094 break;
1095
1096 DELAY(5);
1097 }
1098
1099 if (j >= NVRAM_TIMEOUT_COUNT) {
1100 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1101 return (EBUSY);
1102 }
1103
1104 return (0);
1105 }
1106
1107 #ifdef BNX_NVRAM_WRITE_SUPPORT
1108 /****************************************************************************/
1109 /* Enable NVRAM write access. */
1110 /* */
1111 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1112 /* */
1113 /* Returns: */
1114 /* 0 on success, positive value on failure. */
1115 /****************************************************************************/
1116 int
1117 bnx_enable_nvram_write(struct bnx_softc *sc)
1118 {
1119 u_int32_t val;
1120
1121 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1122
1123 val = REG_RD(sc, BNX_MISC_CFG);
1124 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1125
1126 if (!sc->bnx_flash_info->buffered) {
1127 int j;
1128
1129 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1130 REG_WR(sc, BNX_NVM_COMMAND,
1131 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1132
1133 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1134 DELAY(5);
1135
1136 val = REG_RD(sc, BNX_NVM_COMMAND);
1137 if (val & BNX_NVM_COMMAND_DONE)
1138 break;
1139 }
1140
1141 if (j >= NVRAM_TIMEOUT_COUNT) {
1142 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1143 return (EBUSY);
1144 }
1145 }
1146
1147 return (0);
1148 }
1149
1150 /****************************************************************************/
1151 /* Disable NVRAM write access. */
1152 /* */
1153 /* When the caller is finished writing to NVRAM write access must be */
1154 /* disabled. */
1155 /* */
1156 /* Returns: */
1157 /* Nothing. */
1158 /****************************************************************************/
1159 void
1160 bnx_disable_nvram_write(struct bnx_softc *sc)
1161 {
1162 u_int32_t val;
1163
1164 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1165
1166 val = REG_RD(sc, BNX_MISC_CFG);
1167 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1168 }
1169 #endif
1170
1171 /****************************************************************************/
1172 /* Enable NVRAM access. */
1173 /* */
1174 /* Before accessing NVRAM for read or write operations the caller must */
1175 /* enabled NVRAM access. */
1176 /* */
1177 /* Returns: */
1178 /* Nothing. */
1179 /****************************************************************************/
1180 void
1181 bnx_enable_nvram_access(struct bnx_softc *sc)
1182 {
1183 u_int32_t val;
1184
1185 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1186
1187 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1188 /* Enable both bits, even on read. */
1189 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1190 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1191 }
1192
1193 /****************************************************************************/
1194 /* Disable NVRAM access. */
1195 /* */
1196 /* When the caller is finished accessing NVRAM access must be disabled. */
1197 /* */
1198 /* Returns: */
1199 /* Nothing. */
1200 /****************************************************************************/
1201 void
1202 bnx_disable_nvram_access(struct bnx_softc *sc)
1203 {
1204 u_int32_t val;
1205
1206 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1207
1208 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1209
1210 /* Disable both bits, even after read. */
1211 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1212 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1213 }
1214
1215 #ifdef BNX_NVRAM_WRITE_SUPPORT
1216 /****************************************************************************/
1217 /* Erase NVRAM page before writing. */
1218 /* */
1219 /* Non-buffered flash parts require that a page be erased before it is */
1220 /* written. */
1221 /* */
1222 /* Returns: */
1223 /* 0 on success, positive value on failure. */
1224 /****************************************************************************/
1225 int
1226 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1227 {
1228 u_int32_t cmd;
1229 int j;
1230
1231 /* Buffered flash doesn't require an erase. */
1232 if (sc->bnx_flash_info->buffered)
1233 return (0);
1234
1235 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1236
1237 /* Build an erase command. */
1238 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1239 BNX_NVM_COMMAND_DOIT;
1240
1241 /*
1242 * Clear the DONE bit separately, set the NVRAM adress to erase,
1243 * and issue the erase command.
1244 */
1245 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1246 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1247 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1248
1249 /* Wait for completion. */
1250 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1251 u_int32_t val;
1252
1253 DELAY(5);
1254
1255 val = REG_RD(sc, BNX_NVM_COMMAND);
1256 if (val & BNX_NVM_COMMAND_DONE)
1257 break;
1258 }
1259
1260 if (j >= NVRAM_TIMEOUT_COUNT) {
1261 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1262 return (EBUSY);
1263 }
1264
1265 return (0);
1266 }
1267 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1268
1269 /****************************************************************************/
1270 /* Read a dword (32 bits) from NVRAM. */
1271 /* */
1272 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1273 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1274 /* */
1275 /* Returns: */
1276 /* 0 on success and the 32 bit value read, positive value on failure. */
1277 /****************************************************************************/
1278 int
1279 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1280 u_int8_t *ret_val, u_int32_t cmd_flags)
1281 {
1282 u_int32_t cmd;
1283 int i, rc = 0;
1284
1285 /* Build the command word. */
1286 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1287
1288 /* Calculate the offset for buffered flash. */
1289 if (sc->bnx_flash_info->buffered)
1290 offset = ((offset / sc->bnx_flash_info->page_size) <<
1291 sc->bnx_flash_info->page_bits) +
1292 (offset % sc->bnx_flash_info->page_size);
1293
1294 /*
1295 * Clear the DONE bit separately, set the address to read,
1296 * and issue the read.
1297 */
1298 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1299 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1300 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1301
1302 /* Wait for completion. */
1303 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1304 u_int32_t val;
1305
1306 DELAY(5);
1307
1308 val = REG_RD(sc, BNX_NVM_COMMAND);
1309 if (val & BNX_NVM_COMMAND_DONE) {
1310 val = REG_RD(sc, BNX_NVM_READ);
1311
1312 val = bnx_be32toh(val);
1313 memcpy(ret_val, &val, 4);
1314 break;
1315 }
1316 }
1317
1318 /* Check for errors. */
1319 if (i >= NVRAM_TIMEOUT_COUNT) {
1320 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1321 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1322 rc = EBUSY;
1323 }
1324
1325 return(rc);
1326 }
1327
1328 #ifdef BNX_NVRAM_WRITE_SUPPORT
1329 /****************************************************************************/
1330 /* Write a dword (32 bits) to NVRAM. */
1331 /* */
1332 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1333 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1334 /* enabled NVRAM write access. */
1335 /* */
1336 /* Returns: */
1337 /* 0 on success, positive value on failure. */
1338 /****************************************************************************/
1339 int
1340 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1341 u_int32_t cmd_flags)
1342 {
1343 u_int32_t cmd, val32;
1344 int j;
1345
1346 /* Build the command word. */
1347 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1348
1349 /* Calculate the offset for buffered flash. */
1350 if (sc->bnx_flash_info->buffered)
1351 offset = ((offset / sc->bnx_flash_info->page_size) <<
1352 sc->bnx_flash_info->page_bits) +
1353 (offset % sc->bnx_flash_info->page_size);
1354
1355 /*
1356 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1357 * set the NVRAM address to write, and issue the write command
1358 */
1359 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1360 memcpy(&val32, val, 4);
1361 val32 = htobe32(val32);
1362 REG_WR(sc, BNX_NVM_WRITE, val32);
1363 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1364 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1365
1366 /* Wait for completion. */
1367 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1368 DELAY(5);
1369
1370 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1371 break;
1372 }
1373 if (j >= NVRAM_TIMEOUT_COUNT) {
1374 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1375 "offset 0x%08X\n", __FILE__, __LINE__, offset);
1376 return (EBUSY);
1377 }
1378
1379 return (0);
1380 }
1381 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1382
1383 /****************************************************************************/
1384 /* Initialize NVRAM access. */
1385 /* */
1386 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1387 /* access that device. */
1388 /* */
1389 /* Returns: */
1390 /* 0 on success, positive value on failure. */
1391 /****************************************************************************/
1392 int
1393 bnx_init_nvram(struct bnx_softc *sc)
1394 {
1395 u_int32_t val;
1396 int j, entry_count, rc;
1397 struct flash_spec *flash;
1398
1399 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1400
1401 /* Determine the selected interface. */
1402 val = REG_RD(sc, BNX_NVM_CFG1);
1403
1404 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1405
1406 rc = 0;
1407
1408 /*
1409 * Flash reconfiguration is required to support additional
1410 * NVRAM devices not directly supported in hardware.
1411 * Check if the flash interface was reconfigured
1412 * by the bootcode.
1413 */
1414
1415 if (val & 0x40000000) {
1416 /* Flash interface reconfigured by bootcode. */
1417
1418 DBPRINT(sc,BNX_INFO_LOAD,
1419 "bnx_init_nvram(): Flash WAS reconfigured.\n");
1420
1421 for (j = 0, flash = &flash_table[0]; j < entry_count;
1422 j++, flash++) {
1423 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1424 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1425 sc->bnx_flash_info = flash;
1426 break;
1427 }
1428 }
1429 } else {
1430 /* Flash interface not yet reconfigured. */
1431 u_int32_t mask;
1432
1433 DBPRINT(sc,BNX_INFO_LOAD,
1434 "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1435
1436 if (val & (1 << 23))
1437 mask = FLASH_BACKUP_STRAP_MASK;
1438 else
1439 mask = FLASH_STRAP_MASK;
1440
1441 /* Look for the matching NVRAM device configuration data. */
1442 for (j = 0, flash = &flash_table[0]; j < entry_count;
1443 j++, flash++) {
1444 /* Check if the dev matches any of the known devices. */
1445 if ((val & mask) == (flash->strapping & mask)) {
1446 /* Found a device match. */
1447 sc->bnx_flash_info = flash;
1448
1449 /* Request access to the flash interface. */
1450 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1451 return (rc);
1452
1453 /* Reconfigure the flash interface. */
1454 bnx_enable_nvram_access(sc);
1455 REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1456 REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1457 REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1458 REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1459 bnx_disable_nvram_access(sc);
1460 bnx_release_nvram_lock(sc);
1461
1462 break;
1463 }
1464 }
1465 }
1466
1467 /* Check if a matching device was found. */
1468 if (j == entry_count) {
1469 sc->bnx_flash_info = NULL;
1470 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1471 __FILE__, __LINE__);
1472 rc = ENODEV;
1473 }
1474
1475 /* Write the flash config data to the shared memory interface. */
1476 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1477 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1478 if (val)
1479 sc->bnx_flash_size = val;
1480 else
1481 sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1482
1483 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1484 "0x%08X\n", sc->bnx_flash_info->total_size);
1485
1486 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1487
1488 return (rc);
1489 }
1490
1491 /****************************************************************************/
1492 /* Read an arbitrary range of data from NVRAM. */
1493 /* */
1494 /* Prepares the NVRAM interface for access and reads the requested data */
1495 /* into the supplied buffer. */
1496 /* */
1497 /* Returns: */
1498 /* 0 on success and the data read, positive value on failure. */
1499 /****************************************************************************/
1500 int
1501 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1502 int buf_size)
1503 {
1504 int rc = 0;
1505 u_int32_t cmd_flags, offset32, len32, extra;
1506
1507 if (buf_size == 0)
1508 return (0);
1509
1510 /* Request access to the flash interface. */
1511 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1512 return (rc);
1513
1514 /* Enable access to flash interface */
1515 bnx_enable_nvram_access(sc);
1516
1517 len32 = buf_size;
1518 offset32 = offset;
1519 extra = 0;
1520
1521 cmd_flags = 0;
1522
1523 if (offset32 & 3) {
1524 u_int8_t buf[4];
1525 u_int32_t pre_len;
1526
1527 offset32 &= ~3;
1528 pre_len = 4 - (offset & 3);
1529
1530 if (pre_len >= len32) {
1531 pre_len = len32;
1532 cmd_flags =
1533 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1534 } else
1535 cmd_flags = BNX_NVM_COMMAND_FIRST;
1536
1537 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1538
1539 if (rc)
1540 return (rc);
1541
1542 memcpy(ret_buf, buf + (offset & 3), pre_len);
1543
1544 offset32 += 4;
1545 ret_buf += pre_len;
1546 len32 -= pre_len;
1547 }
1548
1549 if (len32 & 3) {
1550 extra = 4 - (len32 & 3);
1551 len32 = (len32 + 4) & ~3;
1552 }
1553
1554 if (len32 == 4) {
1555 u_int8_t buf[4];
1556
1557 if (cmd_flags)
1558 cmd_flags = BNX_NVM_COMMAND_LAST;
1559 else
1560 cmd_flags =
1561 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1562
1563 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1564
1565 memcpy(ret_buf, buf, 4 - extra);
1566 } else if (len32 > 0) {
1567 u_int8_t buf[4];
1568
1569 /* Read the first word. */
1570 if (cmd_flags)
1571 cmd_flags = 0;
1572 else
1573 cmd_flags = BNX_NVM_COMMAND_FIRST;
1574
1575 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1576
1577 /* Advance to the next dword. */
1578 offset32 += 4;
1579 ret_buf += 4;
1580 len32 -= 4;
1581
1582 while (len32 > 4 && rc == 0) {
1583 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1584
1585 /* Advance to the next dword. */
1586 offset32 += 4;
1587 ret_buf += 4;
1588 len32 -= 4;
1589 }
1590
1591 if (rc)
1592 return (rc);
1593
1594 cmd_flags = BNX_NVM_COMMAND_LAST;
1595 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1596
1597 memcpy(ret_buf, buf, 4 - extra);
1598 }
1599
1600 /* Disable access to flash interface and release the lock. */
1601 bnx_disable_nvram_access(sc);
1602 bnx_release_nvram_lock(sc);
1603
1604 return (rc);
1605 }
1606
1607 #ifdef BNX_NVRAM_WRITE_SUPPORT
1608 /****************************************************************************/
1609 /* Write an arbitrary range of data from NVRAM. */
1610 /* */
1611 /* Prepares the NVRAM interface for write access and writes the requested */
1612 /* data from the supplied buffer. The caller is responsible for */
1613 /* calculating any appropriate CRCs. */
1614 /* */
1615 /* Returns: */
1616 /* 0 on success, positive value on failure. */
1617 /****************************************************************************/
1618 int
1619 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1620 int buf_size)
1621 {
1622 u_int32_t written, offset32, len32;
1623 u_int8_t *buf, start[4], end[4];
1624 int rc = 0;
1625 int align_start, align_end;
1626
1627 buf = data_buf;
1628 offset32 = offset;
1629 len32 = buf_size;
1630 align_start = align_end = 0;
1631
1632 if ((align_start = (offset32 & 3))) {
1633 offset32 &= ~3;
1634 len32 += align_start;
1635 if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1636 return (rc);
1637 }
1638
1639 if (len32 & 3) {
1640 if ((len32 > 4) || !align_start) {
1641 align_end = 4 - (len32 & 3);
1642 len32 += align_end;
1643 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1644 end, 4))) {
1645 return (rc);
1646 }
1647 }
1648 }
1649
1650 if (align_start || align_end) {
1651 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1652 if (buf == 0)
1653 return (ENOMEM);
1654
1655 if (align_start)
1656 memcpy(buf, start, 4);
1657
1658 if (align_end)
1659 memcpy(buf + len32 - 4, end, 4);
1660
1661 memcpy(buf + align_start, data_buf, buf_size);
1662 }
1663
1664 written = 0;
1665 while ((written < len32) && (rc == 0)) {
1666 u_int32_t page_start, page_end, data_start, data_end;
1667 u_int32_t addr, cmd_flags;
1668 int i;
1669 u_int8_t flash_buffer[264];
1670
1671 /* Find the page_start addr */
1672 page_start = offset32 + written;
1673 page_start -= (page_start % sc->bnx_flash_info->page_size);
1674 /* Find the page_end addr */
1675 page_end = page_start + sc->bnx_flash_info->page_size;
1676 /* Find the data_start addr */
1677 data_start = (written == 0) ? offset32 : page_start;
1678 /* Find the data_end addr */
1679 data_end = (page_end > offset32 + len32) ?
1680 (offset32 + len32) : page_end;
1681
1682 /* Request access to the flash interface. */
1683 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1684 goto nvram_write_end;
1685
1686 /* Enable access to flash interface */
1687 bnx_enable_nvram_access(sc);
1688
1689 cmd_flags = BNX_NVM_COMMAND_FIRST;
1690 if (sc->bnx_flash_info->buffered == 0) {
1691 int j;
1692
1693 /* Read the whole page into the buffer
1694 * (non-buffer flash only) */
1695 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1696 if (j == (sc->bnx_flash_info->page_size - 4))
1697 cmd_flags |= BNX_NVM_COMMAND_LAST;
1698
1699 rc = bnx_nvram_read_dword(sc,
1700 page_start + j,
1701 &flash_buffer[j],
1702 cmd_flags);
1703
1704 if (rc)
1705 goto nvram_write_end;
1706
1707 cmd_flags = 0;
1708 }
1709 }
1710
1711 /* Enable writes to flash interface (unlock write-protect) */
1712 if ((rc = bnx_enable_nvram_write(sc)) != 0)
1713 goto nvram_write_end;
1714
1715 /* Erase the page */
1716 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1717 goto nvram_write_end;
1718
1719 /* Re-enable the write again for the actual write */
1720 bnx_enable_nvram_write(sc);
1721
1722 /* Loop to write back the buffer data from page_start to
1723 * data_start */
1724 i = 0;
1725 if (sc->bnx_flash_info->buffered == 0) {
1726 for (addr = page_start; addr < data_start;
1727 addr += 4, i += 4) {
1728
1729 rc = bnx_nvram_write_dword(sc, addr,
1730 &flash_buffer[i], cmd_flags);
1731
1732 if (rc != 0)
1733 goto nvram_write_end;
1734
1735 cmd_flags = 0;
1736 }
1737 }
1738
1739 /* Loop to write the new data from data_start to data_end */
1740 for (addr = data_start; addr < data_end; addr += 4, i++) {
1741 if ((addr == page_end - 4) ||
1742 ((sc->bnx_flash_info->buffered) &&
1743 (addr == data_end - 4))) {
1744
1745 cmd_flags |= BNX_NVM_COMMAND_LAST;
1746 }
1747
1748 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1749
1750 if (rc != 0)
1751 goto nvram_write_end;
1752
1753 cmd_flags = 0;
1754 buf += 4;
1755 }
1756
1757 /* Loop to write back the buffer data from data_end
1758 * to page_end */
1759 if (sc->bnx_flash_info->buffered == 0) {
1760 for (addr = data_end; addr < page_end;
1761 addr += 4, i += 4) {
1762
1763 if (addr == page_end-4)
1764 cmd_flags = BNX_NVM_COMMAND_LAST;
1765
1766 rc = bnx_nvram_write_dword(sc, addr,
1767 &flash_buffer[i], cmd_flags);
1768
1769 if (rc != 0)
1770 goto nvram_write_end;
1771
1772 cmd_flags = 0;
1773 }
1774 }
1775
1776 /* Disable writes to flash interface (lock write-protect) */
1777 bnx_disable_nvram_write(sc);
1778
1779 /* Disable access to flash interface */
1780 bnx_disable_nvram_access(sc);
1781 bnx_release_nvram_lock(sc);
1782
1783 /* Increment written */
1784 written += data_end - data_start;
1785 }
1786
1787 nvram_write_end:
1788 if (align_start || align_end)
1789 free(buf, M_DEVBUF);
1790
1791 return (rc);
1792 }
1793 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1794
1795 /****************************************************************************/
1796 /* Verifies that NVRAM is accessible and contains valid data. */
1797 /* */
1798 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1799 /* correct. */
1800 /* */
1801 /* Returns: */
1802 /* 0 on success, positive value on failure. */
1803 /****************************************************************************/
1804 int
1805 bnx_nvram_test(struct bnx_softc *sc)
1806 {
1807 u_int32_t buf[BNX_NVRAM_SIZE / 4];
1808 u_int8_t *data = (u_int8_t *) buf;
1809 int rc = 0;
1810 u_int32_t magic, csum;
1811
1812 /*
1813 * Check that the device NVRAM is valid by reading
1814 * the magic value at offset 0.
1815 */
1816 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
1817 goto bnx_nvram_test_done;
1818
1819 magic = bnx_be32toh(buf[0]);
1820 if (magic != BNX_NVRAM_MAGIC) {
1821 rc = ENODEV;
1822 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
1823 "Expected: 0x%08X, Found: 0x%08X\n",
1824 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
1825 goto bnx_nvram_test_done;
1826 }
1827
1828 /*
1829 * Verify that the device NVRAM includes valid
1830 * configuration data.
1831 */
1832 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
1833 goto bnx_nvram_test_done;
1834
1835 csum = ether_crc32_le(data, 0x100);
1836 if (csum != BNX_CRC32_RESIDUAL) {
1837 rc = ENODEV;
1838 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
1839 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
1840 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1841 goto bnx_nvram_test_done;
1842 }
1843
1844 csum = ether_crc32_le(data + 0x100, 0x100);
1845 if (csum != BNX_CRC32_RESIDUAL) {
1846 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
1847 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1848 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1849 rc = ENODEV;
1850 }
1851
1852 bnx_nvram_test_done:
1853 return (rc);
1854 }
1855
1856 /****************************************************************************/
1857 /* Free any DMA memory owned by the driver. */
1858 /* */
1859 /* Scans through each data structre that requires DMA memory and frees */
1860 /* the memory if allocated. */
1861 /* */
1862 /* Returns: */
1863 /* Nothing. */
1864 /****************************************************************************/
1865 void
1866 bnx_dma_free(struct bnx_softc *sc)
1867 {
1868 int i;
1869
1870 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1871
1872 /* Destroy the status block. */
1873 if (sc->status_block != NULL && sc->status_map != NULL) {
1874 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
1875 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block,
1876 BNX_STATUS_BLK_SZ);
1877 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
1878 sc->status_rseg);
1879 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
1880 sc->status_block = NULL;
1881 sc->status_map = NULL;
1882 }
1883
1884 /* Destroy the statistics block. */
1885 if (sc->stats_block != NULL && sc->stats_map != NULL) {
1886 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
1887 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block,
1888 BNX_STATS_BLK_SZ);
1889 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
1890 sc->stats_rseg);
1891 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
1892 sc->stats_block = NULL;
1893 sc->stats_map = NULL;
1894 }
1895
1896 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
1897 for (i = 0; i < TX_PAGES; i++ ) {
1898 if (sc->tx_bd_chain[i] != NULL &&
1899 sc->tx_bd_chain_map[i] != NULL) {
1900 bus_dmamap_unload(sc->bnx_dmatag,
1901 sc->tx_bd_chain_map[i]);
1902 bus_dmamem_unmap(sc->bnx_dmatag,
1903 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
1904 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
1905 sc->tx_bd_chain_rseg[i]);
1906 bus_dmamap_destroy(sc->bnx_dmatag,
1907 sc->tx_bd_chain_map[i]);
1908 sc->tx_bd_chain[i] = NULL;
1909 sc->tx_bd_chain_map[i] = NULL;
1910 }
1911 }
1912
1913 /* Unload and destroy the TX mbuf maps. */
1914 for (i = 0; i < TOTAL_TX_BD; i++) {
1915 if (sc->tx_mbuf_map[i] != NULL) {
1916 bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1917 bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1918 }
1919 }
1920
1921 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
1922 for (i = 0; i < RX_PAGES; i++ ) {
1923 if (sc->rx_bd_chain[i] != NULL &&
1924 sc->rx_bd_chain_map[i] != NULL) {
1925 bus_dmamap_unload(sc->bnx_dmatag,
1926 sc->rx_bd_chain_map[i]);
1927 bus_dmamem_unmap(sc->bnx_dmatag,
1928 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
1929 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
1930 sc->rx_bd_chain_rseg[i]);
1931
1932 bus_dmamap_destroy(sc->bnx_dmatag,
1933 sc->rx_bd_chain_map[i]);
1934 sc->rx_bd_chain[i] = NULL;
1935 sc->rx_bd_chain_map[i] = NULL;
1936 }
1937 }
1938
1939 /* Unload and destroy the RX mbuf maps. */
1940 for (i = 0; i < TOTAL_RX_BD; i++) {
1941 if (sc->rx_mbuf_map[i] != NULL) {
1942 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1943 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1944 }
1945 }
1946
1947 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1948 }
1949
1950 /****************************************************************************/
1951 /* Allocate any DMA memory needed by the driver. */
1952 /* */
1953 /* Allocates DMA memory needed for the various global structures needed by */
1954 /* hardware. */
1955 /* */
1956 /* Returns: */
1957 /* 0 for success, positive value for failure. */
1958 /****************************************************************************/
1959 int
1960 bnx_dma_alloc(struct bnx_softc *sc)
1961 {
1962 int i, rc = 0;
1963
1964 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1965
1966 /*
1967 * Allocate DMA memory for the status block, map the memory into DMA
1968 * space, and fetch the physical address of the block.
1969 */
1970 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
1971 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
1972 aprint_error_dev(sc->bnx_dev,
1973 "Could not create status block DMA map!\n");
1974 rc = ENOMEM;
1975 goto bnx_dma_alloc_exit;
1976 }
1977
1978 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
1979 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
1980 &sc->status_rseg, BUS_DMA_NOWAIT)) {
1981 aprint_error_dev(sc->bnx_dev,
1982 "Could not allocate status block DMA memory!\n");
1983 rc = ENOMEM;
1984 goto bnx_dma_alloc_exit;
1985 }
1986
1987 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
1988 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) {
1989 aprint_error_dev(sc->bnx_dev,
1990 "Could not map status block DMA memory!\n");
1991 rc = ENOMEM;
1992 goto bnx_dma_alloc_exit;
1993 }
1994
1995 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
1996 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
1997 aprint_error_dev(sc->bnx_dev,
1998 "Could not load status block DMA memory!\n");
1999 rc = ENOMEM;
2000 goto bnx_dma_alloc_exit;
2001 }
2002
2003 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2004 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ);
2005
2006 /* DRC - Fix for 64 bit addresses. */
2007 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2008 (u_int32_t) sc->status_block_paddr);
2009
2010 /*
2011 * Allocate DMA memory for the statistics block, map the memory into
2012 * DMA space, and fetch the physical address of the block.
2013 */
2014 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2015 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2016 aprint_error_dev(sc->bnx_dev,
2017 "Could not create stats block DMA map!\n");
2018 rc = ENOMEM;
2019 goto bnx_dma_alloc_exit;
2020 }
2021
2022 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2023 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2024 &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2025 aprint_error_dev(sc->bnx_dev,
2026 "Could not allocate stats block DMA memory!\n");
2027 rc = ENOMEM;
2028 goto bnx_dma_alloc_exit;
2029 }
2030
2031 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2032 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) {
2033 aprint_error_dev(sc->bnx_dev,
2034 "Could not map stats block DMA memory!\n");
2035 rc = ENOMEM;
2036 goto bnx_dma_alloc_exit;
2037 }
2038
2039 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2040 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2041 aprint_error_dev(sc->bnx_dev,
2042 "Could not load status block DMA memory!\n");
2043 rc = ENOMEM;
2044 goto bnx_dma_alloc_exit;
2045 }
2046
2047 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2048 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ);
2049
2050 /* DRC - Fix for 64 bit address. */
2051 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2052 (u_int32_t) sc->stats_block_paddr);
2053
2054 /*
2055 * Allocate DMA memory for the TX buffer descriptor chain,
2056 * and fetch the physical address of the block.
2057 */
2058 for (i = 0; i < TX_PAGES; i++) {
2059 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2060 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2061 &sc->tx_bd_chain_map[i])) {
2062 aprint_error_dev(sc->bnx_dev,
2063 "Could not create Tx desc %d DMA map!\n", i);
2064 rc = ENOMEM;
2065 goto bnx_dma_alloc_exit;
2066 }
2067
2068 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2069 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2070 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2071 aprint_error_dev(sc->bnx_dev,
2072 "Could not allocate TX desc %d DMA memory!\n",
2073 i);
2074 rc = ENOMEM;
2075 goto bnx_dma_alloc_exit;
2076 }
2077
2078 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2079 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2080 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2081 aprint_error_dev(sc->bnx_dev,
2082 "Could not map TX desc %d DMA memory!\n", i);
2083 rc = ENOMEM;
2084 goto bnx_dma_alloc_exit;
2085 }
2086
2087 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2088 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2089 BUS_DMA_NOWAIT)) {
2090 aprint_error_dev(sc->bnx_dev,
2091 "Could not load TX desc %d DMA memory!\n", i);
2092 rc = ENOMEM;
2093 goto bnx_dma_alloc_exit;
2094 }
2095
2096 sc->tx_bd_chain_paddr[i] =
2097 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2098
2099 /* DRC - Fix for 64 bit systems. */
2100 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2101 i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2102 }
2103
2104 /*
2105 * Create DMA maps for the TX buffer mbufs.
2106 */
2107 for (i = 0; i < TOTAL_TX_BD; i++) {
2108 if (bus_dmamap_create(sc->bnx_dmatag,
2109 MCLBYTES * BNX_MAX_SEGMENTS,
2110 USABLE_TX_BD - BNX_TX_SLACK_SPACE,
2111 MCLBYTES, 0, BUS_DMA_NOWAIT,
2112 &sc->tx_mbuf_map[i])) {
2113 aprint_error_dev(sc->bnx_dev,
2114 "Could not create Tx mbuf %d DMA map!\n", i);
2115 rc = ENOMEM;
2116 goto bnx_dma_alloc_exit;
2117 }
2118 }
2119
2120 /*
2121 * Allocate DMA memory for the Rx buffer descriptor chain,
2122 * and fetch the physical address of the block.
2123 */
2124 for (i = 0; i < RX_PAGES; i++) {
2125 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2126 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2127 &sc->rx_bd_chain_map[i])) {
2128 aprint_error_dev(sc->bnx_dev,
2129 "Could not create Rx desc %d DMA map!\n", i);
2130 rc = ENOMEM;
2131 goto bnx_dma_alloc_exit;
2132 }
2133
2134 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2135 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2136 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2137 aprint_error_dev(sc->bnx_dev,
2138 "Could not allocate Rx desc %d DMA memory!\n", i);
2139 rc = ENOMEM;
2140 goto bnx_dma_alloc_exit;
2141 }
2142
2143 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2144 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2145 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2146 aprint_error_dev(sc->bnx_dev,
2147 "Could not map Rx desc %d DMA memory!\n", i);
2148 rc = ENOMEM;
2149 goto bnx_dma_alloc_exit;
2150 }
2151
2152 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2153 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2154 BUS_DMA_NOWAIT)) {
2155 aprint_error_dev(sc->bnx_dev,
2156 "Could not load Rx desc %d DMA memory!\n", i);
2157 rc = ENOMEM;
2158 goto bnx_dma_alloc_exit;
2159 }
2160
2161 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
2162 sc->rx_bd_chain_paddr[i] =
2163 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2164
2165 /* DRC - Fix for 64 bit systems. */
2166 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2167 i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2168 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2169 0, BNX_RX_CHAIN_PAGE_SZ,
2170 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2171 }
2172
2173 /*
2174 * Create DMA maps for the Rx buffer mbufs.
2175 */
2176 for (i = 0; i < TOTAL_RX_BD; i++) {
2177 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2178 BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2179 &sc->rx_mbuf_map[i])) {
2180 aprint_error_dev(sc->bnx_dev,
2181 "Could not create Rx mbuf %d DMA map!\n", i);
2182 rc = ENOMEM;
2183 goto bnx_dma_alloc_exit;
2184 }
2185 }
2186
2187 bnx_dma_alloc_exit:
2188 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2189
2190 return(rc);
2191 }
2192
2193 /****************************************************************************/
2194 /* Release all resources used by the driver. */
2195 /* */
2196 /* Releases all resources acquired by the driver including interrupts, */
2197 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2198 /* */
2199 /* Returns: */
2200 /* Nothing. */
2201 /****************************************************************************/
2202 void
2203 bnx_release_resources(struct bnx_softc *sc)
2204 {
2205 int i;
2206 struct pci_attach_args *pa = &(sc->bnx_pa);
2207
2208 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2209
2210 bnx_dma_free(sc);
2211
2212 if (sc->bnx_intrhand != NULL)
2213 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2214
2215 if (sc->bnx_size)
2216 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2217
2218 for (i = 0; i < TOTAL_RX_BD; i++)
2219 if (sc->rx_mbuf_map[i])
2220 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2221
2222 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2223 }
2224
2225 /****************************************************************************/
2226 /* Firmware synchronization. */
2227 /* */
2228 /* Before performing certain events such as a chip reset, synchronize with */
2229 /* the firmware first. */
2230 /* */
2231 /* Returns: */
2232 /* 0 for success, positive value for failure. */
2233 /****************************************************************************/
2234 int
2235 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2236 {
2237 int i, rc = 0;
2238 u_int32_t val;
2239
2240 /* Don't waste any time if we've timed out before. */
2241 if (sc->bnx_fw_timed_out) {
2242 rc = EBUSY;
2243 goto bnx_fw_sync_exit;
2244 }
2245
2246 /* Increment the message sequence number. */
2247 sc->bnx_fw_wr_seq++;
2248 msg_data |= sc->bnx_fw_wr_seq;
2249
2250 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2251 msg_data);
2252
2253 /* Send the message to the bootcode driver mailbox. */
2254 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2255
2256 /* Wait for the bootcode to acknowledge the message. */
2257 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2258 /* Check for a response in the bootcode firmware mailbox. */
2259 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2260 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2261 break;
2262 DELAY(1000);
2263 }
2264
2265 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2266 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2267 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2268 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2269 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2270
2271 msg_data &= ~BNX_DRV_MSG_CODE;
2272 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2273
2274 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2275
2276 sc->bnx_fw_timed_out = 1;
2277 rc = EBUSY;
2278 }
2279
2280 bnx_fw_sync_exit:
2281 return (rc);
2282 }
2283
2284 /****************************************************************************/
2285 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2286 /* */
2287 /* Returns: */
2288 /* Nothing. */
2289 /****************************************************************************/
2290 void
2291 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2292 u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2293 {
2294 int i;
2295 u_int32_t val;
2296
2297 for (i = 0; i < rv2p_code_len; i += 8) {
2298 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2299 rv2p_code++;
2300 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2301 rv2p_code++;
2302
2303 if (rv2p_proc == RV2P_PROC1) {
2304 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2305 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2306 }
2307 else {
2308 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2309 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2310 }
2311 }
2312
2313 /* Reset the processor, un-stall is done later. */
2314 if (rv2p_proc == RV2P_PROC1)
2315 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2316 else
2317 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2318 }
2319
2320 /****************************************************************************/
2321 /* Load RISC processor firmware. */
2322 /* */
2323 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */
2324 /* associated with a particular processor. */
2325 /* */
2326 /* Returns: */
2327 /* Nothing. */
2328 /****************************************************************************/
2329 void
2330 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2331 struct fw_info *fw)
2332 {
2333 u_int32_t offset;
2334 u_int32_t val;
2335
2336 /* Halt the CPU. */
2337 val = REG_RD_IND(sc, cpu_reg->mode);
2338 val |= cpu_reg->mode_value_halt;
2339 REG_WR_IND(sc, cpu_reg->mode, val);
2340 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2341
2342 /* Load the Text area. */
2343 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2344 if (fw->text) {
2345 int j;
2346
2347 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2348 REG_WR_IND(sc, offset, fw->text[j]);
2349 }
2350
2351 /* Load the Data area. */
2352 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2353 if (fw->data) {
2354 int j;
2355
2356 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2357 REG_WR_IND(sc, offset, fw->data[j]);
2358 }
2359
2360 /* Load the SBSS area. */
2361 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2362 if (fw->sbss) {
2363 int j;
2364
2365 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2366 REG_WR_IND(sc, offset, fw->sbss[j]);
2367 }
2368
2369 /* Load the BSS area. */
2370 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2371 if (fw->bss) {
2372 int j;
2373
2374 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2375 REG_WR_IND(sc, offset, fw->bss[j]);
2376 }
2377
2378 /* Load the Read-Only area. */
2379 offset = cpu_reg->spad_base +
2380 (fw->rodata_addr - cpu_reg->mips_view_base);
2381 if (fw->rodata) {
2382 int j;
2383
2384 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2385 REG_WR_IND(sc, offset, fw->rodata[j]);
2386 }
2387
2388 /* Clear the pre-fetch instruction. */
2389 REG_WR_IND(sc, cpu_reg->inst, 0);
2390 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2391
2392 /* Start the CPU. */
2393 val = REG_RD_IND(sc, cpu_reg->mode);
2394 val &= ~cpu_reg->mode_value_halt;
2395 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2396 REG_WR_IND(sc, cpu_reg->mode, val);
2397 }
2398
2399 /****************************************************************************/
2400 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2401 /* */
2402 /* Loads the firmware for each CPU and starts the CPU. */
2403 /* */
2404 /* Returns: */
2405 /* Nothing. */
2406 /****************************************************************************/
2407 void
2408 bnx_init_cpus(struct bnx_softc *sc)
2409 {
2410 struct cpu_reg cpu_reg;
2411 struct fw_info fw;
2412
2413 /* Initialize the RV2P processor. */
2414 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1),
2415 RV2P_PROC1);
2416 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2),
2417 RV2P_PROC2);
2418
2419 /* Initialize the RX Processor. */
2420 cpu_reg.mode = BNX_RXP_CPU_MODE;
2421 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2422 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2423 cpu_reg.state = BNX_RXP_CPU_STATE;
2424 cpu_reg.state_value_clear = 0xffffff;
2425 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2426 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2427 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2428 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2429 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2430 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2431 cpu_reg.mips_view_base = 0x8000000;
2432
2433 fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2434 fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2435 fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2436 fw.start_addr = bnx_RXP_b06FwStartAddr;
2437
2438 fw.text_addr = bnx_RXP_b06FwTextAddr;
2439 fw.text_len = bnx_RXP_b06FwTextLen;
2440 fw.text_index = 0;
2441 fw.text = bnx_RXP_b06FwText;
2442
2443 fw.data_addr = bnx_RXP_b06FwDataAddr;
2444 fw.data_len = bnx_RXP_b06FwDataLen;
2445 fw.data_index = 0;
2446 fw.data = bnx_RXP_b06FwData;
2447
2448 fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2449 fw.sbss_len = bnx_RXP_b06FwSbssLen;
2450 fw.sbss_index = 0;
2451 fw.sbss = bnx_RXP_b06FwSbss;
2452
2453 fw.bss_addr = bnx_RXP_b06FwBssAddr;
2454 fw.bss_len = bnx_RXP_b06FwBssLen;
2455 fw.bss_index = 0;
2456 fw.bss = bnx_RXP_b06FwBss;
2457
2458 fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2459 fw.rodata_len = bnx_RXP_b06FwRodataLen;
2460 fw.rodata_index = 0;
2461 fw.rodata = bnx_RXP_b06FwRodata;
2462
2463 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2464 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2465
2466 /* Initialize the TX Processor. */
2467 cpu_reg.mode = BNX_TXP_CPU_MODE;
2468 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2469 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2470 cpu_reg.state = BNX_TXP_CPU_STATE;
2471 cpu_reg.state_value_clear = 0xffffff;
2472 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2473 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2474 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2475 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2476 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2477 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2478 cpu_reg.mips_view_base = 0x8000000;
2479
2480 fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2481 fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2482 fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2483 fw.start_addr = bnx_TXP_b06FwStartAddr;
2484
2485 fw.text_addr = bnx_TXP_b06FwTextAddr;
2486 fw.text_len = bnx_TXP_b06FwTextLen;
2487 fw.text_index = 0;
2488 fw.text = bnx_TXP_b06FwText;
2489
2490 fw.data_addr = bnx_TXP_b06FwDataAddr;
2491 fw.data_len = bnx_TXP_b06FwDataLen;
2492 fw.data_index = 0;
2493 fw.data = bnx_TXP_b06FwData;
2494
2495 fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2496 fw.sbss_len = bnx_TXP_b06FwSbssLen;
2497 fw.sbss_index = 0;
2498 fw.sbss = bnx_TXP_b06FwSbss;
2499
2500 fw.bss_addr = bnx_TXP_b06FwBssAddr;
2501 fw.bss_len = bnx_TXP_b06FwBssLen;
2502 fw.bss_index = 0;
2503 fw.bss = bnx_TXP_b06FwBss;
2504
2505 fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2506 fw.rodata_len = bnx_TXP_b06FwRodataLen;
2507 fw.rodata_index = 0;
2508 fw.rodata = bnx_TXP_b06FwRodata;
2509
2510 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2511 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2512
2513 /* Initialize the TX Patch-up Processor. */
2514 cpu_reg.mode = BNX_TPAT_CPU_MODE;
2515 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2516 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2517 cpu_reg.state = BNX_TPAT_CPU_STATE;
2518 cpu_reg.state_value_clear = 0xffffff;
2519 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2520 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2521 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2522 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2523 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2524 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2525 cpu_reg.mips_view_base = 0x8000000;
2526
2527 fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2528 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2529 fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2530 fw.start_addr = bnx_TPAT_b06FwStartAddr;
2531
2532 fw.text_addr = bnx_TPAT_b06FwTextAddr;
2533 fw.text_len = bnx_TPAT_b06FwTextLen;
2534 fw.text_index = 0;
2535 fw.text = bnx_TPAT_b06FwText;
2536
2537 fw.data_addr = bnx_TPAT_b06FwDataAddr;
2538 fw.data_len = bnx_TPAT_b06FwDataLen;
2539 fw.data_index = 0;
2540 fw.data = bnx_TPAT_b06FwData;
2541
2542 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2543 fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2544 fw.sbss_index = 0;
2545 fw.sbss = bnx_TPAT_b06FwSbss;
2546
2547 fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2548 fw.bss_len = bnx_TPAT_b06FwBssLen;
2549 fw.bss_index = 0;
2550 fw.bss = bnx_TPAT_b06FwBss;
2551
2552 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2553 fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2554 fw.rodata_index = 0;
2555 fw.rodata = bnx_TPAT_b06FwRodata;
2556
2557 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2558 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2559
2560 /* Initialize the Completion Processor. */
2561 cpu_reg.mode = BNX_COM_CPU_MODE;
2562 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2563 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2564 cpu_reg.state = BNX_COM_CPU_STATE;
2565 cpu_reg.state_value_clear = 0xffffff;
2566 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2567 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2568 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2569 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2570 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2571 cpu_reg.spad_base = BNX_COM_SCRATCH;
2572 cpu_reg.mips_view_base = 0x8000000;
2573
2574 fw.ver_major = bnx_COM_b06FwReleaseMajor;
2575 fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2576 fw.ver_fix = bnx_COM_b06FwReleaseFix;
2577 fw.start_addr = bnx_COM_b06FwStartAddr;
2578
2579 fw.text_addr = bnx_COM_b06FwTextAddr;
2580 fw.text_len = bnx_COM_b06FwTextLen;
2581 fw.text_index = 0;
2582 fw.text = bnx_COM_b06FwText;
2583
2584 fw.data_addr = bnx_COM_b06FwDataAddr;
2585 fw.data_len = bnx_COM_b06FwDataLen;
2586 fw.data_index = 0;
2587 fw.data = bnx_COM_b06FwData;
2588
2589 fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2590 fw.sbss_len = bnx_COM_b06FwSbssLen;
2591 fw.sbss_index = 0;
2592 fw.sbss = bnx_COM_b06FwSbss;
2593
2594 fw.bss_addr = bnx_COM_b06FwBssAddr;
2595 fw.bss_len = bnx_COM_b06FwBssLen;
2596 fw.bss_index = 0;
2597 fw.bss = bnx_COM_b06FwBss;
2598
2599 fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2600 fw.rodata_len = bnx_COM_b06FwRodataLen;
2601 fw.rodata_index = 0;
2602 fw.rodata = bnx_COM_b06FwRodata;
2603
2604 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2605 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2606 }
2607
2608 /****************************************************************************/
2609 /* Initialize context memory. */
2610 /* */
2611 /* Clears the memory associated with each Context ID (CID). */
2612 /* */
2613 /* Returns: */
2614 /* Nothing. */
2615 /****************************************************************************/
2616 void
2617 bnx_init_context(struct bnx_softc *sc)
2618 {
2619 u_int32_t vcid;
2620
2621 vcid = 96;
2622 while (vcid) {
2623 u_int32_t vcid_addr, pcid_addr, offset;
2624
2625 vcid--;
2626
2627 vcid_addr = GET_CID_ADDR(vcid);
2628 pcid_addr = vcid_addr;
2629
2630 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2631 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2632
2633 /* Zero out the context. */
2634 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2635 CTX_WR(sc, 0x00, offset, 0);
2636
2637 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2638 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2639 }
2640 }
2641
2642 /****************************************************************************/
2643 /* Fetch the permanent MAC address of the controller. */
2644 /* */
2645 /* Returns: */
2646 /* Nothing. */
2647 /****************************************************************************/
2648 void
2649 bnx_get_mac_addr(struct bnx_softc *sc)
2650 {
2651 u_int32_t mac_lo = 0, mac_hi = 0;
2652
2653 /*
2654 * The NetXtreme II bootcode populates various NIC
2655 * power-on and runtime configuration items in a
2656 * shared memory area. The factory configured MAC
2657 * address is available from both NVRAM and the
2658 * shared memory area so we'll read the value from
2659 * shared memory for speed.
2660 */
2661
2662 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
2663 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
2664
2665 if ((mac_lo == 0) && (mac_hi == 0)) {
2666 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2667 __FILE__, __LINE__);
2668 } else {
2669 sc->eaddr[0] = (u_char)(mac_hi >> 8);
2670 sc->eaddr[1] = (u_char)(mac_hi >> 0);
2671 sc->eaddr[2] = (u_char)(mac_lo >> 24);
2672 sc->eaddr[3] = (u_char)(mac_lo >> 16);
2673 sc->eaddr[4] = (u_char)(mac_lo >> 8);
2674 sc->eaddr[5] = (u_char)(mac_lo >> 0);
2675 }
2676
2677 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
2678 "%s\n", ether_sprintf(sc->eaddr));
2679 }
2680
2681 /****************************************************************************/
2682 /* Program the MAC address. */
2683 /* */
2684 /* Returns: */
2685 /* Nothing. */
2686 /****************************************************************************/
2687 void
2688 bnx_set_mac_addr(struct bnx_softc *sc)
2689 {
2690 u_int32_t val;
2691 const u_int8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl);
2692
2693 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
2694 "%s\n", ether_sprintf(sc->eaddr));
2695
2696 val = (mac_addr[0] << 8) | mac_addr[1];
2697
2698 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2699
2700 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2701 (mac_addr[4] << 8) | mac_addr[5];
2702
2703 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2704 }
2705
2706 /****************************************************************************/
2707 /* Stop the controller. */
2708 /* */
2709 /* Returns: */
2710 /* Nothing. */
2711 /****************************************************************************/
2712 void
2713 bnx_stop(struct ifnet *ifp, int disable)
2714 {
2715 struct bnx_softc *sc = ifp->if_softc;
2716
2717 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2718
2719 if ((ifp->if_flags & IFF_RUNNING) == 0)
2720 return;
2721
2722 callout_stop(&sc->bnx_timeout);
2723
2724 mii_down(&sc->bnx_mii);
2725
2726 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2727
2728 /* Disable the transmit/receive blocks. */
2729 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2730 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2731 DELAY(20);
2732
2733 bnx_disable_intr(sc);
2734
2735 /* Tell firmware that the driver is going away. */
2736 if (disable)
2737 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
2738 else
2739 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2740
2741 /* Free the RX lists. */
2742 bnx_free_rx_chain(sc);
2743
2744 /* Free TX buffers. */
2745 bnx_free_tx_chain(sc);
2746
2747 ifp->if_timer = 0;
2748
2749 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2750
2751 }
2752
2753 int
2754 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2755 {
2756 u_int32_t val;
2757 int i, rc = 0;
2758
2759 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2760
2761 /* Wait for pending PCI transactions to complete. */
2762 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2763 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2764 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2765 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2766 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2767 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2768 DELAY(5);
2769
2770 /* Assume bootcode is running. */
2771 sc->bnx_fw_timed_out = 0;
2772
2773 /* Give the firmware a chance to prepare for the reset. */
2774 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2775 if (rc)
2776 goto bnx_reset_exit;
2777
2778 /* Set a firmware reminder that this is a soft reset. */
2779 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2780 BNX_DRV_RESET_SIGNATURE_MAGIC);
2781
2782 /* Dummy read to force the chip to complete all current transactions. */
2783 val = REG_RD(sc, BNX_MISC_ID);
2784
2785 /* Chip reset. */
2786 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2787 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2788 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2789 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
2790
2791 /* Allow up to 30us for reset to complete. */
2792 for (i = 0; i < 10; i++) {
2793 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
2794 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2795 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
2796 break;
2797
2798 DELAY(10);
2799 }
2800
2801 /* Check that reset completed successfully. */
2802 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2803 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2804 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", __FILE__, __LINE__);
2805 rc = EBUSY;
2806 goto bnx_reset_exit;
2807 }
2808
2809 /* Make sure byte swapping is properly configured. */
2810 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
2811 if (val != 0x01020304) {
2812 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
2813 __FILE__, __LINE__);
2814 rc = ENODEV;
2815 goto bnx_reset_exit;
2816 }
2817
2818 /* Just completed a reset, assume that firmware is running again. */
2819 sc->bnx_fw_timed_out = 0;
2820
2821 /* Wait for the firmware to finish its initialization. */
2822 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
2823 if (rc)
2824 BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
2825 "initialization!\n", __FILE__, __LINE__);
2826
2827 bnx_reset_exit:
2828 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2829
2830 return (rc);
2831 }
2832
2833 int
2834 bnx_chipinit(struct bnx_softc *sc)
2835 {
2836 struct pci_attach_args *pa = &(sc->bnx_pa);
2837 u_int32_t val;
2838 int rc = 0;
2839
2840 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2841
2842 /* Make sure the interrupt is not active. */
2843 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
2844
2845 /* Initialize DMA byte/word swapping, configure the number of DMA */
2846 /* channels and PCI clock compensation delay. */
2847 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
2848 BNX_DMA_CONFIG_DATA_WORD_SWAP |
2849 #if BYTE_ORDER == BIG_ENDIAN
2850 BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
2851 #endif
2852 BNX_DMA_CONFIG_CNTL_WORD_SWAP |
2853 DMA_READ_CHANS << 12 |
2854 DMA_WRITE_CHANS << 16;
2855
2856 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
2857
2858 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
2859 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
2860
2861 /*
2862 * This setting resolves a problem observed on certain Intel PCI
2863 * chipsets that cannot handle multiple outstanding DMA operations.
2864 * See errata E9_5706A1_65.
2865 */
2866 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
2867 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
2868 !(sc->bnx_flags & BNX_PCIX_FLAG))
2869 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
2870
2871 REG_WR(sc, BNX_DMA_CONFIG, val);
2872
2873 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
2874 if (sc->bnx_flags & BNX_PCIX_FLAG) {
2875 u_int16_t nval;
2876
2877 nval = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
2878 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
2879 nval & ~0x20000);
2880 }
2881
2882 /* Enable the RX_V2P and Context state machines before access. */
2883 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
2884 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
2885 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
2886 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
2887
2888 /* Initialize context mapping and zero out the quick contexts. */
2889 bnx_init_context(sc);
2890
2891 /* Initialize the on-boards CPUs */
2892 bnx_init_cpus(sc);
2893
2894 /* Prepare NVRAM for access. */
2895 if (bnx_init_nvram(sc)) {
2896 rc = ENODEV;
2897 goto bnx_chipinit_exit;
2898 }
2899
2900 /* Set the kernel bypass block size */
2901 val = REG_RD(sc, BNX_MQ_CONFIG);
2902 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2903 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
2904 REG_WR(sc, BNX_MQ_CONFIG, val);
2905
2906 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
2907 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
2908 REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
2909
2910 val = (BCM_PAGE_BITS - 8) << 24;
2911 REG_WR(sc, BNX_RV2P_CONFIG, val);
2912
2913 /* Configure page size. */
2914 val = REG_RD(sc, BNX_TBDR_CONFIG);
2915 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
2916 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
2917 REG_WR(sc, BNX_TBDR_CONFIG, val);
2918
2919 bnx_chipinit_exit:
2920 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2921
2922 return(rc);
2923 }
2924
2925 /****************************************************************************/
2926 /* Initialize the controller in preparation to send/receive traffic. */
2927 /* */
2928 /* Returns: */
2929 /* 0 for success, positive value for failure. */
2930 /****************************************************************************/
2931 int
2932 bnx_blockinit(struct bnx_softc *sc)
2933 {
2934 u_int32_t reg, val;
2935 int rc = 0;
2936
2937 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2938
2939 /* Load the hardware default MAC address. */
2940 bnx_set_mac_addr(sc);
2941
2942 /* Set the Ethernet backoff seed value */
2943 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
2944 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
2945 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
2946
2947 sc->last_status_idx = 0;
2948 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
2949
2950 /* Set up link change interrupt generation. */
2951 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
2952
2953 /* Program the physical address of the status block. */
2954 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
2955 REG_WR(sc, BNX_HC_STATUS_ADDR_H,
2956 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
2957
2958 /* Program the physical address of the statistics block. */
2959 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
2960 (u_int32_t)(sc->stats_block_paddr));
2961 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
2962 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
2963
2964 /* Program various host coalescing parameters. */
2965 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
2966 << 16) | sc->bnx_tx_quick_cons_trip);
2967 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
2968 << 16) | sc->bnx_rx_quick_cons_trip);
2969 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
2970 sc->bnx_comp_prod_trip);
2971 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
2972 sc->bnx_tx_ticks);
2973 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
2974 sc->bnx_rx_ticks);
2975 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
2976 sc->bnx_com_ticks);
2977 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
2978 sc->bnx_cmd_ticks);
2979 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
2980 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
2981 REG_WR(sc, BNX_HC_CONFIG,
2982 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
2983 BNX_HC_CONFIG_COLLECT_STATS));
2984
2985 /* Clear the internal statistics counters. */
2986 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
2987
2988 /* Verify that bootcode is running. */
2989 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
2990
2991 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
2992 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
2993 __FILE__, __LINE__); reg = 0);
2994
2995 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
2996 BNX_DEV_INFO_SIGNATURE_MAGIC) {
2997 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
2998 "Expected: 08%08X\n", __FILE__, __LINE__,
2999 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3000 BNX_DEV_INFO_SIGNATURE_MAGIC);
3001 rc = ENODEV;
3002 goto bnx_blockinit_exit;
3003 }
3004
3005 /* Check if any management firmware is running. */
3006 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3007 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3008 BNX_PORT_FEATURE_IMD_ENABLED)) {
3009 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3010 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3011 }
3012
3013 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3014 BNX_DEV_INFO_BC_REV);
3015
3016 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3017
3018 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3019 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3020
3021 /* Enable link state change interrupt generation. */
3022 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3023
3024 /* Enable all remaining blocks in the MAC. */
3025 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3026 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3027 DELAY(20);
3028
3029 bnx_blockinit_exit:
3030 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3031
3032 return (rc);
3033 }
3034
3035 static int
3036 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, u_int16_t *prod,
3037 u_int16_t *chain_prod, u_int32_t *prod_bseq)
3038 {
3039 bus_dmamap_t map;
3040 struct rx_bd *rxbd;
3041 u_int32_t addr;
3042 int i;
3043 #ifdef BNX_DEBUG
3044 u_int16_t debug_chain_prod = *chain_prod;
3045 #endif
3046 u_int16_t first_chain_prod;
3047
3048 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3049
3050 /* Map the mbuf cluster into device memory. */
3051 map = sc->rx_mbuf_map[*chain_prod];
3052 first_chain_prod = *chain_prod;
3053 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3054 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3055 __FILE__, __LINE__);
3056
3057 m_freem(m_new);
3058
3059 DBRUNIF(1, sc->rx_mbuf_alloc--);
3060
3061 return ENOBUFS;
3062 }
3063 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
3064 BUS_DMASYNC_PREREAD);
3065
3066 /* Watch for overflow. */
3067 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3068 aprint_error_dev(sc->bnx_dev,
3069 "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3070 sc->free_rx_bd, (u_int16_t)USABLE_RX_BD));
3071
3072 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3073 sc->rx_low_watermark = sc->free_rx_bd);
3074
3075 /*
3076 * Setup the rx_bd for the first segment
3077 */
3078 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3079
3080 addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3081 rxbd->rx_bd_haddr_lo = htole32(addr);
3082 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3083 rxbd->rx_bd_haddr_hi = htole32(addr);
3084 rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3085 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3086 *prod_bseq += map->dm_segs[0].ds_len;
3087 bus_dmamap_sync(sc->bnx_dmatag,
3088 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3089 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd),
3090 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3091
3092 for (i = 1; i < map->dm_nsegs; i++) {
3093 *prod = NEXT_RX_BD(*prod);
3094 *chain_prod = RX_CHAIN_IDX(*prod);
3095
3096 rxbd =
3097 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3098
3099 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3100 rxbd->rx_bd_haddr_lo = htole32(addr);
3101 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3102 rxbd->rx_bd_haddr_hi = htole32(addr);
3103 rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3104 rxbd->rx_bd_flags = 0;
3105 *prod_bseq += map->dm_segs[i].ds_len;
3106 bus_dmamap_sync(sc->bnx_dmatag,
3107 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3108 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3109 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3110 }
3111
3112 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3113 bus_dmamap_sync(sc->bnx_dmatag,
3114 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3115 sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3116 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3117
3118 /*
3119 * Save the mbuf, ajust the map pointer (swap map for first and
3120 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches)
3121 * and update counter.
3122 */
3123 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3124 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3125 sc->rx_mbuf_map[*chain_prod] = map;
3126 sc->free_rx_bd -= map->dm_nsegs;
3127
3128 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3129 map->dm_nsegs));
3130 *prod = NEXT_RX_BD(*prod);
3131 *chain_prod = RX_CHAIN_IDX(*prod);
3132
3133 return 0;
3134 }
3135
3136 /****************************************************************************/
3137 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3138 /* */
3139 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3140 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3141 /* necessary. */
3142 /* */
3143 /* Returns: */
3144 /* 0 for success, positive value for failure. */
3145 /****************************************************************************/
3146 int
3147 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3148 u_int16_t *chain_prod, u_int32_t *prod_bseq)
3149 {
3150 struct mbuf *m_new = NULL;
3151 int rc = 0;
3152 u_int16_t min_free_bd;
3153
3154 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3155 __func__);
3156
3157 /* Make sure the inputs are valid. */
3158 DBRUNIF((*chain_prod > MAX_RX_BD),
3159 aprint_error_dev(sc->bnx_dev,
3160 "RX producer out of range: 0x%04X > 0x%04X\n",
3161 *chain_prod, (u_int16_t)MAX_RX_BD));
3162
3163 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3164 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod,
3165 *prod_bseq);
3166
3167 /* try to get in as many mbufs as possible */
3168 if (sc->mbuf_alloc_size == MCLBYTES)
3169 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE;
3170 else
3171 min_free_bd = (BNX_MAX_MRU + PAGE_SIZE - 1) / PAGE_SIZE;
3172 while (sc->free_rx_bd >= min_free_bd) {
3173 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3174 BNX_PRINTF(sc, "Simulating mbuf allocation failure.\n");
3175
3176 sc->mbuf_alloc_failed++;
3177 rc = ENOBUFS;
3178 goto bnx_get_buf_exit);
3179
3180 /* This is a new mbuf allocation. */
3181 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3182 if (m_new == NULL) {
3183 DBPRINT(sc, BNX_WARN,
3184 "%s(%d): RX mbuf header allocation failed!\n",
3185 __FILE__, __LINE__);
3186
3187 DBRUNIF(1, sc->mbuf_alloc_failed++);
3188
3189 rc = ENOBUFS;
3190 goto bnx_get_buf_exit;
3191 }
3192
3193 DBRUNIF(1, sc->rx_mbuf_alloc++);
3194 if (sc->mbuf_alloc_size == MCLBYTES)
3195 MCLGET(m_new, M_DONTWAIT);
3196 else
3197 MEXTMALLOC(m_new, sc->mbuf_alloc_size,
3198 M_DONTWAIT);
3199 if (!(m_new->m_flags & M_EXT)) {
3200 DBPRINT(sc, BNX_WARN,
3201 "%s(%d): RX mbuf chain allocation failed!\n",
3202 __FILE__, __LINE__);
3203
3204 m_freem(m_new);
3205
3206 DBRUNIF(1, sc->rx_mbuf_alloc--);
3207 DBRUNIF(1, sc->mbuf_alloc_failed++);
3208
3209 rc = ENOBUFS;
3210 goto bnx_get_buf_exit;
3211 }
3212
3213 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq);
3214 if (rc != 0)
3215 goto bnx_get_buf_exit;
3216 }
3217
3218 bnx_get_buf_exit:
3219 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3220 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod,
3221 *chain_prod, *prod_bseq);
3222
3223 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3224 __func__);
3225
3226 return(rc);
3227 }
3228
3229 /****************************************************************************/
3230 /* Allocate memory and initialize the TX data structures. */
3231 /* */
3232 /* Returns: */
3233 /* 0 for success, positive value for failure. */
3234 /****************************************************************************/
3235 int
3236 bnx_init_tx_chain(struct bnx_softc *sc)
3237 {
3238 struct tx_bd *txbd;
3239 u_int32_t val, addr;
3240 int i, rc = 0;
3241
3242 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3243
3244 /* Set the initial TX producer/consumer indices. */
3245 sc->tx_prod = 0;
3246 sc->tx_cons = 0;
3247 sc->tx_prod_bseq = 0;
3248 sc->used_tx_bd = 0;
3249 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3250
3251 /*
3252 * The NetXtreme II supports a linked-list structure called
3253 * a Buffer Descriptor Chain (or BD chain). A BD chain
3254 * consists of a series of 1 or more chain pages, each of which
3255 * consists of a fixed number of BD entries.
3256 * The last BD entry on each page is a pointer to the next page
3257 * in the chain, and the last pointer in the BD chain
3258 * points back to the beginning of the chain.
3259 */
3260
3261 /* Set the TX next pointer chain entries. */
3262 for (i = 0; i < TX_PAGES; i++) {
3263 int j;
3264
3265 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3266
3267 /* Check if we've reached the last page. */
3268 if (i == (TX_PAGES - 1))
3269 j = 0;
3270 else
3271 j = i + 1;
3272
3273 addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3274 txbd->tx_bd_haddr_lo = htole32(addr);
3275 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3276 txbd->tx_bd_haddr_hi = htole32(addr);
3277 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3278 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3279 }
3280
3281 /*
3282 * Initialize the context ID for an L2 TX chain.
3283 */
3284 val = BNX_L2CTX_TYPE_TYPE_L2;
3285 val |= BNX_L2CTX_TYPE_SIZE_L2;
3286 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3287
3288 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3289 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3290
3291 /* Point the hardware to the first page in the chain. */
3292 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3293 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3294 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3295 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3296
3297 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3298
3299 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3300
3301 return(rc);
3302 }
3303
3304 /****************************************************************************/
3305 /* Free memory and clear the TX data structures. */
3306 /* */
3307 /* Returns: */
3308 /* Nothing. */
3309 /****************************************************************************/
3310 void
3311 bnx_free_tx_chain(struct bnx_softc *sc)
3312 {
3313 int i;
3314
3315 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3316
3317 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3318 for (i = 0; i < TOTAL_TX_BD; i++) {
3319 if (sc->tx_mbuf_ptr[i] != NULL) {
3320 if (sc->tx_mbuf_map != NULL)
3321 bus_dmamap_sync(sc->bnx_dmatag,
3322 sc->tx_mbuf_map[i], 0,
3323 sc->tx_mbuf_map[i]->dm_mapsize,
3324 BUS_DMASYNC_POSTWRITE);
3325 m_freem(sc->tx_mbuf_ptr[i]);
3326 sc->tx_mbuf_ptr[i] = NULL;
3327 DBRUNIF(1, sc->tx_mbuf_alloc--);
3328 }
3329 }
3330
3331 /* Clear each TX chain page. */
3332 for (i = 0; i < TX_PAGES; i++) {
3333 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ);
3334 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3335 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3336 }
3337
3338 /* Check if we lost any mbufs in the process. */
3339 DBRUNIF((sc->tx_mbuf_alloc),
3340 aprint_error_dev(sc->bnx_dev,
3341 "Memory leak! Lost %d mbufs from tx chain!\n",
3342 sc->tx_mbuf_alloc));
3343
3344 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3345 }
3346
3347 /****************************************************************************/
3348 /* Allocate memory and initialize the RX data structures. */
3349 /* */
3350 /* Returns: */
3351 /* 0 for success, positive value for failure. */
3352 /****************************************************************************/
3353 int
3354 bnx_init_rx_chain(struct bnx_softc *sc)
3355 {
3356 struct rx_bd *rxbd;
3357 int i, rc = 0;
3358 u_int16_t prod, chain_prod;
3359 u_int32_t prod_bseq, val, addr;
3360
3361 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3362
3363 /* Initialize the RX producer and consumer indices. */
3364 sc->rx_prod = 0;
3365 sc->rx_cons = 0;
3366 sc->rx_prod_bseq = 0;
3367 sc->free_rx_bd = BNX_RX_SLACK_SPACE;
3368 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3369
3370 /* Initialize the RX next pointer chain entries. */
3371 for (i = 0; i < RX_PAGES; i++) {
3372 int j;
3373
3374 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3375
3376 /* Check if we've reached the last page. */
3377 if (i == (RX_PAGES - 1))
3378 j = 0;
3379 else
3380 j = i + 1;
3381
3382 /* Setup the chain page pointers. */
3383 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
3384 rxbd->rx_bd_haddr_hi = htole32(addr);
3385 addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
3386 rxbd->rx_bd_haddr_lo = htole32(addr);
3387 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
3388 0, BNX_RX_CHAIN_PAGE_SZ,
3389 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3390 }
3391
3392 /* Initialize the context ID for an L2 RX chain. */
3393 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3394 val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3395 val |= 0x02 << 8;
3396 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3397
3398 /* Point the hardware to the first page in the chain. */
3399 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3400 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3401 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3402 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3403
3404 /* Allocate mbuf clusters for the rx_bd chain. */
3405 prod = prod_bseq = 0;
3406 chain_prod = RX_CHAIN_IDX(prod);
3407 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
3408 BNX_PRINTF(sc,
3409 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod);
3410 }
3411
3412 /* Save the RX chain producer index. */
3413 sc->rx_prod = prod;
3414 sc->rx_prod_bseq = prod_bseq;
3415
3416 for (i = 0; i < RX_PAGES; i++)
3417 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3418 sc->rx_bd_chain_map[i]->dm_mapsize,
3419 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3420
3421 /* Tell the chip about the waiting rx_bd's. */
3422 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3423 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3424
3425 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3426
3427 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3428
3429 return(rc);
3430 }
3431
3432 /****************************************************************************/
3433 /* Free memory and clear the RX data structures. */
3434 /* */
3435 /* Returns: */
3436 /* Nothing. */
3437 /****************************************************************************/
3438 void
3439 bnx_free_rx_chain(struct bnx_softc *sc)
3440 {
3441 int i;
3442
3443 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3444
3445 /* Free any mbufs still in the RX mbuf chain. */
3446 for (i = 0; i < TOTAL_RX_BD; i++) {
3447 if (sc->rx_mbuf_ptr[i] != NULL) {
3448 if (sc->rx_mbuf_map[i] != NULL)
3449 bus_dmamap_sync(sc->bnx_dmatag,
3450 sc->rx_mbuf_map[i], 0,
3451 sc->rx_mbuf_map[i]->dm_mapsize,
3452 BUS_DMASYNC_POSTREAD);
3453 m_freem(sc->rx_mbuf_ptr[i]);
3454 sc->rx_mbuf_ptr[i] = NULL;
3455 DBRUNIF(1, sc->rx_mbuf_alloc--);
3456 }
3457 }
3458
3459 /* Clear each RX chain page. */
3460 for (i = 0; i < RX_PAGES; i++)
3461 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
3462
3463 /* Check if we lost any mbufs in the process. */
3464 DBRUNIF((sc->rx_mbuf_alloc),
3465 aprint_error_dev(sc->bnx_dev,
3466 "Memory leak! Lost %d mbufs from rx chain!\n",
3467 sc->rx_mbuf_alloc));
3468
3469 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3470 }
3471
3472 /****************************************************************************/
3473 /* Handles PHY generated interrupt events. */
3474 /* */
3475 /* Returns: */
3476 /* Nothing. */
3477 /****************************************************************************/
3478 void
3479 bnx_phy_intr(struct bnx_softc *sc)
3480 {
3481 u_int32_t new_link_state, old_link_state;
3482
3483 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3484 BUS_DMASYNC_POSTREAD);
3485 new_link_state = sc->status_block->status_attn_bits &
3486 STATUS_ATTN_BITS_LINK_STATE;
3487 old_link_state = sc->status_block->status_attn_bits_ack &
3488 STATUS_ATTN_BITS_LINK_STATE;
3489
3490 /* Handle any changes if the link state has changed. */
3491 if (new_link_state != old_link_state) {
3492 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3493
3494 callout_stop(&sc->bnx_timeout);
3495 bnx_tick(sc);
3496
3497 /* Update the status_attn_bits_ack field in the status block. */
3498 if (new_link_state) {
3499 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3500 STATUS_ATTN_BITS_LINK_STATE);
3501 DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3502 } else {
3503 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3504 STATUS_ATTN_BITS_LINK_STATE);
3505 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3506 }
3507 }
3508
3509 /* Acknowledge the link change interrupt. */
3510 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3511 }
3512
3513 /****************************************************************************/
3514 /* Handles received frame interrupt events. */
3515 /* */
3516 /* Returns: */
3517 /* Nothing. */
3518 /****************************************************************************/
3519 void
3520 bnx_rx_intr(struct bnx_softc *sc)
3521 {
3522 struct status_block *sblk = sc->status_block;
3523 struct ifnet *ifp = &sc->bnx_ec.ec_if;
3524 u_int16_t hw_cons, sw_cons, sw_chain_cons;
3525 u_int16_t sw_prod, sw_chain_prod;
3526 u_int32_t sw_prod_bseq;
3527 struct l2_fhdr *l2fhdr;
3528 int i;
3529
3530 DBRUNIF(1, sc->rx_interrupts++);
3531 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3532 BUS_DMASYNC_POSTREAD);
3533
3534 /* Prepare the RX chain pages to be accessed by the host CPU. */
3535 for (i = 0; i < RX_PAGES; i++)
3536 bus_dmamap_sync(sc->bnx_dmatag,
3537 sc->rx_bd_chain_map[i], 0,
3538 sc->rx_bd_chain_map[i]->dm_mapsize,
3539 BUS_DMASYNC_POSTWRITE);
3540
3541 /* Get the hardware's view of the RX consumer index. */
3542 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3543 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3544 hw_cons++;
3545
3546 /* Get working copies of the driver's view of the RX indices. */
3547 sw_cons = sc->rx_cons;
3548 sw_prod = sc->rx_prod;
3549 sw_prod_bseq = sc->rx_prod_bseq;
3550
3551 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3552 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3553 __func__, sw_prod, sw_cons, sw_prod_bseq);
3554
3555 /* Prevent speculative reads from getting ahead of the status block. */
3556 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3557 BUS_SPACE_BARRIER_READ);
3558
3559 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3560 sc->rx_low_watermark = sc->free_rx_bd);
3561
3562 /*
3563 * Scan through the receive chain as long
3564 * as there is work to do.
3565 */
3566 while (sw_cons != hw_cons) {
3567 struct mbuf *m;
3568 struct rx_bd *rxbd;
3569 unsigned int len;
3570 u_int32_t status;
3571
3572 /* Convert the producer/consumer indices to an actual
3573 * rx_bd index.
3574 */
3575 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3576 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3577
3578 /* Get the used rx_bd. */
3579 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3580 sc->free_rx_bd++;
3581
3582 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__);
3583 bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3584
3585 /* The mbuf is stored with the last rx_bd entry of a packet. */
3586 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3587 #ifdef DIAGNOSTIC
3588 /* Validate that this is the last rx_bd. */
3589 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) {
3590 printf("%s: Unexpected mbuf found in "
3591 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev),
3592 sw_chain_cons);
3593 }
3594 #endif
3595
3596 /* DRC - ToDo: If the received packet is small, say less
3597 * than 128 bytes, allocate a new mbuf here,
3598 * copy the data to that mbuf, and recycle
3599 * the mapped jumbo frame.
3600 */
3601
3602 /* Unmap the mbuf from DMA space. */
3603 #ifdef DIAGNOSTIC
3604 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) {
3605 printf("invalid map sw_cons 0x%x "
3606 "sw_prod 0x%x "
3607 "sw_chain_cons 0x%x "
3608 "sw_chain_prod 0x%x "
3609 "hw_cons 0x%x "
3610 "TOTAL_RX_BD_PER_PAGE 0x%x "
3611 "TOTAL_RX_BD 0x%x\n",
3612 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod,
3613 hw_cons,
3614 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD);
3615 }
3616 #endif
3617 bus_dmamap_sync(sc->bnx_dmatag,
3618 sc->rx_mbuf_map[sw_chain_cons], 0,
3619 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3620 BUS_DMASYNC_POSTREAD);
3621 bus_dmamap_unload(sc->bnx_dmatag,
3622 sc->rx_mbuf_map[sw_chain_cons]);
3623
3624 /* Remove the mbuf from the driver's chain. */
3625 m = sc->rx_mbuf_ptr[sw_chain_cons];
3626 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3627
3628 /*
3629 * Frames received on the NetXteme II are prepended
3630 * with the l2_fhdr structure which provides status
3631 * information about the received frame (including
3632 * VLAN tags and checksum info) and are also
3633 * automatically adjusted to align the IP header
3634 * (i.e. two null bytes are inserted before the
3635 * Ethernet header).
3636 */
3637 l2fhdr = mtod(m, struct l2_fhdr *);
3638
3639 len = l2fhdr->l2_fhdr_pkt_len;
3640 status = l2fhdr->l2_fhdr_status;
3641
3642 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3643 aprint_error("Simulating l2_fhdr status error.\n");
3644 status = status | L2_FHDR_ERRORS_PHY_DECODE);
3645
3646 /* Watch for unusual sized frames. */
3647 DBRUNIF(((len < BNX_MIN_MTU) ||
3648 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3649 aprint_error_dev(sc->bnx_dev,
3650 "Unusual frame size found. "
3651 "Min(%d), Actual(%d), Max(%d)\n",
3652 (int)BNX_MIN_MTU, len,
3653 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3654
3655 bnx_dump_mbuf(sc, m);
3656 bnx_breakpoint(sc));
3657
3658 len -= ETHER_CRC_LEN;
3659
3660 /* Check the received frame for errors. */
3661 if ((status & (L2_FHDR_ERRORS_BAD_CRC |
3662 L2_FHDR_ERRORS_PHY_DECODE |
3663 L2_FHDR_ERRORS_ALIGNMENT |
3664 L2_FHDR_ERRORS_TOO_SHORT |
3665 L2_FHDR_ERRORS_GIANT_FRAME)) ||
3666 len < (BNX_MIN_MTU - ETHER_CRC_LEN) ||
3667 len >
3668 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
3669 ifp->if_ierrors++;
3670 DBRUNIF(1, sc->l2fhdr_status_errors++);
3671
3672 /* Reuse the mbuf for a new frame. */
3673 if (bnx_add_buf(sc, m, &sw_prod,
3674 &sw_chain_prod, &sw_prod_bseq)) {
3675 DBRUNIF(1, bnx_breakpoint(sc));
3676 panic("%s: Can't reuse RX mbuf!\n",
3677 device_xname(sc->bnx_dev));
3678 }
3679 continue;
3680 }
3681
3682 /*
3683 * Get a new mbuf for the rx_bd. If no new
3684 * mbufs are available then reuse the current mbuf,
3685 * log an ierror on the interface, and generate
3686 * an error in the system log.
3687 */
3688 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod,
3689 &sw_prod_bseq)) {
3690 DBRUN(BNX_WARN, BNX_PRINTF(sc, "Failed to allocate "
3691 "new mbuf, incoming frame dropped!\n"));
3692
3693 ifp->if_ierrors++;
3694
3695 /* Try and reuse the exisitng mbuf. */
3696 if (bnx_add_buf(sc, m, &sw_prod,
3697 &sw_chain_prod, &sw_prod_bseq)) {
3698 DBRUNIF(1, bnx_breakpoint(sc));
3699 panic("%s: Double mbuf allocation "
3700 "failure!",
3701 device_xname(sc->bnx_dev));
3702 }
3703 continue;
3704 }
3705
3706 /* Skip over the l2_fhdr when passing the data up
3707 * the stack.
3708 */
3709 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3710
3711 /* Adjust the pckt length to match the received data. */
3712 m->m_pkthdr.len = m->m_len = len;
3713
3714 /* Send the packet to the appropriate interface. */
3715 m->m_pkthdr.rcvif = ifp;
3716
3717 DBRUN(BNX_VERBOSE_RECV,
3718 struct ether_header *eh;
3719 eh = mtod(m, struct ether_header *);
3720 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n",
3721 __func__, ether_sprintf(eh->ether_dhost),
3722 ether_sprintf(eh->ether_shost),
3723 htons(eh->ether_type)));
3724
3725 /* Validate the checksum. */
3726
3727 /* Check for an IP datagram. */
3728 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3729 /* Check if the IP checksum is valid. */
3730 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
3731 == 0)
3732 m->m_pkthdr.csum_flags |=
3733 M_CSUM_IPv4;
3734 #ifdef BNX_DEBUG
3735 else
3736 DBPRINT(sc, BNX_WARN_SEND,
3737 "%s(): Invalid IP checksum "
3738 "= 0x%04X!\n",
3739 __func__,
3740 l2fhdr->l2_fhdr_ip_xsum
3741 );
3742 #endif
3743 }
3744
3745 /* Check for a valid TCP/UDP frame. */
3746 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3747 L2_FHDR_STATUS_UDP_DATAGRAM)) {
3748 /* Check for a good TCP/UDP checksum. */
3749 if ((status &
3750 (L2_FHDR_ERRORS_TCP_XSUM |
3751 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3752 m->m_pkthdr.csum_flags |=
3753 M_CSUM_TCPv4 |
3754 M_CSUM_UDPv4;
3755 } else {
3756 DBPRINT(sc, BNX_WARN_SEND,
3757 "%s(): Invalid TCP/UDP "
3758 "checksum = 0x%04X!\n",
3759 __func__,
3760 l2fhdr->l2_fhdr_tcp_udp_xsum);
3761 }
3762 }
3763
3764 /*
3765 * If we received a packet with a vlan tag,
3766 * attach that information to the packet.
3767 */
3768 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3769 #if 0
3770 struct ether_vlan_header vh;
3771
3772 DBPRINT(sc, BNX_VERBOSE_SEND,
3773 "%s(): VLAN tag = 0x%04X\n",
3774 __func__,
3775 l2fhdr->l2_fhdr_vlan_tag);
3776
3777 if (m->m_pkthdr.len < ETHER_HDR_LEN) {
3778 m_freem(m);
3779 continue;
3780 }
3781 m_copydata(m, 0, ETHER_HDR_LEN, (void *)&vh);
3782 vh.evl_proto = vh.evl_encap_proto;
3783 vh.evl_tag = l2fhdr->l2_fhdr_vlan_tag;
3784 vh.evl_encap_proto = htons(ETHERTYPE_VLAN);
3785 m_adj(m, ETHER_HDR_LEN);
3786 if ((m = m_prepend(m, sizeof(vh), M_DONTWAIT)) == NULL)
3787 continue;
3788 m->m_pkthdr.len += sizeof(vh);
3789 if (m->m_len < sizeof(vh) &&
3790 (m = m_pullup(m, sizeof(vh))) == NULL)
3791 goto bnx_rx_int_next_rx;
3792 m_copyback(m, 0, sizeof(vh), &vh);
3793 #else
3794 VLAN_INPUT_TAG(ifp, m,
3795 l2fhdr->l2_fhdr_vlan_tag,
3796 continue);
3797 #endif
3798 }
3799
3800 #if NBPFILTER > 0
3801 /*
3802 * Handle BPF listeners. Let the BPF
3803 * user see the packet.
3804 */
3805 if (ifp->if_bpf)
3806 bpf_mtap(ifp->if_bpf, m);
3807 #endif
3808
3809 /* Pass the mbuf off to the upper layers. */
3810 ifp->if_ipackets++;
3811 DBPRINT(sc, BNX_VERBOSE_RECV,
3812 "%s(): Passing received frame up.\n", __func__);
3813 (*ifp->if_input)(ifp, m);
3814 DBRUNIF(1, sc->rx_mbuf_alloc--);
3815
3816 }
3817
3818 sw_cons = NEXT_RX_BD(sw_cons);
3819
3820 /* Refresh hw_cons to see if there's new work */
3821 if (sw_cons == hw_cons) {
3822 hw_cons = sc->hw_rx_cons =
3823 sblk->status_rx_quick_consumer_index0;
3824 if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
3825 USABLE_RX_BD_PER_PAGE)
3826 hw_cons++;
3827 }
3828
3829 /* Prevent speculative reads from getting ahead of
3830 * the status block.
3831 */
3832 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3833 BUS_SPACE_BARRIER_READ);
3834 }
3835
3836 for (i = 0; i < RX_PAGES; i++)
3837 bus_dmamap_sync(sc->bnx_dmatag,
3838 sc->rx_bd_chain_map[i], 0,
3839 sc->rx_bd_chain_map[i]->dm_mapsize,
3840 BUS_DMASYNC_PREWRITE);
3841
3842 sc->rx_cons = sw_cons;
3843 sc->rx_prod = sw_prod;
3844 sc->rx_prod_bseq = sw_prod_bseq;
3845
3846 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3847 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3848
3849 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3850 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3851 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3852 }
3853
3854 /****************************************************************************/
3855 /* Handles transmit completion interrupt events. */
3856 /* */
3857 /* Returns: */
3858 /* Nothing. */
3859 /****************************************************************************/
3860 void
3861 bnx_tx_intr(struct bnx_softc *sc)
3862 {
3863 struct status_block *sblk = sc->status_block;
3864 struct ifnet *ifp = &sc->bnx_ec.ec_if;
3865 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3866
3867 DBRUNIF(1, sc->tx_interrupts++);
3868 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3869 BUS_DMASYNC_POSTREAD);
3870
3871 /* Get the hardware's view of the TX consumer index. */
3872 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3873
3874 /* Skip to the next entry if this is a chain page pointer. */
3875 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3876 hw_tx_cons++;
3877
3878 sw_tx_cons = sc->tx_cons;
3879
3880 /* Prevent speculative reads from getting ahead of the status block. */
3881 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3882 BUS_SPACE_BARRIER_READ);
3883
3884 /* Cycle through any completed TX chain page entries. */
3885 while (sw_tx_cons != hw_tx_cons) {
3886 #ifdef BNX_DEBUG
3887 struct tx_bd *txbd = NULL;
3888 #endif
3889 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
3890
3891 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
3892 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
3893 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
3894
3895 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
3896 aprint_error_dev(sc->bnx_dev,
3897 "TX chain consumer out of range! 0x%04X > 0x%04X\n",
3898 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc));
3899
3900 DBRUNIF(1, txbd = &sc->tx_bd_chain
3901 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
3902
3903 DBRUNIF((txbd == NULL),
3904 aprint_error_dev(sc->bnx_dev,
3905 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons);
3906 bnx_breakpoint(sc));
3907
3908 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__);
3909 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
3910
3911 /*
3912 * Free the associated mbuf. Remember
3913 * that only the last tx_bd of a packet
3914 * has an mbuf pointer and DMA map.
3915 */
3916 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
3917 /* Validate that this is the last tx_bd. */
3918 DBRUNIF((!(txbd->tx_bd_vlan_tag_flags &
3919 TX_BD_FLAGS_END)),
3920 aprint_error_dev(sc->bnx_dev,
3921 "tx_bd END flag not set but txmbuf == NULL!\n");
3922 bnx_breakpoint(sc));
3923
3924 DBRUN(BNX_INFO_SEND,
3925 aprint_debug("%s: Unloading map/freeing mbuf "
3926 "from tx_bd[0x%04X]\n",
3927 __func__, sw_tx_chain_cons));
3928
3929 /* Unmap the mbuf. */
3930 bus_dmamap_unload(sc->bnx_dmatag,
3931 sc->tx_mbuf_map[sw_tx_chain_cons]);
3932
3933 /* Free the mbuf. */
3934 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
3935 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
3936 DBRUNIF(1, sc->tx_mbuf_alloc--);
3937
3938 ifp->if_opackets++;
3939 }
3940
3941 sc->used_tx_bd--;
3942 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
3943
3944 /* Refresh hw_cons to see if there's new work. */
3945 hw_tx_cons = sc->hw_tx_cons =
3946 sblk->status_tx_quick_consumer_index0;
3947 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
3948 USABLE_TX_BD_PER_PAGE)
3949 hw_tx_cons++;
3950
3951 /* Prevent speculative reads from getting ahead of
3952 * the status block.
3953 */
3954 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3955 BUS_SPACE_BARRIER_READ);
3956 }
3957
3958 /* Clear the TX timeout timer. */
3959 ifp->if_timer = 0;
3960
3961 /* Clear the tx hardware queue full flag. */
3962 if ((sc->used_tx_bd + BNX_TX_SLACK_SPACE) < USABLE_TX_BD) {
3963 DBRUNIF((ifp->if_flags & IFF_OACTIVE),
3964 aprint_debug_dev(sc->bnx_dev,
3965 "TX chain is open for business! Used tx_bd = %d\n",
3966 sc->used_tx_bd));
3967 ifp->if_flags &= ~IFF_OACTIVE;
3968 }
3969
3970 sc->tx_cons = sw_tx_cons;
3971 }
3972
3973 /****************************************************************************/
3974 /* Disables interrupt generation. */
3975 /* */
3976 /* Returns: */
3977 /* Nothing. */
3978 /****************************************************************************/
3979 void
3980 bnx_disable_intr(struct bnx_softc *sc)
3981 {
3982 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3983 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
3984 }
3985
3986 /****************************************************************************/
3987 /* Enables interrupt generation. */
3988 /* */
3989 /* Returns: */
3990 /* Nothing. */
3991 /****************************************************************************/
3992 void
3993 bnx_enable_intr(struct bnx_softc *sc)
3994 {
3995 u_int32_t val;
3996
3997 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
3998 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
3999
4000 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4001 sc->last_status_idx);
4002
4003 val = REG_RD(sc, BNX_HC_COMMAND);
4004 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4005 }
4006
4007 /****************************************************************************/
4008 /* Handles controller initialization. */
4009 /* */
4010 /****************************************************************************/
4011 int
4012 bnx_init(struct ifnet *ifp)
4013 {
4014 struct bnx_softc *sc = ifp->if_softc;
4015 u_int32_t ether_mtu;
4016 int s, error = 0;
4017
4018 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4019
4020 s = splnet();
4021
4022 bnx_stop(ifp, 0);
4023
4024 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) {
4025 aprint_error("bnx: Controller reset failed!\n");
4026 goto bnx_init_exit;
4027 }
4028
4029 if ((error = bnx_chipinit(sc)) != 0) {
4030 aprint_error("bnx: Controller initialization failed!\n");
4031 goto bnx_init_exit;
4032 }
4033
4034 if ((error = bnx_blockinit(sc)) != 0) {
4035 aprint_error("bnx: Block initialization failed!\n");
4036 goto bnx_init_exit;
4037 }
4038
4039 /* Calculate and program the Ethernet MRU size. */
4040 if (ifp->if_mtu <= ETHERMTU) {
4041 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4042 sc->mbuf_alloc_size = MCLBYTES;
4043 } else {
4044 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4045 sc->mbuf_alloc_size = BNX_MAX_MRU;
4046 }
4047
4048
4049 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4050 __func__, ether_mtu);
4051
4052 /*
4053 * Program the MRU and enable Jumbo frame
4054 * support.
4055 */
4056 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4057 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4058
4059 /* Calculate the RX Ethernet frame size for rx_bd's. */
4060 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4061
4062 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4063 "max_frame_size = %d\n", __func__, (int)MCLBYTES,
4064 sc->mbuf_alloc_size, sc->max_frame_size);
4065
4066 /* Program appropriate promiscuous/multicast filtering. */
4067 bnx_set_rx_mode(sc);
4068
4069 /* Init RX buffer descriptor chain. */
4070 bnx_init_rx_chain(sc);
4071
4072 /* Init TX buffer descriptor chain. */
4073 bnx_init_tx_chain(sc);
4074
4075 /* Enable host interrupts. */
4076 bnx_enable_intr(sc);
4077
4078 if ((error = ether_mediachange(ifp)) != 0)
4079 goto bnx_init_exit;
4080
4081 ifp->if_flags |= IFF_RUNNING;
4082 ifp->if_flags &= ~IFF_OACTIVE;
4083
4084 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4085
4086 bnx_init_exit:
4087 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4088
4089 splx(s);
4090
4091 return(error);
4092 }
4093
4094 /****************************************************************************/
4095 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4096 /* memory visible to the controller. */
4097 /* */
4098 /* Returns: */
4099 /* 0 for success, positive value for failure. */
4100 /****************************************************************************/
4101 int
4102 bnx_tx_encap(struct bnx_softc *sc, struct mbuf **m_head)
4103 {
4104 bus_dmamap_t map;
4105 struct tx_bd *txbd = NULL;
4106 struct mbuf *m0;
4107 u_int16_t vlan_tag = 0, flags = 0;
4108 u_int16_t chain_prod, prod;
4109 #ifdef BNX_DEBUG
4110 u_int16_t debug_prod;
4111 #endif
4112 u_int32_t addr, prod_bseq;
4113 int i, error, rc = 0;
4114 struct m_tag *mtag;
4115
4116 m0 = *m_head;
4117
4118 /* Transfer any checksum offload flags to the bd. */
4119 if (m0->m_pkthdr.csum_flags) {
4120 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
4121 flags |= TX_BD_FLAGS_IP_CKSUM;
4122 if (m0->m_pkthdr.csum_flags &
4123 (M_CSUM_TCPv4 | M_CSUM_UDPv4))
4124 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4125 }
4126
4127 /* Transfer any VLAN tags to the bd. */
4128 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m0);
4129 if (mtag != NULL) {
4130 flags |= TX_BD_FLAGS_VLAN_TAG;
4131 vlan_tag = VLAN_TAG_VALUE(mtag);
4132 }
4133
4134 /* Map the mbuf into DMAable memory. */
4135 prod = sc->tx_prod;
4136 chain_prod = TX_CHAIN_IDX(prod);
4137 map = sc->tx_mbuf_map[chain_prod];
4138
4139 /* Map the mbuf into our DMA address space. */
4140 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
4141 if (error != 0) {
4142 aprint_error_dev(sc->bnx_dev,
4143 "Error mapping mbuf into TX chain!\n");
4144 m_freem(m0);
4145 *m_head = NULL;
4146 return (error);
4147 }
4148 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4149 BUS_DMASYNC_PREWRITE);
4150 /*
4151 * The chip seems to require that at least 16 descriptors be kept
4152 * empty at all times. Make sure we honor that.
4153 * XXX Would it be faster to assume worst case scenario for
4154 * map->dm_nsegs and do this calculation higher up?
4155 */
4156 if (map->dm_nsegs > (USABLE_TX_BD - sc->used_tx_bd - BNX_TX_SLACK_SPACE)) {
4157 bus_dmamap_unload(sc->bnx_dmatag, map);
4158 return (ENOBUFS);
4159 }
4160
4161 /* prod points to an empty tx_bd at this point. */
4162 prod_bseq = sc->tx_prod_bseq;
4163 #ifdef BNX_DEBUG
4164 debug_prod = chain_prod;
4165 #endif
4166 DBPRINT(sc, BNX_INFO_SEND,
4167 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4168 "prod_bseq = 0x%08X\n",
4169 __func__, *prod, chain_prod, prod_bseq);
4170
4171 /*
4172 * Cycle through each mbuf segment that makes up
4173 * the outgoing frame, gathering the mapping info
4174 * for that segment and creating a tx_bd for the
4175 * mbuf.
4176 */
4177 for (i = 0; i < map->dm_nsegs ; i++) {
4178 chain_prod = TX_CHAIN_IDX(prod);
4179 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4180
4181 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4182 txbd->tx_bd_haddr_lo = htole32(addr);
4183 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4184 txbd->tx_bd_haddr_hi = htole32(addr);
4185 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4186 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4187 txbd->tx_bd_flags = htole16(flags);
4188 prod_bseq += map->dm_segs[i].ds_len;
4189 if (i == 0)
4190 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4191 prod = NEXT_TX_BD(prod);
4192 }
4193 /* Set the END flag on the last TX buffer descriptor. */
4194 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4195
4196 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, nseg));
4197
4198 DBPRINT(sc, BNX_INFO_SEND,
4199 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4200 "prod_bseq = 0x%08X\n",
4201 __func__, prod, chain_prod, prod_bseq);
4202
4203 /*
4204 * Ensure that the mbuf pointer for this
4205 * transmission is placed at the array
4206 * index of the last descriptor in this
4207 * chain. This is done because a single
4208 * map is used for all segments of the mbuf
4209 * and we don't want to unload the map before
4210 * all of the segments have been freed.
4211 */
4212 sc->tx_mbuf_ptr[chain_prod] = m0;
4213 sc->used_tx_bd += map->dm_nsegs;
4214
4215 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4216 sc->tx_hi_watermark = sc->used_tx_bd);
4217
4218 DBRUNIF(1, sc->tx_mbuf_alloc++);
4219
4220 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4221 map_arg.maxsegs));
4222
4223 /* prod points to the next free tx_bd at this point. */
4224 sc->tx_prod = prod;
4225 sc->tx_prod_bseq = prod_bseq;
4226
4227 return (rc);
4228 }
4229
4230 /****************************************************************************/
4231 /* Main transmit routine. */
4232 /* */
4233 /* Returns: */
4234 /* Nothing. */
4235 /****************************************************************************/
4236 void
4237 bnx_start(struct ifnet *ifp)
4238 {
4239 struct bnx_softc *sc = ifp->if_softc;
4240 struct mbuf *m_head = NULL;
4241 int count = 0;
4242 u_int16_t tx_prod, tx_chain_prod;
4243
4244 /* If there's no link or the transmit queue is empty then just exit. */
4245 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) {
4246 DBPRINT(sc, BNX_INFO_SEND,
4247 "%s(): output active or device not running.\n", __func__);
4248 goto bnx_start_exit;
4249 }
4250
4251 /* prod points to the next free tx_bd. */
4252 tx_prod = sc->tx_prod;
4253 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4254
4255 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4256 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4257 __func__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4258
4259 /*
4260 * Keep adding entries while there is space in the ring. We keep
4261 * BNX_TX_SLACK_SPACE entries unused at all times.
4262 */
4263 while (sc->used_tx_bd < USABLE_TX_BD - BNX_TX_SLACK_SPACE) {
4264 /* Check for any frames to send. */
4265 IFQ_POLL(&ifp->if_snd, m_head);
4266 if (m_head == NULL)
4267 break;
4268
4269 /*
4270 * Pack the data into the transmit ring. If we
4271 * don't have room, set the OACTIVE flag to wait
4272 * for the NIC to drain the chain.
4273 */
4274 if (bnx_tx_encap(sc, &m_head)) {
4275 ifp->if_flags |= IFF_OACTIVE;
4276 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4277 "business! Total tx_bd used = %d\n",
4278 sc->used_tx_bd);
4279 break;
4280 }
4281
4282 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4283 count++;
4284
4285 #if NBPFILTER > 0
4286 /* Send a copy of the frame to any BPF listeners. */
4287 if (ifp->if_bpf)
4288 bpf_mtap(ifp->if_bpf, m_head);
4289 #endif
4290 }
4291
4292 if (count == 0) {
4293 /* no packets were dequeued */
4294 DBPRINT(sc, BNX_VERBOSE_SEND,
4295 "%s(): No packets were dequeued\n", __func__);
4296 goto bnx_start_exit;
4297 }
4298
4299 /* Update the driver's counters. */
4300 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4301
4302 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4303 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, tx_prod,
4304 tx_chain_prod, sc->tx_prod_bseq);
4305
4306 /* Start the transmit. */
4307 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4308 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4309
4310 /* Set the tx timeout. */
4311 ifp->if_timer = BNX_TX_TIMEOUT;
4312
4313 bnx_start_exit:
4314 return;
4315 }
4316
4317 /****************************************************************************/
4318 /* Handles any IOCTL calls from the operating system. */
4319 /* */
4320 /* Returns: */
4321 /* 0 for success, positive value for failure. */
4322 /****************************************************************************/
4323 int
4324 bnx_ioctl(struct ifnet *ifp, u_long command, void *data)
4325 {
4326 struct bnx_softc *sc = ifp->if_softc;
4327 struct ifreq *ifr = (struct ifreq *) data;
4328 struct mii_data *mii = &sc->bnx_mii;
4329 int s, error = 0;
4330
4331 s = splnet();
4332
4333 switch (command) {
4334 case SIOCSIFFLAGS:
4335 if (ifp->if_flags & IFF_UP) {
4336 if ((ifp->if_flags & IFF_RUNNING) &&
4337 ((ifp->if_flags ^ sc->bnx_if_flags) &
4338 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
4339 bnx_set_rx_mode(sc);
4340 } else if (!(ifp->if_flags & IFF_RUNNING))
4341 bnx_init(ifp);
4342
4343 } else if (ifp->if_flags & IFF_RUNNING)
4344 bnx_stop(ifp, 1);
4345
4346 sc->bnx_if_flags = ifp->if_flags;
4347 break;
4348
4349 case SIOCSIFMEDIA:
4350 case SIOCGIFMEDIA:
4351 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4352 sc->bnx_phy_flags);
4353
4354 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4355 break;
4356
4357 default:
4358 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
4359 break;
4360
4361 error = 0;
4362
4363 if (command != SIOCADDMULTI && command && SIOCDELMULTI)
4364 ;
4365 else if (ifp->if_flags & IFF_RUNNING) {
4366 /* reload packet filter if running */
4367 bnx_set_rx_mode(sc);
4368 }
4369 break;
4370 }
4371
4372 splx(s);
4373
4374 return (error);
4375 }
4376
4377 /****************************************************************************/
4378 /* Transmit timeout handler. */
4379 /* */
4380 /* Returns: */
4381 /* Nothing. */
4382 /****************************************************************************/
4383 void
4384 bnx_watchdog(struct ifnet *ifp)
4385 {
4386 struct bnx_softc *sc = ifp->if_softc;
4387
4388 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
4389 bnx_dump_status_block(sc));
4390
4391 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n");
4392
4393 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
4394
4395 bnx_init(ifp);
4396
4397 ifp->if_oerrors++;
4398 }
4399
4400 /*
4401 * Interrupt handler.
4402 */
4403 /****************************************************************************/
4404 /* Main interrupt entry point. Verifies that the controller generated the */
4405 /* interrupt and then calls a separate routine for handle the various */
4406 /* interrupt causes (PHY, TX, RX). */
4407 /* */
4408 /* Returns: */
4409 /* 0 for success, positive value for failure. */
4410 /****************************************************************************/
4411 int
4412 bnx_intr(void *xsc)
4413 {
4414 struct bnx_softc *sc;
4415 struct ifnet *ifp;
4416 u_int32_t status_attn_bits;
4417 const struct status_block *sblk;
4418
4419 sc = xsc;
4420 if (!device_is_active(sc->bnx_dev))
4421 return 0;
4422
4423 ifp = &sc->bnx_ec.ec_if;
4424
4425 DBRUNIF(1, sc->interrupts_generated++);
4426
4427 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4428 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4429
4430 /*
4431 * If the hardware status block index
4432 * matches the last value read by the
4433 * driver and we haven't asserted our
4434 * interrupt then there's nothing to do.
4435 */
4436 if ((sc->status_block->status_idx == sc->last_status_idx) &&
4437 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
4438 BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4439 return (0);
4440
4441 /* Ack the interrupt and stop others from occuring. */
4442 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4443 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4444 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4445
4446 /* Keep processing data as long as there is work to do. */
4447 for (;;) {
4448 sblk = sc->status_block;
4449 status_attn_bits = sblk->status_attn_bits;
4450
4451 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4452 aprint_debug("Simulating unexpected status attention bit set.");
4453 status_attn_bits = status_attn_bits |
4454 STATUS_ATTN_BITS_PARITY_ERROR);
4455
4456 /* Was it a link change interrupt? */
4457 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4458 (sblk->status_attn_bits_ack &
4459 STATUS_ATTN_BITS_LINK_STATE))
4460 bnx_phy_intr(sc);
4461
4462 /* If any other attention is asserted then the chip is toast. */
4463 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4464 (sblk->status_attn_bits_ack &
4465 ~STATUS_ATTN_BITS_LINK_STATE))) {
4466 DBRUN(1, sc->unexpected_attentions++);
4467
4468 aprint_error_dev(sc->bnx_dev,
4469 "Fatal attention detected: 0x%08X\n",
4470 sblk->status_attn_bits);
4471
4472 DBRUN(BNX_FATAL,
4473 if (bnx_debug_unexpected_attention == 0)
4474 bnx_breakpoint(sc));
4475
4476 bnx_init(ifp);
4477 return (1);
4478 }
4479
4480 /* Check for any completed RX frames. */
4481 if (sblk->status_rx_quick_consumer_index0 !=
4482 sc->hw_rx_cons)
4483 bnx_rx_intr(sc);
4484
4485 /* Check for any completed TX frames. */
4486 if (sblk->status_tx_quick_consumer_index0 !=
4487 sc->hw_tx_cons)
4488 bnx_tx_intr(sc);
4489
4490 /* Save the status block index value for use during the
4491 * next interrupt.
4492 */
4493 sc->last_status_idx = sblk->status_idx;
4494
4495 /* Prevent speculative reads from getting ahead of the
4496 * status block.
4497 */
4498 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4499 BUS_SPACE_BARRIER_READ);
4500
4501 /* If there's no work left then exit the isr. */
4502 if ((sblk->status_rx_quick_consumer_index0 ==
4503 sc->hw_rx_cons) &&
4504 (sblk->status_tx_quick_consumer_index0 ==
4505 sc->hw_tx_cons))
4506 break;
4507 }
4508
4509 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4510 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4511
4512 /* Re-enable interrupts. */
4513 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4514 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4515 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4516 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4517 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4518
4519 /* Handle any frames that arrived while handling the interrupt. */
4520 if (!IFQ_IS_EMPTY(&ifp->if_snd))
4521 bnx_start(ifp);
4522
4523 return (1);
4524 }
4525
4526 /****************************************************************************/
4527 /* Programs the various packet receive modes (broadcast and multicast). */
4528 /* */
4529 /* Returns: */
4530 /* Nothing. */
4531 /****************************************************************************/
4532 void
4533 bnx_set_rx_mode(struct bnx_softc *sc)
4534 {
4535 struct ethercom *ec = &sc->bnx_ec;
4536 struct ifnet *ifp = &ec->ec_if;
4537 struct ether_multi *enm;
4538 struct ether_multistep step;
4539 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4540 u_int32_t rx_mode, sort_mode;
4541 int h, i;
4542
4543 /* Initialize receive mode default settings. */
4544 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4545 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4546 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4547
4548 /*
4549 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4550 * be enbled.
4551 */
4552 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
4553 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4554
4555 /*
4556 * Check for promiscuous, all multicast, or selected
4557 * multicast address filtering.
4558 */
4559 if (ifp->if_flags & IFF_PROMISC) {
4560 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4561
4562 /* Enable promiscuous mode. */
4563 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4564 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4565 } else if (ifp->if_flags & IFF_ALLMULTI) {
4566 allmulti:
4567 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4568
4569 /* Enable all multicast addresses. */
4570 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4571 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4572 0xffffffff);
4573 sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4574 } else {
4575 /* Accept one or more multicast(s). */
4576 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4577
4578 ETHER_FIRST_MULTI(step, ec, enm);
4579 while (enm != NULL) {
4580 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4581 ETHER_ADDR_LEN)) {
4582 ifp->if_flags |= IFF_ALLMULTI;
4583 goto allmulti;
4584 }
4585 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
4586 0xFF;
4587 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4588 ETHER_NEXT_MULTI(step, enm);
4589 }
4590
4591 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4592 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4593 hashes[i]);
4594
4595 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4596 }
4597
4598 /* Only make changes if the recive mode has actually changed. */
4599 if (rx_mode != sc->rx_mode) {
4600 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4601 rx_mode);
4602
4603 sc->rx_mode = rx_mode;
4604 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4605 }
4606
4607 /* Disable and clear the exisitng sort before enabling a new sort. */
4608 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4609 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4610 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4611 }
4612
4613 /****************************************************************************/
4614 /* Called periodically to updates statistics from the controllers */
4615 /* statistics block. */
4616 /* */
4617 /* Returns: */
4618 /* Nothing. */
4619 /****************************************************************************/
4620 void
4621 bnx_stats_update(struct bnx_softc *sc)
4622 {
4623 struct ifnet *ifp = &sc->bnx_ec.ec_if;
4624 struct statistics_block *stats;
4625
4626 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__);
4627 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4628 BUS_DMASYNC_POSTREAD);
4629
4630 stats = (struct statistics_block *)sc->stats_block;
4631
4632 /*
4633 * Update the interface statistics from the
4634 * hardware statistics.
4635 */
4636 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4637
4638 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4639 (u_long)stats->stat_EtherStatsOverrsizePkts +
4640 (u_long)stats->stat_IfInMBUFDiscards +
4641 (u_long)stats->stat_Dot3StatsAlignmentErrors +
4642 (u_long)stats->stat_Dot3StatsFCSErrors;
4643
4644 ifp->if_oerrors = (u_long)
4645 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4646 (u_long)stats->stat_Dot3StatsExcessiveCollisions +
4647 (u_long)stats->stat_Dot3StatsLateCollisions;
4648
4649 /*
4650 * Certain controllers don't report
4651 * carrier sense errors correctly.
4652 * See errata E11_5708CA0_1165.
4653 */
4654 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4655 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4656 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4657
4658 /*
4659 * Update the sysctl statistics from the
4660 * hardware statistics.
4661 */
4662 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
4663 (u_int64_t) stats->stat_IfHCInOctets_lo;
4664
4665 sc->stat_IfHCInBadOctets =
4666 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4667 (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4668
4669 sc->stat_IfHCOutOctets =
4670 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4671 (u_int64_t) stats->stat_IfHCOutOctets_lo;
4672
4673 sc->stat_IfHCOutBadOctets =
4674 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4675 (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4676
4677 sc->stat_IfHCInUcastPkts =
4678 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4679 (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4680
4681 sc->stat_IfHCInMulticastPkts =
4682 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4683 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4684
4685 sc->stat_IfHCInBroadcastPkts =
4686 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4687 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4688
4689 sc->stat_IfHCOutUcastPkts =
4690 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4691 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4692
4693 sc->stat_IfHCOutMulticastPkts =
4694 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
4695 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
4696
4697 sc->stat_IfHCOutBroadcastPkts =
4698 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
4699 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
4700
4701 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
4702 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
4703
4704 sc->stat_Dot3StatsCarrierSenseErrors =
4705 stats->stat_Dot3StatsCarrierSenseErrors;
4706
4707 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
4708
4709 sc->stat_Dot3StatsAlignmentErrors =
4710 stats->stat_Dot3StatsAlignmentErrors;
4711
4712 sc->stat_Dot3StatsSingleCollisionFrames =
4713 stats->stat_Dot3StatsSingleCollisionFrames;
4714
4715 sc->stat_Dot3StatsMultipleCollisionFrames =
4716 stats->stat_Dot3StatsMultipleCollisionFrames;
4717
4718 sc->stat_Dot3StatsDeferredTransmissions =
4719 stats->stat_Dot3StatsDeferredTransmissions;
4720
4721 sc->stat_Dot3StatsExcessiveCollisions =
4722 stats->stat_Dot3StatsExcessiveCollisions;
4723
4724 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
4725
4726 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
4727
4728 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
4729
4730 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
4731
4732 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
4733
4734 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
4735
4736 sc->stat_EtherStatsPktsRx64Octets =
4737 stats->stat_EtherStatsPktsRx64Octets;
4738
4739 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
4740 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
4741
4742 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
4743 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
4744
4745 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
4746 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
4747
4748 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
4749 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
4750
4751 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
4752 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
4753
4754 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
4755 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
4756
4757 sc->stat_EtherStatsPktsTx64Octets =
4758 stats->stat_EtherStatsPktsTx64Octets;
4759
4760 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
4761 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
4762
4763 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
4764 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
4765
4766 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
4767 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
4768
4769 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
4770 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
4771
4772 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
4773 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
4774
4775 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
4776 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
4777
4778 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
4779
4780 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
4781
4782 sc->stat_OutXonSent = stats->stat_OutXonSent;
4783
4784 sc->stat_OutXoffSent = stats->stat_OutXoffSent;
4785
4786 sc->stat_FlowControlDone = stats->stat_FlowControlDone;
4787
4788 sc->stat_MacControlFramesReceived =
4789 stats->stat_MacControlFramesReceived;
4790
4791 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
4792
4793 sc->stat_IfInFramesL2FilterDiscards =
4794 stats->stat_IfInFramesL2FilterDiscards;
4795
4796 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
4797
4798 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
4799
4800 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
4801
4802 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
4803
4804 sc->stat_CatchupInRuleCheckerDiscards =
4805 stats->stat_CatchupInRuleCheckerDiscards;
4806
4807 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
4808
4809 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
4810
4811 sc->stat_CatchupInRuleCheckerP4Hit =
4812 stats->stat_CatchupInRuleCheckerP4Hit;
4813
4814 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__);
4815 }
4816
4817 void
4818 bnx_tick(void *xsc)
4819 {
4820 struct bnx_softc *sc = xsc;
4821 struct mii_data *mii;
4822 u_int32_t msg;
4823 u_int16_t prod, chain_prod;
4824 u_int32_t prod_bseq;
4825 int s = splnet();
4826
4827 /* Tell the firmware that the driver is still running. */
4828 #ifdef BNX_DEBUG
4829 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
4830 #else
4831 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
4832 #endif
4833 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
4834
4835 /* Update the statistics from the hardware statistics block. */
4836 bnx_stats_update(sc);
4837
4838 /* Schedule the next tick. */
4839 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4840
4841 mii = &sc->bnx_mii;
4842 mii_tick(mii);
4843
4844 /* try to get more RX buffers, just in case */
4845 prod = sc->rx_prod;
4846 prod_bseq = sc->rx_prod_bseq;
4847 chain_prod = RX_CHAIN_IDX(prod);
4848 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
4849 sc->rx_prod = prod;
4850 sc->rx_prod_bseq = prod_bseq;
4851 splx(s);
4852 return;
4853 }
4854
4855 /****************************************************************************/
4856 /* BNX Debug Routines */
4857 /****************************************************************************/
4858 #ifdef BNX_DEBUG
4859
4860 /****************************************************************************/
4861 /* Prints out information about an mbuf. */
4862 /* */
4863 /* Returns: */
4864 /* Nothing. */
4865 /****************************************************************************/
4866 void
4867 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
4868 {
4869 struct mbuf *mp = m;
4870
4871 if (m == NULL) {
4872 /* Index out of range. */
4873 aprint_error("mbuf ptr is null!\n");
4874 return;
4875 }
4876
4877 while (mp) {
4878 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ",
4879 mp, mp->m_len);
4880
4881 if (mp->m_flags & M_EXT)
4882 aprint_debug("M_EXT ");
4883 if (mp->m_flags & M_PKTHDR)
4884 aprint_debug("M_PKTHDR ");
4885 aprint_debug("\n");
4886
4887 if (mp->m_flags & M_EXT)
4888 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n",
4889 mp, mp->m_ext.ext_size);
4890
4891 mp = mp->m_next;
4892 }
4893 }
4894
4895 /****************************************************************************/
4896 /* Prints out the mbufs in the TX mbuf chain. */
4897 /* */
4898 /* Returns: */
4899 /* Nothing. */
4900 /****************************************************************************/
4901 void
4902 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4903 {
4904 struct mbuf *m;
4905 int i;
4906
4907 BNX_PRINTF(sc,
4908 "----------------------------"
4909 " tx mbuf data "
4910 "----------------------------\n");
4911
4912 for (i = 0; i < count; i++) {
4913 m = sc->tx_mbuf_ptr[chain_prod];
4914 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
4915 bnx_dump_mbuf(sc, m);
4916 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
4917 }
4918
4919 BNX_PRINTF(sc,
4920 "--------------------------------------------"
4921 "----------------------------\n");
4922 }
4923
4924 /*
4925 * This routine prints the RX mbuf chain.
4926 */
4927 void
4928 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4929 {
4930 struct mbuf *m;
4931 int i;
4932
4933 BNX_PRINTF(sc,
4934 "----------------------------"
4935 " rx mbuf data "
4936 "----------------------------\n");
4937
4938 for (i = 0; i < count; i++) {
4939 m = sc->rx_mbuf_ptr[chain_prod];
4940 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
4941 bnx_dump_mbuf(sc, m);
4942 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
4943 }
4944
4945
4946 BNX_PRINTF(sc,
4947 "--------------------------------------------"
4948 "----------------------------\n");
4949 }
4950
4951 void
4952 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
4953 {
4954 if (idx > MAX_TX_BD)
4955 /* Index out of range. */
4956 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
4957 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4958 /* TX Chain page pointer. */
4959 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
4960 "page pointer\n", idx, txbd->tx_bd_haddr_hi,
4961 txbd->tx_bd_haddr_lo);
4962 else
4963 /* Normal tx_bd entry. */
4964 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
4965 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
4966 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
4967 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
4968 txbd->tx_bd_flags);
4969 }
4970
4971 void
4972 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
4973 {
4974 if (idx > MAX_RX_BD)
4975 /* Index out of range. */
4976 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
4977 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4978 /* TX Chain page pointer. */
4979 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
4980 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
4981 rxbd->rx_bd_haddr_lo);
4982 else
4983 /* Normal tx_bd entry. */
4984 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
4985 "0x%08X, flags = 0x%08X\n", idx,
4986 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
4987 rxbd->rx_bd_len, rxbd->rx_bd_flags);
4988 }
4989
4990 void
4991 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
4992 {
4993 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
4994 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
4995 "tcp_udp_xsum = 0x%04X\n", idx,
4996 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
4997 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
4998 l2fhdr->l2_fhdr_tcp_udp_xsum);
4999 }
5000
5001 /*
5002 * This routine prints the TX chain.
5003 */
5004 void
5005 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5006 {
5007 struct tx_bd *txbd;
5008 int i;
5009
5010 /* First some info about the tx_bd chain structure. */
5011 BNX_PRINTF(sc,
5012 "----------------------------"
5013 " tx_bd chain "
5014 "----------------------------\n");
5015
5016 BNX_PRINTF(sc,
5017 "page size = 0x%08X, tx chain pages = 0x%08X\n",
5018 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5019
5020 BNX_PRINTF(sc,
5021 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5022 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5023
5024 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5025
5026 BNX_PRINTF(sc, ""
5027 "-----------------------------"
5028 " tx_bd data "
5029 "-----------------------------\n");
5030
5031 /* Now print out the tx_bd's themselves. */
5032 for (i = 0; i < count; i++) {
5033 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5034 bnx_dump_txbd(sc, tx_prod, txbd);
5035 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5036 }
5037
5038 BNX_PRINTF(sc,
5039 "-----------------------------"
5040 "--------------"
5041 "-----------------------------\n");
5042 }
5043
5044 /*
5045 * This routine prints the RX chain.
5046 */
5047 void
5048 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5049 {
5050 struct rx_bd *rxbd;
5051 int i;
5052
5053 /* First some info about the tx_bd chain structure. */
5054 BNX_PRINTF(sc,
5055 "----------------------------"
5056 " rx_bd chain "
5057 "----------------------------\n");
5058
5059 BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5060
5061 BNX_PRINTF(sc,
5062 "page size = 0x%08X, rx chain pages = 0x%08X\n",
5063 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5064
5065 BNX_PRINTF(sc,
5066 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5067 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5068
5069 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5070
5071 BNX_PRINTF(sc,
5072 "----------------------------"
5073 " rx_bd data "
5074 "----------------------------\n");
5075
5076 /* Now print out the rx_bd's themselves. */
5077 for (i = 0; i < count; i++) {
5078 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5079 bnx_dump_rxbd(sc, rx_prod, rxbd);
5080 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5081 }
5082
5083 BNX_PRINTF(sc,
5084 "----------------------------"
5085 "--------------"
5086 "----------------------------\n");
5087 }
5088
5089 /*
5090 * This routine prints the status block.
5091 */
5092 void
5093 bnx_dump_status_block(struct bnx_softc *sc)
5094 {
5095 struct status_block *sblk;
5096 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5097 BUS_DMASYNC_POSTREAD);
5098
5099 sblk = sc->status_block;
5100
5101 BNX_PRINTF(sc, "----------------------------- Status Block "
5102 "-----------------------------\n");
5103
5104 BNX_PRINTF(sc,
5105 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5106 sblk->status_attn_bits, sblk->status_attn_bits_ack,
5107 sblk->status_idx);
5108
5109 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
5110 sblk->status_rx_quick_consumer_index0,
5111 sblk->status_tx_quick_consumer_index0);
5112
5113 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5114
5115 /* Theses indices are not used for normal L2 drivers. */
5116 if (sblk->status_rx_quick_consumer_index1 ||
5117 sblk->status_tx_quick_consumer_index1)
5118 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
5119 sblk->status_rx_quick_consumer_index1,
5120 sblk->status_tx_quick_consumer_index1);
5121
5122 if (sblk->status_rx_quick_consumer_index2 ||
5123 sblk->status_tx_quick_consumer_index2)
5124 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
5125 sblk->status_rx_quick_consumer_index2,
5126 sblk->status_tx_quick_consumer_index2);
5127
5128 if (sblk->status_rx_quick_consumer_index3 ||
5129 sblk->status_tx_quick_consumer_index3)
5130 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
5131 sblk->status_rx_quick_consumer_index3,
5132 sblk->status_tx_quick_consumer_index3);
5133
5134 if (sblk->status_rx_quick_consumer_index4 ||
5135 sblk->status_rx_quick_consumer_index5)
5136 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
5137 sblk->status_rx_quick_consumer_index4,
5138 sblk->status_rx_quick_consumer_index5);
5139
5140 if (sblk->status_rx_quick_consumer_index6 ||
5141 sblk->status_rx_quick_consumer_index7)
5142 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
5143 sblk->status_rx_quick_consumer_index6,
5144 sblk->status_rx_quick_consumer_index7);
5145
5146 if (sblk->status_rx_quick_consumer_index8 ||
5147 sblk->status_rx_quick_consumer_index9)
5148 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
5149 sblk->status_rx_quick_consumer_index8,
5150 sblk->status_rx_quick_consumer_index9);
5151
5152 if (sblk->status_rx_quick_consumer_index10 ||
5153 sblk->status_rx_quick_consumer_index11)
5154 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
5155 sblk->status_rx_quick_consumer_index10,
5156 sblk->status_rx_quick_consumer_index11);
5157
5158 if (sblk->status_rx_quick_consumer_index12 ||
5159 sblk->status_rx_quick_consumer_index13)
5160 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
5161 sblk->status_rx_quick_consumer_index12,
5162 sblk->status_rx_quick_consumer_index13);
5163
5164 if (sblk->status_rx_quick_consumer_index14 ||
5165 sblk->status_rx_quick_consumer_index15)
5166 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
5167 sblk->status_rx_quick_consumer_index14,
5168 sblk->status_rx_quick_consumer_index15);
5169
5170 if (sblk->status_completion_producer_index ||
5171 sblk->status_cmd_consumer_index)
5172 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
5173 sblk->status_completion_producer_index,
5174 sblk->status_cmd_consumer_index);
5175
5176 BNX_PRINTF(sc, "-------------------------------------------"
5177 "-----------------------------\n");
5178 }
5179
5180 /*
5181 * This routine prints the statistics block.
5182 */
5183 void
5184 bnx_dump_stats_block(struct bnx_softc *sc)
5185 {
5186 struct statistics_block *sblk;
5187 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5188 BUS_DMASYNC_POSTREAD);
5189
5190 sblk = sc->stats_block;
5191
5192 BNX_PRINTF(sc, ""
5193 "-----------------------------"
5194 " Stats Block "
5195 "-----------------------------\n");
5196
5197 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
5198 "IfHcInBadOctets = 0x%08X:%08X\n",
5199 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5200 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5201
5202 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
5203 "IfHcOutBadOctets = 0x%08X:%08X\n",
5204 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5205 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5206
5207 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
5208 "IfHcInMulticastPkts = 0x%08X:%08X\n",
5209 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5210 sblk->stat_IfHCInMulticastPkts_hi,
5211 sblk->stat_IfHCInMulticastPkts_lo);
5212
5213 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
5214 "IfHcOutUcastPkts = 0x%08X:%08X\n",
5215 sblk->stat_IfHCInBroadcastPkts_hi,
5216 sblk->stat_IfHCInBroadcastPkts_lo,
5217 sblk->stat_IfHCOutUcastPkts_hi,
5218 sblk->stat_IfHCOutUcastPkts_lo);
5219
5220 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5221 "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5222 sblk->stat_IfHCOutMulticastPkts_hi,
5223 sblk->stat_IfHCOutMulticastPkts_lo,
5224 sblk->stat_IfHCOutBroadcastPkts_hi,
5225 sblk->stat_IfHCOutBroadcastPkts_lo);
5226
5227 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5228 BNX_PRINTF(sc, "0x%08X : "
5229 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5230 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5231
5232 if (sblk->stat_Dot3StatsCarrierSenseErrors)
5233 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5234 sblk->stat_Dot3StatsCarrierSenseErrors);
5235
5236 if (sblk->stat_Dot3StatsFCSErrors)
5237 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5238 sblk->stat_Dot3StatsFCSErrors);
5239
5240 if (sblk->stat_Dot3StatsAlignmentErrors)
5241 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5242 sblk->stat_Dot3StatsAlignmentErrors);
5243
5244 if (sblk->stat_Dot3StatsSingleCollisionFrames)
5245 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5246 sblk->stat_Dot3StatsSingleCollisionFrames);
5247
5248 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5249 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5250 sblk->stat_Dot3StatsMultipleCollisionFrames);
5251
5252 if (sblk->stat_Dot3StatsDeferredTransmissions)
5253 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5254 sblk->stat_Dot3StatsDeferredTransmissions);
5255
5256 if (sblk->stat_Dot3StatsExcessiveCollisions)
5257 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5258 sblk->stat_Dot3StatsExcessiveCollisions);
5259
5260 if (sblk->stat_Dot3StatsLateCollisions)
5261 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5262 sblk->stat_Dot3StatsLateCollisions);
5263
5264 if (sblk->stat_EtherStatsCollisions)
5265 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5266 sblk->stat_EtherStatsCollisions);
5267
5268 if (sblk->stat_EtherStatsFragments)
5269 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5270 sblk->stat_EtherStatsFragments);
5271
5272 if (sblk->stat_EtherStatsJabbers)
5273 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5274 sblk->stat_EtherStatsJabbers);
5275
5276 if (sblk->stat_EtherStatsUndersizePkts)
5277 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5278 sblk->stat_EtherStatsUndersizePkts);
5279
5280 if (sblk->stat_EtherStatsOverrsizePkts)
5281 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5282 sblk->stat_EtherStatsOverrsizePkts);
5283
5284 if (sblk->stat_EtherStatsPktsRx64Octets)
5285 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5286 sblk->stat_EtherStatsPktsRx64Octets);
5287
5288 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5289 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5290 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5291
5292 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5293 BNX_PRINTF(sc, "0x%08X : "
5294 "EtherStatsPktsRx128Octetsto255Octets\n",
5295 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5296
5297 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5298 BNX_PRINTF(sc, "0x%08X : "
5299 "EtherStatsPktsRx256Octetsto511Octets\n",
5300 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5301
5302 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5303 BNX_PRINTF(sc, "0x%08X : "
5304 "EtherStatsPktsRx512Octetsto1023Octets\n",
5305 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5306
5307 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5308 BNX_PRINTF(sc, "0x%08X : "
5309 "EtherStatsPktsRx1024Octetsto1522Octets\n",
5310 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5311
5312 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5313 BNX_PRINTF(sc, "0x%08X : "
5314 "EtherStatsPktsRx1523Octetsto9022Octets\n",
5315 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5316
5317 if (sblk->stat_EtherStatsPktsTx64Octets)
5318 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5319 sblk->stat_EtherStatsPktsTx64Octets);
5320
5321 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5322 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5323 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5324
5325 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5326 BNX_PRINTF(sc, "0x%08X : "
5327 "EtherStatsPktsTx128Octetsto255Octets\n",
5328 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5329
5330 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5331 BNX_PRINTF(sc, "0x%08X : "
5332 "EtherStatsPktsTx256Octetsto511Octets\n",
5333 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5334
5335 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5336 BNX_PRINTF(sc, "0x%08X : "
5337 "EtherStatsPktsTx512Octetsto1023Octets\n",
5338 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5339
5340 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5341 BNX_PRINTF(sc, "0x%08X : "
5342 "EtherStatsPktsTx1024Octetsto1522Octets\n",
5343 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5344
5345 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5346 BNX_PRINTF(sc, "0x%08X : "
5347 "EtherStatsPktsTx1523Octetsto9022Octets\n",
5348 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5349
5350 if (sblk->stat_XonPauseFramesReceived)
5351 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5352 sblk->stat_XonPauseFramesReceived);
5353
5354 if (sblk->stat_XoffPauseFramesReceived)
5355 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5356 sblk->stat_XoffPauseFramesReceived);
5357
5358 if (sblk->stat_OutXonSent)
5359 BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5360 sblk->stat_OutXonSent);
5361
5362 if (sblk->stat_OutXoffSent)
5363 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5364 sblk->stat_OutXoffSent);
5365
5366 if (sblk->stat_FlowControlDone)
5367 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5368 sblk->stat_FlowControlDone);
5369
5370 if (sblk->stat_MacControlFramesReceived)
5371 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5372 sblk->stat_MacControlFramesReceived);
5373
5374 if (sblk->stat_XoffStateEntered)
5375 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5376 sblk->stat_XoffStateEntered);
5377
5378 if (sblk->stat_IfInFramesL2FilterDiscards)
5379 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5380 sblk->stat_IfInFramesL2FilterDiscards);
5381
5382 if (sblk->stat_IfInRuleCheckerDiscards)
5383 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5384 sblk->stat_IfInRuleCheckerDiscards);
5385
5386 if (sblk->stat_IfInFTQDiscards)
5387 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5388 sblk->stat_IfInFTQDiscards);
5389
5390 if (sblk->stat_IfInMBUFDiscards)
5391 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5392 sblk->stat_IfInMBUFDiscards);
5393
5394 if (sblk->stat_IfInRuleCheckerP4Hit)
5395 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5396 sblk->stat_IfInRuleCheckerP4Hit);
5397
5398 if (sblk->stat_CatchupInRuleCheckerDiscards)
5399 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5400 sblk->stat_CatchupInRuleCheckerDiscards);
5401
5402 if (sblk->stat_CatchupInFTQDiscards)
5403 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5404 sblk->stat_CatchupInFTQDiscards);
5405
5406 if (sblk->stat_CatchupInMBUFDiscards)
5407 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5408 sblk->stat_CatchupInMBUFDiscards);
5409
5410 if (sblk->stat_CatchupInRuleCheckerP4Hit)
5411 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5412 sblk->stat_CatchupInRuleCheckerP4Hit);
5413
5414 BNX_PRINTF(sc,
5415 "-----------------------------"
5416 "--------------"
5417 "-----------------------------\n");
5418 }
5419
5420 void
5421 bnx_dump_driver_state(struct bnx_softc *sc)
5422 {
5423 BNX_PRINTF(sc,
5424 "-----------------------------"
5425 " Driver State "
5426 "-----------------------------\n");
5427
5428 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
5429 "address\n", sc);
5430
5431 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
5432 sc->status_block);
5433
5434 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
5435 "address\n", sc->stats_block);
5436
5437 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
5438 "adddress\n", sc->tx_bd_chain);
5439
5440 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5441 sc->rx_bd_chain);
5442
5443 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5444 sc->tx_mbuf_ptr);
5445
5446 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5447 sc->rx_mbuf_ptr);
5448
5449 BNX_PRINTF(sc,
5450 " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
5451 sc->interrupts_generated);
5452
5453 BNX_PRINTF(sc,
5454 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5455 sc->rx_interrupts);
5456
5457 BNX_PRINTF(sc,
5458 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5459 sc->tx_interrupts);
5460
5461 BNX_PRINTF(sc,
5462 " 0x%08X - (sc->last_status_idx) status block index\n",
5463 sc->last_status_idx);
5464
5465 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
5466 sc->tx_prod);
5467
5468 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
5469 sc->tx_cons);
5470
5471 BNX_PRINTF(sc,
5472 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5473 sc->tx_prod_bseq);
5474
5475 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
5476 sc->rx_prod);
5477
5478 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
5479 sc->rx_cons);
5480
5481 BNX_PRINTF(sc,
5482 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5483 sc->rx_prod_bseq);
5484
5485 BNX_PRINTF(sc,
5486 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5487 sc->rx_mbuf_alloc);
5488
5489 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5490 sc->free_rx_bd);
5491
5492 BNX_PRINTF(sc,
5493 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5494 sc->rx_low_watermark, (u_int32_t) USABLE_RX_BD);
5495
5496 BNX_PRINTF(sc,
5497 " 0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
5498 sc->tx_mbuf_alloc);
5499
5500 BNX_PRINTF(sc,
5501 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5502 sc->rx_mbuf_alloc);
5503
5504 BNX_PRINTF(sc, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5505 sc->used_tx_bd);
5506
5507 BNX_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5508 sc->tx_hi_watermark, (u_int32_t) USABLE_TX_BD);
5509
5510 BNX_PRINTF(sc,
5511 " 0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
5512 sc->mbuf_alloc_failed);
5513
5514 BNX_PRINTF(sc, "-------------------------------------------"
5515 "-----------------------------\n");
5516 }
5517
5518 void
5519 bnx_dump_hw_state(struct bnx_softc *sc)
5520 {
5521 u_int32_t val1;
5522 int i;
5523
5524 BNX_PRINTF(sc,
5525 "----------------------------"
5526 " Hardware State "
5527 "----------------------------\n");
5528
5529 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5530
5531 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5532 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5533 val1, BNX_MISC_ENABLE_STATUS_BITS);
5534
5535 val1 = REG_RD(sc, BNX_DMA_STATUS);
5536 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5537
5538 val1 = REG_RD(sc, BNX_CTX_STATUS);
5539 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5540
5541 val1 = REG_RD(sc, BNX_EMAC_STATUS);
5542 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
5543 BNX_EMAC_STATUS);
5544
5545 val1 = REG_RD(sc, BNX_RPM_STATUS);
5546 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5547
5548 val1 = REG_RD(sc, BNX_TBDR_STATUS);
5549 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
5550 BNX_TBDR_STATUS);
5551
5552 val1 = REG_RD(sc, BNX_TDMA_STATUS);
5553 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
5554 BNX_TDMA_STATUS);
5555
5556 val1 = REG_RD(sc, BNX_HC_STATUS);
5557 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5558
5559 BNX_PRINTF(sc,
5560 "----------------------------"
5561 "----------------"
5562 "----------------------------\n");
5563
5564 BNX_PRINTF(sc,
5565 "----------------------------"
5566 " Register Dump "
5567 "----------------------------\n");
5568
5569 for (i = 0x400; i < 0x8000; i += 0x10)
5570 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5571 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5572 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5573
5574 BNX_PRINTF(sc,
5575 "----------------------------"
5576 "----------------"
5577 "----------------------------\n");
5578 }
5579
5580 void
5581 bnx_breakpoint(struct bnx_softc *sc)
5582 {
5583 /* Unreachable code to shut the compiler up about unused functions. */
5584 if (0) {
5585 bnx_dump_txbd(sc, 0, NULL);
5586 bnx_dump_rxbd(sc, 0, NULL);
5587 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5588 bnx_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
5589 bnx_dump_l2fhdr(sc, 0, NULL);
5590 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5591 bnx_dump_rx_chain(sc, 0, USABLE_RX_BD);
5592 bnx_dump_status_block(sc);
5593 bnx_dump_stats_block(sc);
5594 bnx_dump_driver_state(sc);
5595 bnx_dump_hw_state(sc);
5596 }
5597
5598 bnx_dump_driver_state(sc);
5599 /* Print the important status block fields. */
5600 bnx_dump_status_block(sc);
5601
5602 #if 0
5603 /* Call the debugger. */
5604 breakpoint();
5605 #endif
5606
5607 return;
5608 }
5609 #endif
5610