ixgbe.c revision 1.152 1 /* $NetBSD: ixgbe.c,v 1.152 2018/05/15 09:30:56 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 char ixgbe_driver_version[] = "4.0.1-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static void ixgbe_watchdog(struct ifnet *);
188 static bool ixgbe_watchdog_txq(struct ifnet *, struct tx_ring *, bool *);
189 static int ixgbe_setup_interface(device_t, struct adapter *);
190 static void ixgbe_config_gpie(struct adapter *);
191 static void ixgbe_config_dmac(struct adapter *);
192 static void ixgbe_config_delay_values(struct adapter *);
193 static void ixgbe_config_link(struct adapter *);
194 static void ixgbe_check_wol_support(struct adapter *);
195 static int ixgbe_setup_low_power_mode(struct adapter *);
196 static void ixgbe_rearm_queues(struct adapter *, u64);
197
198 static void ixgbe_initialize_transmit_units(struct adapter *);
199 static void ixgbe_initialize_receive_units(struct adapter *);
200 static void ixgbe_enable_rx_drop(struct adapter *);
201 static void ixgbe_disable_rx_drop(struct adapter *);
202 static void ixgbe_initialize_rss_mapping(struct adapter *);
203
204 static void ixgbe_enable_intr(struct adapter *);
205 static void ixgbe_disable_intr(struct adapter *);
206 static void ixgbe_update_stats_counters(struct adapter *);
207 static void ixgbe_set_promisc(struct adapter *);
208 static void ixgbe_set_multi(struct adapter *);
209 static void ixgbe_update_link_status(struct adapter *);
210 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
211 static void ixgbe_configure_ivars(struct adapter *);
212 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
213 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
214
215 static void ixgbe_setup_vlan_hw_support(struct adapter *);
216 #if 0
217 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
218 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
219 #endif
220
221 static void ixgbe_add_device_sysctls(struct adapter *);
222 static void ixgbe_add_hw_stats(struct adapter *);
223 static void ixgbe_clear_evcnt(struct adapter *);
224 static int ixgbe_set_flowcntl(struct adapter *, int);
225 static int ixgbe_set_advertise(struct adapter *, int);
226 static int ixgbe_get_advertise(struct adapter *);
227
228 /* Sysctl handlers */
229 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
230 const char *, int *, int);
231 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
235 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
237 #ifdef IXGBE_DEBUG
238 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
240 #endif
241 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
248
249 /* Support for pluggable optic modules */
250 static bool ixgbe_sfp_probe(struct adapter *);
251
252 /* Legacy (single vector) interrupt handler */
253 static int ixgbe_legacy_irq(void *);
254
255 /* The MSI/MSI-X Interrupt handlers */
256 static int ixgbe_msix_que(void *);
257 static int ixgbe_msix_link(void *);
258
259 /* Software interrupts for deferred work */
260 static void ixgbe_handle_que(void *);
261 static void ixgbe_handle_link(void *);
262 static void ixgbe_handle_msf(void *);
263 static void ixgbe_handle_mod(void *);
264 static void ixgbe_handle_phy(void *);
265
266 /* Workqueue handler for deferred work */
267 static void ixgbe_handle_que_work(struct work *, void *);
268
269 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
270
271 /************************************************************************
272 * NetBSD Device Interface Entry Points
273 ************************************************************************/
274 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
275 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
276 DVF_DETACH_SHUTDOWN);
277
278 #if 0
279 devclass_t ix_devclass;
280 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
281
282 MODULE_DEPEND(ix, pci, 1, 1, 1);
283 MODULE_DEPEND(ix, ether, 1, 1, 1);
284 #ifdef DEV_NETMAP
285 MODULE_DEPEND(ix, netmap, 1, 1, 1);
286 #endif
287 #endif
288
289 /*
290 * TUNEABLE PARAMETERS:
291 */
292
293 /*
294 * AIM: Adaptive Interrupt Moderation
295 * which means that the interrupt rate
296 * is varied over time based on the
297 * traffic for that interrupt vector
298 */
299 static bool ixgbe_enable_aim = true;
300 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
301 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
302 "Enable adaptive interrupt moderation");
303
304 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
305 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
306 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
307
308 /* How many packets rxeof tries to clean at a time */
309 static int ixgbe_rx_process_limit = 256;
310 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
311 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
312
313 /* How many packets txeof tries to clean at a time */
314 static int ixgbe_tx_process_limit = 256;
315 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
316 &ixgbe_tx_process_limit, 0,
317 "Maximum number of sent packets to process at a time, -1 means unlimited");
318
319 /* Flow control setting, default to full */
320 static int ixgbe_flow_control = ixgbe_fc_full;
321 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
322 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
323
324 /* Which pakcet processing uses workqueue or softint */
325 static bool ixgbe_txrx_workqueue = false;
326
327 /*
328 * Smart speed setting, default to on
329 * this only works as a compile option
330 * right now as its during attach, set
331 * this to 'ixgbe_smart_speed_off' to
332 * disable.
333 */
334 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
335
336 /*
337 * MSI-X should be the default for best performance,
338 * but this allows it to be forced off for testing.
339 */
340 static int ixgbe_enable_msix = 1;
341 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
342 "Enable MSI-X interrupts");
343
344 /*
345 * Number of Queues, can be set to 0,
346 * it then autoconfigures based on the
347 * number of cpus with a max of 8. This
348 * can be overriden manually here.
349 */
350 static int ixgbe_num_queues = 0;
351 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
352 "Number of queues to configure, 0 indicates autoconfigure");
353
354 /*
355 * Number of TX descriptors per ring,
356 * setting higher than RX as this seems
357 * the better performing choice.
358 */
359 static int ixgbe_txd = PERFORM_TXD;
360 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
361 "Number of transmit descriptors per queue");
362
363 /* Number of RX descriptors per ring */
364 static int ixgbe_rxd = PERFORM_RXD;
365 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
366 "Number of receive descriptors per queue");
367
368 /*
369 * Defining this on will allow the use
370 * of unsupported SFP+ modules, note that
371 * doing so you are on your own :)
372 */
373 static int allow_unsupported_sfp = false;
374 #define TUNABLE_INT(__x, __y)
375 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
376
377 /*
378 * Not sure if Flow Director is fully baked,
379 * so we'll default to turning it off.
380 */
381 static int ixgbe_enable_fdir = 0;
382 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
383 "Enable Flow Director");
384
385 /* Legacy Transmit (single queue) */
386 static int ixgbe_enable_legacy_tx = 0;
387 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
388 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
389
390 /* Receive-Side Scaling */
391 static int ixgbe_enable_rss = 1;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
393 "Enable Receive-Side Scaling (RSS)");
394
395 /* Keep running tab on them for sanity check */
396 static int ixgbe_total_ports;
397
398 #if 0
399 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
400 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
401 #endif
402
403 #ifdef NET_MPSAFE
404 #define IXGBE_MPSAFE 1
405 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
406 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
407 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
408 #else
409 #define IXGBE_CALLOUT_FLAGS 0
410 #define IXGBE_SOFTINFT_FLAGS 0
411 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
412 #endif
413 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
414
415 /************************************************************************
416 * ixgbe_initialize_rss_mapping
417 ************************************************************************/
418 static void
419 ixgbe_initialize_rss_mapping(struct adapter *adapter)
420 {
421 struct ixgbe_hw *hw = &adapter->hw;
422 u32 reta = 0, mrqc, rss_key[10];
423 int queue_id, table_size, index_mult;
424 int i, j;
425 u32 rss_hash_config;
426
427 /* force use default RSS key. */
428 #ifdef __NetBSD__
429 rss_getkey((uint8_t *) &rss_key);
430 #else
431 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
432 /* Fetch the configured RSS key */
433 rss_getkey((uint8_t *) &rss_key);
434 } else {
435 /* set up random bits */
436 cprng_fast(&rss_key, sizeof(rss_key));
437 }
438 #endif
439
440 /* Set multiplier for RETA setup and table size based on MAC */
441 index_mult = 0x1;
442 table_size = 128;
443 switch (adapter->hw.mac.type) {
444 case ixgbe_mac_82598EB:
445 index_mult = 0x11;
446 break;
447 case ixgbe_mac_X550:
448 case ixgbe_mac_X550EM_x:
449 case ixgbe_mac_X550EM_a:
450 table_size = 512;
451 break;
452 default:
453 break;
454 }
455
456 /* Set up the redirection table */
457 for (i = 0, j = 0; i < table_size; i++, j++) {
458 if (j == adapter->num_queues)
459 j = 0;
460
461 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
462 /*
463 * Fetch the RSS bucket id for the given indirection
464 * entry. Cap it at the number of configured buckets
465 * (which is num_queues.)
466 */
467 queue_id = rss_get_indirection_to_bucket(i);
468 queue_id = queue_id % adapter->num_queues;
469 } else
470 queue_id = (j * index_mult);
471
472 /*
473 * The low 8 bits are for hash value (n+0);
474 * The next 8 bits are for hash value (n+1), etc.
475 */
476 reta = reta >> 8;
477 reta = reta | (((uint32_t) queue_id) << 24);
478 if ((i & 3) == 3) {
479 if (i < 128)
480 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
481 else
482 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
483 reta);
484 reta = 0;
485 }
486 }
487
488 /* Now fill our hash function seeds */
489 for (i = 0; i < 10; i++)
490 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
491
492 /* Perform hash on these packet types */
493 if (adapter->feat_en & IXGBE_FEATURE_RSS)
494 rss_hash_config = rss_gethashconfig();
495 else {
496 /*
497 * Disable UDP - IP fragments aren't currently being handled
498 * and so we end up with a mix of 2-tuple and 4-tuple
499 * traffic.
500 */
501 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
502 | RSS_HASHTYPE_RSS_TCP_IPV4
503 | RSS_HASHTYPE_RSS_IPV6
504 | RSS_HASHTYPE_RSS_TCP_IPV6
505 | RSS_HASHTYPE_RSS_IPV6_EX
506 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
507 }
508
509 mrqc = IXGBE_MRQC_RSSEN;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
528 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
529 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
530 } /* ixgbe_initialize_rss_mapping */
531
532 /************************************************************************
533 * ixgbe_initialize_receive_units - Setup receive registers and features.
534 ************************************************************************/
535 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
536
537 static void
538 ixgbe_initialize_receive_units(struct adapter *adapter)
539 {
540 struct rx_ring *rxr = adapter->rx_rings;
541 struct ixgbe_hw *hw = &adapter->hw;
542 struct ifnet *ifp = adapter->ifp;
543 int i, j;
544 u32 bufsz, fctrl, srrctl, rxcsum;
545 u32 hlreg;
546
547 /*
548 * Make sure receives are disabled while
549 * setting up the descriptor ring
550 */
551 ixgbe_disable_rx(hw);
552
553 /* Enable broadcasts */
554 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
555 fctrl |= IXGBE_FCTRL_BAM;
556 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
557 fctrl |= IXGBE_FCTRL_DPF;
558 fctrl |= IXGBE_FCTRL_PMCF;
559 }
560 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
561
562 /* Set for Jumbo Frames? */
563 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
564 if (ifp->if_mtu > ETHERMTU)
565 hlreg |= IXGBE_HLREG0_JUMBOEN;
566 else
567 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
568
569 #ifdef DEV_NETMAP
570 /* CRC stripping is conditional in Netmap */
571 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
572 (ifp->if_capenable & IFCAP_NETMAP) &&
573 !ix_crcstrip)
574 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
575 else
576 #endif /* DEV_NETMAP */
577 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
578
579 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
580
581 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
582 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
583
584 for (i = 0; i < adapter->num_queues; i++, rxr++) {
585 u64 rdba = rxr->rxdma.dma_paddr;
586 u32 reg;
587 int regnum = i / 4; /* 1 register per 4 queues */
588 int regshift = i % 4; /* 4 bits per 1 queue */
589 j = rxr->me;
590
591 /* Setup the Base and Length of the Rx Descriptor Ring */
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
593 (rdba & 0x00000000ffffffffULL));
594 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
595 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
596 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
597
598 /* Set up the SRRCTL register */
599 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
600 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
601 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
602 srrctl |= bufsz;
603 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
604
605 /* Set RQSMR (Receive Queue Statistic Mapping) register */
606 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
607 reg &= ~(0x000000ff << (regshift * 8));
608 reg |= i << (regshift * 8);
609 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
610
611 /*
612 * Set DROP_EN iff we have no flow control and >1 queue.
613 * Note that srrctl was cleared shortly before during reset,
614 * so we do not need to clear the bit, but do it just in case
615 * this code is moved elsewhere.
616 */
617 if (adapter->num_queues > 1 &&
618 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
619 srrctl |= IXGBE_SRRCTL_DROP_EN;
620 } else {
621 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
622 }
623
624 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
625
626 /* Setup the HW Rx Head and Tail Descriptor Pointers */
627 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
628 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
629
630 /* Set the driver rx tail address */
631 rxr->tail = IXGBE_RDT(rxr->me);
632 }
633
634 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
635 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
636 | IXGBE_PSRTYPE_UDPHDR
637 | IXGBE_PSRTYPE_IPV4HDR
638 | IXGBE_PSRTYPE_IPV6HDR;
639 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
640 }
641
642 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
643
644 ixgbe_initialize_rss_mapping(adapter);
645
646 if (adapter->num_queues > 1) {
647 /* RSS and RX IPP Checksum are mutually exclusive */
648 rxcsum |= IXGBE_RXCSUM_PCSD;
649 }
650
651 if (ifp->if_capenable & IFCAP_RXCSUM)
652 rxcsum |= IXGBE_RXCSUM_PCSD;
653
654 /* This is useful for calculating UDP/IP fragment checksums */
655 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
656 rxcsum |= IXGBE_RXCSUM_IPPCSE;
657
658 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
659
660 } /* ixgbe_initialize_receive_units */
661
662 /************************************************************************
663 * ixgbe_initialize_transmit_units - Enable transmit units.
664 ************************************************************************/
665 static void
666 ixgbe_initialize_transmit_units(struct adapter *adapter)
667 {
668 struct tx_ring *txr = adapter->tx_rings;
669 struct ixgbe_hw *hw = &adapter->hw;
670 int i;
671
672 /* Setup the Base and Length of the Tx Descriptor Ring */
673 for (i = 0; i < adapter->num_queues; i++, txr++) {
674 u64 tdba = txr->txdma.dma_paddr;
675 u32 txctrl = 0;
676 u32 tqsmreg, reg;
677 int regnum = i / 4; /* 1 register per 4 queues */
678 int regshift = i % 4; /* 4 bits per 1 queue */
679 int j = txr->me;
680
681 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
682 (tdba & 0x00000000ffffffffULL));
683 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
684 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
685 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
686
687 /*
688 * Set TQSMR (Transmit Queue Statistic Mapping) register.
689 * Register location is different between 82598 and others.
690 */
691 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
692 tqsmreg = IXGBE_TQSMR(regnum);
693 else
694 tqsmreg = IXGBE_TQSM(regnum);
695 reg = IXGBE_READ_REG(hw, tqsmreg);
696 reg &= ~(0x000000ff << (regshift * 8));
697 reg |= i << (regshift * 8);
698 IXGBE_WRITE_REG(hw, tqsmreg, reg);
699
700 /* Setup the HW Tx Head and Tail descriptor pointers */
701 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
702 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
703
704 /* Cache the tail address */
705 txr->tail = IXGBE_TDT(j);
706
707 /* Disable Head Writeback */
708 /*
709 * Note: for X550 series devices, these registers are actually
710 * prefixed with TPH_ isntead of DCA_, but the addresses and
711 * fields remain the same.
712 */
713 switch (hw->mac.type) {
714 case ixgbe_mac_82598EB:
715 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
716 break;
717 default:
718 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
719 break;
720 }
721 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
722 switch (hw->mac.type) {
723 case ixgbe_mac_82598EB:
724 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
725 break;
726 default:
727 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
728 break;
729 }
730
731 }
732
733 if (hw->mac.type != ixgbe_mac_82598EB) {
734 u32 dmatxctl, rttdcs;
735
736 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
737 dmatxctl |= IXGBE_DMATXCTL_TE;
738 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
739 /* Disable arbiter to set MTQC */
740 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
741 rttdcs |= IXGBE_RTTDCS_ARBDIS;
742 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
743 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
744 ixgbe_get_mtqc(adapter->iov_mode));
745 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
746 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
747 }
748
749 return;
750 } /* ixgbe_initialize_transmit_units */
751
752 /************************************************************************
753 * ixgbe_attach - Device initialization routine
754 *
755 * Called when the driver is being loaded.
756 * Identifies the type of hardware, allocates all resources
757 * and initializes the hardware.
758 *
759 * return 0 on success, positive on failure
760 ************************************************************************/
761 static void
762 ixgbe_attach(device_t parent, device_t dev, void *aux)
763 {
764 struct adapter *adapter;
765 struct ixgbe_hw *hw;
766 int error = -1;
767 u32 ctrl_ext;
768 u16 high, low, nvmreg;
769 pcireg_t id, subid;
770 ixgbe_vendor_info_t *ent;
771 struct pci_attach_args *pa = aux;
772 const char *str;
773 char buf[256];
774
775 INIT_DEBUGOUT("ixgbe_attach: begin");
776
777 /* Allocate, clear, and link in our adapter structure */
778 adapter = device_private(dev);
779 adapter->hw.back = adapter;
780 adapter->dev = dev;
781 hw = &adapter->hw;
782 adapter->osdep.pc = pa->pa_pc;
783 adapter->osdep.tag = pa->pa_tag;
784 if (pci_dma64_available(pa))
785 adapter->osdep.dmat = pa->pa_dmat64;
786 else
787 adapter->osdep.dmat = pa->pa_dmat;
788 adapter->osdep.attached = false;
789
790 ent = ixgbe_lookup(pa);
791
792 KASSERT(ent != NULL);
793
794 aprint_normal(": %s, Version - %s\n",
795 ixgbe_strings[ent->index], ixgbe_driver_version);
796
797 /* Core Lock Init*/
798 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
799
800 /* Set up the timer callout */
801 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
802
803 /* Determine hardware revision */
804 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
805 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
806
807 hw->vendor_id = PCI_VENDOR(id);
808 hw->device_id = PCI_PRODUCT(id);
809 hw->revision_id =
810 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
811 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
812 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
813
814 /*
815 * Make sure BUSMASTER is set
816 */
817 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
818
819 /* Do base PCI setup - map BAR0 */
820 if (ixgbe_allocate_pci_resources(adapter, pa)) {
821 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
822 error = ENXIO;
823 goto err_out;
824 }
825
826 /* let hardware know driver is loaded */
827 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
828 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
829 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
830
831 /*
832 * Initialize the shared code
833 */
834 if (ixgbe_init_shared_code(hw) != 0) {
835 aprint_error_dev(dev, "Unable to initialize the shared code\n");
836 error = ENXIO;
837 goto err_out;
838 }
839
840 switch (hw->mac.type) {
841 case ixgbe_mac_82598EB:
842 str = "82598EB";
843 break;
844 case ixgbe_mac_82599EB:
845 str = "82599EB";
846 break;
847 case ixgbe_mac_X540:
848 str = "X540";
849 break;
850 case ixgbe_mac_X550:
851 str = "X550";
852 break;
853 case ixgbe_mac_X550EM_x:
854 str = "X550EM";
855 break;
856 case ixgbe_mac_X550EM_a:
857 str = "X550EM A";
858 break;
859 default:
860 str = "Unknown";
861 break;
862 }
863 aprint_normal_dev(dev, "device %s\n", str);
864
865 if (hw->mbx.ops.init_params)
866 hw->mbx.ops.init_params(hw);
867
868 hw->allow_unsupported_sfp = allow_unsupported_sfp;
869
870 /* Pick up the 82599 settings */
871 if (hw->mac.type != ixgbe_mac_82598EB) {
872 hw->phy.smart_speed = ixgbe_smart_speed;
873 adapter->num_segs = IXGBE_82599_SCATTER;
874 } else
875 adapter->num_segs = IXGBE_82598_SCATTER;
876
877 hw->mac.ops.set_lan_id(hw);
878 ixgbe_init_device_features(adapter);
879
880 if (ixgbe_configure_interrupts(adapter)) {
881 error = ENXIO;
882 goto err_out;
883 }
884
885 /* Allocate multicast array memory. */
886 adapter->mta = malloc(sizeof(*adapter->mta) *
887 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
888 if (adapter->mta == NULL) {
889 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
890 error = ENOMEM;
891 goto err_out;
892 }
893
894 /* Enable WoL (if supported) */
895 ixgbe_check_wol_support(adapter);
896
897 /* Verify adapter fan is still functional (if applicable) */
898 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
899 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
900 ixgbe_check_fan_failure(adapter, esdp, FALSE);
901 }
902
903 /* Ensure SW/FW semaphore is free */
904 ixgbe_init_swfw_semaphore(hw);
905
906 /* Enable EEE power saving */
907 if (adapter->feat_en & IXGBE_FEATURE_EEE)
908 hw->mac.ops.setup_eee(hw, TRUE);
909
910 /* Set an initial default flow control value */
911 hw->fc.requested_mode = ixgbe_flow_control;
912
913 /* Sysctls for limiting the amount of work done in the taskqueues */
914 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
915 "max number of rx packets to process",
916 &adapter->rx_process_limit, ixgbe_rx_process_limit);
917
918 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
919 "max number of tx packets to process",
920 &adapter->tx_process_limit, ixgbe_tx_process_limit);
921
922 /* Do descriptor calc and sanity checks */
923 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
924 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
925 aprint_error_dev(dev, "TXD config issue, using default!\n");
926 adapter->num_tx_desc = DEFAULT_TXD;
927 } else
928 adapter->num_tx_desc = ixgbe_txd;
929
930 /*
931 * With many RX rings it is easy to exceed the
932 * system mbuf allocation. Tuning nmbclusters
933 * can alleviate this.
934 */
935 if (nmbclusters > 0) {
936 int s;
937 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
938 if (s > nmbclusters) {
939 aprint_error_dev(dev, "RX Descriptors exceed "
940 "system mbuf max, using default instead!\n");
941 ixgbe_rxd = DEFAULT_RXD;
942 }
943 }
944
945 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
946 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
947 aprint_error_dev(dev, "RXD config issue, using default!\n");
948 adapter->num_rx_desc = DEFAULT_RXD;
949 } else
950 adapter->num_rx_desc = ixgbe_rxd;
951
952 /* Allocate our TX/RX Queues */
953 if (ixgbe_allocate_queues(adapter)) {
954 error = ENOMEM;
955 goto err_out;
956 }
957
958 hw->phy.reset_if_overtemp = TRUE;
959 error = ixgbe_reset_hw(hw);
960 hw->phy.reset_if_overtemp = FALSE;
961 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
962 /*
963 * No optics in this port, set up
964 * so the timer routine will probe
965 * for later insertion.
966 */
967 adapter->sfp_probe = TRUE;
968 error = IXGBE_SUCCESS;
969 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
970 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
971 error = EIO;
972 goto err_late;
973 } else if (error) {
974 aprint_error_dev(dev, "Hardware initialization failed\n");
975 error = EIO;
976 goto err_late;
977 }
978
979 /* Make sure we have a good EEPROM before we read from it */
980 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
981 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
982 error = EIO;
983 goto err_late;
984 }
985
986 aprint_normal("%s:", device_xname(dev));
987 /* NVM Image Version */
988 switch (hw->mac.type) {
989 case ixgbe_mac_X540:
990 case ixgbe_mac_X550EM_a:
991 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
992 if (nvmreg == 0xffff)
993 break;
994 high = (nvmreg >> 12) & 0x0f;
995 low = (nvmreg >> 4) & 0xff;
996 id = nvmreg & 0x0f;
997 aprint_normal(" NVM Image Version %u.", high);
998 if (hw->mac.type == ixgbe_mac_X540)
999 str = "%x";
1000 else
1001 str = "%02x";
1002 aprint_normal(str, low);
1003 aprint_normal(" ID 0x%x,", id);
1004 break;
1005 case ixgbe_mac_X550EM_x:
1006 case ixgbe_mac_X550:
1007 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1008 if (nvmreg == 0xffff)
1009 break;
1010 high = (nvmreg >> 12) & 0x0f;
1011 low = nvmreg & 0xff;
1012 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1013 break;
1014 default:
1015 break;
1016 }
1017
1018 /* PHY firmware revision */
1019 switch (hw->mac.type) {
1020 case ixgbe_mac_X540:
1021 case ixgbe_mac_X550:
1022 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1023 if (nvmreg == 0xffff)
1024 break;
1025 high = (nvmreg >> 12) & 0x0f;
1026 low = (nvmreg >> 4) & 0xff;
1027 id = nvmreg & 0x000f;
1028 aprint_normal(" PHY FW Revision %u.", high);
1029 if (hw->mac.type == ixgbe_mac_X540)
1030 str = "%x";
1031 else
1032 str = "%02x";
1033 aprint_normal(str, low);
1034 aprint_normal(" ID 0x%x,", id);
1035 break;
1036 default:
1037 break;
1038 }
1039
1040 /* NVM Map version & OEM NVM Image version */
1041 switch (hw->mac.type) {
1042 case ixgbe_mac_X550:
1043 case ixgbe_mac_X550EM_x:
1044 case ixgbe_mac_X550EM_a:
1045 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1046 if (nvmreg != 0xffff) {
1047 high = (nvmreg >> 12) & 0x0f;
1048 low = nvmreg & 0x00ff;
1049 aprint_normal(" NVM Map version %u.%02x,", high, low);
1050 }
1051 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1052 if (nvmreg != 0xffff) {
1053 high = (nvmreg >> 12) & 0x0f;
1054 low = nvmreg & 0x00ff;
1055 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1056 low);
1057 }
1058 break;
1059 default:
1060 break;
1061 }
1062
1063 /* Print the ETrackID */
1064 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1065 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1066 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1067
1068 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1069 error = ixgbe_allocate_msix(adapter, pa);
1070 if (error) {
1071 /* Free allocated queue structures first */
1072 ixgbe_free_transmit_structures(adapter);
1073 ixgbe_free_receive_structures(adapter);
1074 free(adapter->queues, M_DEVBUF);
1075
1076 /* Fallback to legacy interrupt */
1077 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1078 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1079 adapter->feat_en |= IXGBE_FEATURE_MSI;
1080 adapter->num_queues = 1;
1081
1082 /* Allocate our TX/RX Queues again */
1083 if (ixgbe_allocate_queues(adapter)) {
1084 error = ENOMEM;
1085 goto err_out;
1086 }
1087 }
1088 }
1089 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1090 error = ixgbe_allocate_legacy(adapter, pa);
1091 if (error)
1092 goto err_late;
1093
1094 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1095 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1096 ixgbe_handle_link, adapter);
1097 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1098 ixgbe_handle_mod, adapter);
1099 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1100 ixgbe_handle_msf, adapter);
1101 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1102 ixgbe_handle_phy, adapter);
1103 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1104 adapter->fdir_si =
1105 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1106 ixgbe_reinit_fdir, adapter);
1107 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1108 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1109 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1110 && (adapter->fdir_si == NULL))) {
1111 aprint_error_dev(dev,
1112 "could not establish software interrupts ()\n");
1113 goto err_out;
1114 }
1115
1116 error = ixgbe_start_hw(hw);
1117 switch (error) {
1118 case IXGBE_ERR_EEPROM_VERSION:
1119 aprint_error_dev(dev, "This device is a pre-production adapter/"
1120 "LOM. Please be aware there may be issues associated "
1121 "with your hardware.\nIf you are experiencing problems "
1122 "please contact your Intel or hardware representative "
1123 "who provided you with this hardware.\n");
1124 break;
1125 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1126 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1127 error = EIO;
1128 goto err_late;
1129 case IXGBE_ERR_SFP_NOT_PRESENT:
1130 aprint_error_dev(dev, "No SFP+ Module found\n");
1131 /* falls thru */
1132 default:
1133 break;
1134 }
1135
1136 /* Setup OS specific network interface */
1137 if (ixgbe_setup_interface(dev, adapter) != 0)
1138 goto err_late;
1139
1140 /*
1141 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1142 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1143 */
1144 if (hw->phy.media_type == ixgbe_media_type_copper) {
1145 uint16_t id1, id2;
1146 int oui, model, rev;
1147 const char *descr;
1148
1149 id1 = hw->phy.id >> 16;
1150 id2 = hw->phy.id & 0xffff;
1151 oui = MII_OUI(id1, id2);
1152 model = MII_MODEL(id2);
1153 rev = MII_REV(id2);
1154 if ((descr = mii_get_descr(oui, model)) != NULL)
1155 aprint_normal_dev(dev,
1156 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1157 descr, oui, model, rev);
1158 else
1159 aprint_normal_dev(dev,
1160 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1161 oui, model, rev);
1162 }
1163
1164 /* Enable the optics for 82599 SFP+ fiber */
1165 ixgbe_enable_tx_laser(hw);
1166
1167 /* Enable power to the phy. */
1168 ixgbe_set_phy_power(hw, TRUE);
1169
1170 /* Initialize statistics */
1171 ixgbe_update_stats_counters(adapter);
1172
1173 /* Check PCIE slot type/speed/width */
1174 ixgbe_get_slot_info(adapter);
1175
1176 /*
1177 * Do time init and sysctl init here, but
1178 * only on the first port of a bypass adapter.
1179 */
1180 ixgbe_bypass_init(adapter);
1181
1182 /* Set an initial dmac value */
1183 adapter->dmac = 0;
1184 /* Set initial advertised speeds (if applicable) */
1185 adapter->advertise = ixgbe_get_advertise(adapter);
1186
1187 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1188 ixgbe_define_iov_schemas(dev, &error);
1189
1190 /* Add sysctls */
1191 ixgbe_add_device_sysctls(adapter);
1192 ixgbe_add_hw_stats(adapter);
1193
1194 /* For Netmap */
1195 adapter->init_locked = ixgbe_init_locked;
1196 adapter->stop_locked = ixgbe_stop;
1197
1198 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1199 ixgbe_netmap_attach(adapter);
1200
1201 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1202 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1203 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1204 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1205
1206 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1207 pmf_class_network_register(dev, adapter->ifp);
1208 else
1209 aprint_error_dev(dev, "couldn't establish power handler\n");
1210
1211 INIT_DEBUGOUT("ixgbe_attach: end");
1212 adapter->osdep.attached = true;
1213
1214 return;
1215
1216 err_late:
1217 ixgbe_free_transmit_structures(adapter);
1218 ixgbe_free_receive_structures(adapter);
1219 free(adapter->queues, M_DEVBUF);
1220 err_out:
1221 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1222 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1223 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1224 ixgbe_free_softint(adapter);
1225 ixgbe_free_pci_resources(adapter);
1226 if (adapter->mta != NULL)
1227 free(adapter->mta, M_DEVBUF);
1228 IXGBE_CORE_LOCK_DESTROY(adapter);
1229
1230 return;
1231 } /* ixgbe_attach */
1232
1233 /************************************************************************
1234 * ixgbe_check_wol_support
1235 *
1236 * Checks whether the adapter's ports are capable of
1237 * Wake On LAN by reading the adapter's NVM.
1238 *
1239 * Sets each port's hw->wol_enabled value depending
1240 * on the value read here.
1241 ************************************************************************/
1242 static void
1243 ixgbe_check_wol_support(struct adapter *adapter)
1244 {
1245 struct ixgbe_hw *hw = &adapter->hw;
1246 u16 dev_caps = 0;
1247
1248 /* Find out WoL support for port */
1249 adapter->wol_support = hw->wol_enabled = 0;
1250 ixgbe_get_device_caps(hw, &dev_caps);
1251 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1252 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1253 hw->bus.func == 0))
1254 adapter->wol_support = hw->wol_enabled = 1;
1255
1256 /* Save initial wake up filter configuration */
1257 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1258
1259 return;
1260 } /* ixgbe_check_wol_support */
1261
1262 /************************************************************************
1263 * ixgbe_setup_interface
1264 *
1265 * Setup networking device structure and register an interface.
1266 ************************************************************************/
1267 static int
1268 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1269 {
1270 struct ethercom *ec = &adapter->osdep.ec;
1271 struct ifnet *ifp;
1272 int rv;
1273
1274 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1275
1276 ifp = adapter->ifp = &ec->ec_if;
1277 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1278 ifp->if_baudrate = IF_Gbps(10);
1279 ifp->if_init = ixgbe_init;
1280 ifp->if_stop = ixgbe_ifstop;
1281 ifp->if_softc = adapter;
1282 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1283 #ifdef IXGBE_MPSAFE
1284 ifp->if_extflags = IFEF_MPSAFE;
1285 #endif
1286 ifp->if_ioctl = ixgbe_ioctl;
1287 #if __FreeBSD_version >= 1100045
1288 /* TSO parameters */
1289 ifp->if_hw_tsomax = 65518;
1290 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1291 ifp->if_hw_tsomaxsegsize = 2048;
1292 #endif
1293 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1294 #if 0
1295 ixgbe_start_locked = ixgbe_legacy_start_locked;
1296 #endif
1297 } else {
1298 ifp->if_transmit = ixgbe_mq_start;
1299 #if 0
1300 ixgbe_start_locked = ixgbe_mq_start_locked;
1301 #endif
1302 }
1303 ifp->if_start = ixgbe_legacy_start;
1304 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1305 IFQ_SET_READY(&ifp->if_snd);
1306
1307 rv = if_initialize(ifp);
1308 if (rv != 0) {
1309 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1310 return rv;
1311 }
1312 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1313 ether_ifattach(ifp, adapter->hw.mac.addr);
1314 /*
1315 * We use per TX queue softint, so if_deferred_start_init() isn't
1316 * used.
1317 */
1318 if_register(ifp);
1319 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1320
1321 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1322
1323 /*
1324 * Tell the upper layer(s) we support long frames.
1325 */
1326 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1327
1328 /* Set capability flags */
1329 ifp->if_capabilities |= IFCAP_RXCSUM
1330 | IFCAP_TXCSUM
1331 | IFCAP_TSOv4
1332 | IFCAP_TSOv6
1333 | IFCAP_LRO;
1334 ifp->if_capenable = 0;
1335
1336 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1337 | ETHERCAP_VLAN_HWCSUM
1338 | ETHERCAP_JUMBO_MTU
1339 | ETHERCAP_VLAN_MTU;
1340
1341 /* Enable the above capabilities by default */
1342 ec->ec_capenable = ec->ec_capabilities;
1343
1344 /*
1345 * Don't turn this on by default, if vlans are
1346 * created on another pseudo device (eg. lagg)
1347 * then vlan events are not passed thru, breaking
1348 * operation, but with HW FILTER off it works. If
1349 * using vlans directly on the ixgbe driver you can
1350 * enable this and get full hardware tag filtering.
1351 */
1352 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1353
1354 /*
1355 * Specify the media types supported by this adapter and register
1356 * callbacks to update media and link information
1357 */
1358 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1359 ixgbe_media_status);
1360
1361 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1362 ixgbe_add_media_types(adapter);
1363
1364 /* Set autoselect media by default */
1365 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1366
1367 return (0);
1368 } /* ixgbe_setup_interface */
1369
1370 /************************************************************************
1371 * ixgbe_add_media_types
1372 ************************************************************************/
1373 static void
1374 ixgbe_add_media_types(struct adapter *adapter)
1375 {
1376 struct ixgbe_hw *hw = &adapter->hw;
1377 device_t dev = adapter->dev;
1378 u64 layer;
1379
1380 layer = adapter->phy_layer;
1381
1382 #define ADD(mm, dd) \
1383 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1384
1385 ADD(IFM_NONE, 0);
1386
1387 /* Media types with matching NetBSD media defines */
1388 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1389 ADD(IFM_10G_T | IFM_FDX, 0);
1390 }
1391 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1392 ADD(IFM_1000_T | IFM_FDX, 0);
1393 }
1394 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1395 ADD(IFM_100_TX | IFM_FDX, 0);
1396 }
1397 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1398 ADD(IFM_10_T | IFM_FDX, 0);
1399 }
1400
1401 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1402 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1403 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1404 }
1405
1406 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1407 ADD(IFM_10G_LR | IFM_FDX, 0);
1408 if (hw->phy.multispeed_fiber) {
1409 ADD(IFM_1000_LX | IFM_FDX, 0);
1410 }
1411 }
1412 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1413 ADD(IFM_10G_SR | IFM_FDX, 0);
1414 if (hw->phy.multispeed_fiber) {
1415 ADD(IFM_1000_SX | IFM_FDX, 0);
1416 }
1417 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1418 ADD(IFM_1000_SX | IFM_FDX, 0);
1419 }
1420 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1421 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1422 }
1423
1424 #ifdef IFM_ETH_XTYPE
1425 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1426 ADD(IFM_10G_KR | IFM_FDX, 0);
1427 }
1428 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1429 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1430 }
1431 #else
1432 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1433 device_printf(dev, "Media supported: 10GbaseKR\n");
1434 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1435 ADD(IFM_10G_SR | IFM_FDX, 0);
1436 }
1437 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1438 device_printf(dev, "Media supported: 10GbaseKX4\n");
1439 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1440 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1441 }
1442 #endif
1443 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1444 ADD(IFM_1000_KX | IFM_FDX, 0);
1445 }
1446 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1447 ADD(IFM_2500_KX | IFM_FDX, 0);
1448 }
1449 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1450 ADD(IFM_2500_T | IFM_FDX, 0);
1451 }
1452 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1453 ADD(IFM_5000_T | IFM_FDX, 0);
1454 }
1455 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1456 device_printf(dev, "Media supported: 1000baseBX\n");
1457 /* XXX no ifmedia_set? */
1458
1459 ADD(IFM_AUTO, 0);
1460
1461 #undef ADD
1462 } /* ixgbe_add_media_types */
1463
1464 /************************************************************************
1465 * ixgbe_is_sfp
1466 ************************************************************************/
1467 static inline bool
1468 ixgbe_is_sfp(struct ixgbe_hw *hw)
1469 {
1470 switch (hw->mac.type) {
1471 case ixgbe_mac_82598EB:
1472 if (hw->phy.type == ixgbe_phy_nl)
1473 return (TRUE);
1474 return (FALSE);
1475 case ixgbe_mac_82599EB:
1476 switch (hw->mac.ops.get_media_type(hw)) {
1477 case ixgbe_media_type_fiber:
1478 case ixgbe_media_type_fiber_qsfp:
1479 return (TRUE);
1480 default:
1481 return (FALSE);
1482 }
1483 case ixgbe_mac_X550EM_x:
1484 case ixgbe_mac_X550EM_a:
1485 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1486 return (TRUE);
1487 return (FALSE);
1488 default:
1489 return (FALSE);
1490 }
1491 } /* ixgbe_is_sfp */
1492
1493 /************************************************************************
1494 * ixgbe_config_link
1495 ************************************************************************/
1496 static void
1497 ixgbe_config_link(struct adapter *adapter)
1498 {
1499 struct ixgbe_hw *hw = &adapter->hw;
1500 u32 autoneg, err = 0;
1501 bool sfp, negotiate = false;
1502
1503 sfp = ixgbe_is_sfp(hw);
1504
1505 if (sfp) {
1506 if (hw->phy.multispeed_fiber) {
1507 ixgbe_enable_tx_laser(hw);
1508 kpreempt_disable();
1509 softint_schedule(adapter->msf_si);
1510 kpreempt_enable();
1511 }
1512 kpreempt_disable();
1513 softint_schedule(adapter->mod_si);
1514 kpreempt_enable();
1515 } else {
1516 struct ifmedia *ifm = &adapter->media;
1517
1518 if (hw->mac.ops.check_link)
1519 err = ixgbe_check_link(hw, &adapter->link_speed,
1520 &adapter->link_up, FALSE);
1521 if (err)
1522 return;
1523
1524 /*
1525 * Check if it's the first call. If it's the first call,
1526 * get value for auto negotiation.
1527 */
1528 autoneg = hw->phy.autoneg_advertised;
1529 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1530 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1531 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1532 &negotiate);
1533 if (err)
1534 return;
1535 if (hw->mac.ops.setup_link)
1536 err = hw->mac.ops.setup_link(hw, autoneg,
1537 adapter->link_up);
1538 }
1539
1540 } /* ixgbe_config_link */
1541
1542 /************************************************************************
1543 * ixgbe_update_stats_counters - Update board statistics counters.
1544 ************************************************************************/
1545 static void
1546 ixgbe_update_stats_counters(struct adapter *adapter)
1547 {
1548 struct ifnet *ifp = adapter->ifp;
1549 struct ixgbe_hw *hw = &adapter->hw;
1550 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1551 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1552 u64 total_missed_rx = 0;
1553 uint64_t crcerrs, rlec;
1554
1555 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1556 stats->crcerrs.ev_count += crcerrs;
1557 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1558 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1559 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1560 if (hw->mac.type == ixgbe_mac_X550)
1561 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1562
1563 /* 16 registers */
1564 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1565 int j = i % adapter->num_queues;
1566
1567 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1568 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1569 if (hw->mac.type >= ixgbe_mac_82599EB) {
1570 stats->qprdc[j].ev_count
1571 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1572 }
1573 }
1574
1575 /* 8 registers */
1576 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1577 uint32_t mp;
1578 int j = i % adapter->num_queues;
1579
1580 /* MPC */
1581 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1582 /* global total per queue */
1583 stats->mpc[j].ev_count += mp;
1584 /* running comprehensive total for stats display */
1585 total_missed_rx += mp;
1586
1587 if (hw->mac.type == ixgbe_mac_82598EB)
1588 stats->rnbc[j].ev_count
1589 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1590
1591 stats->pxontxc[j].ev_count
1592 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1593 stats->pxofftxc[j].ev_count
1594 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1595 if (hw->mac.type >= ixgbe_mac_82599EB) {
1596 stats->pxonrxc[j].ev_count
1597 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1598 stats->pxoffrxc[j].ev_count
1599 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1600 stats->pxon2offc[j].ev_count
1601 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1602 } else {
1603 stats->pxonrxc[j].ev_count
1604 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1605 stats->pxoffrxc[j].ev_count
1606 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1607 }
1608 }
1609 stats->mpctotal.ev_count += total_missed_rx;
1610
1611 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1612 if ((adapter->link_active == TRUE)
1613 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1614 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1615 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1616 }
1617 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1618 stats->rlec.ev_count += rlec;
1619
1620 /* Hardware workaround, gprc counts missed packets */
1621 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1622
1623 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1624 stats->lxontxc.ev_count += lxon;
1625 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1626 stats->lxofftxc.ev_count += lxoff;
1627 total = lxon + lxoff;
1628
1629 if (hw->mac.type != ixgbe_mac_82598EB) {
1630 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1631 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1632 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1633 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1634 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1635 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1636 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1637 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1638 } else {
1639 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1640 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1641 /* 82598 only has a counter in the high register */
1642 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1643 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1644 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1645 }
1646
1647 /*
1648 * Workaround: mprc hardware is incorrectly counting
1649 * broadcasts, so for now we subtract those.
1650 */
1651 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1652 stats->bprc.ev_count += bprc;
1653 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1654 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1655
1656 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1657 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1658 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1659 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1660 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1661 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1662
1663 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1664 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1665 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1666
1667 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1668 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1669 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1670 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1671 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1672 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1673 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1674 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1675 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1676 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1677 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1678 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1679 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1680 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1681 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1682 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1683 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1684 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1685 /* Only read FCOE on 82599 */
1686 if (hw->mac.type != ixgbe_mac_82598EB) {
1687 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1688 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1689 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1690 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1691 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1692 }
1693
1694 /* Fill out the OS statistics structure */
1695 /*
1696 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1697 * adapter->stats counters. It's required to make ifconfig -z
1698 * (SOICZIFDATA) work.
1699 */
1700 ifp->if_collisions = 0;
1701
1702 /* Rx Errors */
1703 ifp->if_iqdrops += total_missed_rx;
1704 ifp->if_ierrors += crcerrs + rlec;
1705 } /* ixgbe_update_stats_counters */
1706
1707 /************************************************************************
1708 * ixgbe_add_hw_stats
1709 *
1710 * Add sysctl variables, one per statistic, to the system.
1711 ************************************************************************/
1712 static void
1713 ixgbe_add_hw_stats(struct adapter *adapter)
1714 {
1715 device_t dev = adapter->dev;
1716 const struct sysctlnode *rnode, *cnode;
1717 struct sysctllog **log = &adapter->sysctllog;
1718 struct tx_ring *txr = adapter->tx_rings;
1719 struct rx_ring *rxr = adapter->rx_rings;
1720 struct ixgbe_hw *hw = &adapter->hw;
1721 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1722 const char *xname = device_xname(dev);
1723 int i;
1724
1725 /* Driver Statistics */
1726 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1727 NULL, xname, "Driver tx dma soft fail EFBIG");
1728 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1729 NULL, xname, "m_defrag() failed");
1730 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1731 NULL, xname, "Driver tx dma hard fail EFBIG");
1732 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1733 NULL, xname, "Driver tx dma hard fail EINVAL");
1734 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1735 NULL, xname, "Driver tx dma hard fail other");
1736 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1737 NULL, xname, "Driver tx dma soft fail EAGAIN");
1738 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1739 NULL, xname, "Driver tx dma soft fail ENOMEM");
1740 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1741 NULL, xname, "Watchdog timeouts");
1742 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1743 NULL, xname, "TSO errors");
1744 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1745 NULL, xname, "Link MSI-X IRQ Handled");
1746 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1747 NULL, xname, "Link softint");
1748 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1749 NULL, xname, "module softint");
1750 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1751 NULL, xname, "multimode softint");
1752 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1753 NULL, xname, "external PHY softint");
1754
1755 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1756 #ifdef LRO
1757 struct lro_ctrl *lro = &rxr->lro;
1758 #endif /* LRO */
1759
1760 snprintf(adapter->queues[i].evnamebuf,
1761 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1762 xname, i);
1763 snprintf(adapter->queues[i].namebuf,
1764 sizeof(adapter->queues[i].namebuf), "q%d", i);
1765
1766 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1767 aprint_error_dev(dev, "could not create sysctl root\n");
1768 break;
1769 }
1770
1771 if (sysctl_createv(log, 0, &rnode, &rnode,
1772 0, CTLTYPE_NODE,
1773 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1774 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1775 break;
1776
1777 if (sysctl_createv(log, 0, &rnode, &cnode,
1778 CTLFLAG_READWRITE, CTLTYPE_INT,
1779 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1780 ixgbe_sysctl_interrupt_rate_handler, 0,
1781 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1782 break;
1783
1784 if (sysctl_createv(log, 0, &rnode, &cnode,
1785 CTLFLAG_READONLY, CTLTYPE_INT,
1786 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1787 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1788 0, CTL_CREATE, CTL_EOL) != 0)
1789 break;
1790
1791 if (sysctl_createv(log, 0, &rnode, &cnode,
1792 CTLFLAG_READONLY, CTLTYPE_INT,
1793 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1794 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1795 0, CTL_CREATE, CTL_EOL) != 0)
1796 break;
1797
1798 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1799 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1800 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1801 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1802 "Handled queue in softint");
1803 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1804 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1805 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1806 NULL, adapter->queues[i].evnamebuf, "TSO");
1807 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1808 NULL, adapter->queues[i].evnamebuf,
1809 "Queue No Descriptor Available");
1810 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1811 NULL, adapter->queues[i].evnamebuf,
1812 "Queue Packets Transmitted");
1813 #ifndef IXGBE_LEGACY_TX
1814 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1815 NULL, adapter->queues[i].evnamebuf,
1816 "Packets dropped in pcq");
1817 #endif
1818
1819 if (sysctl_createv(log, 0, &rnode, &cnode,
1820 CTLFLAG_READONLY,
1821 CTLTYPE_INT,
1822 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1823 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1824 CTL_CREATE, CTL_EOL) != 0)
1825 break;
1826
1827 if (sysctl_createv(log, 0, &rnode, &cnode,
1828 CTLFLAG_READONLY,
1829 CTLTYPE_INT,
1830 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1831 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1832 CTL_CREATE, CTL_EOL) != 0)
1833 break;
1834
1835 if (i < __arraycount(stats->mpc)) {
1836 evcnt_attach_dynamic(&stats->mpc[i],
1837 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1838 "RX Missed Packet Count");
1839 if (hw->mac.type == ixgbe_mac_82598EB)
1840 evcnt_attach_dynamic(&stats->rnbc[i],
1841 EVCNT_TYPE_MISC, NULL,
1842 adapter->queues[i].evnamebuf,
1843 "Receive No Buffers");
1844 }
1845 if (i < __arraycount(stats->pxontxc)) {
1846 evcnt_attach_dynamic(&stats->pxontxc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1848 "pxontxc");
1849 evcnt_attach_dynamic(&stats->pxonrxc[i],
1850 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1851 "pxonrxc");
1852 evcnt_attach_dynamic(&stats->pxofftxc[i],
1853 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1854 "pxofftxc");
1855 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1856 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1857 "pxoffrxc");
1858 if (hw->mac.type >= ixgbe_mac_82599EB)
1859 evcnt_attach_dynamic(&stats->pxon2offc[i],
1860 EVCNT_TYPE_MISC, NULL,
1861 adapter->queues[i].evnamebuf,
1862 "pxon2offc");
1863 }
1864 if (i < __arraycount(stats->qprc)) {
1865 evcnt_attach_dynamic(&stats->qprc[i],
1866 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1867 "qprc");
1868 evcnt_attach_dynamic(&stats->qptc[i],
1869 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1870 "qptc");
1871 evcnt_attach_dynamic(&stats->qbrc[i],
1872 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1873 "qbrc");
1874 evcnt_attach_dynamic(&stats->qbtc[i],
1875 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1876 "qbtc");
1877 if (hw->mac.type >= ixgbe_mac_82599EB)
1878 evcnt_attach_dynamic(&stats->qprdc[i],
1879 EVCNT_TYPE_MISC, NULL,
1880 adapter->queues[i].evnamebuf, "qprdc");
1881 }
1882
1883 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1884 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1885 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1886 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1887 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1888 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1889 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1890 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1891 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1892 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1893 #ifdef LRO
1894 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1895 CTLFLAG_RD, &lro->lro_queued, 0,
1896 "LRO Queued");
1897 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1898 CTLFLAG_RD, &lro->lro_flushed, 0,
1899 "LRO Flushed");
1900 #endif /* LRO */
1901 }
1902
1903 /* MAC stats get their own sub node */
1904
1905 snprintf(stats->namebuf,
1906 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1907
1908 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1909 stats->namebuf, "rx csum offload - IP");
1910 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1911 stats->namebuf, "rx csum offload - L4");
1912 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1913 stats->namebuf, "rx csum offload - IP bad");
1914 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1915 stats->namebuf, "rx csum offload - L4 bad");
1916 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1917 stats->namebuf, "Interrupt conditions zero");
1918 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1919 stats->namebuf, "Legacy interrupts");
1920
1921 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1922 stats->namebuf, "CRC Errors");
1923 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1924 stats->namebuf, "Illegal Byte Errors");
1925 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1926 stats->namebuf, "Byte Errors");
1927 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1928 stats->namebuf, "MAC Short Packets Discarded");
1929 if (hw->mac.type >= ixgbe_mac_X550)
1930 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1931 stats->namebuf, "Bad SFD");
1932 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1933 stats->namebuf, "Total Packets Missed");
1934 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1935 stats->namebuf, "MAC Local Faults");
1936 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1937 stats->namebuf, "MAC Remote Faults");
1938 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1939 stats->namebuf, "Receive Length Errors");
1940 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "Link XON Transmitted");
1942 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "Link XON Received");
1944 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "Link XOFF Transmitted");
1946 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "Link XOFF Received");
1948
1949 /* Packet Reception Stats */
1950 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Total Octets Received");
1952 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Good Octets Received");
1954 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Total Packets Received");
1956 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "Good Packets Received");
1958 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "Multicast Packets Received");
1960 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Broadcast Packets Received");
1962 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "64 byte frames received ");
1964 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "65-127 byte frames received");
1966 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "128-255 byte frames received");
1968 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "256-511 byte frames received");
1970 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "512-1023 byte frames received");
1972 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "1023-1522 byte frames received");
1974 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Receive Undersized");
1976 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Fragmented Packets Received ");
1978 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Oversized Packets Received");
1980 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "Received Jabber");
1982 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "Management Packets Received");
1984 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Management Packets Dropped");
1986 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Checksum Errors");
1988
1989 /* Packet Transmission Stats */
1990 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Good Octets Transmitted");
1992 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Total Packets Transmitted");
1994 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Good Packets Transmitted");
1996 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Broadcast Packets Transmitted");
1998 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Multicast Packets Transmitted");
2000 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Management Packets Transmitted");
2002 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "64 byte frames transmitted ");
2004 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "65-127 byte frames transmitted");
2006 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "128-255 byte frames transmitted");
2008 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "256-511 byte frames transmitted");
2010 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "512-1023 byte frames transmitted");
2012 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "1024-1522 byte frames transmitted");
2014 } /* ixgbe_add_hw_stats */
2015
2016 static void
2017 ixgbe_clear_evcnt(struct adapter *adapter)
2018 {
2019 struct tx_ring *txr = adapter->tx_rings;
2020 struct rx_ring *rxr = adapter->rx_rings;
2021 struct ixgbe_hw *hw = &adapter->hw;
2022 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2023
2024 adapter->efbig_tx_dma_setup.ev_count = 0;
2025 adapter->mbuf_defrag_failed.ev_count = 0;
2026 adapter->efbig2_tx_dma_setup.ev_count = 0;
2027 adapter->einval_tx_dma_setup.ev_count = 0;
2028 adapter->other_tx_dma_setup.ev_count = 0;
2029 adapter->eagain_tx_dma_setup.ev_count = 0;
2030 adapter->enomem_tx_dma_setup.ev_count = 0;
2031 adapter->tso_err.ev_count = 0;
2032 adapter->watchdog_events.ev_count = 0;
2033 adapter->link_irq.ev_count = 0;
2034 adapter->link_sicount.ev_count = 0;
2035 adapter->mod_sicount.ev_count = 0;
2036 adapter->msf_sicount.ev_count = 0;
2037 adapter->phy_sicount.ev_count = 0;
2038
2039 txr = adapter->tx_rings;
2040 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2041 adapter->queues[i].irqs.ev_count = 0;
2042 adapter->queues[i].handleq.ev_count = 0;
2043 adapter->queues[i].req.ev_count = 0;
2044 txr->no_desc_avail.ev_count = 0;
2045 txr->total_packets.ev_count = 0;
2046 txr->tso_tx.ev_count = 0;
2047 #ifndef IXGBE_LEGACY_TX
2048 txr->pcq_drops.ev_count = 0;
2049 #endif
2050 txr->q_efbig_tx_dma_setup = 0;
2051 txr->q_mbuf_defrag_failed = 0;
2052 txr->q_efbig2_tx_dma_setup = 0;
2053 txr->q_einval_tx_dma_setup = 0;
2054 txr->q_other_tx_dma_setup = 0;
2055 txr->q_eagain_tx_dma_setup = 0;
2056 txr->q_enomem_tx_dma_setup = 0;
2057 txr->q_tso_err = 0;
2058
2059 if (i < __arraycount(stats->mpc)) {
2060 stats->mpc[i].ev_count = 0;
2061 if (hw->mac.type == ixgbe_mac_82598EB)
2062 stats->rnbc[i].ev_count = 0;
2063 }
2064 if (i < __arraycount(stats->pxontxc)) {
2065 stats->pxontxc[i].ev_count = 0;
2066 stats->pxonrxc[i].ev_count = 0;
2067 stats->pxofftxc[i].ev_count = 0;
2068 stats->pxoffrxc[i].ev_count = 0;
2069 if (hw->mac.type >= ixgbe_mac_82599EB)
2070 stats->pxon2offc[i].ev_count = 0;
2071 }
2072 if (i < __arraycount(stats->qprc)) {
2073 stats->qprc[i].ev_count = 0;
2074 stats->qptc[i].ev_count = 0;
2075 stats->qbrc[i].ev_count = 0;
2076 stats->qbtc[i].ev_count = 0;
2077 if (hw->mac.type >= ixgbe_mac_82599EB)
2078 stats->qprdc[i].ev_count = 0;
2079 }
2080
2081 rxr->rx_packets.ev_count = 0;
2082 rxr->rx_bytes.ev_count = 0;
2083 rxr->rx_copies.ev_count = 0;
2084 rxr->no_jmbuf.ev_count = 0;
2085 rxr->rx_discarded.ev_count = 0;
2086 }
2087 stats->ipcs.ev_count = 0;
2088 stats->l4cs.ev_count = 0;
2089 stats->ipcs_bad.ev_count = 0;
2090 stats->l4cs_bad.ev_count = 0;
2091 stats->intzero.ev_count = 0;
2092 stats->legint.ev_count = 0;
2093 stats->crcerrs.ev_count = 0;
2094 stats->illerrc.ev_count = 0;
2095 stats->errbc.ev_count = 0;
2096 stats->mspdc.ev_count = 0;
2097 stats->mbsdc.ev_count = 0;
2098 stats->mpctotal.ev_count = 0;
2099 stats->mlfc.ev_count = 0;
2100 stats->mrfc.ev_count = 0;
2101 stats->rlec.ev_count = 0;
2102 stats->lxontxc.ev_count = 0;
2103 stats->lxonrxc.ev_count = 0;
2104 stats->lxofftxc.ev_count = 0;
2105 stats->lxoffrxc.ev_count = 0;
2106
2107 /* Packet Reception Stats */
2108 stats->tor.ev_count = 0;
2109 stats->gorc.ev_count = 0;
2110 stats->tpr.ev_count = 0;
2111 stats->gprc.ev_count = 0;
2112 stats->mprc.ev_count = 0;
2113 stats->bprc.ev_count = 0;
2114 stats->prc64.ev_count = 0;
2115 stats->prc127.ev_count = 0;
2116 stats->prc255.ev_count = 0;
2117 stats->prc511.ev_count = 0;
2118 stats->prc1023.ev_count = 0;
2119 stats->prc1522.ev_count = 0;
2120 stats->ruc.ev_count = 0;
2121 stats->rfc.ev_count = 0;
2122 stats->roc.ev_count = 0;
2123 stats->rjc.ev_count = 0;
2124 stats->mngprc.ev_count = 0;
2125 stats->mngpdc.ev_count = 0;
2126 stats->xec.ev_count = 0;
2127
2128 /* Packet Transmission Stats */
2129 stats->gotc.ev_count = 0;
2130 stats->tpt.ev_count = 0;
2131 stats->gptc.ev_count = 0;
2132 stats->bptc.ev_count = 0;
2133 stats->mptc.ev_count = 0;
2134 stats->mngptc.ev_count = 0;
2135 stats->ptc64.ev_count = 0;
2136 stats->ptc127.ev_count = 0;
2137 stats->ptc255.ev_count = 0;
2138 stats->ptc511.ev_count = 0;
2139 stats->ptc1023.ev_count = 0;
2140 stats->ptc1522.ev_count = 0;
2141 }
2142
2143 /************************************************************************
2144 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2145 *
2146 * Retrieves the TDH value from the hardware
2147 ************************************************************************/
2148 static int
2149 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2150 {
2151 struct sysctlnode node = *rnode;
2152 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2153 uint32_t val;
2154
2155 if (!txr)
2156 return (0);
2157
2158 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2159 node.sysctl_data = &val;
2160 return sysctl_lookup(SYSCTLFN_CALL(&node));
2161 } /* ixgbe_sysctl_tdh_handler */
2162
2163 /************************************************************************
2164 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2165 *
2166 * Retrieves the TDT value from the hardware
2167 ************************************************************************/
2168 static int
2169 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2170 {
2171 struct sysctlnode node = *rnode;
2172 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2173 uint32_t val;
2174
2175 if (!txr)
2176 return (0);
2177
2178 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2179 node.sysctl_data = &val;
2180 return sysctl_lookup(SYSCTLFN_CALL(&node));
2181 } /* ixgbe_sysctl_tdt_handler */
2182
2183 /************************************************************************
2184 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2185 *
2186 * Retrieves the RDH value from the hardware
2187 ************************************************************************/
2188 static int
2189 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2190 {
2191 struct sysctlnode node = *rnode;
2192 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2193 uint32_t val;
2194
2195 if (!rxr)
2196 return (0);
2197
2198 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2199 node.sysctl_data = &val;
2200 return sysctl_lookup(SYSCTLFN_CALL(&node));
2201 } /* ixgbe_sysctl_rdh_handler */
2202
2203 /************************************************************************
2204 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2205 *
2206 * Retrieves the RDT value from the hardware
2207 ************************************************************************/
2208 static int
2209 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2210 {
2211 struct sysctlnode node = *rnode;
2212 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2213 uint32_t val;
2214
2215 if (!rxr)
2216 return (0);
2217
2218 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2219 node.sysctl_data = &val;
2220 return sysctl_lookup(SYSCTLFN_CALL(&node));
2221 } /* ixgbe_sysctl_rdt_handler */
2222
2223 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2224 /************************************************************************
2225 * ixgbe_register_vlan
2226 *
2227 * Run via vlan config EVENT, it enables us to use the
2228 * HW Filter table since we can get the vlan id. This
2229 * just creates the entry in the soft version of the
2230 * VFTA, init will repopulate the real table.
2231 ************************************************************************/
2232 static void
2233 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2234 {
2235 struct adapter *adapter = ifp->if_softc;
2236 u16 index, bit;
2237
2238 if (ifp->if_softc != arg) /* Not our event */
2239 return;
2240
2241 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2242 return;
2243
2244 IXGBE_CORE_LOCK(adapter);
2245 index = (vtag >> 5) & 0x7F;
2246 bit = vtag & 0x1F;
2247 adapter->shadow_vfta[index] |= (1 << bit);
2248 ixgbe_setup_vlan_hw_support(adapter);
2249 IXGBE_CORE_UNLOCK(adapter);
2250 } /* ixgbe_register_vlan */
2251
2252 /************************************************************************
2253 * ixgbe_unregister_vlan
2254 *
2255 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2256 ************************************************************************/
2257 static void
2258 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2259 {
2260 struct adapter *adapter = ifp->if_softc;
2261 u16 index, bit;
2262
2263 if (ifp->if_softc != arg)
2264 return;
2265
2266 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2267 return;
2268
2269 IXGBE_CORE_LOCK(adapter);
2270 index = (vtag >> 5) & 0x7F;
2271 bit = vtag & 0x1F;
2272 adapter->shadow_vfta[index] &= ~(1 << bit);
2273 /* Re-init to load the changes */
2274 ixgbe_setup_vlan_hw_support(adapter);
2275 IXGBE_CORE_UNLOCK(adapter);
2276 } /* ixgbe_unregister_vlan */
2277 #endif
2278
2279 static void
2280 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2281 {
2282 struct ethercom *ec = &adapter->osdep.ec;
2283 struct ixgbe_hw *hw = &adapter->hw;
2284 struct rx_ring *rxr;
2285 int i;
2286 u32 ctrl;
2287
2288
2289 /*
2290 * We get here thru init_locked, meaning
2291 * a soft reset, this has already cleared
2292 * the VFTA and other state, so if there
2293 * have been no vlan's registered do nothing.
2294 */
2295 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2296 return;
2297
2298 /* Setup the queues for vlans */
2299 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2300 for (i = 0; i < adapter->num_queues; i++) {
2301 rxr = &adapter->rx_rings[i];
2302 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2303 if (hw->mac.type != ixgbe_mac_82598EB) {
2304 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2305 ctrl |= IXGBE_RXDCTL_VME;
2306 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2307 }
2308 rxr->vtag_strip = TRUE;
2309 }
2310 }
2311
2312 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2313 return;
2314 /*
2315 * A soft reset zero's out the VFTA, so
2316 * we need to repopulate it now.
2317 */
2318 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2319 if (adapter->shadow_vfta[i] != 0)
2320 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2321 adapter->shadow_vfta[i]);
2322
2323 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2324 /* Enable the Filter Table if enabled */
2325 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2326 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2327 ctrl |= IXGBE_VLNCTRL_VFE;
2328 }
2329 if (hw->mac.type == ixgbe_mac_82598EB)
2330 ctrl |= IXGBE_VLNCTRL_VME;
2331 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2332 } /* ixgbe_setup_vlan_hw_support */
2333
2334 /************************************************************************
2335 * ixgbe_get_slot_info
2336 *
2337 * Get the width and transaction speed of
2338 * the slot this adapter is plugged into.
2339 ************************************************************************/
2340 static void
2341 ixgbe_get_slot_info(struct adapter *adapter)
2342 {
2343 device_t dev = adapter->dev;
2344 struct ixgbe_hw *hw = &adapter->hw;
2345 u32 offset;
2346 u16 link;
2347 int bus_info_valid = TRUE;
2348
2349 /* Some devices are behind an internal bridge */
2350 switch (hw->device_id) {
2351 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2352 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2353 goto get_parent_info;
2354 default:
2355 break;
2356 }
2357
2358 ixgbe_get_bus_info(hw);
2359
2360 /*
2361 * Some devices don't use PCI-E, but there is no need
2362 * to display "Unknown" for bus speed and width.
2363 */
2364 switch (hw->mac.type) {
2365 case ixgbe_mac_X550EM_x:
2366 case ixgbe_mac_X550EM_a:
2367 return;
2368 default:
2369 goto display;
2370 }
2371
2372 get_parent_info:
2373 /*
2374 * For the Quad port adapter we need to parse back
2375 * up the PCI tree to find the speed of the expansion
2376 * slot into which this adapter is plugged. A bit more work.
2377 */
2378 dev = device_parent(device_parent(dev));
2379 #if 0
2380 #ifdef IXGBE_DEBUG
2381 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2382 pci_get_slot(dev), pci_get_function(dev));
2383 #endif
2384 dev = device_parent(device_parent(dev));
2385 #ifdef IXGBE_DEBUG
2386 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2387 pci_get_slot(dev), pci_get_function(dev));
2388 #endif
2389 #endif
2390 /* Now get the PCI Express Capabilities offset */
2391 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2392 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2393 /*
2394 * Hmm...can't get PCI-Express capabilities.
2395 * Falling back to default method.
2396 */
2397 bus_info_valid = FALSE;
2398 ixgbe_get_bus_info(hw);
2399 goto display;
2400 }
2401 /* ...and read the Link Status Register */
2402 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2403 offset + PCIE_LCSR) >> 16;
2404 ixgbe_set_pci_config_data_generic(hw, link);
2405
2406 display:
2407 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2408 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2409 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2410 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2411 "Unknown"),
2412 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2413 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2414 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2415 "Unknown"));
2416
2417 if (bus_info_valid) {
2418 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2419 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2420 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2421 device_printf(dev, "PCI-Express bandwidth available"
2422 " for this card\n is not sufficient for"
2423 " optimal performance.\n");
2424 device_printf(dev, "For optimal performance a x8 "
2425 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2426 }
2427 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2428 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2429 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2430 device_printf(dev, "PCI-Express bandwidth available"
2431 " for this card\n is not sufficient for"
2432 " optimal performance.\n");
2433 device_printf(dev, "For optimal performance a x8 "
2434 "PCIE Gen3 slot is required.\n");
2435 }
2436 } else
2437 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2438
2439 return;
2440 } /* ixgbe_get_slot_info */
2441
2442 /************************************************************************
2443 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2444 ************************************************************************/
2445 static inline void
2446 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2447 {
2448 struct ixgbe_hw *hw = &adapter->hw;
2449 struct ix_queue *que = &adapter->queues[vector];
2450 u64 queue = (u64)(1ULL << vector);
2451 u32 mask;
2452
2453 mutex_enter(&que->dc_mtx);
2454 if (que->disabled_count > 0 && --que->disabled_count > 0)
2455 goto out;
2456
2457 if (hw->mac.type == ixgbe_mac_82598EB) {
2458 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2459 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2460 } else {
2461 mask = (queue & 0xFFFFFFFF);
2462 if (mask)
2463 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2464 mask = (queue >> 32);
2465 if (mask)
2466 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2467 }
2468 out:
2469 mutex_exit(&que->dc_mtx);
2470 } /* ixgbe_enable_queue */
2471
2472 /************************************************************************
2473 * ixgbe_disable_queue_internal
2474 ************************************************************************/
2475 static inline void
2476 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2477 {
2478 struct ixgbe_hw *hw = &adapter->hw;
2479 struct ix_queue *que = &adapter->queues[vector];
2480 u64 queue = (u64)(1ULL << vector);
2481 u32 mask;
2482
2483 mutex_enter(&que->dc_mtx);
2484
2485 if (que->disabled_count > 0) {
2486 if (nestok)
2487 que->disabled_count++;
2488 goto out;
2489 }
2490 que->disabled_count++;
2491
2492 if (hw->mac.type == ixgbe_mac_82598EB) {
2493 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2494 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2495 } else {
2496 mask = (queue & 0xFFFFFFFF);
2497 if (mask)
2498 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2499 mask = (queue >> 32);
2500 if (mask)
2501 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2502 }
2503 out:
2504 mutex_exit(&que->dc_mtx);
2505 } /* ixgbe_disable_queue_internal */
2506
2507 /************************************************************************
2508 * ixgbe_disable_queue
2509 ************************************************************************/
2510 static inline void
2511 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2512 {
2513
2514 ixgbe_disable_queue_internal(adapter, vector, true);
2515 } /* ixgbe_disable_queue */
2516
2517 /************************************************************************
2518 * ixgbe_sched_handle_que - schedule deferred packet processing
2519 ************************************************************************/
2520 static inline void
2521 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2522 {
2523
2524 if(que->txrx_use_workqueue) {
2525 /*
2526 * adapter->que_wq is bound to each CPU instead of
2527 * each NIC queue to reduce workqueue kthread. As we
2528 * should consider about interrupt affinity in this
2529 * function, the workqueue kthread must be WQ_PERCPU.
2530 * If create WQ_PERCPU workqueue kthread for each NIC
2531 * queue, that number of created workqueue kthread is
2532 * (number of used NIC queue) * (number of CPUs) =
2533 * (number of CPUs) ^ 2 most often.
2534 *
2535 * The same NIC queue's interrupts are avoided by
2536 * masking the queue's interrupt. And different
2537 * NIC queue's interrupts use different struct work
2538 * (que->wq_cookie). So, "enqueued flag" to avoid
2539 * twice workqueue_enqueue() is not required .
2540 */
2541 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2542 } else {
2543 softint_schedule(que->que_si);
2544 }
2545 }
2546
2547 /************************************************************************
2548 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2549 ************************************************************************/
2550 static int
2551 ixgbe_msix_que(void *arg)
2552 {
2553 struct ix_queue *que = arg;
2554 struct adapter *adapter = que->adapter;
2555 struct ifnet *ifp = adapter->ifp;
2556 struct tx_ring *txr = que->txr;
2557 struct rx_ring *rxr = que->rxr;
2558 bool more;
2559 u32 newitr = 0;
2560
2561 /* Protect against spurious interrupts */
2562 if ((ifp->if_flags & IFF_RUNNING) == 0)
2563 return 0;
2564
2565 ixgbe_disable_queue(adapter, que->msix);
2566 ++que->irqs.ev_count;
2567
2568 /*
2569 * Don't change "que->txrx_use_workqueue" from this point to avoid
2570 * flip-flopping softint/workqueue mode in one deferred processing.
2571 */
2572 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2573
2574 #ifdef __NetBSD__
2575 /* Don't run ixgbe_rxeof in interrupt context */
2576 more = true;
2577 #else
2578 more = ixgbe_rxeof(que);
2579 #endif
2580
2581 IXGBE_TX_LOCK(txr);
2582 ixgbe_txeof(txr);
2583 IXGBE_TX_UNLOCK(txr);
2584
2585 /* Do AIM now? */
2586
2587 if (adapter->enable_aim == false)
2588 goto no_calc;
2589 /*
2590 * Do Adaptive Interrupt Moderation:
2591 * - Write out last calculated setting
2592 * - Calculate based on average size over
2593 * the last interval.
2594 */
2595 if (que->eitr_setting)
2596 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2597
2598 que->eitr_setting = 0;
2599
2600 /* Idle, do nothing */
2601 if ((txr->bytes == 0) && (rxr->bytes == 0))
2602 goto no_calc;
2603
2604 if ((txr->bytes) && (txr->packets))
2605 newitr = txr->bytes/txr->packets;
2606 if ((rxr->bytes) && (rxr->packets))
2607 newitr = max(newitr, (rxr->bytes / rxr->packets));
2608 newitr += 24; /* account for hardware frame, crc */
2609
2610 /* set an upper boundary */
2611 newitr = min(newitr, 3000);
2612
2613 /* Be nice to the mid range */
2614 if ((newitr > 300) && (newitr < 1200))
2615 newitr = (newitr / 3);
2616 else
2617 newitr = (newitr / 2);
2618
2619 /*
2620 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2621 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2622 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2623 * on 1G and higher.
2624 */
2625 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2626 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2627 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2628 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2629 }
2630
2631 /* save for next interrupt */
2632 que->eitr_setting = newitr;
2633
2634 /* Reset state */
2635 txr->bytes = 0;
2636 txr->packets = 0;
2637 rxr->bytes = 0;
2638 rxr->packets = 0;
2639
2640 no_calc:
2641 if (more)
2642 ixgbe_sched_handle_que(adapter, que);
2643 else
2644 ixgbe_enable_queue(adapter, que->msix);
2645
2646 return 1;
2647 } /* ixgbe_msix_que */
2648
2649 /************************************************************************
2650 * ixgbe_media_status - Media Ioctl callback
2651 *
2652 * Called whenever the user queries the status of
2653 * the interface using ifconfig.
2654 ************************************************************************/
2655 static void
2656 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2657 {
2658 struct adapter *adapter = ifp->if_softc;
2659 struct ixgbe_hw *hw = &adapter->hw;
2660 int layer;
2661
2662 INIT_DEBUGOUT("ixgbe_media_status: begin");
2663 IXGBE_CORE_LOCK(adapter);
2664 ixgbe_update_link_status(adapter);
2665
2666 ifmr->ifm_status = IFM_AVALID;
2667 ifmr->ifm_active = IFM_ETHER;
2668
2669 if (!adapter->link_active) {
2670 ifmr->ifm_active |= IFM_NONE;
2671 IXGBE_CORE_UNLOCK(adapter);
2672 return;
2673 }
2674
2675 ifmr->ifm_status |= IFM_ACTIVE;
2676 layer = adapter->phy_layer;
2677
2678 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2679 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2680 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2681 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2682 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2683 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2684 switch (adapter->link_speed) {
2685 case IXGBE_LINK_SPEED_10GB_FULL:
2686 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2687 break;
2688 case IXGBE_LINK_SPEED_5GB_FULL:
2689 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2690 break;
2691 case IXGBE_LINK_SPEED_2_5GB_FULL:
2692 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2693 break;
2694 case IXGBE_LINK_SPEED_1GB_FULL:
2695 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2696 break;
2697 case IXGBE_LINK_SPEED_100_FULL:
2698 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2699 break;
2700 case IXGBE_LINK_SPEED_10_FULL:
2701 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2702 break;
2703 }
2704 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2705 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2706 switch (adapter->link_speed) {
2707 case IXGBE_LINK_SPEED_10GB_FULL:
2708 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2709 break;
2710 }
2711 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2712 switch (adapter->link_speed) {
2713 case IXGBE_LINK_SPEED_10GB_FULL:
2714 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2715 break;
2716 case IXGBE_LINK_SPEED_1GB_FULL:
2717 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2718 break;
2719 }
2720 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2721 switch (adapter->link_speed) {
2722 case IXGBE_LINK_SPEED_10GB_FULL:
2723 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2724 break;
2725 case IXGBE_LINK_SPEED_1GB_FULL:
2726 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2727 break;
2728 }
2729 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2730 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2731 switch (adapter->link_speed) {
2732 case IXGBE_LINK_SPEED_10GB_FULL:
2733 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2734 break;
2735 case IXGBE_LINK_SPEED_1GB_FULL:
2736 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2737 break;
2738 }
2739 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2740 switch (adapter->link_speed) {
2741 case IXGBE_LINK_SPEED_10GB_FULL:
2742 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2743 break;
2744 }
2745 /*
2746 * XXX: These need to use the proper media types once
2747 * they're added.
2748 */
2749 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2750 switch (adapter->link_speed) {
2751 case IXGBE_LINK_SPEED_10GB_FULL:
2752 #ifndef IFM_ETH_XTYPE
2753 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2754 #else
2755 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2756 #endif
2757 break;
2758 case IXGBE_LINK_SPEED_2_5GB_FULL:
2759 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2760 break;
2761 case IXGBE_LINK_SPEED_1GB_FULL:
2762 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2763 break;
2764 }
2765 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2766 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2767 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2768 switch (adapter->link_speed) {
2769 case IXGBE_LINK_SPEED_10GB_FULL:
2770 #ifndef IFM_ETH_XTYPE
2771 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2772 #else
2773 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2774 #endif
2775 break;
2776 case IXGBE_LINK_SPEED_2_5GB_FULL:
2777 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2778 break;
2779 case IXGBE_LINK_SPEED_1GB_FULL:
2780 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2781 break;
2782 }
2783
2784 /* If nothing is recognized... */
2785 #if 0
2786 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2787 ifmr->ifm_active |= IFM_UNKNOWN;
2788 #endif
2789
2790 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2791
2792 /* Display current flow control setting used on link */
2793 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2794 hw->fc.current_mode == ixgbe_fc_full)
2795 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2796 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2797 hw->fc.current_mode == ixgbe_fc_full)
2798 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2799
2800 IXGBE_CORE_UNLOCK(adapter);
2801
2802 return;
2803 } /* ixgbe_media_status */
2804
2805 /************************************************************************
2806 * ixgbe_media_change - Media Ioctl callback
2807 *
2808 * Called when the user changes speed/duplex using
2809 * media/mediopt option with ifconfig.
2810 ************************************************************************/
2811 static int
2812 ixgbe_media_change(struct ifnet *ifp)
2813 {
2814 struct adapter *adapter = ifp->if_softc;
2815 struct ifmedia *ifm = &adapter->media;
2816 struct ixgbe_hw *hw = &adapter->hw;
2817 ixgbe_link_speed speed = 0;
2818 ixgbe_link_speed link_caps = 0;
2819 bool negotiate = false;
2820 s32 err = IXGBE_NOT_IMPLEMENTED;
2821
2822 INIT_DEBUGOUT("ixgbe_media_change: begin");
2823
2824 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2825 return (EINVAL);
2826
2827 if (hw->phy.media_type == ixgbe_media_type_backplane)
2828 return (EPERM);
2829
2830 /*
2831 * We don't actually need to check against the supported
2832 * media types of the adapter; ifmedia will take care of
2833 * that for us.
2834 */
2835 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2836 case IFM_AUTO:
2837 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2838 &negotiate);
2839 if (err != IXGBE_SUCCESS) {
2840 device_printf(adapter->dev, "Unable to determine "
2841 "supported advertise speeds\n");
2842 return (ENODEV);
2843 }
2844 speed |= link_caps;
2845 break;
2846 case IFM_10G_T:
2847 case IFM_10G_LRM:
2848 case IFM_10G_LR:
2849 case IFM_10G_TWINAX:
2850 #ifndef IFM_ETH_XTYPE
2851 case IFM_10G_SR: /* KR, too */
2852 case IFM_10G_CX4: /* KX4 */
2853 #else
2854 case IFM_10G_KR:
2855 case IFM_10G_KX4:
2856 #endif
2857 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2858 break;
2859 case IFM_5000_T:
2860 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2861 break;
2862 case IFM_2500_T:
2863 case IFM_2500_KX:
2864 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2865 break;
2866 case IFM_1000_T:
2867 case IFM_1000_LX:
2868 case IFM_1000_SX:
2869 case IFM_1000_KX:
2870 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2871 break;
2872 case IFM_100_TX:
2873 speed |= IXGBE_LINK_SPEED_100_FULL;
2874 break;
2875 case IFM_10_T:
2876 speed |= IXGBE_LINK_SPEED_10_FULL;
2877 break;
2878 case IFM_NONE:
2879 break;
2880 default:
2881 goto invalid;
2882 }
2883
2884 hw->mac.autotry_restart = TRUE;
2885 hw->mac.ops.setup_link(hw, speed, TRUE);
2886 adapter->advertise = 0;
2887 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2888 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2889 adapter->advertise |= 1 << 2;
2890 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2891 adapter->advertise |= 1 << 1;
2892 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2893 adapter->advertise |= 1 << 0;
2894 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2895 adapter->advertise |= 1 << 3;
2896 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2897 adapter->advertise |= 1 << 4;
2898 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2899 adapter->advertise |= 1 << 5;
2900 }
2901
2902 return (0);
2903
2904 invalid:
2905 device_printf(adapter->dev, "Invalid media type!\n");
2906
2907 return (EINVAL);
2908 } /* ixgbe_media_change */
2909
2910 /************************************************************************
2911 * ixgbe_set_promisc
2912 ************************************************************************/
2913 static void
2914 ixgbe_set_promisc(struct adapter *adapter)
2915 {
2916 struct ifnet *ifp = adapter->ifp;
2917 int mcnt = 0;
2918 u32 rctl;
2919 struct ether_multi *enm;
2920 struct ether_multistep step;
2921 struct ethercom *ec = &adapter->osdep.ec;
2922
2923 KASSERT(mutex_owned(&adapter->core_mtx));
2924 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2925 rctl &= (~IXGBE_FCTRL_UPE);
2926 if (ifp->if_flags & IFF_ALLMULTI)
2927 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2928 else {
2929 ETHER_LOCK(ec);
2930 ETHER_FIRST_MULTI(step, ec, enm);
2931 while (enm != NULL) {
2932 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2933 break;
2934 mcnt++;
2935 ETHER_NEXT_MULTI(step, enm);
2936 }
2937 ETHER_UNLOCK(ec);
2938 }
2939 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2940 rctl &= (~IXGBE_FCTRL_MPE);
2941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2942
2943 if (ifp->if_flags & IFF_PROMISC) {
2944 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2945 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2946 } else if (ifp->if_flags & IFF_ALLMULTI) {
2947 rctl |= IXGBE_FCTRL_MPE;
2948 rctl &= ~IXGBE_FCTRL_UPE;
2949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2950 }
2951 } /* ixgbe_set_promisc */
2952
2953 /************************************************************************
2954 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2955 ************************************************************************/
2956 static int
2957 ixgbe_msix_link(void *arg)
2958 {
2959 struct adapter *adapter = arg;
2960 struct ixgbe_hw *hw = &adapter->hw;
2961 u32 eicr, eicr_mask;
2962 s32 retval;
2963
2964 ++adapter->link_irq.ev_count;
2965
2966 /* Pause other interrupts */
2967 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2968
2969 /* First get the cause */
2970 /*
2971 * The specifications of 82598, 82599, X540 and X550 say EICS register
2972 * is write only. However, Linux says it is a workaround for silicon
2973 * errata to read EICS instead of EICR to get interrupt cause. It seems
2974 * there is a problem about read clear mechanism for EICR register.
2975 */
2976 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2977 /* Be sure the queue bits are not cleared */
2978 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2979 /* Clear interrupt with write */
2980 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2981
2982 /* Link status change */
2983 if (eicr & IXGBE_EICR_LSC) {
2984 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2985 softint_schedule(adapter->link_si);
2986 }
2987
2988 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2989 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2990 (eicr & IXGBE_EICR_FLOW_DIR)) {
2991 /* This is probably overkill :) */
2992 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
2993 return 1;
2994 /* Disable the interrupt */
2995 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2996 softint_schedule(adapter->fdir_si);
2997 }
2998
2999 if (eicr & IXGBE_EICR_ECC) {
3000 device_printf(adapter->dev,
3001 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3002 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3003 }
3004
3005 /* Check for over temp condition */
3006 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3007 switch (adapter->hw.mac.type) {
3008 case ixgbe_mac_X550EM_a:
3009 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3010 break;
3011 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3012 IXGBE_EICR_GPI_SDP0_X550EM_a);
3013 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3014 IXGBE_EICR_GPI_SDP0_X550EM_a);
3015 retval = hw->phy.ops.check_overtemp(hw);
3016 if (retval != IXGBE_ERR_OVERTEMP)
3017 break;
3018 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3019 device_printf(adapter->dev, "System shutdown required!\n");
3020 break;
3021 default:
3022 if (!(eicr & IXGBE_EICR_TS))
3023 break;
3024 retval = hw->phy.ops.check_overtemp(hw);
3025 if (retval != IXGBE_ERR_OVERTEMP)
3026 break;
3027 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3028 device_printf(adapter->dev, "System shutdown required!\n");
3029 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3030 break;
3031 }
3032 }
3033
3034 /* Check for VF message */
3035 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3036 (eicr & IXGBE_EICR_MAILBOX))
3037 softint_schedule(adapter->mbx_si);
3038 }
3039
3040 if (ixgbe_is_sfp(hw)) {
3041 /* Pluggable optics-related interrupt */
3042 if (hw->mac.type >= ixgbe_mac_X540)
3043 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3044 else
3045 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3046
3047 if (eicr & eicr_mask) {
3048 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3049 softint_schedule(adapter->mod_si);
3050 }
3051
3052 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3053 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3054 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3055 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3056 softint_schedule(adapter->msf_si);
3057 }
3058 }
3059
3060 /* Check for fan failure */
3061 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3062 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3063 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3064 }
3065
3066 /* External PHY interrupt */
3067 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3068 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3069 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3070 softint_schedule(adapter->phy_si);
3071 }
3072
3073 /* Re-enable other interrupts */
3074 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3075 return 1;
3076 } /* ixgbe_msix_link */
3077
3078 static void
3079 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3080 {
3081
3082 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3083 itr |= itr << 16;
3084 else
3085 itr |= IXGBE_EITR_CNT_WDIS;
3086
3087 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3088 }
3089
3090
3091 /************************************************************************
3092 * ixgbe_sysctl_interrupt_rate_handler
3093 ************************************************************************/
3094 static int
3095 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3096 {
3097 struct sysctlnode node = *rnode;
3098 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3099 struct adapter *adapter = que->adapter;
3100 uint32_t reg, usec, rate;
3101 int error;
3102
3103 if (que == NULL)
3104 return 0;
3105 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3106 usec = ((reg & 0x0FF8) >> 3);
3107 if (usec > 0)
3108 rate = 500000 / usec;
3109 else
3110 rate = 0;
3111 node.sysctl_data = &rate;
3112 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3113 if (error || newp == NULL)
3114 return error;
3115 reg &= ~0xfff; /* default, no limitation */
3116 if (rate > 0 && rate < 500000) {
3117 if (rate < 1000)
3118 rate = 1000;
3119 reg |= ((4000000/rate) & 0xff8);
3120 /*
3121 * When RSC is used, ITR interval must be larger than
3122 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3123 * The minimum value is always greater than 2us on 100M
3124 * (and 10M?(not documented)), but it's not on 1G and higher.
3125 */
3126 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3127 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3128 if ((adapter->num_queues > 1)
3129 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3130 return EINVAL;
3131 }
3132 ixgbe_max_interrupt_rate = rate;
3133 } else
3134 ixgbe_max_interrupt_rate = 0;
3135 ixgbe_eitr_write(adapter, que->msix, reg);
3136
3137 return (0);
3138 } /* ixgbe_sysctl_interrupt_rate_handler */
3139
3140 const struct sysctlnode *
3141 ixgbe_sysctl_instance(struct adapter *adapter)
3142 {
3143 const char *dvname;
3144 struct sysctllog **log;
3145 int rc;
3146 const struct sysctlnode *rnode;
3147
3148 if (adapter->sysctltop != NULL)
3149 return adapter->sysctltop;
3150
3151 log = &adapter->sysctllog;
3152 dvname = device_xname(adapter->dev);
3153
3154 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3155 0, CTLTYPE_NODE, dvname,
3156 SYSCTL_DESCR("ixgbe information and settings"),
3157 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3158 goto err;
3159
3160 return rnode;
3161 err:
3162 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3163 return NULL;
3164 }
3165
3166 /************************************************************************
3167 * ixgbe_add_device_sysctls
3168 ************************************************************************/
3169 static void
3170 ixgbe_add_device_sysctls(struct adapter *adapter)
3171 {
3172 device_t dev = adapter->dev;
3173 struct ixgbe_hw *hw = &adapter->hw;
3174 struct sysctllog **log;
3175 const struct sysctlnode *rnode, *cnode;
3176
3177 log = &adapter->sysctllog;
3178
3179 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3180 aprint_error_dev(dev, "could not create sysctl root\n");
3181 return;
3182 }
3183
3184 if (sysctl_createv(log, 0, &rnode, &cnode,
3185 CTLFLAG_READONLY, CTLTYPE_INT,
3186 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3187 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3188 aprint_error_dev(dev, "could not create sysctl\n");
3189
3190 if (sysctl_createv(log, 0, &rnode, &cnode,
3191 CTLFLAG_READONLY, CTLTYPE_INT,
3192 "num_queues", SYSCTL_DESCR("Number of queues"),
3193 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3194 aprint_error_dev(dev, "could not create sysctl\n");
3195
3196 /* Sysctls for all devices */
3197 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3198 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3199 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3200 CTL_EOL) != 0)
3201 aprint_error_dev(dev, "could not create sysctl\n");
3202
3203 adapter->enable_aim = ixgbe_enable_aim;
3204 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3205 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3206 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3207 aprint_error_dev(dev, "could not create sysctl\n");
3208
3209 if (sysctl_createv(log, 0, &rnode, &cnode,
3210 CTLFLAG_READWRITE, CTLTYPE_INT,
3211 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3212 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3213 CTL_EOL) != 0)
3214 aprint_error_dev(dev, "could not create sysctl\n");
3215
3216 /*
3217 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3218 * it causesflip-flopping softint/workqueue mode in one deferred
3219 * processing. Therefore, preempt_disable()/preempt_enable() are
3220 * required in ixgbe_sched_handle_que() to avoid
3221 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3222 * I think changing "que->txrx_use_workqueue" in interrupt handler
3223 * is lighter than doing preempt_disable()/preempt_enable() in every
3224 * ixgbe_sched_handle_que().
3225 */
3226 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3227 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3228 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3229 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3230 aprint_error_dev(dev, "could not create sysctl\n");
3231
3232 #ifdef IXGBE_DEBUG
3233 /* testing sysctls (for all devices) */
3234 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3235 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3236 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3237 CTL_EOL) != 0)
3238 aprint_error_dev(dev, "could not create sysctl\n");
3239
3240 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3241 CTLTYPE_STRING, "print_rss_config",
3242 SYSCTL_DESCR("Prints RSS Configuration"),
3243 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3244 CTL_EOL) != 0)
3245 aprint_error_dev(dev, "could not create sysctl\n");
3246 #endif
3247 /* for X550 series devices */
3248 if (hw->mac.type >= ixgbe_mac_X550)
3249 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3250 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3251 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3252 CTL_EOL) != 0)
3253 aprint_error_dev(dev, "could not create sysctl\n");
3254
3255 /* for WoL-capable devices */
3256 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3257 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3258 CTLTYPE_BOOL, "wol_enable",
3259 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3260 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3261 CTL_EOL) != 0)
3262 aprint_error_dev(dev, "could not create sysctl\n");
3263
3264 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3265 CTLTYPE_INT, "wufc",
3266 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3267 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3268 CTL_EOL) != 0)
3269 aprint_error_dev(dev, "could not create sysctl\n");
3270 }
3271
3272 /* for X552/X557-AT devices */
3273 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3274 const struct sysctlnode *phy_node;
3275
3276 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3277 "phy", SYSCTL_DESCR("External PHY sysctls"),
3278 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3279 aprint_error_dev(dev, "could not create sysctl\n");
3280 return;
3281 }
3282
3283 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3284 CTLTYPE_INT, "temp",
3285 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3286 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3287 CTL_EOL) != 0)
3288 aprint_error_dev(dev, "could not create sysctl\n");
3289
3290 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3291 CTLTYPE_INT, "overtemp_occurred",
3292 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3293 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3294 CTL_CREATE, CTL_EOL) != 0)
3295 aprint_error_dev(dev, "could not create sysctl\n");
3296 }
3297
3298 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3299 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3300 CTLTYPE_INT, "eee_state",
3301 SYSCTL_DESCR("EEE Power Save State"),
3302 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3303 CTL_EOL) != 0)
3304 aprint_error_dev(dev, "could not create sysctl\n");
3305 }
3306 } /* ixgbe_add_device_sysctls */
3307
3308 /************************************************************************
3309 * ixgbe_allocate_pci_resources
3310 ************************************************************************/
3311 static int
3312 ixgbe_allocate_pci_resources(struct adapter *adapter,
3313 const struct pci_attach_args *pa)
3314 {
3315 pcireg_t memtype;
3316 device_t dev = adapter->dev;
3317 bus_addr_t addr;
3318 int flags;
3319
3320 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3321 switch (memtype) {
3322 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3323 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3324 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3325 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3326 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3327 goto map_err;
3328 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3329 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3330 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3331 }
3332 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3333 adapter->osdep.mem_size, flags,
3334 &adapter->osdep.mem_bus_space_handle) != 0) {
3335 map_err:
3336 adapter->osdep.mem_size = 0;
3337 aprint_error_dev(dev, "unable to map BAR0\n");
3338 return ENXIO;
3339 }
3340 break;
3341 default:
3342 aprint_error_dev(dev, "unexpected type on BAR0\n");
3343 return ENXIO;
3344 }
3345
3346 return (0);
3347 } /* ixgbe_allocate_pci_resources */
3348
3349 static void
3350 ixgbe_free_softint(struct adapter *adapter)
3351 {
3352 struct ix_queue *que = adapter->queues;
3353 struct tx_ring *txr = adapter->tx_rings;
3354 int i;
3355
3356 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3357 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3358 if (txr->txr_si != NULL)
3359 softint_disestablish(txr->txr_si);
3360 }
3361 if (que->que_si != NULL)
3362 softint_disestablish(que->que_si);
3363 }
3364 if (adapter->txr_wq != NULL)
3365 workqueue_destroy(adapter->txr_wq);
3366 if (adapter->txr_wq_enqueued != NULL)
3367 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3368 if (adapter->que_wq != NULL)
3369 workqueue_destroy(adapter->que_wq);
3370
3371 /* Drain the Link queue */
3372 if (adapter->link_si != NULL) {
3373 softint_disestablish(adapter->link_si);
3374 adapter->link_si = NULL;
3375 }
3376 if (adapter->mod_si != NULL) {
3377 softint_disestablish(adapter->mod_si);
3378 adapter->mod_si = NULL;
3379 }
3380 if (adapter->msf_si != NULL) {
3381 softint_disestablish(adapter->msf_si);
3382 adapter->msf_si = NULL;
3383 }
3384 if (adapter->phy_si != NULL) {
3385 softint_disestablish(adapter->phy_si);
3386 adapter->phy_si = NULL;
3387 }
3388 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3389 if (adapter->fdir_si != NULL) {
3390 softint_disestablish(adapter->fdir_si);
3391 adapter->fdir_si = NULL;
3392 }
3393 }
3394 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3395 if (adapter->mbx_si != NULL) {
3396 softint_disestablish(adapter->mbx_si);
3397 adapter->mbx_si = NULL;
3398 }
3399 }
3400 } /* ixgbe_free_softint */
3401
3402 /************************************************************************
3403 * ixgbe_detach - Device removal routine
3404 *
3405 * Called when the driver is being removed.
3406 * Stops the adapter and deallocates all the resources
3407 * that were allocated for driver operation.
3408 *
3409 * return 0 on success, positive on failure
3410 ************************************************************************/
3411 static int
3412 ixgbe_detach(device_t dev, int flags)
3413 {
3414 struct adapter *adapter = device_private(dev);
3415 struct rx_ring *rxr = adapter->rx_rings;
3416 struct tx_ring *txr = adapter->tx_rings;
3417 struct ixgbe_hw *hw = &adapter->hw;
3418 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3419 u32 ctrl_ext;
3420
3421 INIT_DEBUGOUT("ixgbe_detach: begin");
3422 if (adapter->osdep.attached == false)
3423 return 0;
3424
3425 if (ixgbe_pci_iov_detach(dev) != 0) {
3426 device_printf(dev, "SR-IOV in use; detach first.\n");
3427 return (EBUSY);
3428 }
3429
3430 /* Stop the interface. Callouts are stopped in it. */
3431 ixgbe_ifstop(adapter->ifp, 1);
3432 #if NVLAN > 0
3433 /* Make sure VLANs are not using driver */
3434 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3435 ; /* nothing to do: no VLANs */
3436 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3437 vlan_ifdetach(adapter->ifp);
3438 else {
3439 aprint_error_dev(dev, "VLANs in use, detach first\n");
3440 return (EBUSY);
3441 }
3442 #endif
3443
3444 pmf_device_deregister(dev);
3445
3446 ether_ifdetach(adapter->ifp);
3447 /* Stop the adapter */
3448 IXGBE_CORE_LOCK(adapter);
3449 ixgbe_setup_low_power_mode(adapter);
3450 IXGBE_CORE_UNLOCK(adapter);
3451
3452 ixgbe_free_softint(adapter);
3453
3454 /* let hardware know driver is unloading */
3455 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3456 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3457 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3458
3459 callout_halt(&adapter->timer, NULL);
3460
3461 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3462 netmap_detach(adapter->ifp);
3463
3464 ixgbe_free_pci_resources(adapter);
3465 #if 0 /* XXX the NetBSD port is probably missing something here */
3466 bus_generic_detach(dev);
3467 #endif
3468 if_detach(adapter->ifp);
3469 if_percpuq_destroy(adapter->ipq);
3470
3471 sysctl_teardown(&adapter->sysctllog);
3472 evcnt_detach(&adapter->efbig_tx_dma_setup);
3473 evcnt_detach(&adapter->mbuf_defrag_failed);
3474 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3475 evcnt_detach(&adapter->einval_tx_dma_setup);
3476 evcnt_detach(&adapter->other_tx_dma_setup);
3477 evcnt_detach(&adapter->eagain_tx_dma_setup);
3478 evcnt_detach(&adapter->enomem_tx_dma_setup);
3479 evcnt_detach(&adapter->watchdog_events);
3480 evcnt_detach(&adapter->tso_err);
3481 evcnt_detach(&adapter->link_irq);
3482 evcnt_detach(&adapter->link_sicount);
3483 evcnt_detach(&adapter->mod_sicount);
3484 evcnt_detach(&adapter->msf_sicount);
3485 evcnt_detach(&adapter->phy_sicount);
3486
3487 txr = adapter->tx_rings;
3488 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3489 evcnt_detach(&adapter->queues[i].irqs);
3490 evcnt_detach(&adapter->queues[i].handleq);
3491 evcnt_detach(&adapter->queues[i].req);
3492 evcnt_detach(&txr->no_desc_avail);
3493 evcnt_detach(&txr->total_packets);
3494 evcnt_detach(&txr->tso_tx);
3495 #ifndef IXGBE_LEGACY_TX
3496 evcnt_detach(&txr->pcq_drops);
3497 #endif
3498
3499 if (i < __arraycount(stats->mpc)) {
3500 evcnt_detach(&stats->mpc[i]);
3501 if (hw->mac.type == ixgbe_mac_82598EB)
3502 evcnt_detach(&stats->rnbc[i]);
3503 }
3504 if (i < __arraycount(stats->pxontxc)) {
3505 evcnt_detach(&stats->pxontxc[i]);
3506 evcnt_detach(&stats->pxonrxc[i]);
3507 evcnt_detach(&stats->pxofftxc[i]);
3508 evcnt_detach(&stats->pxoffrxc[i]);
3509 if (hw->mac.type >= ixgbe_mac_82599EB)
3510 evcnt_detach(&stats->pxon2offc[i]);
3511 }
3512 if (i < __arraycount(stats->qprc)) {
3513 evcnt_detach(&stats->qprc[i]);
3514 evcnt_detach(&stats->qptc[i]);
3515 evcnt_detach(&stats->qbrc[i]);
3516 evcnt_detach(&stats->qbtc[i]);
3517 if (hw->mac.type >= ixgbe_mac_82599EB)
3518 evcnt_detach(&stats->qprdc[i]);
3519 }
3520
3521 evcnt_detach(&rxr->rx_packets);
3522 evcnt_detach(&rxr->rx_bytes);
3523 evcnt_detach(&rxr->rx_copies);
3524 evcnt_detach(&rxr->no_jmbuf);
3525 evcnt_detach(&rxr->rx_discarded);
3526 }
3527 evcnt_detach(&stats->ipcs);
3528 evcnt_detach(&stats->l4cs);
3529 evcnt_detach(&stats->ipcs_bad);
3530 evcnt_detach(&stats->l4cs_bad);
3531 evcnt_detach(&stats->intzero);
3532 evcnt_detach(&stats->legint);
3533 evcnt_detach(&stats->crcerrs);
3534 evcnt_detach(&stats->illerrc);
3535 evcnt_detach(&stats->errbc);
3536 evcnt_detach(&stats->mspdc);
3537 if (hw->mac.type >= ixgbe_mac_X550)
3538 evcnt_detach(&stats->mbsdc);
3539 evcnt_detach(&stats->mpctotal);
3540 evcnt_detach(&stats->mlfc);
3541 evcnt_detach(&stats->mrfc);
3542 evcnt_detach(&stats->rlec);
3543 evcnt_detach(&stats->lxontxc);
3544 evcnt_detach(&stats->lxonrxc);
3545 evcnt_detach(&stats->lxofftxc);
3546 evcnt_detach(&stats->lxoffrxc);
3547
3548 /* Packet Reception Stats */
3549 evcnt_detach(&stats->tor);
3550 evcnt_detach(&stats->gorc);
3551 evcnt_detach(&stats->tpr);
3552 evcnt_detach(&stats->gprc);
3553 evcnt_detach(&stats->mprc);
3554 evcnt_detach(&stats->bprc);
3555 evcnt_detach(&stats->prc64);
3556 evcnt_detach(&stats->prc127);
3557 evcnt_detach(&stats->prc255);
3558 evcnt_detach(&stats->prc511);
3559 evcnt_detach(&stats->prc1023);
3560 evcnt_detach(&stats->prc1522);
3561 evcnt_detach(&stats->ruc);
3562 evcnt_detach(&stats->rfc);
3563 evcnt_detach(&stats->roc);
3564 evcnt_detach(&stats->rjc);
3565 evcnt_detach(&stats->mngprc);
3566 evcnt_detach(&stats->mngpdc);
3567 evcnt_detach(&stats->xec);
3568
3569 /* Packet Transmission Stats */
3570 evcnt_detach(&stats->gotc);
3571 evcnt_detach(&stats->tpt);
3572 evcnt_detach(&stats->gptc);
3573 evcnt_detach(&stats->bptc);
3574 evcnt_detach(&stats->mptc);
3575 evcnt_detach(&stats->mngptc);
3576 evcnt_detach(&stats->ptc64);
3577 evcnt_detach(&stats->ptc127);
3578 evcnt_detach(&stats->ptc255);
3579 evcnt_detach(&stats->ptc511);
3580 evcnt_detach(&stats->ptc1023);
3581 evcnt_detach(&stats->ptc1522);
3582
3583 ixgbe_free_transmit_structures(adapter);
3584 ixgbe_free_receive_structures(adapter);
3585 for (int i = 0; i < adapter->num_queues; i++) {
3586 struct ix_queue * que = &adapter->queues[i];
3587 mutex_destroy(&que->dc_mtx);
3588 }
3589 free(adapter->queues, M_DEVBUF);
3590 free(adapter->mta, M_DEVBUF);
3591
3592 IXGBE_CORE_LOCK_DESTROY(adapter);
3593
3594 return (0);
3595 } /* ixgbe_detach */
3596
3597 /************************************************************************
3598 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3599 *
3600 * Prepare the adapter/port for LPLU and/or WoL
3601 ************************************************************************/
3602 static int
3603 ixgbe_setup_low_power_mode(struct adapter *adapter)
3604 {
3605 struct ixgbe_hw *hw = &adapter->hw;
3606 device_t dev = adapter->dev;
3607 s32 error = 0;
3608
3609 KASSERT(mutex_owned(&adapter->core_mtx));
3610
3611 /* Limit power management flow to X550EM baseT */
3612 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3613 hw->phy.ops.enter_lplu) {
3614 /* X550EM baseT adapters need a special LPLU flow */
3615 hw->phy.reset_disable = true;
3616 ixgbe_stop(adapter);
3617 error = hw->phy.ops.enter_lplu(hw);
3618 if (error)
3619 device_printf(dev,
3620 "Error entering LPLU: %d\n", error);
3621 hw->phy.reset_disable = false;
3622 } else {
3623 /* Just stop for other adapters */
3624 ixgbe_stop(adapter);
3625 }
3626
3627 if (!hw->wol_enabled) {
3628 ixgbe_set_phy_power(hw, FALSE);
3629 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3630 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3631 } else {
3632 /* Turn off support for APM wakeup. (Using ACPI instead) */
3633 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3634 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3635
3636 /*
3637 * Clear Wake Up Status register to prevent any previous wakeup
3638 * events from waking us up immediately after we suspend.
3639 */
3640 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3641
3642 /*
3643 * Program the Wakeup Filter Control register with user filter
3644 * settings
3645 */
3646 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3647
3648 /* Enable wakeups and power management in Wakeup Control */
3649 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3650 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3651
3652 }
3653
3654 return error;
3655 } /* ixgbe_setup_low_power_mode */
3656
3657 /************************************************************************
3658 * ixgbe_shutdown - Shutdown entry point
3659 ************************************************************************/
3660 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3661 static int
3662 ixgbe_shutdown(device_t dev)
3663 {
3664 struct adapter *adapter = device_private(dev);
3665 int error = 0;
3666
3667 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3668
3669 IXGBE_CORE_LOCK(adapter);
3670 error = ixgbe_setup_low_power_mode(adapter);
3671 IXGBE_CORE_UNLOCK(adapter);
3672
3673 return (error);
3674 } /* ixgbe_shutdown */
3675 #endif
3676
3677 /************************************************************************
3678 * ixgbe_suspend
3679 *
3680 * From D0 to D3
3681 ************************************************************************/
3682 static bool
3683 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3684 {
3685 struct adapter *adapter = device_private(dev);
3686 int error = 0;
3687
3688 INIT_DEBUGOUT("ixgbe_suspend: begin");
3689
3690 IXGBE_CORE_LOCK(adapter);
3691
3692 error = ixgbe_setup_low_power_mode(adapter);
3693
3694 IXGBE_CORE_UNLOCK(adapter);
3695
3696 return (error);
3697 } /* ixgbe_suspend */
3698
3699 /************************************************************************
3700 * ixgbe_resume
3701 *
3702 * From D3 to D0
3703 ************************************************************************/
3704 static bool
3705 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3706 {
3707 struct adapter *adapter = device_private(dev);
3708 struct ifnet *ifp = adapter->ifp;
3709 struct ixgbe_hw *hw = &adapter->hw;
3710 u32 wus;
3711
3712 INIT_DEBUGOUT("ixgbe_resume: begin");
3713
3714 IXGBE_CORE_LOCK(adapter);
3715
3716 /* Read & clear WUS register */
3717 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3718 if (wus)
3719 device_printf(dev, "Woken up by (WUS): %#010x\n",
3720 IXGBE_READ_REG(hw, IXGBE_WUS));
3721 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3722 /* And clear WUFC until next low-power transition */
3723 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3724
3725 /*
3726 * Required after D3->D0 transition;
3727 * will re-advertise all previous advertised speeds
3728 */
3729 if (ifp->if_flags & IFF_UP)
3730 ixgbe_init_locked(adapter);
3731
3732 IXGBE_CORE_UNLOCK(adapter);
3733
3734 return true;
3735 } /* ixgbe_resume */
3736
3737 /*
3738 * Set the various hardware offload abilities.
3739 *
3740 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3741 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3742 * mbuf offload flags the driver will understand.
3743 */
3744 static void
3745 ixgbe_set_if_hwassist(struct adapter *adapter)
3746 {
3747 /* XXX */
3748 }
3749
3750 /************************************************************************
3751 * ixgbe_init_locked - Init entry point
3752 *
3753 * Used in two ways: It is used by the stack as an init
3754 * entry point in network interface structure. It is also
3755 * used by the driver as a hw/sw initialization routine to
3756 * get to a consistent state.
3757 *
3758 * return 0 on success, positive on failure
3759 ************************************************************************/
3760 static void
3761 ixgbe_init_locked(struct adapter *adapter)
3762 {
3763 struct ifnet *ifp = adapter->ifp;
3764 device_t dev = adapter->dev;
3765 struct ixgbe_hw *hw = &adapter->hw;
3766 struct tx_ring *txr;
3767 struct rx_ring *rxr;
3768 u32 txdctl, mhadd;
3769 u32 rxdctl, rxctrl;
3770 u32 ctrl_ext;
3771 int i, j, err;
3772
3773 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3774
3775 KASSERT(mutex_owned(&adapter->core_mtx));
3776 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3777
3778 hw->adapter_stopped = FALSE;
3779 ixgbe_stop_adapter(hw);
3780 callout_stop(&adapter->timer);
3781
3782 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3783 adapter->max_frame_size =
3784 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3785
3786 /* Queue indices may change with IOV mode */
3787 ixgbe_align_all_queue_indices(adapter);
3788
3789 /* reprogram the RAR[0] in case user changed it. */
3790 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3791
3792 /* Get the latest mac address, User can use a LAA */
3793 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3794 IXGBE_ETH_LENGTH_OF_ADDRESS);
3795 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3796 hw->addr_ctrl.rar_used_count = 1;
3797
3798 /* Set hardware offload abilities from ifnet flags */
3799 ixgbe_set_if_hwassist(adapter);
3800
3801 /* Prepare transmit descriptors and buffers */
3802 if (ixgbe_setup_transmit_structures(adapter)) {
3803 device_printf(dev, "Could not setup transmit structures\n");
3804 ixgbe_stop(adapter);
3805 return;
3806 }
3807
3808 ixgbe_init_hw(hw);
3809
3810 ixgbe_initialize_iov(adapter);
3811
3812 ixgbe_initialize_transmit_units(adapter);
3813
3814 /* Setup Multicast table */
3815 ixgbe_set_multi(adapter);
3816
3817 /* Determine the correct mbuf pool, based on frame size */
3818 if (adapter->max_frame_size <= MCLBYTES)
3819 adapter->rx_mbuf_sz = MCLBYTES;
3820 else
3821 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3822
3823 /* Prepare receive descriptors and buffers */
3824 if (ixgbe_setup_receive_structures(adapter)) {
3825 device_printf(dev, "Could not setup receive structures\n");
3826 ixgbe_stop(adapter);
3827 return;
3828 }
3829
3830 /* Configure RX settings */
3831 ixgbe_initialize_receive_units(adapter);
3832
3833 /* Enable SDP & MSI-X interrupts based on adapter */
3834 ixgbe_config_gpie(adapter);
3835
3836 /* Set MTU size */
3837 if (ifp->if_mtu > ETHERMTU) {
3838 /* aka IXGBE_MAXFRS on 82599 and newer */
3839 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3840 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3841 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3842 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3843 }
3844
3845 /* Now enable all the queues */
3846 for (i = 0; i < adapter->num_queues; i++) {
3847 txr = &adapter->tx_rings[i];
3848 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3849 txdctl |= IXGBE_TXDCTL_ENABLE;
3850 /* Set WTHRESH to 8, burst writeback */
3851 txdctl |= (8 << 16);
3852 /*
3853 * When the internal queue falls below PTHRESH (32),
3854 * start prefetching as long as there are at least
3855 * HTHRESH (1) buffers ready. The values are taken
3856 * from the Intel linux driver 3.8.21.
3857 * Prefetching enables tx line rate even with 1 queue.
3858 */
3859 txdctl |= (32 << 0) | (1 << 8);
3860 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3861 }
3862
3863 for (i = 0; i < adapter->num_queues; i++) {
3864 rxr = &adapter->rx_rings[i];
3865 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3866 if (hw->mac.type == ixgbe_mac_82598EB) {
3867 /*
3868 * PTHRESH = 21
3869 * HTHRESH = 4
3870 * WTHRESH = 8
3871 */
3872 rxdctl &= ~0x3FFFFF;
3873 rxdctl |= 0x080420;
3874 }
3875 rxdctl |= IXGBE_RXDCTL_ENABLE;
3876 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3877 for (j = 0; j < 10; j++) {
3878 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3879 IXGBE_RXDCTL_ENABLE)
3880 break;
3881 else
3882 msec_delay(1);
3883 }
3884 wmb();
3885
3886 /*
3887 * In netmap mode, we must preserve the buffers made
3888 * available to userspace before the if_init()
3889 * (this is true by default on the TX side, because
3890 * init makes all buffers available to userspace).
3891 *
3892 * netmap_reset() and the device specific routines
3893 * (e.g. ixgbe_setup_receive_rings()) map these
3894 * buffers at the end of the NIC ring, so here we
3895 * must set the RDT (tail) register to make sure
3896 * they are not overwritten.
3897 *
3898 * In this driver the NIC ring starts at RDH = 0,
3899 * RDT points to the last slot available for reception (?),
3900 * so RDT = num_rx_desc - 1 means the whole ring is available.
3901 */
3902 #ifdef DEV_NETMAP
3903 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3904 (ifp->if_capenable & IFCAP_NETMAP)) {
3905 struct netmap_adapter *na = NA(adapter->ifp);
3906 struct netmap_kring *kring = &na->rx_rings[i];
3907 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3908
3909 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3910 } else
3911 #endif /* DEV_NETMAP */
3912 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3913 adapter->num_rx_desc - 1);
3914 }
3915
3916 /* Enable Receive engine */
3917 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3918 if (hw->mac.type == ixgbe_mac_82598EB)
3919 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3920 rxctrl |= IXGBE_RXCTRL_RXEN;
3921 ixgbe_enable_rx_dma(hw, rxctrl);
3922
3923 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3924
3925 /* Set up MSI/MSI-X routing */
3926 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3927 ixgbe_configure_ivars(adapter);
3928 /* Set up auto-mask */
3929 if (hw->mac.type == ixgbe_mac_82598EB)
3930 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3931 else {
3932 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3933 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3934 }
3935 } else { /* Simple settings for Legacy/MSI */
3936 ixgbe_set_ivar(adapter, 0, 0, 0);
3937 ixgbe_set_ivar(adapter, 0, 0, 1);
3938 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3939 }
3940
3941 ixgbe_init_fdir(adapter);
3942
3943 /*
3944 * Check on any SFP devices that
3945 * need to be kick-started
3946 */
3947 if (hw->phy.type == ixgbe_phy_none) {
3948 err = hw->phy.ops.identify(hw);
3949 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3950 device_printf(dev,
3951 "Unsupported SFP+ module type was detected.\n");
3952 return;
3953 }
3954 }
3955
3956 /* Set moderation on the Link interrupt */
3957 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
3958
3959 /* Enable power to the phy. */
3960 ixgbe_set_phy_power(hw, TRUE);
3961
3962 /* Config/Enable Link */
3963 ixgbe_config_link(adapter);
3964
3965 /* Hardware Packet Buffer & Flow Control setup */
3966 ixgbe_config_delay_values(adapter);
3967
3968 /* Initialize the FC settings */
3969 ixgbe_start_hw(hw);
3970
3971 /* Set up VLAN support and filter */
3972 ixgbe_setup_vlan_hw_support(adapter);
3973
3974 /* Setup DMA Coalescing */
3975 ixgbe_config_dmac(adapter);
3976
3977 /* And now turn on interrupts */
3978 ixgbe_enable_intr(adapter);
3979
3980 /* Enable the use of the MBX by the VF's */
3981 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3982 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3983 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3984 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3985 }
3986
3987 /* Update saved flags. See ixgbe_ifflags_cb() */
3988 adapter->if_flags = ifp->if_flags;
3989
3990 /* Now inform the stack we're ready */
3991 ifp->if_flags |= IFF_RUNNING;
3992
3993 return;
3994 } /* ixgbe_init_locked */
3995
3996 /************************************************************************
3997 * ixgbe_init
3998 ************************************************************************/
3999 static int
4000 ixgbe_init(struct ifnet *ifp)
4001 {
4002 struct adapter *adapter = ifp->if_softc;
4003
4004 IXGBE_CORE_LOCK(adapter);
4005 ixgbe_init_locked(adapter);
4006 IXGBE_CORE_UNLOCK(adapter);
4007
4008 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4009 } /* ixgbe_init */
4010
4011 /************************************************************************
4012 * ixgbe_set_ivar
4013 *
4014 * Setup the correct IVAR register for a particular MSI-X interrupt
4015 * (yes this is all very magic and confusing :)
4016 * - entry is the register array entry
4017 * - vector is the MSI-X vector for this queue
4018 * - type is RX/TX/MISC
4019 ************************************************************************/
4020 static void
4021 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4022 {
4023 struct ixgbe_hw *hw = &adapter->hw;
4024 u32 ivar, index;
4025
4026 vector |= IXGBE_IVAR_ALLOC_VAL;
4027
4028 switch (hw->mac.type) {
4029 case ixgbe_mac_82598EB:
4030 if (type == -1)
4031 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4032 else
4033 entry += (type * 64);
4034 index = (entry >> 2) & 0x1F;
4035 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4036 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4037 ivar |= (vector << (8 * (entry & 0x3)));
4038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4039 break;
4040 case ixgbe_mac_82599EB:
4041 case ixgbe_mac_X540:
4042 case ixgbe_mac_X550:
4043 case ixgbe_mac_X550EM_x:
4044 case ixgbe_mac_X550EM_a:
4045 if (type == -1) { /* MISC IVAR */
4046 index = (entry & 1) * 8;
4047 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4048 ivar &= ~(0xFF << index);
4049 ivar |= (vector << index);
4050 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4051 } else { /* RX/TX IVARS */
4052 index = (16 * (entry & 1)) + (8 * type);
4053 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4054 ivar &= ~(0xFF << index);
4055 ivar |= (vector << index);
4056 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4057 }
4058 break;
4059 default:
4060 break;
4061 }
4062 } /* ixgbe_set_ivar */
4063
4064 /************************************************************************
4065 * ixgbe_configure_ivars
4066 ************************************************************************/
4067 static void
4068 ixgbe_configure_ivars(struct adapter *adapter)
4069 {
4070 struct ix_queue *que = adapter->queues;
4071 u32 newitr;
4072
4073 if (ixgbe_max_interrupt_rate > 0)
4074 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4075 else {
4076 /*
4077 * Disable DMA coalescing if interrupt moderation is
4078 * disabled.
4079 */
4080 adapter->dmac = 0;
4081 newitr = 0;
4082 }
4083
4084 for (int i = 0; i < adapter->num_queues; i++, que++) {
4085 struct rx_ring *rxr = &adapter->rx_rings[i];
4086 struct tx_ring *txr = &adapter->tx_rings[i];
4087 /* First the RX queue entry */
4088 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4089 /* ... and the TX */
4090 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4091 /* Set an Initial EITR value */
4092 ixgbe_eitr_write(adapter, que->msix, newitr);
4093 /*
4094 * To eliminate influence of the previous state.
4095 * At this point, Tx/Rx interrupt handler
4096 * (ixgbe_msix_que()) cannot be called, so both
4097 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4098 */
4099 que->eitr_setting = 0;
4100 }
4101
4102 /* For the Link interrupt */
4103 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4104 } /* ixgbe_configure_ivars */
4105
4106 /************************************************************************
4107 * ixgbe_config_gpie
4108 ************************************************************************/
4109 static void
4110 ixgbe_config_gpie(struct adapter *adapter)
4111 {
4112 struct ixgbe_hw *hw = &adapter->hw;
4113 u32 gpie;
4114
4115 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4116
4117 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4118 /* Enable Enhanced MSI-X mode */
4119 gpie |= IXGBE_GPIE_MSIX_MODE
4120 | IXGBE_GPIE_EIAME
4121 | IXGBE_GPIE_PBA_SUPPORT
4122 | IXGBE_GPIE_OCD;
4123 }
4124
4125 /* Fan Failure Interrupt */
4126 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4127 gpie |= IXGBE_SDP1_GPIEN;
4128
4129 /* Thermal Sensor Interrupt */
4130 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4131 gpie |= IXGBE_SDP0_GPIEN_X540;
4132
4133 /* Link detection */
4134 switch (hw->mac.type) {
4135 case ixgbe_mac_82599EB:
4136 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4137 break;
4138 case ixgbe_mac_X550EM_x:
4139 case ixgbe_mac_X550EM_a:
4140 gpie |= IXGBE_SDP0_GPIEN_X540;
4141 break;
4142 default:
4143 break;
4144 }
4145
4146 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4147
4148 } /* ixgbe_config_gpie */
4149
4150 /************************************************************************
4151 * ixgbe_config_delay_values
4152 *
4153 * Requires adapter->max_frame_size to be set.
4154 ************************************************************************/
4155 static void
4156 ixgbe_config_delay_values(struct adapter *adapter)
4157 {
4158 struct ixgbe_hw *hw = &adapter->hw;
4159 u32 rxpb, frame, size, tmp;
4160
4161 frame = adapter->max_frame_size;
4162
4163 /* Calculate High Water */
4164 switch (hw->mac.type) {
4165 case ixgbe_mac_X540:
4166 case ixgbe_mac_X550:
4167 case ixgbe_mac_X550EM_x:
4168 case ixgbe_mac_X550EM_a:
4169 tmp = IXGBE_DV_X540(frame, frame);
4170 break;
4171 default:
4172 tmp = IXGBE_DV(frame, frame);
4173 break;
4174 }
4175 size = IXGBE_BT2KB(tmp);
4176 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4177 hw->fc.high_water[0] = rxpb - size;
4178
4179 /* Now calculate Low Water */
4180 switch (hw->mac.type) {
4181 case ixgbe_mac_X540:
4182 case ixgbe_mac_X550:
4183 case ixgbe_mac_X550EM_x:
4184 case ixgbe_mac_X550EM_a:
4185 tmp = IXGBE_LOW_DV_X540(frame);
4186 break;
4187 default:
4188 tmp = IXGBE_LOW_DV(frame);
4189 break;
4190 }
4191 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4192
4193 hw->fc.pause_time = IXGBE_FC_PAUSE;
4194 hw->fc.send_xon = TRUE;
4195 } /* ixgbe_config_delay_values */
4196
4197 /************************************************************************
4198 * ixgbe_set_multi - Multicast Update
4199 *
4200 * Called whenever multicast address list is updated.
4201 ************************************************************************/
4202 static void
4203 ixgbe_set_multi(struct adapter *adapter)
4204 {
4205 struct ixgbe_mc_addr *mta;
4206 struct ifnet *ifp = adapter->ifp;
4207 u8 *update_ptr;
4208 int mcnt = 0;
4209 u32 fctrl;
4210 struct ethercom *ec = &adapter->osdep.ec;
4211 struct ether_multi *enm;
4212 struct ether_multistep step;
4213
4214 KASSERT(mutex_owned(&adapter->core_mtx));
4215 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4216
4217 mta = adapter->mta;
4218 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4219
4220 ifp->if_flags &= ~IFF_ALLMULTI;
4221 ETHER_LOCK(ec);
4222 ETHER_FIRST_MULTI(step, ec, enm);
4223 while (enm != NULL) {
4224 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4225 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4226 ETHER_ADDR_LEN) != 0)) {
4227 ifp->if_flags |= IFF_ALLMULTI;
4228 break;
4229 }
4230 bcopy(enm->enm_addrlo,
4231 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4232 mta[mcnt].vmdq = adapter->pool;
4233 mcnt++;
4234 ETHER_NEXT_MULTI(step, enm);
4235 }
4236 ETHER_UNLOCK(ec);
4237
4238 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4239 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4240 if (ifp->if_flags & IFF_PROMISC)
4241 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4242 else if (ifp->if_flags & IFF_ALLMULTI) {
4243 fctrl |= IXGBE_FCTRL_MPE;
4244 }
4245
4246 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4247
4248 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4249 update_ptr = (u8 *)mta;
4250 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4251 ixgbe_mc_array_itr, TRUE);
4252 }
4253
4254 } /* ixgbe_set_multi */
4255
4256 /************************************************************************
4257 * ixgbe_mc_array_itr
4258 *
4259 * An iterator function needed by the multicast shared code.
4260 * It feeds the shared code routine the addresses in the
4261 * array of ixgbe_set_multi() one by one.
4262 ************************************************************************/
4263 static u8 *
4264 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4265 {
4266 struct ixgbe_mc_addr *mta;
4267
4268 mta = (struct ixgbe_mc_addr *)*update_ptr;
4269 *vmdq = mta->vmdq;
4270
4271 *update_ptr = (u8*)(mta + 1);
4272
4273 return (mta->addr);
4274 } /* ixgbe_mc_array_itr */
4275
4276 /************************************************************************
4277 * ixgbe_local_timer - Timer routine
4278 *
4279 * Checks for link status, updates statistics,
4280 * and runs the watchdog check.
4281 ************************************************************************/
4282 static void
4283 ixgbe_local_timer(void *arg)
4284 {
4285 struct adapter *adapter = arg;
4286
4287 IXGBE_CORE_LOCK(adapter);
4288 ixgbe_local_timer1(adapter);
4289 IXGBE_CORE_UNLOCK(adapter);
4290 }
4291
4292 static void
4293 ixgbe_local_timer1(void *arg)
4294 {
4295 struct adapter *adapter = arg;
4296 struct ix_queue *que = adapter->queues;
4297 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4298 int i;
4299
4300 KASSERT(mutex_owned(&adapter->core_mtx));
4301
4302 /* Check for pluggable optics */
4303 if (adapter->sfp_probe)
4304 if (!ixgbe_sfp_probe(adapter))
4305 goto out; /* Nothing to do */
4306
4307 ixgbe_update_link_status(adapter);
4308 ixgbe_update_stats_counters(adapter);
4309
4310 /* Update some event counters */
4311 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4312 que = adapter->queues;
4313 for (i = 0; i < adapter->num_queues; i++, que++) {
4314 struct tx_ring *txr = que->txr;
4315
4316 v0 += txr->q_efbig_tx_dma_setup;
4317 v1 += txr->q_mbuf_defrag_failed;
4318 v2 += txr->q_efbig2_tx_dma_setup;
4319 v3 += txr->q_einval_tx_dma_setup;
4320 v4 += txr->q_other_tx_dma_setup;
4321 v5 += txr->q_eagain_tx_dma_setup;
4322 v6 += txr->q_enomem_tx_dma_setup;
4323 v7 += txr->q_tso_err;
4324 }
4325 adapter->efbig_tx_dma_setup.ev_count = v0;
4326 adapter->mbuf_defrag_failed.ev_count = v1;
4327 adapter->efbig2_tx_dma_setup.ev_count = v2;
4328 adapter->einval_tx_dma_setup.ev_count = v3;
4329 adapter->other_tx_dma_setup.ev_count = v4;
4330 adapter->eagain_tx_dma_setup.ev_count = v5;
4331 adapter->enomem_tx_dma_setup.ev_count = v6;
4332 adapter->tso_err.ev_count = v7;
4333
4334 ixgbe_watchdog(adapter->ifp);
4335
4336 out:
4337 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4338 } /* ixgbe_local_timer */
4339
4340 static void
4341 ixgbe_watchdog(struct ifnet *ifp)
4342 {
4343 struct adapter *adapter = ifp->if_softc;
4344 struct ix_queue *que;
4345 struct tx_ring *txr;
4346 u64 queues = 0;
4347 bool hung = false;
4348 bool sending = false;
4349 int i;
4350
4351 txr = adapter->tx_rings;
4352 for (i = 0; i < adapter->num_queues; i++, txr++) {
4353 hung = ixgbe_watchdog_txq(ifp, txr, &sending);
4354 if (hung)
4355 break;
4356 else if (sending)
4357 queues |= ((u64)1 << txr->me);
4358 }
4359
4360 if (hung) {
4361 ifp->if_flags &= ~IFF_RUNNING;
4362 ifp->if_oerrors++;
4363 adapter->watchdog_events.ev_count++;
4364 ixgbe_init_locked(adapter);
4365 } else if (queues != 0) {
4366 /*
4367 * Force an IRQ on queues with work.
4368 *
4369 * It's supporsed not to be called ixgbe_rearm_queues() if
4370 * any chips have no bug. In reality, ixgbe_rearm_queues() is
4371 * required on 82599 and newer chip AND other than queue 0 to
4372 * prevent device timeout. When it occured, packet was sent but
4373 * the descriptor's DD bot wasn't set even though
4374 * IXGBE_TXD_CMD_EOP and IXGBE_TXD_CMD_RS were set. After
4375 * forcing interrupt by writing EICS register in
4376 * ixgbe_rearm_queues(), DD is set. Why? Is this an
4377 * undocumented errata? It might be possible not call
4378 * rearm_queues on 82598 or queue 0, we call in any cases in
4379 * case the problem occurs.
4380 */
4381 que = adapter->queues;
4382 for (i = 0; i < adapter->num_queues; i++, que++) {
4383 u64 index = queues & ((u64)1 << i);
4384
4385 mutex_enter(&que->dc_mtx);
4386 if ((index != 0) && (que->disabled_count == 0))
4387 ixgbe_rearm_queues(adapter, index);
4388 mutex_exit(&que->dc_mtx);
4389 }
4390 }
4391 }
4392
4393 static bool
4394 ixgbe_watchdog_txq(struct ifnet *ifp, struct tx_ring *txr, bool *sending)
4395 {
4396 struct adapter *adapter = ifp->if_softc;
4397 device_t dev = adapter->dev;
4398 bool hung = false;
4399 bool more = false;
4400
4401 IXGBE_TX_LOCK(txr);
4402 *sending = txr->sending;
4403 if (*sending && ((time_uptime - txr->lastsent) > IXGBE_TX_TIMEOUT)) {
4404 /*
4405 * Since we're using delayed interrupts, sweep up before we
4406 * report an error.
4407 */
4408 do {
4409 more = ixgbe_txeof(txr);
4410 } while (more);
4411 hung = true;
4412 device_printf(dev,
4413 "Watchdog timeout (queue %d%s)-- resetting\n", txr->me,
4414 (txr->tx_avail == txr->num_desc)
4415 ? ", lost interrupt?" : "");
4416 }
4417 IXGBE_TX_UNLOCK(txr);
4418
4419 return hung;
4420 }
4421
4422 /************************************************************************
4423 * ixgbe_sfp_probe
4424 *
4425 * Determine if a port had optics inserted.
4426 ************************************************************************/
4427 static bool
4428 ixgbe_sfp_probe(struct adapter *adapter)
4429 {
4430 struct ixgbe_hw *hw = &adapter->hw;
4431 device_t dev = adapter->dev;
4432 bool result = FALSE;
4433
4434 if ((hw->phy.type == ixgbe_phy_nl) &&
4435 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4436 s32 ret = hw->phy.ops.identify_sfp(hw);
4437 if (ret)
4438 goto out;
4439 ret = hw->phy.ops.reset(hw);
4440 adapter->sfp_probe = FALSE;
4441 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4442 device_printf(dev,"Unsupported SFP+ module detected!");
4443 device_printf(dev,
4444 "Reload driver with supported module.\n");
4445 goto out;
4446 } else
4447 device_printf(dev, "SFP+ module detected!\n");
4448 /* We now have supported optics */
4449 result = TRUE;
4450 }
4451 out:
4452
4453 return (result);
4454 } /* ixgbe_sfp_probe */
4455
4456 /************************************************************************
4457 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4458 ************************************************************************/
4459 static void
4460 ixgbe_handle_mod(void *context)
4461 {
4462 struct adapter *adapter = context;
4463 struct ixgbe_hw *hw = &adapter->hw;
4464 device_t dev = adapter->dev;
4465 u32 err, cage_full = 0;
4466
4467 ++adapter->mod_sicount.ev_count;
4468 if (adapter->hw.need_crosstalk_fix) {
4469 switch (hw->mac.type) {
4470 case ixgbe_mac_82599EB:
4471 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4472 IXGBE_ESDP_SDP2;
4473 break;
4474 case ixgbe_mac_X550EM_x:
4475 case ixgbe_mac_X550EM_a:
4476 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4477 IXGBE_ESDP_SDP0;
4478 break;
4479 default:
4480 break;
4481 }
4482
4483 if (!cage_full)
4484 return;
4485 }
4486
4487 err = hw->phy.ops.identify_sfp(hw);
4488 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4489 device_printf(dev,
4490 "Unsupported SFP+ module type was detected.\n");
4491 return;
4492 }
4493
4494 if (hw->mac.type == ixgbe_mac_82598EB)
4495 err = hw->phy.ops.reset(hw);
4496 else
4497 err = hw->mac.ops.setup_sfp(hw);
4498
4499 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4500 device_printf(dev,
4501 "Setup failure - unsupported SFP+ module type.\n");
4502 return;
4503 }
4504 softint_schedule(adapter->msf_si);
4505 } /* ixgbe_handle_mod */
4506
4507
4508 /************************************************************************
4509 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4510 ************************************************************************/
4511 static void
4512 ixgbe_handle_msf(void *context)
4513 {
4514 struct adapter *adapter = context;
4515 struct ixgbe_hw *hw = &adapter->hw;
4516 u32 autoneg;
4517 bool negotiate;
4518
4519 ++adapter->msf_sicount.ev_count;
4520 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4521 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4522
4523 autoneg = hw->phy.autoneg_advertised;
4524 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4525 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4526 else
4527 negotiate = 0;
4528 if (hw->mac.ops.setup_link)
4529 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4530
4531 /* Adjust media types shown in ifconfig */
4532 ifmedia_removeall(&adapter->media);
4533 ixgbe_add_media_types(adapter);
4534 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4535 } /* ixgbe_handle_msf */
4536
4537 /************************************************************************
4538 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4539 ************************************************************************/
4540 static void
4541 ixgbe_handle_phy(void *context)
4542 {
4543 struct adapter *adapter = context;
4544 struct ixgbe_hw *hw = &adapter->hw;
4545 int error;
4546
4547 ++adapter->phy_sicount.ev_count;
4548 error = hw->phy.ops.handle_lasi(hw);
4549 if (error == IXGBE_ERR_OVERTEMP)
4550 device_printf(adapter->dev,
4551 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4552 " PHY will downshift to lower power state!\n");
4553 else if (error)
4554 device_printf(adapter->dev,
4555 "Error handling LASI interrupt: %d\n", error);
4556 } /* ixgbe_handle_phy */
4557
4558 static void
4559 ixgbe_ifstop(struct ifnet *ifp, int disable)
4560 {
4561 struct adapter *adapter = ifp->if_softc;
4562
4563 IXGBE_CORE_LOCK(adapter);
4564 ixgbe_stop(adapter);
4565 IXGBE_CORE_UNLOCK(adapter);
4566 }
4567
4568 /************************************************************************
4569 * ixgbe_stop - Stop the hardware
4570 *
4571 * Disables all traffic on the adapter by issuing a
4572 * global reset on the MAC and deallocates TX/RX buffers.
4573 ************************************************************************/
4574 static void
4575 ixgbe_stop(void *arg)
4576 {
4577 struct ifnet *ifp;
4578 struct adapter *adapter = arg;
4579 struct ixgbe_hw *hw = &adapter->hw;
4580 struct tx_ring *txr;
4581 int i;
4582
4583 ifp = adapter->ifp;
4584
4585 KASSERT(mutex_owned(&adapter->core_mtx));
4586
4587 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4588 ixgbe_disable_intr(adapter);
4589 callout_stop(&adapter->timer);
4590
4591 txr = adapter->tx_rings;
4592 for (i = 0; i < adapter->num_queues; i++, txr++) {
4593 IXGBE_TX_LOCK(txr);
4594 txr->sending = false;
4595 IXGBE_TX_UNLOCK(txr);
4596 }
4597
4598 /* Let the stack know...*/
4599 ifp->if_flags &= ~IFF_RUNNING;
4600
4601 ixgbe_reset_hw(hw);
4602 hw->adapter_stopped = FALSE;
4603 ixgbe_stop_adapter(hw);
4604 if (hw->mac.type == ixgbe_mac_82599EB)
4605 ixgbe_stop_mac_link_on_d3_82599(hw);
4606 /* Turn off the laser - noop with no optics */
4607 ixgbe_disable_tx_laser(hw);
4608
4609 /* Update the stack */
4610 adapter->link_up = FALSE;
4611 ixgbe_update_link_status(adapter);
4612
4613 /* reprogram the RAR[0] in case user changed it. */
4614 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4615
4616 return;
4617 } /* ixgbe_stop */
4618
4619 /************************************************************************
4620 * ixgbe_update_link_status - Update OS on link state
4621 *
4622 * Note: Only updates the OS on the cached link state.
4623 * The real check of the hardware only happens with
4624 * a link interrupt.
4625 ************************************************************************/
4626 static void
4627 ixgbe_update_link_status(struct adapter *adapter)
4628 {
4629 struct ifnet *ifp = adapter->ifp;
4630 device_t dev = adapter->dev;
4631 struct ixgbe_hw *hw = &adapter->hw;
4632
4633 KASSERT(mutex_owned(&adapter->core_mtx));
4634
4635 if (adapter->link_up) {
4636 if (adapter->link_active == FALSE) {
4637 /*
4638 * To eliminate influence of the previous state
4639 * in the same way as ixgbe_init_locked().
4640 */
4641 struct ix_queue *que = adapter->queues;
4642 for (int i = 0; i < adapter->num_queues; i++, que++)
4643 que->eitr_setting = 0;
4644
4645 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4646 /*
4647 * Discard count for both MAC Local Fault and
4648 * Remote Fault because those registers are
4649 * valid only when the link speed is up and
4650 * 10Gbps.
4651 */
4652 IXGBE_READ_REG(hw, IXGBE_MLFC);
4653 IXGBE_READ_REG(hw, IXGBE_MRFC);
4654 }
4655
4656 if (bootverbose) {
4657 const char *bpsmsg;
4658
4659 switch (adapter->link_speed) {
4660 case IXGBE_LINK_SPEED_10GB_FULL:
4661 bpsmsg = "10 Gbps";
4662 break;
4663 case IXGBE_LINK_SPEED_5GB_FULL:
4664 bpsmsg = "5 Gbps";
4665 break;
4666 case IXGBE_LINK_SPEED_2_5GB_FULL:
4667 bpsmsg = "2.5 Gbps";
4668 break;
4669 case IXGBE_LINK_SPEED_1GB_FULL:
4670 bpsmsg = "1 Gbps";
4671 break;
4672 case IXGBE_LINK_SPEED_100_FULL:
4673 bpsmsg = "100 Mbps";
4674 break;
4675 case IXGBE_LINK_SPEED_10_FULL:
4676 bpsmsg = "10 Mbps";
4677 break;
4678 default:
4679 bpsmsg = "unknown speed";
4680 break;
4681 }
4682 device_printf(dev, "Link is up %s %s \n",
4683 bpsmsg, "Full Duplex");
4684 }
4685 adapter->link_active = TRUE;
4686 /* Update any Flow Control changes */
4687 ixgbe_fc_enable(&adapter->hw);
4688 /* Update DMA coalescing config */
4689 ixgbe_config_dmac(adapter);
4690 if_link_state_change(ifp, LINK_STATE_UP);
4691
4692 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4693 ixgbe_ping_all_vfs(adapter);
4694 }
4695 } else { /* Link down */
4696 if (adapter->link_active == TRUE) {
4697 if (bootverbose)
4698 device_printf(dev, "Link is Down\n");
4699 if_link_state_change(ifp, LINK_STATE_DOWN);
4700 adapter->link_active = FALSE;
4701 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4702 ixgbe_ping_all_vfs(adapter);
4703 ixgbe_drain_all(adapter);
4704 }
4705 }
4706 } /* ixgbe_update_link_status */
4707
4708 /************************************************************************
4709 * ixgbe_config_dmac - Configure DMA Coalescing
4710 ************************************************************************/
4711 static void
4712 ixgbe_config_dmac(struct adapter *adapter)
4713 {
4714 struct ixgbe_hw *hw = &adapter->hw;
4715 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4716
4717 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4718 return;
4719
4720 if (dcfg->watchdog_timer ^ adapter->dmac ||
4721 dcfg->link_speed ^ adapter->link_speed) {
4722 dcfg->watchdog_timer = adapter->dmac;
4723 dcfg->fcoe_en = false;
4724 dcfg->link_speed = adapter->link_speed;
4725 dcfg->num_tcs = 1;
4726
4727 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4728 dcfg->watchdog_timer, dcfg->link_speed);
4729
4730 hw->mac.ops.dmac_config(hw);
4731 }
4732 } /* ixgbe_config_dmac */
4733
4734 /************************************************************************
4735 * ixgbe_enable_intr
4736 ************************************************************************/
4737 static void
4738 ixgbe_enable_intr(struct adapter *adapter)
4739 {
4740 struct ixgbe_hw *hw = &adapter->hw;
4741 struct ix_queue *que = adapter->queues;
4742 u32 mask, fwsm;
4743
4744 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4745
4746 switch (adapter->hw.mac.type) {
4747 case ixgbe_mac_82599EB:
4748 mask |= IXGBE_EIMS_ECC;
4749 /* Temperature sensor on some adapters */
4750 mask |= IXGBE_EIMS_GPI_SDP0;
4751 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4752 mask |= IXGBE_EIMS_GPI_SDP1;
4753 mask |= IXGBE_EIMS_GPI_SDP2;
4754 break;
4755 case ixgbe_mac_X540:
4756 /* Detect if Thermal Sensor is enabled */
4757 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4758 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4759 mask |= IXGBE_EIMS_TS;
4760 mask |= IXGBE_EIMS_ECC;
4761 break;
4762 case ixgbe_mac_X550:
4763 /* MAC thermal sensor is automatically enabled */
4764 mask |= IXGBE_EIMS_TS;
4765 mask |= IXGBE_EIMS_ECC;
4766 break;
4767 case ixgbe_mac_X550EM_x:
4768 case ixgbe_mac_X550EM_a:
4769 /* Some devices use SDP0 for important information */
4770 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4771 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4772 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4773 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4774 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4775 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4776 mask |= IXGBE_EICR_GPI_SDP0_X540;
4777 mask |= IXGBE_EIMS_ECC;
4778 break;
4779 default:
4780 break;
4781 }
4782
4783 /* Enable Fan Failure detection */
4784 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4785 mask |= IXGBE_EIMS_GPI_SDP1;
4786 /* Enable SR-IOV */
4787 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4788 mask |= IXGBE_EIMS_MAILBOX;
4789 /* Enable Flow Director */
4790 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4791 mask |= IXGBE_EIMS_FLOW_DIR;
4792
4793 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4794
4795 /* With MSI-X we use auto clear */
4796 if (adapter->msix_mem) {
4797 mask = IXGBE_EIMS_ENABLE_MASK;
4798 /* Don't autoclear Link */
4799 mask &= ~IXGBE_EIMS_OTHER;
4800 mask &= ~IXGBE_EIMS_LSC;
4801 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4802 mask &= ~IXGBE_EIMS_MAILBOX;
4803 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4804 }
4805
4806 /*
4807 * Now enable all queues, this is done separately to
4808 * allow for handling the extended (beyond 32) MSI-X
4809 * vectors that can be used by 82599
4810 */
4811 for (int i = 0; i < adapter->num_queues; i++, que++)
4812 ixgbe_enable_queue(adapter, que->msix);
4813
4814 IXGBE_WRITE_FLUSH(hw);
4815
4816 } /* ixgbe_enable_intr */
4817
4818 /************************************************************************
4819 * ixgbe_disable_intr_internal
4820 ************************************************************************/
4821 static void
4822 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4823 {
4824 struct ix_queue *que = adapter->queues;
4825
4826 /* disable interrupts other than queues */
4827 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4828
4829 if (adapter->msix_mem)
4830 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4831
4832 for (int i = 0; i < adapter->num_queues; i++, que++)
4833 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4834
4835 IXGBE_WRITE_FLUSH(&adapter->hw);
4836
4837 } /* ixgbe_do_disable_intr_internal */
4838
4839 /************************************************************************
4840 * ixgbe_disable_intr
4841 ************************************************************************/
4842 static void
4843 ixgbe_disable_intr(struct adapter *adapter)
4844 {
4845
4846 ixgbe_disable_intr_internal(adapter, true);
4847 } /* ixgbe_disable_intr */
4848
4849 /************************************************************************
4850 * ixgbe_ensure_disabled_intr
4851 ************************************************************************/
4852 void
4853 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4854 {
4855
4856 ixgbe_disable_intr_internal(adapter, false);
4857 } /* ixgbe_ensure_disabled_intr */
4858
4859 /************************************************************************
4860 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4861 ************************************************************************/
4862 static int
4863 ixgbe_legacy_irq(void *arg)
4864 {
4865 struct ix_queue *que = arg;
4866 struct adapter *adapter = que->adapter;
4867 struct ixgbe_hw *hw = &adapter->hw;
4868 struct ifnet *ifp = adapter->ifp;
4869 struct tx_ring *txr = adapter->tx_rings;
4870 bool more = false;
4871 u32 eicr, eicr_mask;
4872
4873 /* Silicon errata #26 on 82598 */
4874 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4875
4876 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4877
4878 adapter->stats.pf.legint.ev_count++;
4879 ++que->irqs.ev_count;
4880 if (eicr == 0) {
4881 adapter->stats.pf.intzero.ev_count++;
4882 if ((ifp->if_flags & IFF_UP) != 0)
4883 ixgbe_enable_intr(adapter);
4884 return 0;
4885 }
4886
4887 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4888 /*
4889 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
4890 */
4891 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
4892
4893 #ifdef __NetBSD__
4894 /* Don't run ixgbe_rxeof in interrupt context */
4895 more = true;
4896 #else
4897 more = ixgbe_rxeof(que);
4898 #endif
4899
4900 IXGBE_TX_LOCK(txr);
4901 ixgbe_txeof(txr);
4902 #ifdef notyet
4903 if (!ixgbe_ring_empty(ifp, txr->br))
4904 ixgbe_start_locked(ifp, txr);
4905 #endif
4906 IXGBE_TX_UNLOCK(txr);
4907 }
4908
4909 /* Check for fan failure */
4910 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4911 ixgbe_check_fan_failure(adapter, eicr, true);
4912 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4913 }
4914
4915 /* Link status change */
4916 if (eicr & IXGBE_EICR_LSC)
4917 softint_schedule(adapter->link_si);
4918
4919 if (ixgbe_is_sfp(hw)) {
4920 /* Pluggable optics-related interrupt */
4921 if (hw->mac.type >= ixgbe_mac_X540)
4922 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4923 else
4924 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4925
4926 if (eicr & eicr_mask) {
4927 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4928 softint_schedule(adapter->mod_si);
4929 }
4930
4931 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4932 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4933 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4934 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4935 softint_schedule(adapter->msf_si);
4936 }
4937 }
4938
4939 /* External PHY interrupt */
4940 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4941 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4942 softint_schedule(adapter->phy_si);
4943
4944 if (more) {
4945 que->req.ev_count++;
4946 ixgbe_sched_handle_que(adapter, que);
4947 } else
4948 ixgbe_enable_intr(adapter);
4949
4950 return 1;
4951 } /* ixgbe_legacy_irq */
4952
4953 /************************************************************************
4954 * ixgbe_free_pciintr_resources
4955 ************************************************************************/
4956 static void
4957 ixgbe_free_pciintr_resources(struct adapter *adapter)
4958 {
4959 struct ix_queue *que = adapter->queues;
4960 int rid;
4961
4962 /*
4963 * Release all msix queue resources:
4964 */
4965 for (int i = 0; i < adapter->num_queues; i++, que++) {
4966 if (que->res != NULL) {
4967 pci_intr_disestablish(adapter->osdep.pc,
4968 adapter->osdep.ihs[i]);
4969 adapter->osdep.ihs[i] = NULL;
4970 }
4971 }
4972
4973 /* Clean the Legacy or Link interrupt last */
4974 if (adapter->vector) /* we are doing MSIX */
4975 rid = adapter->vector;
4976 else
4977 rid = 0;
4978
4979 if (adapter->osdep.ihs[rid] != NULL) {
4980 pci_intr_disestablish(adapter->osdep.pc,
4981 adapter->osdep.ihs[rid]);
4982 adapter->osdep.ihs[rid] = NULL;
4983 }
4984
4985 if (adapter->osdep.intrs != NULL) {
4986 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4987 adapter->osdep.nintrs);
4988 adapter->osdep.intrs = NULL;
4989 }
4990 } /* ixgbe_free_pciintr_resources */
4991
4992 /************************************************************************
4993 * ixgbe_free_pci_resources
4994 ************************************************************************/
4995 static void
4996 ixgbe_free_pci_resources(struct adapter *adapter)
4997 {
4998
4999 ixgbe_free_pciintr_resources(adapter);
5000
5001 if (adapter->osdep.mem_size != 0) {
5002 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5003 adapter->osdep.mem_bus_space_handle,
5004 adapter->osdep.mem_size);
5005 }
5006
5007 } /* ixgbe_free_pci_resources */
5008
5009 /************************************************************************
5010 * ixgbe_set_sysctl_value
5011 ************************************************************************/
5012 static void
5013 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5014 const char *description, int *limit, int value)
5015 {
5016 device_t dev = adapter->dev;
5017 struct sysctllog **log;
5018 const struct sysctlnode *rnode, *cnode;
5019
5020 log = &adapter->sysctllog;
5021 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5022 aprint_error_dev(dev, "could not create sysctl root\n");
5023 return;
5024 }
5025 if (sysctl_createv(log, 0, &rnode, &cnode,
5026 CTLFLAG_READWRITE, CTLTYPE_INT,
5027 name, SYSCTL_DESCR(description),
5028 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5029 aprint_error_dev(dev, "could not create sysctl\n");
5030 *limit = value;
5031 } /* ixgbe_set_sysctl_value */
5032
5033 /************************************************************************
5034 * ixgbe_sysctl_flowcntl
5035 *
5036 * SYSCTL wrapper around setting Flow Control
5037 ************************************************************************/
5038 static int
5039 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5040 {
5041 struct sysctlnode node = *rnode;
5042 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5043 int error, fc;
5044
5045 fc = adapter->hw.fc.current_mode;
5046 node.sysctl_data = &fc;
5047 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5048 if (error != 0 || newp == NULL)
5049 return error;
5050
5051 /* Don't bother if it's not changed */
5052 if (fc == adapter->hw.fc.current_mode)
5053 return (0);
5054
5055 return ixgbe_set_flowcntl(adapter, fc);
5056 } /* ixgbe_sysctl_flowcntl */
5057
5058 /************************************************************************
5059 * ixgbe_set_flowcntl - Set flow control
5060 *
5061 * Flow control values:
5062 * 0 - off
5063 * 1 - rx pause
5064 * 2 - tx pause
5065 * 3 - full
5066 ************************************************************************/
5067 static int
5068 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5069 {
5070 switch (fc) {
5071 case ixgbe_fc_rx_pause:
5072 case ixgbe_fc_tx_pause:
5073 case ixgbe_fc_full:
5074 adapter->hw.fc.requested_mode = fc;
5075 if (adapter->num_queues > 1)
5076 ixgbe_disable_rx_drop(adapter);
5077 break;
5078 case ixgbe_fc_none:
5079 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5080 if (adapter->num_queues > 1)
5081 ixgbe_enable_rx_drop(adapter);
5082 break;
5083 default:
5084 return (EINVAL);
5085 }
5086
5087 #if 0 /* XXX NetBSD */
5088 /* Don't autoneg if forcing a value */
5089 adapter->hw.fc.disable_fc_autoneg = TRUE;
5090 #endif
5091 ixgbe_fc_enable(&adapter->hw);
5092
5093 return (0);
5094 } /* ixgbe_set_flowcntl */
5095
5096 /************************************************************************
5097 * ixgbe_enable_rx_drop
5098 *
5099 * Enable the hardware to drop packets when the buffer is
5100 * full. This is useful with multiqueue, so that no single
5101 * queue being full stalls the entire RX engine. We only
5102 * enable this when Multiqueue is enabled AND Flow Control
5103 * is disabled.
5104 ************************************************************************/
5105 static void
5106 ixgbe_enable_rx_drop(struct adapter *adapter)
5107 {
5108 struct ixgbe_hw *hw = &adapter->hw;
5109 struct rx_ring *rxr;
5110 u32 srrctl;
5111
5112 for (int i = 0; i < adapter->num_queues; i++) {
5113 rxr = &adapter->rx_rings[i];
5114 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5115 srrctl |= IXGBE_SRRCTL_DROP_EN;
5116 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5117 }
5118
5119 /* enable drop for each vf */
5120 for (int i = 0; i < adapter->num_vfs; i++) {
5121 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5122 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5123 IXGBE_QDE_ENABLE));
5124 }
5125 } /* ixgbe_enable_rx_drop */
5126
5127 /************************************************************************
5128 * ixgbe_disable_rx_drop
5129 ************************************************************************/
5130 static void
5131 ixgbe_disable_rx_drop(struct adapter *adapter)
5132 {
5133 struct ixgbe_hw *hw = &adapter->hw;
5134 struct rx_ring *rxr;
5135 u32 srrctl;
5136
5137 for (int i = 0; i < adapter->num_queues; i++) {
5138 rxr = &adapter->rx_rings[i];
5139 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5140 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5141 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5142 }
5143
5144 /* disable drop for each vf */
5145 for (int i = 0; i < adapter->num_vfs; i++) {
5146 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5147 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5148 }
5149 } /* ixgbe_disable_rx_drop */
5150
5151 /************************************************************************
5152 * ixgbe_sysctl_advertise
5153 *
5154 * SYSCTL wrapper around setting advertised speed
5155 ************************************************************************/
5156 static int
5157 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5158 {
5159 struct sysctlnode node = *rnode;
5160 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5161 int error = 0, advertise;
5162
5163 advertise = adapter->advertise;
5164 node.sysctl_data = &advertise;
5165 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5166 if (error != 0 || newp == NULL)
5167 return error;
5168
5169 return ixgbe_set_advertise(adapter, advertise);
5170 } /* ixgbe_sysctl_advertise */
5171
5172 /************************************************************************
5173 * ixgbe_set_advertise - Control advertised link speed
5174 *
5175 * Flags:
5176 * 0x00 - Default (all capable link speed)
5177 * 0x01 - advertise 100 Mb
5178 * 0x02 - advertise 1G
5179 * 0x04 - advertise 10G
5180 * 0x08 - advertise 10 Mb
5181 * 0x10 - advertise 2.5G
5182 * 0x20 - advertise 5G
5183 ************************************************************************/
5184 static int
5185 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5186 {
5187 device_t dev;
5188 struct ixgbe_hw *hw;
5189 ixgbe_link_speed speed = 0;
5190 ixgbe_link_speed link_caps = 0;
5191 s32 err = IXGBE_NOT_IMPLEMENTED;
5192 bool negotiate = FALSE;
5193
5194 /* Checks to validate new value */
5195 if (adapter->advertise == advertise) /* no change */
5196 return (0);
5197
5198 dev = adapter->dev;
5199 hw = &adapter->hw;
5200
5201 /* No speed changes for backplane media */
5202 if (hw->phy.media_type == ixgbe_media_type_backplane)
5203 return (ENODEV);
5204
5205 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5206 (hw->phy.multispeed_fiber))) {
5207 device_printf(dev,
5208 "Advertised speed can only be set on copper or "
5209 "multispeed fiber media types.\n");
5210 return (EINVAL);
5211 }
5212
5213 if (advertise < 0x0 || advertise > 0x2f) {
5214 device_printf(dev,
5215 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5216 return (EINVAL);
5217 }
5218
5219 if (hw->mac.ops.get_link_capabilities) {
5220 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5221 &negotiate);
5222 if (err != IXGBE_SUCCESS) {
5223 device_printf(dev, "Unable to determine supported advertise speeds\n");
5224 return (ENODEV);
5225 }
5226 }
5227
5228 /* Set new value and report new advertised mode */
5229 if (advertise & 0x1) {
5230 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5231 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5232 return (EINVAL);
5233 }
5234 speed |= IXGBE_LINK_SPEED_100_FULL;
5235 }
5236 if (advertise & 0x2) {
5237 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5238 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5239 return (EINVAL);
5240 }
5241 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5242 }
5243 if (advertise & 0x4) {
5244 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5245 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5246 return (EINVAL);
5247 }
5248 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5249 }
5250 if (advertise & 0x8) {
5251 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5252 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5253 return (EINVAL);
5254 }
5255 speed |= IXGBE_LINK_SPEED_10_FULL;
5256 }
5257 if (advertise & 0x10) {
5258 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5259 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5260 return (EINVAL);
5261 }
5262 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5263 }
5264 if (advertise & 0x20) {
5265 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5266 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5267 return (EINVAL);
5268 }
5269 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5270 }
5271 if (advertise == 0)
5272 speed = link_caps; /* All capable link speed */
5273
5274 hw->mac.autotry_restart = TRUE;
5275 hw->mac.ops.setup_link(hw, speed, TRUE);
5276 adapter->advertise = advertise;
5277
5278 return (0);
5279 } /* ixgbe_set_advertise */
5280
5281 /************************************************************************
5282 * ixgbe_get_advertise - Get current advertised speed settings
5283 *
5284 * Formatted for sysctl usage.
5285 * Flags:
5286 * 0x01 - advertise 100 Mb
5287 * 0x02 - advertise 1G
5288 * 0x04 - advertise 10G
5289 * 0x08 - advertise 10 Mb (yes, Mb)
5290 * 0x10 - advertise 2.5G
5291 * 0x20 - advertise 5G
5292 ************************************************************************/
5293 static int
5294 ixgbe_get_advertise(struct adapter *adapter)
5295 {
5296 struct ixgbe_hw *hw = &adapter->hw;
5297 int speed;
5298 ixgbe_link_speed link_caps = 0;
5299 s32 err;
5300 bool negotiate = FALSE;
5301
5302 /*
5303 * Advertised speed means nothing unless it's copper or
5304 * multi-speed fiber
5305 */
5306 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5307 !(hw->phy.multispeed_fiber))
5308 return (0);
5309
5310 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5311 if (err != IXGBE_SUCCESS)
5312 return (0);
5313
5314 speed =
5315 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5316 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5317 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5318 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5319 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5320 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5321
5322 return speed;
5323 } /* ixgbe_get_advertise */
5324
5325 /************************************************************************
5326 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5327 *
5328 * Control values:
5329 * 0/1 - off / on (use default value of 1000)
5330 *
5331 * Legal timer values are:
5332 * 50,100,250,500,1000,2000,5000,10000
5333 *
5334 * Turning off interrupt moderation will also turn this off.
5335 ************************************************************************/
5336 static int
5337 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5338 {
5339 struct sysctlnode node = *rnode;
5340 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5341 struct ifnet *ifp = adapter->ifp;
5342 int error;
5343 int newval;
5344
5345 newval = adapter->dmac;
5346 node.sysctl_data = &newval;
5347 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5348 if ((error) || (newp == NULL))
5349 return (error);
5350
5351 switch (newval) {
5352 case 0:
5353 /* Disabled */
5354 adapter->dmac = 0;
5355 break;
5356 case 1:
5357 /* Enable and use default */
5358 adapter->dmac = 1000;
5359 break;
5360 case 50:
5361 case 100:
5362 case 250:
5363 case 500:
5364 case 1000:
5365 case 2000:
5366 case 5000:
5367 case 10000:
5368 /* Legal values - allow */
5369 adapter->dmac = newval;
5370 break;
5371 default:
5372 /* Do nothing, illegal value */
5373 return (EINVAL);
5374 }
5375
5376 /* Re-initialize hardware if it's already running */
5377 if (ifp->if_flags & IFF_RUNNING)
5378 ifp->if_init(ifp);
5379
5380 return (0);
5381 }
5382
5383 #ifdef IXGBE_DEBUG
5384 /************************************************************************
5385 * ixgbe_sysctl_power_state
5386 *
5387 * Sysctl to test power states
5388 * Values:
5389 * 0 - set device to D0
5390 * 3 - set device to D3
5391 * (none) - get current device power state
5392 ************************************************************************/
5393 static int
5394 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5395 {
5396 #ifdef notyet
5397 struct sysctlnode node = *rnode;
5398 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5399 device_t dev = adapter->dev;
5400 int curr_ps, new_ps, error = 0;
5401
5402 curr_ps = new_ps = pci_get_powerstate(dev);
5403
5404 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5405 if ((error) || (req->newp == NULL))
5406 return (error);
5407
5408 if (new_ps == curr_ps)
5409 return (0);
5410
5411 if (new_ps == 3 && curr_ps == 0)
5412 error = DEVICE_SUSPEND(dev);
5413 else if (new_ps == 0 && curr_ps == 3)
5414 error = DEVICE_RESUME(dev);
5415 else
5416 return (EINVAL);
5417
5418 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5419
5420 return (error);
5421 #else
5422 return 0;
5423 #endif
5424 } /* ixgbe_sysctl_power_state */
5425 #endif
5426
5427 /************************************************************************
5428 * ixgbe_sysctl_wol_enable
5429 *
5430 * Sysctl to enable/disable the WoL capability,
5431 * if supported by the adapter.
5432 *
5433 * Values:
5434 * 0 - disabled
5435 * 1 - enabled
5436 ************************************************************************/
5437 static int
5438 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5439 {
5440 struct sysctlnode node = *rnode;
5441 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5442 struct ixgbe_hw *hw = &adapter->hw;
5443 bool new_wol_enabled;
5444 int error = 0;
5445
5446 new_wol_enabled = hw->wol_enabled;
5447 node.sysctl_data = &new_wol_enabled;
5448 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5449 if ((error) || (newp == NULL))
5450 return (error);
5451 if (new_wol_enabled == hw->wol_enabled)
5452 return (0);
5453
5454 if (new_wol_enabled && !adapter->wol_support)
5455 return (ENODEV);
5456 else
5457 hw->wol_enabled = new_wol_enabled;
5458
5459 return (0);
5460 } /* ixgbe_sysctl_wol_enable */
5461
5462 /************************************************************************
5463 * ixgbe_sysctl_wufc - Wake Up Filter Control
5464 *
5465 * Sysctl to enable/disable the types of packets that the
5466 * adapter will wake up on upon receipt.
5467 * Flags:
5468 * 0x1 - Link Status Change
5469 * 0x2 - Magic Packet
5470 * 0x4 - Direct Exact
5471 * 0x8 - Directed Multicast
5472 * 0x10 - Broadcast
5473 * 0x20 - ARP/IPv4 Request Packet
5474 * 0x40 - Direct IPv4 Packet
5475 * 0x80 - Direct IPv6 Packet
5476 *
5477 * Settings not listed above will cause the sysctl to return an error.
5478 ************************************************************************/
5479 static int
5480 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5481 {
5482 struct sysctlnode node = *rnode;
5483 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5484 int error = 0;
5485 u32 new_wufc;
5486
5487 new_wufc = adapter->wufc;
5488 node.sysctl_data = &new_wufc;
5489 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5490 if ((error) || (newp == NULL))
5491 return (error);
5492 if (new_wufc == adapter->wufc)
5493 return (0);
5494
5495 if (new_wufc & 0xffffff00)
5496 return (EINVAL);
5497
5498 new_wufc &= 0xff;
5499 new_wufc |= (0xffffff & adapter->wufc);
5500 adapter->wufc = new_wufc;
5501
5502 return (0);
5503 } /* ixgbe_sysctl_wufc */
5504
5505 #ifdef IXGBE_DEBUG
5506 /************************************************************************
5507 * ixgbe_sysctl_print_rss_config
5508 ************************************************************************/
5509 static int
5510 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5511 {
5512 #ifdef notyet
5513 struct sysctlnode node = *rnode;
5514 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5515 struct ixgbe_hw *hw = &adapter->hw;
5516 device_t dev = adapter->dev;
5517 struct sbuf *buf;
5518 int error = 0, reta_size;
5519 u32 reg;
5520
5521 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5522 if (!buf) {
5523 device_printf(dev, "Could not allocate sbuf for output.\n");
5524 return (ENOMEM);
5525 }
5526
5527 // TODO: use sbufs to make a string to print out
5528 /* Set multiplier for RETA setup and table size based on MAC */
5529 switch (adapter->hw.mac.type) {
5530 case ixgbe_mac_X550:
5531 case ixgbe_mac_X550EM_x:
5532 case ixgbe_mac_X550EM_a:
5533 reta_size = 128;
5534 break;
5535 default:
5536 reta_size = 32;
5537 break;
5538 }
5539
5540 /* Print out the redirection table */
5541 sbuf_cat(buf, "\n");
5542 for (int i = 0; i < reta_size; i++) {
5543 if (i < 32) {
5544 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5545 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5546 } else {
5547 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5548 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5549 }
5550 }
5551
5552 // TODO: print more config
5553
5554 error = sbuf_finish(buf);
5555 if (error)
5556 device_printf(dev, "Error finishing sbuf: %d\n", error);
5557
5558 sbuf_delete(buf);
5559 #endif
5560 return (0);
5561 } /* ixgbe_sysctl_print_rss_config */
5562 #endif /* IXGBE_DEBUG */
5563
5564 /************************************************************************
5565 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5566 *
5567 * For X552/X557-AT devices using an external PHY
5568 ************************************************************************/
5569 static int
5570 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5571 {
5572 struct sysctlnode node = *rnode;
5573 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5574 struct ixgbe_hw *hw = &adapter->hw;
5575 int val;
5576 u16 reg;
5577 int error;
5578
5579 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5580 device_printf(adapter->dev,
5581 "Device has no supported external thermal sensor.\n");
5582 return (ENODEV);
5583 }
5584
5585 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5586 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5587 device_printf(adapter->dev,
5588 "Error reading from PHY's current temperature register\n");
5589 return (EAGAIN);
5590 }
5591
5592 node.sysctl_data = &val;
5593
5594 /* Shift temp for output */
5595 val = reg >> 8;
5596
5597 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5598 if ((error) || (newp == NULL))
5599 return (error);
5600
5601 return (0);
5602 } /* ixgbe_sysctl_phy_temp */
5603
5604 /************************************************************************
5605 * ixgbe_sysctl_phy_overtemp_occurred
5606 *
5607 * Reports (directly from the PHY) whether the current PHY
5608 * temperature is over the overtemp threshold.
5609 ************************************************************************/
5610 static int
5611 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5612 {
5613 struct sysctlnode node = *rnode;
5614 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5615 struct ixgbe_hw *hw = &adapter->hw;
5616 int val, error;
5617 u16 reg;
5618
5619 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5620 device_printf(adapter->dev,
5621 "Device has no supported external thermal sensor.\n");
5622 return (ENODEV);
5623 }
5624
5625 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5626 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5627 device_printf(adapter->dev,
5628 "Error reading from PHY's temperature status register\n");
5629 return (EAGAIN);
5630 }
5631
5632 node.sysctl_data = &val;
5633
5634 /* Get occurrence bit */
5635 val = !!(reg & 0x4000);
5636
5637 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5638 if ((error) || (newp == NULL))
5639 return (error);
5640
5641 return (0);
5642 } /* ixgbe_sysctl_phy_overtemp_occurred */
5643
5644 /************************************************************************
5645 * ixgbe_sysctl_eee_state
5646 *
5647 * Sysctl to set EEE power saving feature
5648 * Values:
5649 * 0 - disable EEE
5650 * 1 - enable EEE
5651 * (none) - get current device EEE state
5652 ************************************************************************/
5653 static int
5654 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5655 {
5656 struct sysctlnode node = *rnode;
5657 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5658 struct ifnet *ifp = adapter->ifp;
5659 device_t dev = adapter->dev;
5660 int curr_eee, new_eee, error = 0;
5661 s32 retval;
5662
5663 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5664 node.sysctl_data = &new_eee;
5665 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5666 if ((error) || (newp == NULL))
5667 return (error);
5668
5669 /* Nothing to do */
5670 if (new_eee == curr_eee)
5671 return (0);
5672
5673 /* Not supported */
5674 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5675 return (EINVAL);
5676
5677 /* Bounds checking */
5678 if ((new_eee < 0) || (new_eee > 1))
5679 return (EINVAL);
5680
5681 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5682 if (retval) {
5683 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5684 return (EINVAL);
5685 }
5686
5687 /* Restart auto-neg */
5688 ifp->if_init(ifp);
5689
5690 device_printf(dev, "New EEE state: %d\n", new_eee);
5691
5692 /* Cache new value */
5693 if (new_eee)
5694 adapter->feat_en |= IXGBE_FEATURE_EEE;
5695 else
5696 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5697
5698 return (error);
5699 } /* ixgbe_sysctl_eee_state */
5700
5701 /************************************************************************
5702 * ixgbe_init_device_features
5703 ************************************************************************/
5704 static void
5705 ixgbe_init_device_features(struct adapter *adapter)
5706 {
5707 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5708 | IXGBE_FEATURE_RSS
5709 | IXGBE_FEATURE_MSI
5710 | IXGBE_FEATURE_MSIX
5711 | IXGBE_FEATURE_LEGACY_IRQ
5712 | IXGBE_FEATURE_LEGACY_TX;
5713
5714 /* Set capabilities first... */
5715 switch (adapter->hw.mac.type) {
5716 case ixgbe_mac_82598EB:
5717 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5718 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5719 break;
5720 case ixgbe_mac_X540:
5721 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5722 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5723 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5724 (adapter->hw.bus.func == 0))
5725 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5726 break;
5727 case ixgbe_mac_X550:
5728 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5729 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5730 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5731 break;
5732 case ixgbe_mac_X550EM_x:
5733 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5734 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5735 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5736 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5737 break;
5738 case ixgbe_mac_X550EM_a:
5739 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5740 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5741 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5742 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5743 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5744 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5745 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5746 }
5747 break;
5748 case ixgbe_mac_82599EB:
5749 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5750 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5751 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5752 (adapter->hw.bus.func == 0))
5753 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5754 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5755 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5756 break;
5757 default:
5758 break;
5759 }
5760
5761 /* Enabled by default... */
5762 /* Fan failure detection */
5763 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5764 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5765 /* Netmap */
5766 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5767 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5768 /* EEE */
5769 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5770 adapter->feat_en |= IXGBE_FEATURE_EEE;
5771 /* Thermal Sensor */
5772 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5773 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5774
5775 /* Enabled via global sysctl... */
5776 /* Flow Director */
5777 if (ixgbe_enable_fdir) {
5778 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5779 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5780 else
5781 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5782 }
5783 /* Legacy (single queue) transmit */
5784 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5785 ixgbe_enable_legacy_tx)
5786 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5787 /*
5788 * Message Signal Interrupts - Extended (MSI-X)
5789 * Normal MSI is only enabled if MSI-X calls fail.
5790 */
5791 if (!ixgbe_enable_msix)
5792 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5793 /* Receive-Side Scaling (RSS) */
5794 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5795 adapter->feat_en |= IXGBE_FEATURE_RSS;
5796
5797 /* Disable features with unmet dependencies... */
5798 /* No MSI-X */
5799 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5800 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5801 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5802 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5803 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5804 }
5805 } /* ixgbe_init_device_features */
5806
5807 /************************************************************************
5808 * ixgbe_probe - Device identification routine
5809 *
5810 * Determines if the driver should be loaded on
5811 * adapter based on its PCI vendor/device ID.
5812 *
5813 * return BUS_PROBE_DEFAULT on success, positive on failure
5814 ************************************************************************/
5815 static int
5816 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5817 {
5818 const struct pci_attach_args *pa = aux;
5819
5820 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5821 }
5822
5823 static ixgbe_vendor_info_t *
5824 ixgbe_lookup(const struct pci_attach_args *pa)
5825 {
5826 ixgbe_vendor_info_t *ent;
5827 pcireg_t subid;
5828
5829 INIT_DEBUGOUT("ixgbe_lookup: begin");
5830
5831 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5832 return NULL;
5833
5834 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5835
5836 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5837 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5838 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5839 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5840 (ent->subvendor_id == 0)) &&
5841 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5842 (ent->subdevice_id == 0))) {
5843 ++ixgbe_total_ports;
5844 return ent;
5845 }
5846 }
5847 return NULL;
5848 }
5849
5850 static int
5851 ixgbe_ifflags_cb(struct ethercom *ec)
5852 {
5853 struct ifnet *ifp = &ec->ec_if;
5854 struct adapter *adapter = ifp->if_softc;
5855 int change, rc = 0;
5856
5857 IXGBE_CORE_LOCK(adapter);
5858
5859 change = ifp->if_flags ^ adapter->if_flags;
5860 if (change != 0)
5861 adapter->if_flags = ifp->if_flags;
5862
5863 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5864 rc = ENETRESET;
5865 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5866 ixgbe_set_promisc(adapter);
5867
5868 /* Set up VLAN support and filter */
5869 ixgbe_setup_vlan_hw_support(adapter);
5870
5871 IXGBE_CORE_UNLOCK(adapter);
5872
5873 return rc;
5874 }
5875
5876 /************************************************************************
5877 * ixgbe_ioctl - Ioctl entry point
5878 *
5879 * Called when the user wants to configure the interface.
5880 *
5881 * return 0 on success, positive on failure
5882 ************************************************************************/
5883 static int
5884 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5885 {
5886 struct adapter *adapter = ifp->if_softc;
5887 struct ixgbe_hw *hw = &adapter->hw;
5888 struct ifcapreq *ifcr = data;
5889 struct ifreq *ifr = data;
5890 int error = 0;
5891 int l4csum_en;
5892 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5893 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5894
5895 switch (command) {
5896 case SIOCSIFFLAGS:
5897 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5898 break;
5899 case SIOCADDMULTI:
5900 case SIOCDELMULTI:
5901 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5902 break;
5903 case SIOCSIFMEDIA:
5904 case SIOCGIFMEDIA:
5905 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5906 break;
5907 case SIOCSIFCAP:
5908 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5909 break;
5910 case SIOCSIFMTU:
5911 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5912 break;
5913 #ifdef __NetBSD__
5914 case SIOCINITIFADDR:
5915 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5916 break;
5917 case SIOCGIFFLAGS:
5918 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5919 break;
5920 case SIOCGIFAFLAG_IN:
5921 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5922 break;
5923 case SIOCGIFADDR:
5924 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5925 break;
5926 case SIOCGIFMTU:
5927 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5928 break;
5929 case SIOCGIFCAP:
5930 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5931 break;
5932 case SIOCGETHERCAP:
5933 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5934 break;
5935 case SIOCGLIFADDR:
5936 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5937 break;
5938 case SIOCZIFDATA:
5939 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5940 hw->mac.ops.clear_hw_cntrs(hw);
5941 ixgbe_clear_evcnt(adapter);
5942 break;
5943 case SIOCAIFADDR:
5944 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5945 break;
5946 #endif
5947 default:
5948 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5949 break;
5950 }
5951
5952 switch (command) {
5953 case SIOCSIFMEDIA:
5954 case SIOCGIFMEDIA:
5955 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5956 case SIOCGI2C:
5957 {
5958 struct ixgbe_i2c_req i2c;
5959
5960 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5961 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5962 if (error != 0)
5963 break;
5964 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5965 error = EINVAL;
5966 break;
5967 }
5968 if (i2c.len > sizeof(i2c.data)) {
5969 error = EINVAL;
5970 break;
5971 }
5972
5973 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5974 i2c.dev_addr, i2c.data);
5975 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5976 break;
5977 }
5978 case SIOCSIFCAP:
5979 /* Layer-4 Rx checksum offload has to be turned on and
5980 * off as a unit.
5981 */
5982 l4csum_en = ifcr->ifcr_capenable & l4csum;
5983 if (l4csum_en != l4csum && l4csum_en != 0)
5984 return EINVAL;
5985 /*FALLTHROUGH*/
5986 case SIOCADDMULTI:
5987 case SIOCDELMULTI:
5988 case SIOCSIFFLAGS:
5989 case SIOCSIFMTU:
5990 default:
5991 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5992 return error;
5993 if ((ifp->if_flags & IFF_RUNNING) == 0)
5994 ;
5995 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5996 IXGBE_CORE_LOCK(adapter);
5997 if ((ifp->if_flags & IFF_RUNNING) != 0)
5998 ixgbe_init_locked(adapter);
5999 ixgbe_recalculate_max_frame(adapter);
6000 IXGBE_CORE_UNLOCK(adapter);
6001 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6002 /*
6003 * Multicast list has changed; set the hardware filter
6004 * accordingly.
6005 */
6006 IXGBE_CORE_LOCK(adapter);
6007 ixgbe_disable_intr(adapter);
6008 ixgbe_set_multi(adapter);
6009 ixgbe_enable_intr(adapter);
6010 IXGBE_CORE_UNLOCK(adapter);
6011 }
6012 return 0;
6013 }
6014
6015 return error;
6016 } /* ixgbe_ioctl */
6017
6018 /************************************************************************
6019 * ixgbe_check_fan_failure
6020 ************************************************************************/
6021 static void
6022 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6023 {
6024 u32 mask;
6025
6026 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6027 IXGBE_ESDP_SDP1;
6028
6029 if (reg & mask)
6030 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6031 } /* ixgbe_check_fan_failure */
6032
6033 /************************************************************************
6034 * ixgbe_handle_que
6035 ************************************************************************/
6036 static void
6037 ixgbe_handle_que(void *context)
6038 {
6039 struct ix_queue *que = context;
6040 struct adapter *adapter = que->adapter;
6041 struct tx_ring *txr = que->txr;
6042 struct ifnet *ifp = adapter->ifp;
6043 bool more = false;
6044
6045 que->handleq.ev_count++;
6046
6047 if (ifp->if_flags & IFF_RUNNING) {
6048 more = ixgbe_rxeof(que);
6049 IXGBE_TX_LOCK(txr);
6050 more |= ixgbe_txeof(txr);
6051 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6052 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6053 ixgbe_mq_start_locked(ifp, txr);
6054 /* Only for queue 0 */
6055 /* NetBSD still needs this for CBQ */
6056 if ((&adapter->queues[0] == que)
6057 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6058 ixgbe_legacy_start_locked(ifp, txr);
6059 IXGBE_TX_UNLOCK(txr);
6060 }
6061
6062 if (more) {
6063 que->req.ev_count++;
6064 ixgbe_sched_handle_que(adapter, que);
6065 } else if (que->res != NULL) {
6066 /* Re-enable this interrupt */
6067 ixgbe_enable_queue(adapter, que->msix);
6068 } else
6069 ixgbe_enable_intr(adapter);
6070
6071 return;
6072 } /* ixgbe_handle_que */
6073
6074 /************************************************************************
6075 * ixgbe_handle_que_work
6076 ************************************************************************/
6077 static void
6078 ixgbe_handle_que_work(struct work *wk, void *context)
6079 {
6080 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6081
6082 /*
6083 * "enqueued flag" is not required here.
6084 * See ixgbe_msix_que().
6085 */
6086 ixgbe_handle_que(que);
6087 }
6088
6089 /************************************************************************
6090 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6091 ************************************************************************/
6092 static int
6093 ixgbe_allocate_legacy(struct adapter *adapter,
6094 const struct pci_attach_args *pa)
6095 {
6096 device_t dev = adapter->dev;
6097 struct ix_queue *que = adapter->queues;
6098 struct tx_ring *txr = adapter->tx_rings;
6099 int counts[PCI_INTR_TYPE_SIZE];
6100 pci_intr_type_t intr_type, max_type;
6101 char intrbuf[PCI_INTRSTR_LEN];
6102 const char *intrstr = NULL;
6103
6104 /* We allocate a single interrupt resource */
6105 max_type = PCI_INTR_TYPE_MSI;
6106 counts[PCI_INTR_TYPE_MSIX] = 0;
6107 counts[PCI_INTR_TYPE_MSI] =
6108 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6109 /* Check not feat_en but feat_cap to fallback to INTx */
6110 counts[PCI_INTR_TYPE_INTX] =
6111 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6112
6113 alloc_retry:
6114 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6115 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6116 return ENXIO;
6117 }
6118 adapter->osdep.nintrs = 1;
6119 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6120 intrbuf, sizeof(intrbuf));
6121 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6122 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6123 device_xname(dev));
6124 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6125 if (adapter->osdep.ihs[0] == NULL) {
6126 aprint_error_dev(dev,"unable to establish %s\n",
6127 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6128 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6129 adapter->osdep.intrs = NULL;
6130 switch (intr_type) {
6131 case PCI_INTR_TYPE_MSI:
6132 /* The next try is for INTx: Disable MSI */
6133 max_type = PCI_INTR_TYPE_INTX;
6134 counts[PCI_INTR_TYPE_INTX] = 1;
6135 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6136 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6137 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6138 goto alloc_retry;
6139 } else
6140 break;
6141 case PCI_INTR_TYPE_INTX:
6142 default:
6143 /* See below */
6144 break;
6145 }
6146 }
6147 if (intr_type == PCI_INTR_TYPE_INTX) {
6148 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6149 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6150 }
6151 if (adapter->osdep.ihs[0] == NULL) {
6152 aprint_error_dev(dev,
6153 "couldn't establish interrupt%s%s\n",
6154 intrstr ? " at " : "", intrstr ? intrstr : "");
6155 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6156 adapter->osdep.intrs = NULL;
6157 return ENXIO;
6158 }
6159 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6160 /*
6161 * Try allocating a fast interrupt and the associated deferred
6162 * processing contexts.
6163 */
6164 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6165 txr->txr_si =
6166 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6167 ixgbe_deferred_mq_start, txr);
6168 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6169 ixgbe_handle_que, que);
6170
6171 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6172 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6173 aprint_error_dev(dev,
6174 "could not establish software interrupts\n");
6175
6176 return ENXIO;
6177 }
6178 /* For simplicity in the handlers */
6179 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6180
6181 return (0);
6182 } /* ixgbe_allocate_legacy */
6183
6184 /************************************************************************
6185 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6186 ************************************************************************/
6187 static int
6188 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6189 {
6190 device_t dev = adapter->dev;
6191 struct ix_queue *que = adapter->queues;
6192 struct tx_ring *txr = adapter->tx_rings;
6193 pci_chipset_tag_t pc;
6194 char intrbuf[PCI_INTRSTR_LEN];
6195 char intr_xname[32];
6196 char wqname[MAXCOMLEN];
6197 const char *intrstr = NULL;
6198 int error, vector = 0;
6199 int cpu_id = 0;
6200 kcpuset_t *affinity;
6201 #ifdef RSS
6202 unsigned int rss_buckets = 0;
6203 kcpuset_t cpu_mask;
6204 #endif
6205
6206 pc = adapter->osdep.pc;
6207 #ifdef RSS
6208 /*
6209 * If we're doing RSS, the number of queues needs to
6210 * match the number of RSS buckets that are configured.
6211 *
6212 * + If there's more queues than RSS buckets, we'll end
6213 * up with queues that get no traffic.
6214 *
6215 * + If there's more RSS buckets than queues, we'll end
6216 * up having multiple RSS buckets map to the same queue,
6217 * so there'll be some contention.
6218 */
6219 rss_buckets = rss_getnumbuckets();
6220 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6221 (adapter->num_queues != rss_buckets)) {
6222 device_printf(dev,
6223 "%s: number of queues (%d) != number of RSS buckets (%d)"
6224 "; performance will be impacted.\n",
6225 __func__, adapter->num_queues, rss_buckets);
6226 }
6227 #endif
6228
6229 adapter->osdep.nintrs = adapter->num_queues + 1;
6230 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6231 adapter->osdep.nintrs) != 0) {
6232 aprint_error_dev(dev,
6233 "failed to allocate MSI-X interrupt\n");
6234 return (ENXIO);
6235 }
6236
6237 kcpuset_create(&affinity, false);
6238 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6239 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6240 device_xname(dev), i);
6241 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6242 sizeof(intrbuf));
6243 #ifdef IXGBE_MPSAFE
6244 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6245 true);
6246 #endif
6247 /* Set the handler function */
6248 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6249 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6250 intr_xname);
6251 if (que->res == NULL) {
6252 aprint_error_dev(dev,
6253 "Failed to register QUE handler\n");
6254 error = ENXIO;
6255 goto err_out;
6256 }
6257 que->msix = vector;
6258 adapter->active_queues |= (u64)(1 << que->msix);
6259
6260 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6261 #ifdef RSS
6262 /*
6263 * The queue ID is used as the RSS layer bucket ID.
6264 * We look up the queue ID -> RSS CPU ID and select
6265 * that.
6266 */
6267 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6268 CPU_SETOF(cpu_id, &cpu_mask);
6269 #endif
6270 } else {
6271 /*
6272 * Bind the MSI-X vector, and thus the
6273 * rings to the corresponding CPU.
6274 *
6275 * This just happens to match the default RSS
6276 * round-robin bucket -> queue -> CPU allocation.
6277 */
6278 if (adapter->num_queues > 1)
6279 cpu_id = i;
6280 }
6281 /* Round-robin affinity */
6282 kcpuset_zero(affinity);
6283 kcpuset_set(affinity, cpu_id % ncpu);
6284 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6285 NULL);
6286 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6287 intrstr);
6288 if (error == 0) {
6289 #if 1 /* def IXGBE_DEBUG */
6290 #ifdef RSS
6291 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6292 cpu_id % ncpu);
6293 #else
6294 aprint_normal(", bound queue %d to cpu %d", i,
6295 cpu_id % ncpu);
6296 #endif
6297 #endif /* IXGBE_DEBUG */
6298 }
6299 aprint_normal("\n");
6300
6301 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6302 txr->txr_si = softint_establish(
6303 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6304 ixgbe_deferred_mq_start, txr);
6305 if (txr->txr_si == NULL) {
6306 aprint_error_dev(dev,
6307 "couldn't establish software interrupt\n");
6308 error = ENXIO;
6309 goto err_out;
6310 }
6311 }
6312 que->que_si
6313 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6314 ixgbe_handle_que, que);
6315 if (que->que_si == NULL) {
6316 aprint_error_dev(dev,
6317 "couldn't establish software interrupt\n");
6318 error = ENXIO;
6319 goto err_out;
6320 }
6321 }
6322 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6323 error = workqueue_create(&adapter->txr_wq, wqname,
6324 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6325 IXGBE_WORKQUEUE_FLAGS);
6326 if (error) {
6327 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6328 goto err_out;
6329 }
6330 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6331
6332 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6333 error = workqueue_create(&adapter->que_wq, wqname,
6334 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6335 IXGBE_WORKQUEUE_FLAGS);
6336 if (error) {
6337 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6338 goto err_out;
6339 }
6340
6341 /* and Link */
6342 cpu_id++;
6343 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6344 adapter->vector = vector;
6345 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6346 sizeof(intrbuf));
6347 #ifdef IXGBE_MPSAFE
6348 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6349 true);
6350 #endif
6351 /* Set the link handler function */
6352 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6353 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6354 intr_xname);
6355 if (adapter->osdep.ihs[vector] == NULL) {
6356 aprint_error_dev(dev, "Failed to register LINK handler\n");
6357 error = ENXIO;
6358 goto err_out;
6359 }
6360 /* Round-robin affinity */
6361 kcpuset_zero(affinity);
6362 kcpuset_set(affinity, cpu_id % ncpu);
6363 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6364 NULL);
6365
6366 aprint_normal_dev(dev,
6367 "for link, interrupting at %s", intrstr);
6368 if (error == 0)
6369 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6370 else
6371 aprint_normal("\n");
6372
6373 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6374 adapter->mbx_si =
6375 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6376 ixgbe_handle_mbx, adapter);
6377 if (adapter->mbx_si == NULL) {
6378 aprint_error_dev(dev,
6379 "could not establish software interrupts\n");
6380
6381 error = ENXIO;
6382 goto err_out;
6383 }
6384 }
6385
6386 kcpuset_destroy(affinity);
6387 aprint_normal_dev(dev,
6388 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6389
6390 return (0);
6391
6392 err_out:
6393 kcpuset_destroy(affinity);
6394 ixgbe_free_softint(adapter);
6395 ixgbe_free_pciintr_resources(adapter);
6396 return (error);
6397 } /* ixgbe_allocate_msix */
6398
6399 /************************************************************************
6400 * ixgbe_configure_interrupts
6401 *
6402 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6403 * This will also depend on user settings.
6404 ************************************************************************/
6405 static int
6406 ixgbe_configure_interrupts(struct adapter *adapter)
6407 {
6408 device_t dev = adapter->dev;
6409 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6410 int want, queues, msgs;
6411
6412 /* Default to 1 queue if MSI-X setup fails */
6413 adapter->num_queues = 1;
6414
6415 /* Override by tuneable */
6416 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6417 goto msi;
6418
6419 /*
6420 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6421 * interrupt slot.
6422 */
6423 if (ncpu == 1)
6424 goto msi;
6425
6426 /* First try MSI-X */
6427 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6428 msgs = MIN(msgs, IXG_MAX_NINTR);
6429 if (msgs < 2)
6430 goto msi;
6431
6432 adapter->msix_mem = (void *)1; /* XXX */
6433
6434 /* Figure out a reasonable auto config value */
6435 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6436
6437 #ifdef RSS
6438 /* If we're doing RSS, clamp at the number of RSS buckets */
6439 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6440 queues = min(queues, rss_getnumbuckets());
6441 #endif
6442 if (ixgbe_num_queues > queues) {
6443 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6444 ixgbe_num_queues = queues;
6445 }
6446
6447 if (ixgbe_num_queues != 0)
6448 queues = ixgbe_num_queues;
6449 else
6450 queues = min(queues,
6451 min(mac->max_tx_queues, mac->max_rx_queues));
6452
6453 /* reflect correct sysctl value */
6454 ixgbe_num_queues = queues;
6455
6456 /*
6457 * Want one vector (RX/TX pair) per queue
6458 * plus an additional for Link.
6459 */
6460 want = queues + 1;
6461 if (msgs >= want)
6462 msgs = want;
6463 else {
6464 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6465 "%d vectors but %d queues wanted!\n",
6466 msgs, want);
6467 goto msi;
6468 }
6469 adapter->num_queues = queues;
6470 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6471 return (0);
6472
6473 /*
6474 * MSI-X allocation failed or provided us with
6475 * less vectors than needed. Free MSI-X resources
6476 * and we'll try enabling MSI.
6477 */
6478 msi:
6479 /* Without MSI-X, some features are no longer supported */
6480 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6481 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6482 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6483 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6484
6485 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6486 adapter->msix_mem = NULL; /* XXX */
6487 if (msgs > 1)
6488 msgs = 1;
6489 if (msgs != 0) {
6490 msgs = 1;
6491 adapter->feat_en |= IXGBE_FEATURE_MSI;
6492 return (0);
6493 }
6494
6495 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6496 aprint_error_dev(dev,
6497 "Device does not support legacy interrupts.\n");
6498 return 1;
6499 }
6500
6501 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6502
6503 return (0);
6504 } /* ixgbe_configure_interrupts */
6505
6506
6507 /************************************************************************
6508 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6509 *
6510 * Done outside of interrupt context since the driver might sleep
6511 ************************************************************************/
6512 static void
6513 ixgbe_handle_link(void *context)
6514 {
6515 struct adapter *adapter = context;
6516 struct ixgbe_hw *hw = &adapter->hw;
6517
6518 IXGBE_CORE_LOCK(adapter);
6519 ++adapter->link_sicount.ev_count;
6520 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6521 ixgbe_update_link_status(adapter);
6522
6523 /* Re-enable link interrupts */
6524 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6525
6526 IXGBE_CORE_UNLOCK(adapter);
6527 } /* ixgbe_handle_link */
6528
6529 /************************************************************************
6530 * ixgbe_rearm_queues
6531 ************************************************************************/
6532 static inline void
6533 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6534 {
6535 u32 mask;
6536
6537 switch (adapter->hw.mac.type) {
6538 case ixgbe_mac_82598EB:
6539 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6541 break;
6542 case ixgbe_mac_82599EB:
6543 case ixgbe_mac_X540:
6544 case ixgbe_mac_X550:
6545 case ixgbe_mac_X550EM_x:
6546 case ixgbe_mac_X550EM_a:
6547 mask = (queues & 0xFFFFFFFF);
6548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6549 mask = (queues >> 32);
6550 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6551 break;
6552 default:
6553 break;
6554 }
6555 } /* ixgbe_rearm_queues */
6556