ixgbe.c revision 1.88.2.54 1 /* $NetBSD: ixgbe.c,v 1.88.2.54 2023/01/23 14:07:24 martin Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.88.2.54 2023/01/23 14:07:24 martin Exp $");
68
69 #ifdef _KERNEL_OPT
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_net_mpsafe.h"
73 #endif
74
75 #include "ixgbe.h"
76 #include "ixgbe_sriov.h"
77 #include "vlan.h"
78
79 #include <sys/cprng.h>
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82
83 /************************************************************************
84 * Driver version
85 ************************************************************************/
86 static const char ixgbe_driver_version[] = "4.0.1-k";
87 /* XXX NetBSD: + 3.3.24 */
88
89 /************************************************************************
90 * PCI Device ID Table
91 *
92 * Used by probe to select devices to load on
93 * Last field stores an index into ixgbe_strings
94 * Last entry must be all 0s
95 *
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 ************************************************************************/
98 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 {
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
150 /* required last entry */
151 {0, 0, 0, 0, 0}
152 };
153
154 /************************************************************************
155 * Table of branding strings
156 ************************************************************************/
157 static const char *ixgbe_strings[] = {
158 "Intel(R) PRO/10GbE PCI-Express Network Driver"
159 };
160
161 /************************************************************************
162 * Function prototypes
163 ************************************************************************/
164 static int ixgbe_probe(device_t, cfdata_t, void *);
165 static void ixgbe_attach(device_t, device_t, void *);
166 static int ixgbe_detach(device_t, int);
167 #if 0
168 static int ixgbe_shutdown(device_t);
169 #endif
170 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
171 static bool ixgbe_resume(device_t, const pmf_qual_t *);
172 static int ixgbe_ifflags_cb(struct ethercom *);
173 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
174 static int ixgbe_init(struct ifnet *);
175 static void ixgbe_init_locked(struct adapter *);
176 static void ixgbe_ifstop(struct ifnet *, int);
177 static void ixgbe_stop_locked(void *);
178 static void ixgbe_init_device_features(struct adapter *);
179 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
180 static void ixgbe_add_media_types(struct adapter *);
181 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
182 static int ixgbe_media_change(struct ifnet *);
183 static int ixgbe_allocate_pci_resources(struct adapter *,
184 const struct pci_attach_args *);
185 static void ixgbe_free_softint(struct adapter *);
186 static void ixgbe_get_slot_info(struct adapter *);
187 static int ixgbe_allocate_msix(struct adapter *,
188 const struct pci_attach_args *);
189 static int ixgbe_allocate_legacy(struct adapter *,
190 const struct pci_attach_args *);
191 static int ixgbe_configure_interrupts(struct adapter *);
192 static void ixgbe_free_pciintr_resources(struct adapter *);
193 static void ixgbe_free_pci_resources(struct adapter *);
194 static void ixgbe_local_timer(void *);
195 static void ixgbe_local_timer1(void *);
196 static void ixgbe_recovery_mode_timer(void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_config_link(struct adapter *);
202 static void ixgbe_check_wol_support(struct adapter *);
203 static int ixgbe_setup_low_power_mode(struct adapter *);
204 static void ixgbe_rearm_queues(struct adapter *, u64);
205
206 static void ixgbe_initialize_transmit_units(struct adapter *);
207 static void ixgbe_initialize_receive_units(struct adapter *);
208 static void ixgbe_enable_rx_drop(struct adapter *);
209 static void ixgbe_disable_rx_drop(struct adapter *);
210 static void ixgbe_initialize_rss_mapping(struct adapter *);
211
212 static void ixgbe_enable_intr(struct adapter *);
213 static void ixgbe_disable_intr(struct adapter *);
214 static void ixgbe_update_stats_counters(struct adapter *);
215 static void ixgbe_set_rxfilter(struct adapter *);
216 static void ixgbe_update_link_status(struct adapter *);
217 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
218 static void ixgbe_configure_ivars(struct adapter *);
219 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
220 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
221
222 static void ixgbe_setup_vlan_hw_support(struct adapter *);
223 #if 0
224 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
225 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
226 #endif
227
228 static void ixgbe_add_device_sysctls(struct adapter *);
229 static void ixgbe_add_hw_stats(struct adapter *);
230 static void ixgbe_clear_evcnt(struct adapter *);
231 static int ixgbe_set_flowcntl(struct adapter *, int);
232 static int ixgbe_set_advertise(struct adapter *, int);
233 static int ixgbe_get_default_advertise(struct adapter *);
234
235 /* Sysctl handlers */
236 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
238 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
242 #ifdef IXGBE_DEBUG
243 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
245 #endif
246 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
259
260 /* Support for pluggable optic modules */
261 static bool ixgbe_sfp_probe(struct adapter *);
262
263 /* Interrupt functions */
264 static int ixgbe_msix_que(void *);
265 static int ixgbe_msix_admin(void *);
266 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
267 static int ixgbe_legacy_irq(void *);
268
269 /* Software interrupts for deferred work */
270 static void ixgbe_handle_que(void *);
271 static void ixgbe_handle_link(void *);
272 static void ixgbe_handle_msf(void *);
273 static void ixgbe_handle_mod(void *);
274 static void ixgbe_handle_phy(void *);
275
276 /* Workqueue handler for deferred work */
277 static void ixgbe_handle_que_work(struct work *, void *);
278
279 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
280
281 /************************************************************************
282 * NetBSD Device Interface Entry Points
283 ************************************************************************/
284 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
285 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
286 DVF_DETACH_SHUTDOWN);
287
288 #if 0
289 devclass_t ix_devclass;
290 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
291
292 MODULE_DEPEND(ix, pci, 1, 1, 1);
293 MODULE_DEPEND(ix, ether, 1, 1, 1);
294 #ifdef DEV_NETMAP
295 MODULE_DEPEND(ix, netmap, 1, 1, 1);
296 #endif
297 #endif
298
299 /*
300 * TUNEABLE PARAMETERS:
301 */
302
303 /*
304 * AIM: Adaptive Interrupt Moderation
305 * which means that the interrupt rate
306 * is varied over time based on the
307 * traffic for that interrupt vector
308 */
309 static bool ixgbe_enable_aim = true;
310 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
311 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
312 "Enable adaptive interrupt moderation");
313
314 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
315 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
316 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
317
318 /* How many packets rxeof tries to clean at a time */
319 static int ixgbe_rx_process_limit = 256;
320 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
321 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
322
323 /* How many packets txeof tries to clean at a time */
324 static int ixgbe_tx_process_limit = 256;
325 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
326 &ixgbe_tx_process_limit, 0,
327 "Maximum number of sent packets to process at a time, -1 means unlimited");
328
329 /* Flow control setting, default to full */
330 static int ixgbe_flow_control = ixgbe_fc_full;
331 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
332 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
333
334 /* Which packet processing uses workqueue or softint */
335 static bool ixgbe_txrx_workqueue = false;
336
337 /*
338 * Smart speed setting, default to on
339 * this only works as a compile option
340 * right now as its during attach, set
341 * this to 'ixgbe_smart_speed_off' to
342 * disable.
343 */
344 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
345
346 /*
347 * MSI-X should be the default for best performance,
348 * but this allows it to be forced off for testing.
349 */
350 static int ixgbe_enable_msix = 1;
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
352 "Enable MSI-X interrupts");
353
354 /*
355 * Number of Queues, can be set to 0,
356 * it then autoconfigures based on the
357 * number of cpus with a max of 8. This
358 * can be overridden manually here.
359 */
360 static int ixgbe_num_queues = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
362 "Number of queues to configure, 0 indicates autoconfigure");
363
364 /*
365 * Number of TX descriptors per ring,
366 * setting higher than RX as this seems
367 * the better performing choice.
368 */
369 static int ixgbe_txd = PERFORM_TXD;
370 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
371 "Number of transmit descriptors per queue");
372
373 /* Number of RX descriptors per ring */
374 static int ixgbe_rxd = PERFORM_RXD;
375 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
376 "Number of receive descriptors per queue");
377
378 /*
379 * Defining this on will allow the use
380 * of unsupported SFP+ modules, note that
381 * doing so you are on your own :)
382 */
383 static int allow_unsupported_sfp = false;
384 #define TUNABLE_INT(__x, __y)
385 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
386
387 /*
388 * Not sure if Flow Director is fully baked,
389 * so we'll default to turning it off.
390 */
391 static int ixgbe_enable_fdir = 0;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
393 "Enable Flow Director");
394
395 /* Legacy Transmit (single queue) */
396 static int ixgbe_enable_legacy_tx = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
398 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
399
400 /* Receive-Side Scaling */
401 static int ixgbe_enable_rss = 1;
402 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
403 "Enable Receive-Side Scaling (RSS)");
404
405 #if 0
406 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
407 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
408 #endif
409
410 #ifdef NET_MPSAFE
411 #define IXGBE_MPSAFE 1
412 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
413 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
414 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
415 #else
416 #define IXGBE_CALLOUT_FLAGS 0
417 #define IXGBE_SOFTINT_FLAGS 0
418 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
419 #endif
420 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
421
422 /* Interval between reports of errors */
423 static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
424
425 /************************************************************************
426 * ixgbe_initialize_rss_mapping
427 ************************************************************************/
428 static void
429 ixgbe_initialize_rss_mapping(struct adapter *adapter)
430 {
431 struct ixgbe_hw *hw = &adapter->hw;
432 u32 reta = 0, mrqc, rss_key[10];
433 int queue_id, table_size, index_mult;
434 int i, j;
435 u32 rss_hash_config;
436
437 /* force use default RSS key. */
438 #ifdef __NetBSD__
439 rss_getkey((uint8_t *) &rss_key);
440 #else
441 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
442 /* Fetch the configured RSS key */
443 rss_getkey((uint8_t *) &rss_key);
444 } else {
445 /* set up random bits */
446 cprng_fast(&rss_key, sizeof(rss_key));
447 }
448 #endif
449
450 /* Set multiplier for RETA setup and table size based on MAC */
451 index_mult = 0x1;
452 table_size = 128;
453 switch (adapter->hw.mac.type) {
454 case ixgbe_mac_82598EB:
455 index_mult = 0x11;
456 break;
457 case ixgbe_mac_X550:
458 case ixgbe_mac_X550EM_x:
459 case ixgbe_mac_X550EM_a:
460 table_size = 512;
461 break;
462 default:
463 break;
464 }
465
466 /* Set up the redirection table */
467 for (i = 0, j = 0; i < table_size; i++, j++) {
468 if (j == adapter->num_queues)
469 j = 0;
470
471 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
472 /*
473 * Fetch the RSS bucket id for the given indirection
474 * entry. Cap it at the number of configured buckets
475 * (which is num_queues.)
476 */
477 queue_id = rss_get_indirection_to_bucket(i);
478 queue_id = queue_id % adapter->num_queues;
479 } else
480 queue_id = (j * index_mult);
481
482 /*
483 * The low 8 bits are for hash value (n+0);
484 * The next 8 bits are for hash value (n+1), etc.
485 */
486 reta = reta >> 8;
487 reta = reta | (((uint32_t) queue_id) << 24);
488 if ((i & 3) == 3) {
489 if (i < 128)
490 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
491 else
492 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
493 reta);
494 reta = 0;
495 }
496 }
497
498 /* Now fill our hash function seeds */
499 for (i = 0; i < 10; i++)
500 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
501
502 /* Perform hash on these packet types */
503 if (adapter->feat_en & IXGBE_FEATURE_RSS)
504 rss_hash_config = rss_gethashconfig();
505 else {
506 /*
507 * Disable UDP - IP fragments aren't currently being handled
508 * and so we end up with a mix of 2-tuple and 4-tuple
509 * traffic.
510 */
511 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
512 | RSS_HASHTYPE_RSS_TCP_IPV4
513 | RSS_HASHTYPE_RSS_IPV6
514 | RSS_HASHTYPE_RSS_TCP_IPV6
515 | RSS_HASHTYPE_RSS_IPV6_EX
516 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
517 }
518
519 mrqc = IXGBE_MRQC_RSSEN;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
538 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
539 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
540 } /* ixgbe_initialize_rss_mapping */
541
542 /************************************************************************
543 * ixgbe_initialize_receive_units - Setup receive registers and features.
544 ************************************************************************/
545 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
546
547 static void
548 ixgbe_initialize_receive_units(struct adapter *adapter)
549 {
550 struct rx_ring *rxr = adapter->rx_rings;
551 struct ixgbe_hw *hw = &adapter->hw;
552 struct ifnet *ifp = adapter->ifp;
553 int i, j;
554 u32 bufsz, fctrl, srrctl, rxcsum;
555 u32 hlreg;
556
557 /*
558 * Make sure receives are disabled while
559 * setting up the descriptor ring
560 */
561 ixgbe_disable_rx(hw);
562
563 /* Enable broadcasts */
564 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
565 fctrl |= IXGBE_FCTRL_BAM;
566 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
567 fctrl |= IXGBE_FCTRL_DPF;
568 fctrl |= IXGBE_FCTRL_PMCF;
569 }
570 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
571
572 /* Set for Jumbo Frames? */
573 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
574 if (ifp->if_mtu > ETHERMTU)
575 hlreg |= IXGBE_HLREG0_JUMBOEN;
576 else
577 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
578
579 #ifdef DEV_NETMAP
580 /* CRC stripping is conditional in Netmap */
581 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
582 (ifp->if_capenable & IFCAP_NETMAP) &&
583 !ix_crcstrip)
584 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
585 else
586 #endif /* DEV_NETMAP */
587 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
588
589 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
590
591 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
592 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
593
594 for (i = 0; i < adapter->num_queues; i++, rxr++) {
595 u64 rdba = rxr->rxdma.dma_paddr;
596 u32 reg;
597 int regnum = i / 4; /* 1 register per 4 queues */
598 int regshift = i % 4; /* 4 bits per 1 queue */
599 j = rxr->me;
600
601 /* Setup the Base and Length of the Rx Descriptor Ring */
602 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
603 (rdba & 0x00000000ffffffffULL));
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
605 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
606 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
607
608 /* Set up the SRRCTL register */
609 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
610 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
611 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
612 srrctl |= bufsz;
613 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
614
615 /* Set RQSMR (Receive Queue Statistic Mapping) register */
616 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
617 reg &= ~(0x000000ffUL << (regshift * 8));
618 reg |= i << (regshift * 8);
619 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
620
621 /*
622 * Set DROP_EN iff we have no flow control and >1 queue.
623 * Note that srrctl was cleared shortly before during reset,
624 * so we do not need to clear the bit, but do it just in case
625 * this code is moved elsewhere.
626 */
627 if ((adapter->num_queues > 1) &&
628 (adapter->hw.fc.requested_mode == ixgbe_fc_none))
629 srrctl |= IXGBE_SRRCTL_DROP_EN;
630 else
631 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
632
633 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
634
635 /* Setup the HW Rx Head and Tail Descriptor Pointers */
636 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
637 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
638
639 /* Set the driver rx tail address */
640 rxr->tail = IXGBE_RDT(rxr->me);
641 }
642
643 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
644 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
645 | IXGBE_PSRTYPE_UDPHDR
646 | IXGBE_PSRTYPE_IPV4HDR
647 | IXGBE_PSRTYPE_IPV6HDR;
648 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
649 }
650
651 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
652
653 ixgbe_initialize_rss_mapping(adapter);
654
655 if (adapter->num_queues > 1) {
656 /* RSS and RX IPP Checksum are mutually exclusive */
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658 }
659
660 if (ifp->if_capenable & IFCAP_RXCSUM)
661 rxcsum |= IXGBE_RXCSUM_PCSD;
662
663 /* This is useful for calculating UDP/IP fragment checksums */
664 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
665 rxcsum |= IXGBE_RXCSUM_IPPCSE;
666
667 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
668
669 } /* ixgbe_initialize_receive_units */
670
671 /************************************************************************
672 * ixgbe_initialize_transmit_units - Enable transmit units.
673 ************************************************************************/
674 static void
675 ixgbe_initialize_transmit_units(struct adapter *adapter)
676 {
677 struct tx_ring *txr = adapter->tx_rings;
678 struct ixgbe_hw *hw = &adapter->hw;
679 int i;
680
681 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 u32 tqsmreg, reg;
688 int regnum = i / 4; /* 1 register per 4 queues */
689 int regshift = i % 4; /* 4 bits per 1 queue */
690 int j = txr->me;
691
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
693 (tdba & 0x00000000ffffffffULL));
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
695 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
696 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
697
698 /*
699 * Set TQSMR (Transmit Queue Statistic Mapping) register.
700 * Register location is different between 82598 and others.
701 */
702 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
703 tqsmreg = IXGBE_TQSMR(regnum);
704 else
705 tqsmreg = IXGBE_TQSM(regnum);
706 reg = IXGBE_READ_REG(hw, tqsmreg);
707 reg &= ~(0x000000ffUL << (regshift * 8));
708 reg |= i << (regshift * 8);
709 IXGBE_WRITE_REG(hw, tqsmreg, reg);
710
711 /* Setup the HW Tx Head and Tail descriptor pointers */
712 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
713 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
714
715 /* Cache the tail address */
716 txr->tail = IXGBE_TDT(j);
717
718 txr->txr_no_space = false;
719
720 /* Disable Head Writeback */
721 /*
722 * Note: for X550 series devices, these registers are actually
723 * prefixed with TPH_ instead of DCA_, but the addresses and
724 * fields remain the same.
725 */
726 switch (hw->mac.type) {
727 case ixgbe_mac_82598EB:
728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
729 break;
730 default:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
732 break;
733 }
734 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
735 switch (hw->mac.type) {
736 case ixgbe_mac_82598EB:
737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
738 break;
739 default:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
741 break;
742 }
743
744 }
745
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 u32 dmatxctl, rttdcs;
748
749 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
750 dmatxctl |= IXGBE_DMATXCTL_TE;
751 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
752 /* Disable arbiter to set MTQC */
753 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
754 rttdcs |= IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
757 ixgbe_get_mtqc(adapter->iov_mode));
758 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
759 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
760 }
761
762 return;
763 } /* ixgbe_initialize_transmit_units */
764
765 /************************************************************************
766 * ixgbe_attach - Device initialization routine
767 *
768 * Called when the driver is being loaded.
769 * Identifies the type of hardware, allocates all resources
770 * and initializes the hardware.
771 *
772 * return 0 on success, positive on failure
773 ************************************************************************/
774 static void
775 ixgbe_attach(device_t parent, device_t dev, void *aux)
776 {
777 struct adapter *adapter;
778 struct ixgbe_hw *hw;
779 int error = -1;
780 u32 ctrl_ext;
781 u16 high, low, nvmreg;
782 pcireg_t id, subid;
783 const ixgbe_vendor_info_t *ent;
784 struct pci_attach_args *pa = aux;
785 bool unsupported_sfp = false;
786 const char *str;
787 char buf[256];
788
789 INIT_DEBUGOUT("ixgbe_attach: begin");
790
791 /* Allocate, clear, and link in our adapter structure */
792 adapter = device_private(dev);
793 adapter->hw.back = adapter;
794 adapter->dev = dev;
795 hw = &adapter->hw;
796 adapter->osdep.pc = pa->pa_pc;
797 adapter->osdep.tag = pa->pa_tag;
798 if (pci_dma64_available(pa))
799 adapter->osdep.dmat = pa->pa_dmat64;
800 else
801 adapter->osdep.dmat = pa->pa_dmat;
802 adapter->osdep.attached = false;
803
804 ent = ixgbe_lookup(pa);
805
806 KASSERT(ent != NULL);
807
808 aprint_normal(": %s, Version - %s\n",
809 ixgbe_strings[ent->index], ixgbe_driver_version);
810
811 /* Core Lock Init */
812 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
813
814 /* Set up the timer callout */
815 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
816
817 /* Determine hardware revision */
818 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
819 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
820
821 hw->vendor_id = PCI_VENDOR(id);
822 hw->device_id = PCI_PRODUCT(id);
823 hw->revision_id =
824 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
825 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
826 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
827
828 /*
829 * Make sure BUSMASTER is set
830 */
831 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
832
833 /* Do base PCI setup - map BAR0 */
834 if (ixgbe_allocate_pci_resources(adapter, pa)) {
835 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
836 error = ENXIO;
837 goto err_out;
838 }
839
840 /* let hardware know driver is loaded */
841 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
842 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
843 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
844
845 /*
846 * Initialize the shared code
847 */
848 if (ixgbe_init_shared_code(hw) != 0) {
849 aprint_error_dev(dev,
850 "Unable to initialize the shared code\n");
851 error = ENXIO;
852 goto err_out;
853 }
854
855 switch (hw->mac.type) {
856 case ixgbe_mac_82598EB:
857 str = "82598EB";
858 break;
859 case ixgbe_mac_82599EB:
860 str = "82599EB";
861 break;
862 case ixgbe_mac_X540:
863 str = "X540";
864 break;
865 case ixgbe_mac_X550:
866 str = "X550";
867 break;
868 case ixgbe_mac_X550EM_x:
869 str = "X550EM X";
870 break;
871 case ixgbe_mac_X550EM_a:
872 str = "X550EM A";
873 break;
874 default:
875 str = "Unknown";
876 break;
877 }
878 aprint_normal_dev(dev, "device %s\n", str);
879
880 hw->allow_unsupported_sfp = allow_unsupported_sfp;
881
882 /* Pick up the 82599 settings */
883 if (hw->mac.type != ixgbe_mac_82598EB)
884 hw->phy.smart_speed = ixgbe_smart_speed;
885
886 /* Set the right number of segments */
887 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
888 adapter->num_segs = IXGBE_SCATTER_DEFAULT;
889
890 /* Ensure SW/FW semaphore is free */
891 ixgbe_init_swfw_semaphore(hw);
892
893 hw->mac.ops.set_lan_id(hw);
894 ixgbe_init_device_features(adapter);
895
896 if (ixgbe_configure_interrupts(adapter)) {
897 error = ENXIO;
898 goto err_out;
899 }
900
901 /* Allocate multicast array memory. */
902 adapter->mta = malloc(sizeof(*adapter->mta) *
903 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
904 if (adapter->mta == NULL) {
905 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
906 error = ENOMEM;
907 goto err_out;
908 }
909
910 /* Enable WoL (if supported) */
911 ixgbe_check_wol_support(adapter);
912
913 /* Verify adapter fan is still functional (if applicable) */
914 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
915 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
916 ixgbe_check_fan_failure(adapter, esdp, FALSE);
917 }
918
919 /* Set an initial default flow control value */
920 hw->fc.requested_mode = ixgbe_flow_control;
921
922 /* Do descriptor calc and sanity checks */
923 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
924 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
925 aprint_error_dev(dev, "TXD config issue, using default!\n");
926 adapter->num_tx_desc = DEFAULT_TXD;
927 } else
928 adapter->num_tx_desc = ixgbe_txd;
929
930 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
931 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
932 aprint_error_dev(dev, "RXD config issue, using default!\n");
933 adapter->num_rx_desc = DEFAULT_RXD;
934 } else
935 adapter->num_rx_desc = ixgbe_rxd;
936
937 /* Sysctls for limiting the amount of work done in the taskqueues */
938 adapter->rx_process_limit
939 = (ixgbe_rx_process_limit <= adapter->num_rx_desc)
940 ? ixgbe_rx_process_limit : adapter->num_rx_desc;
941 adapter->tx_process_limit
942 = (ixgbe_tx_process_limit <= adapter->num_tx_desc)
943 ? ixgbe_tx_process_limit : adapter->num_tx_desc;
944
945 /* Set default high limit of copying mbuf in rxeof */
946 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
947
948 /* Allocate our TX/RX Queues */
949 if (ixgbe_allocate_queues(adapter)) {
950 error = ENOMEM;
951 goto err_out;
952 }
953
954 hw->phy.reset_if_overtemp = TRUE;
955 error = ixgbe_reset_hw(hw);
956 hw->phy.reset_if_overtemp = FALSE;
957 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
958 /*
959 * No optics in this port, set up
960 * so the timer routine will probe
961 * for later insertion.
962 */
963 adapter->sfp_probe = TRUE;
964 error = IXGBE_SUCCESS;
965 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
966 aprint_error_dev(dev,
967 "Unsupported SFP+ module detected!\n");
968 unsupported_sfp = true;
969 error = IXGBE_SUCCESS;
970 } else if (error) {
971 aprint_error_dev(dev,
972 "Hardware initialization failed(error = %d)\n", error);
973 error = EIO;
974 goto err_late;
975 }
976
977 /* Make sure we have a good EEPROM before we read from it */
978 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
979 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
980 error = EIO;
981 goto err_late;
982 }
983
984 aprint_normal("%s:", device_xname(dev));
985 /* NVM Image Version */
986 high = low = 0;
987 switch (hw->mac.type) {
988 case ixgbe_mac_82598EB:
989 /*
990 * Print version from the dev starter version (0x29). The
991 * location is the same as newer device's IXGBE_NVM_MAP_VER.
992 */
993 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
994 if (nvmreg == 0xffff)
995 break;
996 high = (nvmreg >> 12) & 0x0f;
997 low = (nvmreg >> 4) & 0xff;
998 id = nvmreg & 0x0f;
999 /*
1000 * The following output might not be correct. Some 82598 cards
1001 * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1002 */
1003 aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1004 break;
1005 case ixgbe_mac_X540:
1006 case ixgbe_mac_X550EM_a:
1007 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1008 if (nvmreg == 0xffff)
1009 break;
1010 high = (nvmreg >> 12) & 0x0f;
1011 low = (nvmreg >> 4) & 0xff;
1012 id = nvmreg & 0x0f;
1013 aprint_normal(" NVM Image Version %u.", high);
1014 if (hw->mac.type == ixgbe_mac_X540)
1015 str = "%x";
1016 else
1017 str = "%02x";
1018 aprint_normal(str, low);
1019 aprint_normal(" ID 0x%x,", id);
1020 break;
1021 case ixgbe_mac_X550EM_x:
1022 case ixgbe_mac_X550:
1023 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1024 if (nvmreg == 0xffff)
1025 break;
1026 high = (nvmreg >> 12) & 0x0f;
1027 low = nvmreg & 0xff;
1028 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1029 break;
1030 default:
1031 break;
1032 }
1033 hw->eeprom.nvm_image_ver_high = high;
1034 hw->eeprom.nvm_image_ver_low = low;
1035
1036 /* PHY firmware revision */
1037 switch (hw->mac.type) {
1038 case ixgbe_mac_X540:
1039 case ixgbe_mac_X550:
1040 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1041 if (nvmreg == 0xffff)
1042 break;
1043 high = (nvmreg >> 12) & 0x0f;
1044 low = (nvmreg >> 4) & 0xff;
1045 id = nvmreg & 0x000f;
1046 aprint_normal(" PHY FW Revision %u.", high);
1047 if (hw->mac.type == ixgbe_mac_X540)
1048 str = "%x";
1049 else
1050 str = "%02x";
1051 aprint_normal(str, low);
1052 aprint_normal(" ID 0x%x,", id);
1053 break;
1054 default:
1055 break;
1056 }
1057
1058 /* NVM Map version & OEM NVM Image version */
1059 switch (hw->mac.type) {
1060 case ixgbe_mac_X550:
1061 case ixgbe_mac_X550EM_x:
1062 case ixgbe_mac_X550EM_a:
1063 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1064 if (nvmreg != 0xffff) {
1065 high = (nvmreg >> 12) & 0x0f;
1066 low = nvmreg & 0x00ff;
1067 aprint_normal(" NVM Map version %u.%02x,", high, low);
1068 }
1069 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1070 if (nvmreg != 0xffff) {
1071 high = (nvmreg >> 12) & 0x0f;
1072 low = nvmreg & 0x00ff;
1073 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1074 low);
1075 }
1076 break;
1077 default:
1078 break;
1079 }
1080
1081 /* Print the ETrackID */
1082 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1083 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1084 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1085
1086 /* Printed Board Assembly number */
1087 error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
1088 aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
1089
1090 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1091 error = ixgbe_allocate_msix(adapter, pa);
1092 if (error) {
1093 /* Free allocated queue structures first */
1094 ixgbe_free_queues(adapter);
1095
1096 /* Fallback to legacy interrupt */
1097 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1098 adapter->feat_en |= IXGBE_FEATURE_MSI;
1099 adapter->num_queues = 1;
1100
1101 /* Allocate our TX/RX Queues again */
1102 if (ixgbe_allocate_queues(adapter)) {
1103 error = ENOMEM;
1104 goto err_out;
1105 }
1106 }
1107 }
1108
1109 /* Recovery mode */
1110 switch (adapter->hw.mac.type) {
1111 case ixgbe_mac_X550:
1112 case ixgbe_mac_X550EM_x:
1113 case ixgbe_mac_X550EM_a:
1114 /* >= 2.00 */
1115 if (hw->eeprom.nvm_image_ver_high >= 2) {
1116 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1117 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1118 }
1119 break;
1120 default:
1121 break;
1122 }
1123
1124 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1125 error = ixgbe_allocate_legacy(adapter, pa);
1126 if (error)
1127 goto err_late;
1128
1129 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1130 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS,
1131 ixgbe_handle_link, adapter);
1132 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1133 ixgbe_handle_mod, adapter);
1134 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1135 ixgbe_handle_msf, adapter);
1136 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1137 ixgbe_handle_phy, adapter);
1138 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1139 adapter->fdir_si =
1140 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1141 ixgbe_reinit_fdir, adapter);
1142 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1143 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1144 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1145 && (adapter->fdir_si == NULL))) {
1146 aprint_error_dev(dev,
1147 "could not establish software interrupts ()\n");
1148 goto err_out;
1149 }
1150
1151 error = ixgbe_start_hw(hw);
1152 switch (error) {
1153 case IXGBE_ERR_EEPROM_VERSION:
1154 aprint_error_dev(dev,
1155 "This device is a pre-production adapter/"
1156 "LOM. Please be aware there may be issues associated "
1157 "with your hardware.\nIf you are experiencing problems "
1158 "please contact your Intel or hardware representative "
1159 "who provided you with this hardware.\n");
1160 break;
1161 default:
1162 break;
1163 }
1164
1165 /* Setup OS specific network interface */
1166 if (ixgbe_setup_interface(dev, adapter) != 0)
1167 goto err_late;
1168
1169 /*
1170 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1171 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1172 */
1173 if (hw->phy.media_type == ixgbe_media_type_copper) {
1174 uint16_t id1, id2;
1175 int oui, model, rev;
1176 const char *descr;
1177
1178 id1 = hw->phy.id >> 16;
1179 id2 = hw->phy.id & 0xffff;
1180 oui = MII_OUI(id1, id2);
1181 model = MII_MODEL(id2);
1182 rev = MII_REV(id2);
1183 if ((descr = mii_get_descr(oui, model)) != NULL)
1184 aprint_normal_dev(dev,
1185 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1186 descr, oui, model, rev);
1187 else
1188 aprint_normal_dev(dev,
1189 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1190 oui, model, rev);
1191 }
1192
1193 /* Enable EEE power saving */
1194 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1195 hw->mac.ops.setup_eee(hw,
1196 adapter->feat_en & IXGBE_FEATURE_EEE);
1197
1198 /* Enable power to the phy. */
1199 if (!unsupported_sfp) {
1200 /* Enable the optics for 82599 SFP+ fiber */
1201 ixgbe_enable_tx_laser(hw);
1202
1203 /*
1204 * XXX Currently, ixgbe_set_phy_power() supports only copper
1205 * PHY, so it's not required to test with !unsupported_sfp.
1206 */
1207 ixgbe_set_phy_power(hw, TRUE);
1208 }
1209
1210 /* Initialize statistics */
1211 ixgbe_update_stats_counters(adapter);
1212
1213 /* Check PCIE slot type/speed/width */
1214 ixgbe_get_slot_info(adapter);
1215
1216 /*
1217 * Do time init and sysctl init here, but
1218 * only on the first port of a bypass adapter.
1219 */
1220 ixgbe_bypass_init(adapter);
1221
1222 /* Set an initial dmac value */
1223 adapter->dmac = 0;
1224 /* Set initial advertised speeds (if applicable) */
1225 adapter->advertise = ixgbe_get_default_advertise(adapter);
1226
1227 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1228 ixgbe_define_iov_schemas(dev, &error);
1229
1230 /* Add sysctls */
1231 ixgbe_add_device_sysctls(adapter);
1232 ixgbe_add_hw_stats(adapter);
1233
1234 /* For Netmap */
1235 adapter->init_locked = ixgbe_init_locked;
1236 adapter->stop_locked = ixgbe_stop_locked;
1237
1238 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1239 ixgbe_netmap_attach(adapter);
1240
1241 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1242 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1243 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1244 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1245
1246 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1247 pmf_class_network_register(dev, adapter->ifp);
1248 else
1249 aprint_error_dev(dev, "couldn't establish power handler\n");
1250
1251 /* Init recovery mode timer and state variable */
1252 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1253 adapter->recovery_mode = 0;
1254
1255 /* Set up the timer callout */
1256 callout_init(&adapter->recovery_mode_timer,
1257 IXGBE_CALLOUT_FLAGS);
1258
1259 /* Start the task */
1260 callout_reset(&adapter->recovery_mode_timer, hz,
1261 ixgbe_recovery_mode_timer, adapter);
1262 }
1263
1264 INIT_DEBUGOUT("ixgbe_attach: end");
1265 adapter->osdep.attached = true;
1266
1267 return;
1268
1269 err_late:
1270 ixgbe_free_queues(adapter);
1271 err_out:
1272 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1273 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1275 ixgbe_free_softint(adapter);
1276 ixgbe_free_pci_resources(adapter);
1277 if (adapter->mta != NULL)
1278 free(adapter->mta, M_DEVBUF);
1279 IXGBE_CORE_LOCK_DESTROY(adapter);
1280
1281 return;
1282 } /* ixgbe_attach */
1283
1284 /************************************************************************
1285 * ixgbe_check_wol_support
1286 *
1287 * Checks whether the adapter's ports are capable of
1288 * Wake On LAN by reading the adapter's NVM.
1289 *
1290 * Sets each port's hw->wol_enabled value depending
1291 * on the value read here.
1292 ************************************************************************/
1293 static void
1294 ixgbe_check_wol_support(struct adapter *adapter)
1295 {
1296 struct ixgbe_hw *hw = &adapter->hw;
1297 u16 dev_caps = 0;
1298
1299 /* Find out WoL support for port */
1300 adapter->wol_support = hw->wol_enabled = 0;
1301 ixgbe_get_device_caps(hw, &dev_caps);
1302 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1303 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1304 hw->bus.func == 0))
1305 adapter->wol_support = hw->wol_enabled = 1;
1306
1307 /* Save initial wake up filter configuration */
1308 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1309
1310 return;
1311 } /* ixgbe_check_wol_support */
1312
1313 /************************************************************************
1314 * ixgbe_setup_interface
1315 *
1316 * Setup networking device structure and register an interface.
1317 ************************************************************************/
1318 static int
1319 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1320 {
1321 struct ethercom *ec = &adapter->osdep.ec;
1322 struct ifnet *ifp;
1323 int rv;
1324
1325 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1326
1327 ifp = adapter->ifp = &ec->ec_if;
1328 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1329 ifp->if_baudrate = IF_Gbps(10);
1330 ifp->if_init = ixgbe_init;
1331 ifp->if_stop = ixgbe_ifstop;
1332 ifp->if_softc = adapter;
1333 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1334 #ifdef IXGBE_MPSAFE
1335 ifp->if_extflags = IFEF_MPSAFE;
1336 #endif
1337 ifp->if_ioctl = ixgbe_ioctl;
1338 #if __FreeBSD_version >= 1100045
1339 /* TSO parameters */
1340 ifp->if_hw_tsomax = 65518;
1341 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1342 ifp->if_hw_tsomaxsegsize = 2048;
1343 #endif
1344 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1345 #if 0
1346 ixgbe_start_locked = ixgbe_legacy_start_locked;
1347 #endif
1348 } else {
1349 ifp->if_transmit = ixgbe_mq_start;
1350 #if 0
1351 ixgbe_start_locked = ixgbe_mq_start_locked;
1352 #endif
1353 }
1354 ifp->if_start = ixgbe_legacy_start;
1355 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1356 IFQ_SET_READY(&ifp->if_snd);
1357
1358 rv = if_initialize(ifp);
1359 if (rv != 0) {
1360 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1361 return rv;
1362 }
1363 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1364 ether_ifattach(ifp, adapter->hw.mac.addr);
1365 aprint_normal_dev(dev, "Ethernet address %s\n",
1366 ether_sprintf(adapter->hw.mac.addr));
1367 /*
1368 * We use per TX queue softint, so if_deferred_start_init() isn't
1369 * used.
1370 */
1371 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1372
1373 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1374
1375 /*
1376 * Tell the upper layer(s) we support long frames.
1377 */
1378 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1379
1380 /* Set capability flags */
1381 ifp->if_capabilities |= IFCAP_RXCSUM
1382 | IFCAP_TXCSUM
1383 | IFCAP_TSOv4
1384 | IFCAP_TSOv6;
1385 ifp->if_capenable = 0;
1386
1387 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1388 | ETHERCAP_VLAN_HWCSUM
1389 | ETHERCAP_JUMBO_MTU
1390 | ETHERCAP_VLAN_MTU;
1391
1392 /* Enable the above capabilities by default */
1393 ec->ec_capenable = ec->ec_capabilities;
1394
1395 /*
1396 * Don't turn this on by default, if vlans are
1397 * created on another pseudo device (eg. lagg)
1398 * then vlan events are not passed thru, breaking
1399 * operation, but with HW FILTER off it works. If
1400 * using vlans directly on the ixgbe driver you can
1401 * enable this and get full hardware tag filtering.
1402 */
1403 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1404
1405 /*
1406 * Specify the media types supported by this adapter and register
1407 * callbacks to update media and link information
1408 */
1409 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1410 ixgbe_media_status);
1411
1412 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1413 ixgbe_add_media_types(adapter);
1414
1415 /* Set autoselect media by default */
1416 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1417
1418 if_register(ifp);
1419
1420 return (0);
1421 } /* ixgbe_setup_interface */
1422
1423 /************************************************************************
1424 * ixgbe_add_media_types
1425 ************************************************************************/
1426 static void
1427 ixgbe_add_media_types(struct adapter *adapter)
1428 {
1429 struct ixgbe_hw *hw = &adapter->hw;
1430 device_t dev = adapter->dev;
1431 u64 layer;
1432
1433 layer = adapter->phy_layer;
1434
1435 #define ADD(mm, dd) \
1436 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1437
1438 ADD(IFM_NONE, 0);
1439
1440 /* Media types with matching NetBSD media defines */
1441 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1442 ADD(IFM_10G_T | IFM_FDX, 0);
1443 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1444 ADD(IFM_1000_T | IFM_FDX, 0);
1445 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1446 ADD(IFM_100_TX | IFM_FDX, 0);
1447 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1448 ADD(IFM_10_T | IFM_FDX, 0);
1449
1450 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1451 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1452 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1453
1454 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1455 ADD(IFM_10G_LR | IFM_FDX, 0);
1456 if (hw->phy.multispeed_fiber)
1457 ADD(IFM_1000_LX | IFM_FDX, 0);
1458 }
1459 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1460 ADD(IFM_10G_SR | IFM_FDX, 0);
1461 if (hw->phy.multispeed_fiber)
1462 ADD(IFM_1000_SX | IFM_FDX, 0);
1463 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1464 ADD(IFM_1000_SX | IFM_FDX, 0);
1465 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1466 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1467
1468 #ifdef IFM_ETH_XTYPE
1469 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1470 ADD(IFM_10G_KR | IFM_FDX, 0);
1471 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1472 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1473 #else
1474 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1475 device_printf(dev, "Media supported: 10GbaseKR\n");
1476 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1477 ADD(IFM_10G_SR | IFM_FDX, 0);
1478 }
1479 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1480 device_printf(dev, "Media supported: 10GbaseKX4\n");
1481 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1482 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1483 }
1484 #endif
1485 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1486 ADD(IFM_1000_KX | IFM_FDX, 0);
1487 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1488 ADD(IFM_2500_KX | IFM_FDX, 0);
1489 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1490 ADD(IFM_2500_T | IFM_FDX, 0);
1491 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T)
1492 ADD(IFM_5000_T | IFM_FDX, 0);
1493 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1494 ADD(IFM_1000_LX | IFM_FDX, 0); /* IFM_1000_BX */
1495 /* XXX no ifmedia_set? */
1496
1497 ADD(IFM_AUTO, 0);
1498
1499 #undef ADD
1500 } /* ixgbe_add_media_types */
1501
1502 /************************************************************************
1503 * ixgbe_is_sfp
1504 ************************************************************************/
1505 static inline bool
1506 ixgbe_is_sfp(struct ixgbe_hw *hw)
1507 {
1508 switch (hw->mac.type) {
1509 case ixgbe_mac_82598EB:
1510 if (hw->phy.type == ixgbe_phy_nl)
1511 return (TRUE);
1512 return (FALSE);
1513 case ixgbe_mac_82599EB:
1514 case ixgbe_mac_X550EM_x:
1515 case ixgbe_mac_X550EM_a:
1516 switch (hw->mac.ops.get_media_type(hw)) {
1517 case ixgbe_media_type_fiber:
1518 case ixgbe_media_type_fiber_qsfp:
1519 return (TRUE);
1520 default:
1521 return (FALSE);
1522 }
1523 default:
1524 return (FALSE);
1525 }
1526 } /* ixgbe_is_sfp */
1527
1528 /************************************************************************
1529 * ixgbe_config_link
1530 ************************************************************************/
1531 static void
1532 ixgbe_config_link(struct adapter *adapter)
1533 {
1534 struct ixgbe_hw *hw = &adapter->hw;
1535 u32 autoneg, err = 0;
1536 bool sfp, negotiate = false;
1537
1538 sfp = ixgbe_is_sfp(hw);
1539
1540 if (sfp) {
1541 if (hw->phy.multispeed_fiber) {
1542 ixgbe_enable_tx_laser(hw);
1543 kpreempt_disable();
1544 softint_schedule(adapter->msf_si);
1545 kpreempt_enable();
1546 }
1547 kpreempt_disable();
1548 softint_schedule(adapter->mod_si);
1549 kpreempt_enable();
1550 } else {
1551 struct ifmedia *ifm = &adapter->media;
1552
1553 if (hw->mac.ops.check_link)
1554 err = ixgbe_check_link(hw, &adapter->link_speed,
1555 &adapter->link_up, FALSE);
1556 if (err)
1557 return;
1558
1559 /*
1560 * Check if it's the first call. If it's the first call,
1561 * get value for auto negotiation.
1562 */
1563 autoneg = hw->phy.autoneg_advertised;
1564 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1565 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1566 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1567 &negotiate);
1568 if (err)
1569 return;
1570 if (hw->mac.ops.setup_link)
1571 err = hw->mac.ops.setup_link(hw, autoneg,
1572 adapter->link_up);
1573 }
1574 } /* ixgbe_config_link */
1575
1576 /************************************************************************
1577 * ixgbe_update_stats_counters - Update board statistics counters.
1578 ************************************************************************/
1579 static void
1580 ixgbe_update_stats_counters(struct adapter *adapter)
1581 {
1582 struct ifnet *ifp = adapter->ifp;
1583 struct ixgbe_hw *hw = &adapter->hw;
1584 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1585 u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1586 u64 total, total_missed_rx = 0;
1587 uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1588 unsigned int queue_counters;
1589 int i;
1590
1591 IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1592 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1593
1594 IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1595 IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1596 if (hw->mac.type >= ixgbe_mac_X550)
1597 IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1598
1599 /* 16 registers exist */
1600 queue_counters = min(__arraycount(stats->qprc), adapter->num_queues);
1601 for (i = 0; i < queue_counters; i++) {
1602 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1603 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1604 if (hw->mac.type >= ixgbe_mac_82599EB)
1605 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
1606 }
1607
1608 /* 8 registers exist */
1609 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1610 uint32_t mp;
1611
1612 /* MPC */
1613 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1614 /* global total per queue */
1615 IXGBE_EVC_ADD(&stats->mpc[i], mp);
1616 /* running comprehensive total for stats display */
1617 total_missed_rx += mp;
1618
1619 if (hw->mac.type == ixgbe_mac_82598EB)
1620 IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1621
1622 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1623 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1624 if (hw->mac.type >= ixgbe_mac_82599EB) {
1625 IXGBE_EVC_REGADD(hw, stats,
1626 IXGBE_PXONRXCNT(i), pxonrxc[i]);
1627 IXGBE_EVC_REGADD(hw, stats,
1628 IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1629 IXGBE_EVC_REGADD(hw, stats,
1630 IXGBE_PXON2OFFCNT(i), pxon2offc[i]);
1631 } else {
1632 IXGBE_EVC_REGADD(hw, stats,
1633 IXGBE_PXONRXC(i), pxonrxc[i]);
1634 IXGBE_EVC_REGADD(hw, stats,
1635 IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1636 }
1637 }
1638 IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1639
1640 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1641 if ((adapter->link_active == LINK_STATE_UP)
1642 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1643 IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1644 IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1645 }
1646 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1647
1648 /* Hardware workaround, gprc counts missed packets */
1649 IXGBE_EVC_ADD(&stats->gprc,
1650 IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1651
1652 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1653 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1654 total = lxontxc + lxofftxc;
1655
1656 if (hw->mac.type != ixgbe_mac_82598EB) {
1657 IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1658 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1659 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1660 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1661 - total * ETHER_MIN_LEN);
1662 IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1663 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1664 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1665 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1666 } else {
1667 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1668 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1669 /* 82598 only has a counter in the high register */
1670 IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1671 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1672 - total * ETHER_MIN_LEN);
1673 IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1674 }
1675
1676 /*
1677 * Workaround: mprc hardware is incorrectly counting
1678 * broadcasts, so for now we subtract those.
1679 */
1680 IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1681 IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1682 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1683
1684 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1685 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1686 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1687 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1688 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1689 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1690
1691 IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1692 IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1693 IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1694
1695 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1696 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1697 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1698 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1699
1700 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1701 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1702 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1703 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1704 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1705 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1706 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1707 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1708 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1709 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1710 IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1711 IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1712 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1713 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1714 /* Only read FCOE on 82599 */
1715 if (hw->mac.type != ixgbe_mac_82598EB) {
1716 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1717 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1718 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1719 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1720 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1721 }
1722
1723 /* Fill out the OS statistics structure */
1724 /*
1725 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1726 * adapter->stats counters. It's required to make ifconfig -z
1727 * (SOICZIFDATA) work.
1728 */
1729 ifp->if_collisions = 0;
1730
1731 /* Rx Errors */
1732 ifp->if_iqdrops += total_missed_rx;
1733
1734 /*
1735 * Aggregate following types of errors as RX errors:
1736 * - CRC error count,
1737 * - illegal byte error count,
1738 * - length error count,
1739 * - undersized packets count,
1740 * - fragmented packets count,
1741 * - oversized packets count,
1742 * - jabber count.
1743 */
1744 ifp->if_ierrors +=
1745 crcerrs + illerrc + rlec + ruc + rfc + roc + rjc;
1746 } /* ixgbe_update_stats_counters */
1747
1748 /************************************************************************
1749 * ixgbe_add_hw_stats
1750 *
1751 * Add sysctl variables, one per statistic, to the system.
1752 ************************************************************************/
1753 static void
1754 ixgbe_add_hw_stats(struct adapter *adapter)
1755 {
1756 device_t dev = adapter->dev;
1757 const struct sysctlnode *rnode, *cnode;
1758 struct sysctllog **log = &adapter->sysctllog;
1759 struct tx_ring *txr = adapter->tx_rings;
1760 struct rx_ring *rxr = adapter->rx_rings;
1761 struct ixgbe_hw *hw = &adapter->hw;
1762 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1763 const char *xname = device_xname(dev);
1764 int i;
1765
1766 /* Driver Statistics */
1767 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1768 NULL, xname, "Driver tx dma soft fail EFBIG");
1769 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1770 NULL, xname, "m_defrag() failed");
1771 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1772 NULL, xname, "Driver tx dma hard fail EFBIG");
1773 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1774 NULL, xname, "Driver tx dma hard fail EINVAL");
1775 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1776 NULL, xname, "Driver tx dma hard fail other");
1777 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1778 NULL, xname, "Driver tx dma soft fail EAGAIN");
1779 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1780 NULL, xname, "Driver tx dma soft fail ENOMEM");
1781 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1782 NULL, xname, "Watchdog timeouts");
1783 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1784 NULL, xname, "TSO errors");
1785 evcnt_attach_dynamic(&adapter->admin_irq, EVCNT_TYPE_INTR,
1786 NULL, xname, "Admin MSI-X IRQ Handled");
1787 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1788 NULL, xname, "Link softint");
1789 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1790 NULL, xname, "module softint");
1791 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1792 NULL, xname, "multimode softint");
1793 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1794 NULL, xname, "external PHY softint");
1795
1796 /* Max number of traffic class is 8 */
1797 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1798 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1799 snprintf(adapter->tcs[i].evnamebuf,
1800 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", xname, i);
1801 if (i < __arraycount(stats->mpc)) {
1802 evcnt_attach_dynamic(&stats->mpc[i],
1803 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1804 "RX Missed Packet Count");
1805 if (hw->mac.type == ixgbe_mac_82598EB)
1806 evcnt_attach_dynamic(&stats->rnbc[i],
1807 EVCNT_TYPE_MISC, NULL,
1808 adapter->tcs[i].evnamebuf,
1809 "Receive No Buffers");
1810 }
1811 if (i < __arraycount(stats->pxontxc)) {
1812 evcnt_attach_dynamic(&stats->pxontxc[i],
1813 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1814 "pxontxc");
1815 evcnt_attach_dynamic(&stats->pxonrxc[i],
1816 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1817 "pxonrxc");
1818 evcnt_attach_dynamic(&stats->pxofftxc[i],
1819 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1820 "pxofftxc");
1821 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1822 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1823 "pxoffrxc");
1824 if (hw->mac.type >= ixgbe_mac_82599EB)
1825 evcnt_attach_dynamic(&stats->pxon2offc[i],
1826 EVCNT_TYPE_MISC, NULL,
1827 adapter->tcs[i].evnamebuf,
1828 "pxon2offc");
1829 }
1830 }
1831
1832 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1833 #ifdef LRO
1834 struct lro_ctrl *lro = &rxr->lro;
1835 #endif /* LRO */
1836
1837 snprintf(adapter->queues[i].evnamebuf,
1838 sizeof(adapter->queues[i].evnamebuf), "%s q%d", xname, i);
1839 snprintf(adapter->queues[i].namebuf,
1840 sizeof(adapter->queues[i].namebuf), "q%d", i);
1841
1842 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1843 aprint_error_dev(dev,
1844 "could not create sysctl root\n");
1845 break;
1846 }
1847
1848 if (sysctl_createv(log, 0, &rnode, &rnode,
1849 0, CTLTYPE_NODE,
1850 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1851 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1852 break;
1853
1854 if (sysctl_createv(log, 0, &rnode, &cnode,
1855 CTLFLAG_READWRITE, CTLTYPE_INT,
1856 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1857 ixgbe_sysctl_interrupt_rate_handler, 0,
1858 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1859 break;
1860
1861 if (sysctl_createv(log, 0, &rnode, &cnode,
1862 CTLFLAG_READONLY, CTLTYPE_INT,
1863 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1864 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1865 0, CTL_CREATE, CTL_EOL) != 0)
1866 break;
1867
1868 if (sysctl_createv(log, 0, &rnode, &cnode,
1869 CTLFLAG_READONLY, CTLTYPE_INT,
1870 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1871 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1872 0, CTL_CREATE, CTL_EOL) != 0)
1873 break;
1874
1875 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1876 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1877 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1878 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1879 "Handled queue in softint");
1880 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1881 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1882 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1883 NULL, adapter->queues[i].evnamebuf, "TSO");
1884 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1885 NULL, adapter->queues[i].evnamebuf,
1886 "TX Queue No Descriptor Available");
1887 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1888 NULL, adapter->queues[i].evnamebuf,
1889 "Queue Packets Transmitted");
1890 #ifndef IXGBE_LEGACY_TX
1891 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1892 NULL, adapter->queues[i].evnamebuf,
1893 "Packets dropped in pcq");
1894 #endif
1895
1896 if (sysctl_createv(log, 0, &rnode, &cnode,
1897 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1898 SYSCTL_DESCR("Receive Descriptor next to check"),
1899 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1900 CTL_CREATE, CTL_EOL) != 0)
1901 break;
1902
1903 if (sysctl_createv(log, 0, &rnode, &cnode,
1904 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1905 SYSCTL_DESCR("Receive Descriptor next to refresh"),
1906 ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1907 CTL_CREATE, CTL_EOL) != 0)
1908 break;
1909
1910 if (sysctl_createv(log, 0, &rnode, &cnode,
1911 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1912 SYSCTL_DESCR("Receive Descriptor Head"),
1913 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1914 CTL_CREATE, CTL_EOL) != 0)
1915 break;
1916
1917 if (sysctl_createv(log, 0, &rnode, &cnode,
1918 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1919 SYSCTL_DESCR("Receive Descriptor Tail"),
1920 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1921 CTL_CREATE, CTL_EOL) != 0)
1922 break;
1923
1924 if (i < __arraycount(stats->qprc)) {
1925 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1926 NULL, adapter->queues[i].evnamebuf, "qprc");
1927 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1928 NULL, adapter->queues[i].evnamebuf, "qptc");
1929 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1930 NULL, adapter->queues[i].evnamebuf, "qbrc");
1931 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1932 NULL, adapter->queues[i].evnamebuf, "qbtc");
1933 if (hw->mac.type >= ixgbe_mac_82599EB)
1934 evcnt_attach_dynamic(&stats->qprdc[i],
1935 EVCNT_TYPE_MISC, NULL,
1936 adapter->queues[i].evnamebuf, "qprdc");
1937 }
1938
1939 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1940 NULL, adapter->queues[i].evnamebuf,
1941 "Queue Packets Received");
1942 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1943 NULL, adapter->queues[i].evnamebuf,
1944 "Queue Bytes Received");
1945 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1946 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1947 evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
1948 NULL, adapter->queues[i].evnamebuf, "Rx no mbuf");
1949 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1950 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1951 #ifdef LRO
1952 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1953 CTLFLAG_RD, &lro->lro_queued, 0,
1954 "LRO Queued");
1955 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1956 CTLFLAG_RD, &lro->lro_flushed, 0,
1957 "LRO Flushed");
1958 #endif /* LRO */
1959 }
1960
1961 /* MAC stats get their own sub node */
1962
1963 snprintf(stats->namebuf,
1964 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1965
1966 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "rx csum offload - IP");
1968 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "rx csum offload - L4");
1970 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "rx csum offload - IP bad");
1972 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "rx csum offload - L4 bad");
1974 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Interrupt conditions zero");
1976 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Legacy interrupts");
1978
1979 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "CRC Errors");
1981 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Illegal Byte Errors");
1983 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Byte Errors");
1985 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "MAC Short Packets Discarded");
1987 if (hw->mac.type >= ixgbe_mac_X550)
1988 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Bad SFD");
1990 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Total Packets Missed");
1992 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "MAC Local Faults");
1994 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "MAC Remote Faults");
1996 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Receive Length Errors");
1998 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Link XON Transmitted");
2000 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Link XON Received");
2002 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Link XOFF Transmitted");
2004 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Link XOFF Received");
2006
2007 /* Packet Reception Stats */
2008 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Total Octets Received");
2010 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Good Octets Received");
2012 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Total Packets Received");
2014 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Good Packets Received");
2016 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Multicast Packets Received");
2018 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Broadcast Packets Received");
2020 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "64 byte frames received ");
2022 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "65-127 byte frames received");
2024 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "128-255 byte frames received");
2026 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "256-511 byte frames received");
2028 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "512-1023 byte frames received");
2030 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "1023-1522 byte frames received");
2032 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Receive Undersized");
2034 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Fragmented Packets Received ");
2036 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Oversized Packets Received");
2038 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "Received Jabber");
2040 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "Management Packets Received");
2042 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "Management Packets Dropped");
2044 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "Checksum Errors");
2046
2047 /* Packet Transmission Stats */
2048 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "Good Octets Transmitted");
2050 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Total Packets Transmitted");
2052 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Good Packets Transmitted");
2054 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Broadcast Packets Transmitted");
2056 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Multicast Packets Transmitted");
2058 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Management Packets Transmitted");
2060 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "64 byte frames transmitted ");
2062 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "65-127 byte frames transmitted");
2064 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2065 stats->namebuf, "128-255 byte frames transmitted");
2066 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "256-511 byte frames transmitted");
2068 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "512-1023 byte frames transmitted");
2070 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "1024-1522 byte frames transmitted");
2072 } /* ixgbe_add_hw_stats */
2073
2074 static void
2075 ixgbe_clear_evcnt(struct adapter *adapter)
2076 {
2077 struct tx_ring *txr = adapter->tx_rings;
2078 struct rx_ring *rxr = adapter->rx_rings;
2079 struct ixgbe_hw *hw = &adapter->hw;
2080 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2081 int i;
2082
2083 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0);
2084 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0);
2085 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0);
2086 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0);
2087 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0);
2088 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0);
2089 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0);
2090 IXGBE_EVC_STORE(&adapter->tso_err, 0);
2091 IXGBE_EVC_STORE(&adapter->watchdog_events, 0);
2092 IXGBE_EVC_STORE(&adapter->admin_irq, 0);
2093 IXGBE_EVC_STORE(&adapter->link_sicount, 0);
2094 IXGBE_EVC_STORE(&adapter->mod_sicount, 0);
2095 IXGBE_EVC_STORE(&adapter->msf_sicount, 0);
2096 IXGBE_EVC_STORE(&adapter->phy_sicount, 0);
2097
2098 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2099 if (i < __arraycount(stats->mpc)) {
2100 IXGBE_EVC_STORE(&stats->mpc[i], 0);
2101 if (hw->mac.type == ixgbe_mac_82598EB)
2102 IXGBE_EVC_STORE(&stats->rnbc[i], 0);
2103 }
2104 if (i < __arraycount(stats->pxontxc)) {
2105 IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
2106 IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
2107 IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
2108 IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
2109 if (hw->mac.type >= ixgbe_mac_82599EB)
2110 IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
2111 }
2112 }
2113
2114 txr = adapter->tx_rings;
2115 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2116 IXGBE_EVC_STORE(&adapter->queues[i].irqs, 0);
2117 IXGBE_EVC_STORE(&adapter->queues[i].handleq, 0);
2118 IXGBE_EVC_STORE(&adapter->queues[i].req, 0);
2119 IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2120 IXGBE_EVC_STORE(&txr->total_packets, 0);
2121 IXGBE_EVC_STORE(&txr->tso_tx, 0);
2122 #ifndef IXGBE_LEGACY_TX
2123 IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2124 #endif
2125 txr->q_efbig_tx_dma_setup = 0;
2126 txr->q_mbuf_defrag_failed = 0;
2127 txr->q_efbig2_tx_dma_setup = 0;
2128 txr->q_einval_tx_dma_setup = 0;
2129 txr->q_other_tx_dma_setup = 0;
2130 txr->q_eagain_tx_dma_setup = 0;
2131 txr->q_enomem_tx_dma_setup = 0;
2132 txr->q_tso_err = 0;
2133
2134 if (i < __arraycount(stats->qprc)) {
2135 IXGBE_EVC_STORE(&stats->qprc[i], 0);
2136 IXGBE_EVC_STORE(&stats->qptc[i], 0);
2137 IXGBE_EVC_STORE(&stats->qbrc[i], 0);
2138 IXGBE_EVC_STORE(&stats->qbtc[i], 0);
2139 if (hw->mac.type >= ixgbe_mac_82599EB)
2140 IXGBE_EVC_STORE(&stats->qprdc[i], 0);
2141 }
2142
2143 IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2144 IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2145 IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2146 IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2147 IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2148 }
2149 IXGBE_EVC_STORE(&stats->ipcs, 0);
2150 IXGBE_EVC_STORE(&stats->l4cs, 0);
2151 IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2152 IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2153 IXGBE_EVC_STORE(&stats->intzero, 0);
2154 IXGBE_EVC_STORE(&stats->legint, 0);
2155 IXGBE_EVC_STORE(&stats->crcerrs, 0);
2156 IXGBE_EVC_STORE(&stats->illerrc, 0);
2157 IXGBE_EVC_STORE(&stats->errbc, 0);
2158 IXGBE_EVC_STORE(&stats->mspdc, 0);
2159 if (hw->mac.type >= ixgbe_mac_X550)
2160 IXGBE_EVC_STORE(&stats->mbsdc, 0);
2161 IXGBE_EVC_STORE(&stats->mpctotal, 0);
2162 IXGBE_EVC_STORE(&stats->mlfc, 0);
2163 IXGBE_EVC_STORE(&stats->mrfc, 0);
2164 IXGBE_EVC_STORE(&stats->rlec, 0);
2165 IXGBE_EVC_STORE(&stats->lxontxc, 0);
2166 IXGBE_EVC_STORE(&stats->lxonrxc, 0);
2167 IXGBE_EVC_STORE(&stats->lxofftxc, 0);
2168 IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
2169
2170 /* Packet Reception Stats */
2171 IXGBE_EVC_STORE(&stats->tor, 0);
2172 IXGBE_EVC_STORE(&stats->gorc, 0);
2173 IXGBE_EVC_STORE(&stats->tpr, 0);
2174 IXGBE_EVC_STORE(&stats->gprc, 0);
2175 IXGBE_EVC_STORE(&stats->mprc, 0);
2176 IXGBE_EVC_STORE(&stats->bprc, 0);
2177 IXGBE_EVC_STORE(&stats->prc64, 0);
2178 IXGBE_EVC_STORE(&stats->prc127, 0);
2179 IXGBE_EVC_STORE(&stats->prc255, 0);
2180 IXGBE_EVC_STORE(&stats->prc511, 0);
2181 IXGBE_EVC_STORE(&stats->prc1023, 0);
2182 IXGBE_EVC_STORE(&stats->prc1522, 0);
2183 IXGBE_EVC_STORE(&stats->ruc, 0);
2184 IXGBE_EVC_STORE(&stats->rfc, 0);
2185 IXGBE_EVC_STORE(&stats->roc, 0);
2186 IXGBE_EVC_STORE(&stats->rjc, 0);
2187 IXGBE_EVC_STORE(&stats->mngprc, 0);
2188 IXGBE_EVC_STORE(&stats->mngpdc, 0);
2189 IXGBE_EVC_STORE(&stats->xec, 0);
2190
2191 /* Packet Transmission Stats */
2192 IXGBE_EVC_STORE(&stats->gotc, 0);
2193 IXGBE_EVC_STORE(&stats->tpt, 0);
2194 IXGBE_EVC_STORE(&stats->gptc, 0);
2195 IXGBE_EVC_STORE(&stats->bptc, 0);
2196 IXGBE_EVC_STORE(&stats->mptc, 0);
2197 IXGBE_EVC_STORE(&stats->mngptc, 0);
2198 IXGBE_EVC_STORE(&stats->ptc64, 0);
2199 IXGBE_EVC_STORE(&stats->ptc127, 0);
2200 IXGBE_EVC_STORE(&stats->ptc255, 0);
2201 IXGBE_EVC_STORE(&stats->ptc511, 0);
2202 IXGBE_EVC_STORE(&stats->ptc1023, 0);
2203 IXGBE_EVC_STORE(&stats->ptc1522, 0);
2204 }
2205
2206 /************************************************************************
2207 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2208 *
2209 * Retrieves the TDH value from the hardware
2210 ************************************************************************/
2211 static int
2212 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2213 {
2214 struct sysctlnode node = *rnode;
2215 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2216 struct adapter *adapter;
2217 uint32_t val;
2218
2219 if (!txr)
2220 return (0);
2221
2222 adapter = txr->adapter;
2223 if (ixgbe_fw_recovery_mode_swflag(adapter))
2224 return (EPERM);
2225
2226 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2227 node.sysctl_data = &val;
2228 return sysctl_lookup(SYSCTLFN_CALL(&node));
2229 } /* ixgbe_sysctl_tdh_handler */
2230
2231 /************************************************************************
2232 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2233 *
2234 * Retrieves the TDT value from the hardware
2235 ************************************************************************/
2236 static int
2237 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2238 {
2239 struct sysctlnode node = *rnode;
2240 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2241 struct adapter *adapter;
2242 uint32_t val;
2243
2244 if (!txr)
2245 return (0);
2246
2247 adapter = txr->adapter;
2248 if (ixgbe_fw_recovery_mode_swflag(adapter))
2249 return (EPERM);
2250
2251 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2252 node.sysctl_data = &val;
2253 return sysctl_lookup(SYSCTLFN_CALL(&node));
2254 } /* ixgbe_sysctl_tdt_handler */
2255
2256 /************************************************************************
2257 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2258 * handler function
2259 *
2260 * Retrieves the next_to_check value
2261 ************************************************************************/
2262 static int
2263 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2264 {
2265 struct sysctlnode node = *rnode;
2266 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2267 struct adapter *adapter;
2268 uint32_t val;
2269
2270 if (!rxr)
2271 return (0);
2272
2273 adapter = rxr->adapter;
2274 if (ixgbe_fw_recovery_mode_swflag(adapter))
2275 return (EPERM);
2276
2277 val = rxr->next_to_check;
2278 node.sysctl_data = &val;
2279 return sysctl_lookup(SYSCTLFN_CALL(&node));
2280 } /* ixgbe_sysctl_next_to_check_handler */
2281
2282 /************************************************************************
2283 * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2284 * handler function
2285 *
2286 * Retrieves the next_to_refresh value
2287 ************************************************************************/
2288 static int
2289 ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2290 {
2291 struct sysctlnode node = *rnode;
2292 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2293 struct adapter *adapter;
2294 uint32_t val;
2295
2296 if (!rxr)
2297 return (0);
2298
2299 adapter = rxr->adapter;
2300 if (ixgbe_fw_recovery_mode_swflag(adapter))
2301 return (EPERM);
2302
2303 val = rxr->next_to_refresh;
2304 node.sysctl_data = &val;
2305 return sysctl_lookup(SYSCTLFN_CALL(&node));
2306 } /* ixgbe_sysctl_next_to_refresh_handler */
2307
2308 /************************************************************************
2309 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2310 *
2311 * Retrieves the RDH value from the hardware
2312 ************************************************************************/
2313 static int
2314 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2315 {
2316 struct sysctlnode node = *rnode;
2317 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2318 struct adapter *adapter;
2319 uint32_t val;
2320
2321 if (!rxr)
2322 return (0);
2323
2324 adapter = rxr->adapter;
2325 if (ixgbe_fw_recovery_mode_swflag(adapter))
2326 return (EPERM);
2327
2328 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2329 node.sysctl_data = &val;
2330 return sysctl_lookup(SYSCTLFN_CALL(&node));
2331 } /* ixgbe_sysctl_rdh_handler */
2332
2333 /************************************************************************
2334 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2335 *
2336 * Retrieves the RDT value from the hardware
2337 ************************************************************************/
2338 static int
2339 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2340 {
2341 struct sysctlnode node = *rnode;
2342 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2343 struct adapter *adapter;
2344 uint32_t val;
2345
2346 if (!rxr)
2347 return (0);
2348
2349 adapter = rxr->adapter;
2350 if (ixgbe_fw_recovery_mode_swflag(adapter))
2351 return (EPERM);
2352
2353 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2354 node.sysctl_data = &val;
2355 return sysctl_lookup(SYSCTLFN_CALL(&node));
2356 } /* ixgbe_sysctl_rdt_handler */
2357
2358 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2359 /************************************************************************
2360 * ixgbe_register_vlan
2361 *
2362 * Run via vlan config EVENT, it enables us to use the
2363 * HW Filter table since we can get the vlan id. This
2364 * just creates the entry in the soft version of the
2365 * VFTA, init will repopulate the real table.
2366 ************************************************************************/
2367 static void
2368 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2369 {
2370 struct adapter *adapter = ifp->if_softc;
2371 u16 index, bit;
2372
2373 if (ifp->if_softc != arg) /* Not our event */
2374 return;
2375
2376 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2377 return;
2378
2379 IXGBE_CORE_LOCK(adapter);
2380 index = (vtag >> 5) & 0x7F;
2381 bit = vtag & 0x1F;
2382 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2383 ixgbe_setup_vlan_hw_support(adapter);
2384 IXGBE_CORE_UNLOCK(adapter);
2385 } /* ixgbe_register_vlan */
2386
2387 /************************************************************************
2388 * ixgbe_unregister_vlan
2389 *
2390 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2391 ************************************************************************/
2392 static void
2393 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2394 {
2395 struct adapter *adapter = ifp->if_softc;
2396 u16 index, bit;
2397
2398 if (ifp->if_softc != arg)
2399 return;
2400
2401 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2402 return;
2403
2404 IXGBE_CORE_LOCK(adapter);
2405 index = (vtag >> 5) & 0x7F;
2406 bit = vtag & 0x1F;
2407 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2408 /* Re-init to load the changes */
2409 ixgbe_setup_vlan_hw_support(adapter);
2410 IXGBE_CORE_UNLOCK(adapter);
2411 } /* ixgbe_unregister_vlan */
2412 #endif
2413
2414 static void
2415 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2416 {
2417 struct ethercom *ec = &adapter->osdep.ec;
2418 struct ixgbe_hw *hw = &adapter->hw;
2419 struct rx_ring *rxr;
2420 int i;
2421 u32 ctrl;
2422 bool hwtagging;
2423
2424 /*
2425 * This function is called from both if_init and ifflags_cb()
2426 * on NetBSD.
2427 */
2428
2429 /* Enable HW tagging only if any vlan is attached */
2430 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2431 && VLAN_ATTACHED(ec);
2432
2433 /* Setup the queues for vlans */
2434 for (i = 0; i < adapter->num_queues; i++) {
2435 rxr = &adapter->rx_rings[i];
2436 /*
2437 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2438 */
2439 if (hw->mac.type != ixgbe_mac_82598EB) {
2440 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2441 if (hwtagging)
2442 ctrl |= IXGBE_RXDCTL_VME;
2443 else
2444 ctrl &= ~IXGBE_RXDCTL_VME;
2445 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2446 }
2447 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2448 }
2449
2450 /*
2451 * A soft reset zero's out the VFTA, so
2452 * we need to repopulate it now.
2453 */
2454 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2455 if (adapter->shadow_vfta[i] != 0)
2456 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2457 adapter->shadow_vfta[i]);
2458
2459 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2460 /* Enable the Filter Table if enabled */
2461 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2462 ctrl |= IXGBE_VLNCTRL_VFE;
2463 else
2464 ctrl &= ~IXGBE_VLNCTRL_VFE;
2465 /* VLAN hw tagging for 82598 */
2466 if (hw->mac.type == ixgbe_mac_82598EB) {
2467 if (hwtagging)
2468 ctrl |= IXGBE_VLNCTRL_VME;
2469 else
2470 ctrl &= ~IXGBE_VLNCTRL_VME;
2471 }
2472 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2473 } /* ixgbe_setup_vlan_hw_support */
2474
2475 /************************************************************************
2476 * ixgbe_get_slot_info
2477 *
2478 * Get the width and transaction speed of
2479 * the slot this adapter is plugged into.
2480 ************************************************************************/
2481 static void
2482 ixgbe_get_slot_info(struct adapter *adapter)
2483 {
2484 device_t dev = adapter->dev;
2485 struct ixgbe_hw *hw = &adapter->hw;
2486 u32 offset;
2487 u16 link;
2488 int bus_info_valid = TRUE;
2489
2490 /* Some devices are behind an internal bridge */
2491 switch (hw->device_id) {
2492 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2493 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2494 goto get_parent_info;
2495 default:
2496 break;
2497 }
2498
2499 ixgbe_get_bus_info(hw);
2500
2501 /*
2502 * Some devices don't use PCI-E, but there is no need
2503 * to display "Unknown" for bus speed and width.
2504 */
2505 switch (hw->mac.type) {
2506 case ixgbe_mac_X550EM_x:
2507 case ixgbe_mac_X550EM_a:
2508 return;
2509 default:
2510 goto display;
2511 }
2512
2513 get_parent_info:
2514 /*
2515 * For the Quad port adapter we need to parse back
2516 * up the PCI tree to find the speed of the expansion
2517 * slot into which this adapter is plugged. A bit more work.
2518 */
2519 dev = device_parent(device_parent(dev));
2520 #if 0
2521 #ifdef IXGBE_DEBUG
2522 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2523 pci_get_slot(dev), pci_get_function(dev));
2524 #endif
2525 dev = device_parent(device_parent(dev));
2526 #ifdef IXGBE_DEBUG
2527 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2528 pci_get_slot(dev), pci_get_function(dev));
2529 #endif
2530 #endif
2531 /* Now get the PCI Express Capabilities offset */
2532 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2533 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2534 /*
2535 * Hmm...can't get PCI-Express capabilities.
2536 * Falling back to default method.
2537 */
2538 bus_info_valid = FALSE;
2539 ixgbe_get_bus_info(hw);
2540 goto display;
2541 }
2542 /* ...and read the Link Status Register */
2543 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2544 offset + PCIE_LCSR) >> 16;
2545 ixgbe_set_pci_config_data_generic(hw, link);
2546
2547 display:
2548 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2549 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2550 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2551 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2552 "Unknown"),
2553 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2554 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2555 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2556 "Unknown"));
2557
2558 if (bus_info_valid) {
2559 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2560 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2561 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2562 device_printf(dev, "PCI-Express bandwidth available"
2563 " for this card\n is not sufficient for"
2564 " optimal performance.\n");
2565 device_printf(dev, "For optimal performance a x8 "
2566 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2567 }
2568 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2569 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2570 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2571 device_printf(dev, "PCI-Express bandwidth available"
2572 " for this card\n is not sufficient for"
2573 " optimal performance.\n");
2574 device_printf(dev, "For optimal performance a x8 "
2575 "PCIE Gen3 slot is required.\n");
2576 }
2577 } else
2578 device_printf(dev,
2579 "Unable to determine slot speed/width. The speed/width "
2580 "reported are that of the internal switch.\n");
2581
2582 return;
2583 } /* ixgbe_get_slot_info */
2584
2585 /************************************************************************
2586 * ixgbe_enable_queue - Queue Interrupt Enabler
2587 ************************************************************************/
2588 static inline void
2589 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2590 {
2591 struct ixgbe_hw *hw = &adapter->hw;
2592 struct ix_queue *que = &adapter->queues[vector];
2593 u64 queue = 1ULL << vector;
2594 u32 mask;
2595
2596 mutex_enter(&que->dc_mtx);
2597 if (que->disabled_count > 0 && --que->disabled_count > 0)
2598 goto out;
2599
2600 if (hw->mac.type == ixgbe_mac_82598EB) {
2601 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2602 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2603 } else {
2604 mask = (queue & 0xFFFFFFFF);
2605 if (mask)
2606 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2607 mask = (queue >> 32);
2608 if (mask)
2609 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2610 }
2611 out:
2612 mutex_exit(&que->dc_mtx);
2613 } /* ixgbe_enable_queue */
2614
2615 /************************************************************************
2616 * ixgbe_disable_queue_internal
2617 ************************************************************************/
2618 static inline void
2619 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2620 {
2621 struct ixgbe_hw *hw = &adapter->hw;
2622 struct ix_queue *que = &adapter->queues[vector];
2623 u64 queue = 1ULL << vector;
2624 u32 mask;
2625
2626 mutex_enter(&que->dc_mtx);
2627
2628 if (que->disabled_count > 0) {
2629 if (nestok)
2630 que->disabled_count++;
2631 goto out;
2632 }
2633 que->disabled_count++;
2634
2635 if (hw->mac.type == ixgbe_mac_82598EB) {
2636 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2637 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2638 } else {
2639 mask = (queue & 0xFFFFFFFF);
2640 if (mask)
2641 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2642 mask = (queue >> 32);
2643 if (mask)
2644 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2645 }
2646 out:
2647 mutex_exit(&que->dc_mtx);
2648 } /* ixgbe_disable_queue_internal */
2649
2650 /************************************************************************
2651 * ixgbe_disable_queue
2652 ************************************************************************/
2653 static inline void
2654 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2655 {
2656
2657 ixgbe_disable_queue_internal(adapter, vector, true);
2658 } /* ixgbe_disable_queue */
2659
2660 /************************************************************************
2661 * ixgbe_sched_handle_que - schedule deferred packet processing
2662 ************************************************************************/
2663 static inline void
2664 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2665 {
2666
2667 if (que->txrx_use_workqueue) {
2668 /*
2669 * adapter->que_wq is bound to each CPU instead of
2670 * each NIC queue to reduce workqueue kthread. As we
2671 * should consider about interrupt affinity in this
2672 * function, the workqueue kthread must be WQ_PERCPU.
2673 * If create WQ_PERCPU workqueue kthread for each NIC
2674 * queue, that number of created workqueue kthread is
2675 * (number of used NIC queue) * (number of CPUs) =
2676 * (number of CPUs) ^ 2 most often.
2677 *
2678 * The same NIC queue's interrupts are avoided by
2679 * masking the queue's interrupt. And different
2680 * NIC queue's interrupts use different struct work
2681 * (que->wq_cookie). So, "enqueued flag" to avoid
2682 * twice workqueue_enqueue() is not required .
2683 */
2684 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2685 } else
2686 softint_schedule(que->que_si);
2687 }
2688
2689 /************************************************************************
2690 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2691 ************************************************************************/
2692 static int
2693 ixgbe_msix_que(void *arg)
2694 {
2695 struct ix_queue *que = arg;
2696 struct adapter *adapter = que->adapter;
2697 struct ifnet *ifp = adapter->ifp;
2698 struct tx_ring *txr = que->txr;
2699 struct rx_ring *rxr = que->rxr;
2700 u32 newitr = 0;
2701
2702 /* Protect against spurious interrupts */
2703 if ((ifp->if_flags & IFF_RUNNING) == 0)
2704 return 0;
2705
2706 ixgbe_disable_queue(adapter, que->msix);
2707 IXGBE_EVC_ADD(&que->irqs, 1);
2708
2709 /*
2710 * Don't change "que->txrx_use_workqueue" from this point to avoid
2711 * flip-flopping softint/workqueue mode in one deferred processing.
2712 */
2713 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2714
2715 IXGBE_TX_LOCK(txr);
2716 ixgbe_txeof(txr);
2717 IXGBE_TX_UNLOCK(txr);
2718
2719 /* Do AIM now? */
2720
2721 if (adapter->enable_aim == false)
2722 goto no_calc;
2723 /*
2724 * Do Adaptive Interrupt Moderation:
2725 * - Write out last calculated setting
2726 * - Calculate based on average size over
2727 * the last interval.
2728 */
2729 if (que->eitr_setting)
2730 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2731
2732 que->eitr_setting = 0;
2733
2734 /* Idle, do nothing */
2735 if ((txr->bytes == 0) && (rxr->bytes == 0))
2736 goto no_calc;
2737
2738 if ((txr->bytes) && (txr->packets))
2739 newitr = txr->bytes/txr->packets;
2740 if ((rxr->bytes) && (rxr->packets))
2741 newitr = max(newitr, (rxr->bytes / rxr->packets));
2742 newitr += 24; /* account for hardware frame, crc */
2743
2744 /* set an upper boundary */
2745 newitr = min(newitr, 3000);
2746
2747 /* Be nice to the mid range */
2748 if ((newitr > 300) && (newitr < 1200))
2749 newitr = (newitr / 3);
2750 else
2751 newitr = (newitr / 2);
2752
2753 /*
2754 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2755 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2756 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2757 * on 1G and higher.
2758 */
2759 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2760 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL))
2761 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2762 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2763
2764 /* save for next interrupt */
2765 que->eitr_setting = newitr;
2766
2767 /* Reset state */
2768 txr->bytes = 0;
2769 txr->packets = 0;
2770 rxr->bytes = 0;
2771 rxr->packets = 0;
2772
2773 no_calc:
2774 ixgbe_sched_handle_que(adapter, que);
2775
2776 return 1;
2777 } /* ixgbe_msix_que */
2778
2779 /************************************************************************
2780 * ixgbe_media_status - Media Ioctl callback
2781 *
2782 * Called whenever the user queries the status of
2783 * the interface using ifconfig.
2784 ************************************************************************/
2785 static void
2786 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2787 {
2788 struct adapter *adapter = ifp->if_softc;
2789 struct ixgbe_hw *hw = &adapter->hw;
2790 int layer;
2791
2792 INIT_DEBUGOUT("ixgbe_media_status: begin");
2793 IXGBE_CORE_LOCK(adapter);
2794 ixgbe_update_link_status(adapter);
2795
2796 ifmr->ifm_status = IFM_AVALID;
2797 ifmr->ifm_active = IFM_ETHER;
2798
2799 if (adapter->link_active != LINK_STATE_UP) {
2800 ifmr->ifm_active |= IFM_NONE;
2801 IXGBE_CORE_UNLOCK(adapter);
2802 return;
2803 }
2804
2805 ifmr->ifm_status |= IFM_ACTIVE;
2806 layer = adapter->phy_layer;
2807
2808 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2809 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2810 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2811 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2812 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2813 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2814 switch (adapter->link_speed) {
2815 case IXGBE_LINK_SPEED_10GB_FULL:
2816 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2817 break;
2818 case IXGBE_LINK_SPEED_5GB_FULL:
2819 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2820 break;
2821 case IXGBE_LINK_SPEED_2_5GB_FULL:
2822 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2823 break;
2824 case IXGBE_LINK_SPEED_1GB_FULL:
2825 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2826 break;
2827 case IXGBE_LINK_SPEED_100_FULL:
2828 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2829 break;
2830 case IXGBE_LINK_SPEED_10_FULL:
2831 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2832 break;
2833 }
2834 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2835 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2836 switch (adapter->link_speed) {
2837 case IXGBE_LINK_SPEED_10GB_FULL:
2838 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2839 break;
2840 }
2841 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2842 switch (adapter->link_speed) {
2843 case IXGBE_LINK_SPEED_10GB_FULL:
2844 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2845 break;
2846 case IXGBE_LINK_SPEED_1GB_FULL:
2847 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2848 break;
2849 }
2850 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2851 switch (adapter->link_speed) {
2852 case IXGBE_LINK_SPEED_10GB_FULL:
2853 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2854 break;
2855 case IXGBE_LINK_SPEED_1GB_FULL:
2856 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2857 break;
2858 }
2859 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2860 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2861 switch (adapter->link_speed) {
2862 case IXGBE_LINK_SPEED_10GB_FULL:
2863 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2864 break;
2865 case IXGBE_LINK_SPEED_1GB_FULL:
2866 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2867 break;
2868 }
2869 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2870 switch (adapter->link_speed) {
2871 case IXGBE_LINK_SPEED_10GB_FULL:
2872 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2873 break;
2874 }
2875 /*
2876 * XXX: These need to use the proper media types once
2877 * they're added.
2878 */
2879 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2880 switch (adapter->link_speed) {
2881 case IXGBE_LINK_SPEED_10GB_FULL:
2882 #ifndef IFM_ETH_XTYPE
2883 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2884 #else
2885 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2886 #endif
2887 break;
2888 case IXGBE_LINK_SPEED_2_5GB_FULL:
2889 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2890 break;
2891 case IXGBE_LINK_SPEED_1GB_FULL:
2892 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2893 break;
2894 }
2895 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2896 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2897 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2898 switch (adapter->link_speed) {
2899 case IXGBE_LINK_SPEED_10GB_FULL:
2900 #ifndef IFM_ETH_XTYPE
2901 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2902 #else
2903 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2904 #endif
2905 break;
2906 case IXGBE_LINK_SPEED_2_5GB_FULL:
2907 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2908 break;
2909 case IXGBE_LINK_SPEED_1GB_FULL:
2910 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2911 break;
2912 }
2913
2914 /* If nothing is recognized... */
2915 #if 0
2916 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2917 ifmr->ifm_active |= IFM_UNKNOWN;
2918 #endif
2919
2920 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2921
2922 /* Display current flow control setting used on link */
2923 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2924 hw->fc.current_mode == ixgbe_fc_full)
2925 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2926 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2927 hw->fc.current_mode == ixgbe_fc_full)
2928 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2929
2930 IXGBE_CORE_UNLOCK(adapter);
2931
2932 return;
2933 } /* ixgbe_media_status */
2934
2935 /************************************************************************
2936 * ixgbe_media_change - Media Ioctl callback
2937 *
2938 * Called when the user changes speed/duplex using
2939 * media/mediopt option with ifconfig.
2940 ************************************************************************/
2941 static int
2942 ixgbe_media_change(struct ifnet *ifp)
2943 {
2944 struct adapter *adapter = ifp->if_softc;
2945 struct ifmedia *ifm = &adapter->media;
2946 struct ixgbe_hw *hw = &adapter->hw;
2947 ixgbe_link_speed speed = 0;
2948 ixgbe_link_speed link_caps = 0;
2949 bool negotiate = false;
2950 s32 err = IXGBE_NOT_IMPLEMENTED;
2951
2952 INIT_DEBUGOUT("ixgbe_media_change: begin");
2953
2954 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2955 return (EINVAL);
2956
2957 if (hw->phy.media_type == ixgbe_media_type_backplane)
2958 return (EPERM);
2959
2960 IXGBE_CORE_LOCK(adapter);
2961 /*
2962 * We don't actually need to check against the supported
2963 * media types of the adapter; ifmedia will take care of
2964 * that for us.
2965 */
2966 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2967 case IFM_AUTO:
2968 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2969 &negotiate);
2970 if (err != IXGBE_SUCCESS) {
2971 device_printf(adapter->dev, "Unable to determine "
2972 "supported advertise speeds\n");
2973 IXGBE_CORE_UNLOCK(adapter);
2974 return (ENODEV);
2975 }
2976 speed |= link_caps;
2977 break;
2978 case IFM_10G_T:
2979 case IFM_10G_LRM:
2980 case IFM_10G_LR:
2981 case IFM_10G_TWINAX:
2982 case IFM_10G_SR:
2983 case IFM_10G_CX4:
2984 #ifdef IFM_ETH_XTYPE
2985 case IFM_10G_KR:
2986 case IFM_10G_KX4:
2987 #endif
2988 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2989 break;
2990 case IFM_5000_T:
2991 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2992 break;
2993 case IFM_2500_T:
2994 case IFM_2500_KX:
2995 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2996 break;
2997 case IFM_1000_T:
2998 case IFM_1000_LX:
2999 case IFM_1000_SX:
3000 case IFM_1000_KX:
3001 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3002 break;
3003 case IFM_100_TX:
3004 speed |= IXGBE_LINK_SPEED_100_FULL;
3005 break;
3006 case IFM_10_T:
3007 speed |= IXGBE_LINK_SPEED_10_FULL;
3008 break;
3009 case IFM_NONE:
3010 break;
3011 default:
3012 goto invalid;
3013 }
3014
3015 hw->mac.autotry_restart = TRUE;
3016 hw->mac.ops.setup_link(hw, speed, TRUE);
3017 adapter->advertise = 0;
3018 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3019 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3020 adapter->advertise |= 1 << 2;
3021 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3022 adapter->advertise |= 1 << 1;
3023 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3024 adapter->advertise |= 1 << 0;
3025 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3026 adapter->advertise |= 1 << 3;
3027 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3028 adapter->advertise |= 1 << 4;
3029 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3030 adapter->advertise |= 1 << 5;
3031 }
3032
3033 IXGBE_CORE_UNLOCK(adapter);
3034 return (0);
3035
3036 invalid:
3037 device_printf(adapter->dev, "Invalid media type!\n");
3038 IXGBE_CORE_UNLOCK(adapter);
3039
3040 return (EINVAL);
3041 } /* ixgbe_media_change */
3042
3043 /************************************************************************
3044 * ixgbe_msix_admin - Link status change ISR (MSI-X)
3045 ************************************************************************/
3046 static int
3047 ixgbe_msix_admin(void *arg)
3048 {
3049 struct adapter *adapter = arg;
3050 struct ixgbe_hw *hw = &adapter->hw;
3051 u32 eicr;
3052 u32 eims_orig;
3053 u32 eims_disable = 0;
3054
3055 IXGBE_EVC_ADD(&adapter->admin_irq, 1);
3056
3057 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3058 /* Pause other interrupts */
3059 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3060
3061 /*
3062 * First get the cause.
3063 *
3064 * The specifications of 82598, 82599, X540 and X550 say EICS register
3065 * is write only. However, Linux says it is a workaround for silicon
3066 * errata to read EICS instead of EICR to get interrupt cause.
3067 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3068 */
3069 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3070 /* Be sure the queue bits are not cleared */
3071 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3072 /* Clear all OTHER interrupts with write */
3073 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3074
3075 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3076
3077 /* Re-enable some OTHER interrupts */
3078 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3079
3080 return 1;
3081 } /* ixgbe_msix_admin */
3082
3083 static void
3084 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3085 {
3086 struct ixgbe_hw *hw = &adapter->hw;
3087 s32 retval;
3088
3089 /* Link status change */
3090 if (eicr & IXGBE_EICR_LSC) {
3091 softint_schedule(adapter->link_si);
3092 *eims_disable |= IXGBE_EIMS_LSC;
3093 }
3094
3095 if (ixgbe_is_sfp(hw)) {
3096 u32 eicr_mask;
3097
3098 /* Pluggable optics-related interrupt */
3099 if (hw->mac.type >= ixgbe_mac_X540)
3100 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3101 else
3102 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3103
3104 /*
3105 * An interrupt might not arrive when a module is inserted.
3106 * When an link status change interrupt occurred and the driver
3107 * still regard SFP as unplugged, issue the module softint
3108 * and then issue LSC interrupt.
3109 */
3110 if ((eicr & eicr_mask)
3111 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3112 && (eicr & IXGBE_EICR_LSC))) {
3113 softint_schedule(adapter->mod_si);
3114 *eims_disable |= IXGBE_EIMS_LSC;
3115 }
3116
3117 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3118 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3119 softint_schedule(adapter->msf_si);
3120 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3121 }
3122 }
3123
3124 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3125 #ifdef IXGBE_FDIR
3126 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3127 (eicr & IXGBE_EICR_FLOW_DIR)) {
3128 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3129 softint_schedule(adapter->fdir_si);
3130 /* Disable the interrupt */
3131 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3132 }
3133 }
3134 #endif
3135
3136 if (eicr & IXGBE_EICR_ECC) {
3137 if (ratecheck(&adapter->lasterr_time,
3138 &ixgbe_errlog_intrvl))
3139 device_printf(adapter->dev,
3140 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3141 }
3142
3143 /* Check for over temp condition */
3144 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3145 switch (adapter->hw.mac.type) {
3146 case ixgbe_mac_X550EM_a:
3147 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3148 break;
3149 retval = hw->phy.ops.check_overtemp(hw);
3150 if (retval != IXGBE_ERR_OVERTEMP)
3151 break;
3152 if (ratecheck(&adapter->lasterr_time,
3153 &ixgbe_errlog_intrvl)) {
3154 device_printf(adapter->dev,
3155 "CRITICAL: OVER TEMP!! "
3156 "PHY IS SHUT DOWN!!\n");
3157 device_printf(adapter->dev,
3158 "System shutdown required!\n");
3159 }
3160 break;
3161 default:
3162 if (!(eicr & IXGBE_EICR_TS))
3163 break;
3164 retval = hw->phy.ops.check_overtemp(hw);
3165 if (retval != IXGBE_ERR_OVERTEMP)
3166 break;
3167 if (ratecheck(&adapter->lasterr_time,
3168 &ixgbe_errlog_intrvl)) {
3169 device_printf(adapter->dev,
3170 "CRITICAL: OVER TEMP!! "
3171 "PHY IS SHUT DOWN!!\n");
3172 device_printf(adapter->dev,
3173 "System shutdown required!\n");
3174 }
3175 break;
3176 }
3177 }
3178
3179 /* Check for VF message */
3180 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3181 (eicr & IXGBE_EICR_MAILBOX)) {
3182 softint_schedule(adapter->mbx_si);
3183 *eims_disable |= IXGBE_EIMS_MAILBOX;
3184 }
3185 }
3186
3187 /* Check for fan failure */
3188 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3189 ixgbe_check_fan_failure(adapter, eicr, true);
3190
3191 /* External PHY interrupt */
3192 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3193 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3194 softint_schedule(adapter->phy_si);
3195 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3196 }
3197 } /* ixgbe_intr_admin_common */
3198
3199 static void
3200 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3201 {
3202
3203 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3204 itr |= itr << 16;
3205 else
3206 itr |= IXGBE_EITR_CNT_WDIS;
3207
3208 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3209 }
3210
3211
3212 /************************************************************************
3213 * ixgbe_sysctl_interrupt_rate_handler
3214 ************************************************************************/
3215 static int
3216 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3217 {
3218 struct sysctlnode node = *rnode;
3219 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3220 struct adapter *adapter;
3221 uint32_t reg, usec, rate;
3222 int error;
3223
3224 if (que == NULL)
3225 return 0;
3226
3227 adapter = que->adapter;
3228 if (ixgbe_fw_recovery_mode_swflag(adapter))
3229 return (EPERM);
3230
3231 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3232 usec = ((reg & 0x0FF8) >> 3);
3233 if (usec > 0)
3234 rate = 500000 / usec;
3235 else
3236 rate = 0;
3237 node.sysctl_data = &rate;
3238 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3239 if (error || newp == NULL)
3240 return error;
3241 reg &= ~0xfff; /* default, no limitation */
3242 if (rate > 0 && rate < 500000) {
3243 if (rate < 1000)
3244 rate = 1000;
3245 reg |= ((4000000 / rate) & 0xff8);
3246 /*
3247 * When RSC is used, ITR interval must be larger than
3248 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3249 * The minimum value is always greater than 2us on 100M
3250 * (and 10M?(not documented)), but it's not on 1G and higher.
3251 */
3252 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3253 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3254 if ((adapter->num_queues > 1)
3255 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3256 return EINVAL;
3257 }
3258 ixgbe_max_interrupt_rate = rate;
3259 } else
3260 ixgbe_max_interrupt_rate = 0;
3261 ixgbe_eitr_write(adapter, que->msix, reg);
3262
3263 return (0);
3264 } /* ixgbe_sysctl_interrupt_rate_handler */
3265
3266 const struct sysctlnode *
3267 ixgbe_sysctl_instance(struct adapter *adapter)
3268 {
3269 const char *dvname;
3270 struct sysctllog **log;
3271 int rc;
3272 const struct sysctlnode *rnode;
3273
3274 if (adapter->sysctltop != NULL)
3275 return adapter->sysctltop;
3276
3277 log = &adapter->sysctllog;
3278 dvname = device_xname(adapter->dev);
3279
3280 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3281 0, CTLTYPE_NODE, dvname,
3282 SYSCTL_DESCR("ixgbe information and settings"),
3283 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3284 goto err;
3285
3286 return rnode;
3287 err:
3288 device_printf(adapter->dev,
3289 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3290 return NULL;
3291 }
3292
3293 /************************************************************************
3294 * ixgbe_add_device_sysctls
3295 ************************************************************************/
3296 static void
3297 ixgbe_add_device_sysctls(struct adapter *adapter)
3298 {
3299 device_t dev = adapter->dev;
3300 struct ixgbe_hw *hw = &adapter->hw;
3301 struct sysctllog **log;
3302 const struct sysctlnode *rnode, *cnode;
3303
3304 log = &adapter->sysctllog;
3305
3306 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3307 aprint_error_dev(dev, "could not create sysctl root\n");
3308 return;
3309 }
3310
3311 if (sysctl_createv(log, 0, &rnode, &cnode,
3312 CTLFLAG_READWRITE, CTLTYPE_INT,
3313 "debug", SYSCTL_DESCR("Debug Info"),
3314 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL)
3315 != 0)
3316 aprint_error_dev(dev, "could not create sysctl\n");
3317
3318 if (sysctl_createv(log, 0, &rnode, &cnode,
3319 CTLFLAG_READWRITE, CTLTYPE_INT,
3320 "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3321 ixgbe_sysctl_rx_copy_len, 0,
3322 (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3323 aprint_error_dev(dev, "could not create sysctl\n");
3324
3325 if (sysctl_createv(log, 0, &rnode, &cnode,
3326 CTLFLAG_READONLY, CTLTYPE_INT,
3327 "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
3328 NULL, 0, &adapter->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3329 aprint_error_dev(dev, "could not create sysctl\n");
3330
3331 if (sysctl_createv(log, 0, &rnode, &cnode,
3332 CTLFLAG_READONLY, CTLTYPE_INT,
3333 "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
3334 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3335 aprint_error_dev(dev, "could not create sysctl\n");
3336
3337 if (sysctl_createv(log, 0, &rnode, &cnode,
3338 CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
3339 SYSCTL_DESCR("max number of RX packets to process"),
3340 ixgbe_sysctl_rx_process_limit, 0, (void *)adapter, 0, CTL_CREATE,
3341 CTL_EOL) != 0)
3342 aprint_error_dev(dev, "could not create sysctl\n");
3343
3344 if (sysctl_createv(log, 0, &rnode, &cnode,
3345 CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
3346 SYSCTL_DESCR("max number of TX packets to process"),
3347 ixgbe_sysctl_tx_process_limit, 0, (void *)adapter, 0, CTL_CREATE,
3348 CTL_EOL) != 0)
3349 aprint_error_dev(dev, "could not create sysctl\n");
3350
3351 if (sysctl_createv(log, 0, &rnode, &cnode,
3352 CTLFLAG_READONLY, CTLTYPE_INT,
3353 "num_queues", SYSCTL_DESCR("Number of queues"),
3354 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3355 aprint_error_dev(dev, "could not create sysctl\n");
3356
3357 /* Sysctls for all devices */
3358 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3359 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3360 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3361 CTL_EOL) != 0)
3362 aprint_error_dev(dev, "could not create sysctl\n");
3363
3364 adapter->enable_aim = ixgbe_enable_aim;
3365 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3366 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3367 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3368 aprint_error_dev(dev, "could not create sysctl\n");
3369
3370 if (sysctl_createv(log, 0, &rnode, &cnode,
3371 CTLFLAG_READWRITE, CTLTYPE_INT,
3372 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3373 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3374 CTL_EOL) != 0)
3375 aprint_error_dev(dev, "could not create sysctl\n");
3376
3377 /*
3378 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3379 * it causesflip-flopping softint/workqueue mode in one deferred
3380 * processing. Therefore, preempt_disable()/preempt_enable() are
3381 * required in ixgbe_sched_handle_que() to avoid
3382 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3383 * I think changing "que->txrx_use_workqueue" in interrupt handler
3384 * is lighter than doing preempt_disable()/preempt_enable() in every
3385 * ixgbe_sched_handle_que().
3386 */
3387 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3388 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3389 CTLTYPE_BOOL, "txrx_workqueue",
3390 SYSCTL_DESCR("Use workqueue for packet processing"),
3391 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE,
3392 CTL_EOL) != 0)
3393 aprint_error_dev(dev, "could not create sysctl\n");
3394
3395 #ifdef IXGBE_DEBUG
3396 /* testing sysctls (for all devices) */
3397 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3398 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3399 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3400 CTL_EOL) != 0)
3401 aprint_error_dev(dev, "could not create sysctl\n");
3402
3403 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3404 CTLTYPE_STRING, "print_rss_config",
3405 SYSCTL_DESCR("Prints RSS Configuration"),
3406 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3407 CTL_EOL) != 0)
3408 aprint_error_dev(dev, "could not create sysctl\n");
3409 #endif
3410 /* for X550 series devices */
3411 if (hw->mac.type >= ixgbe_mac_X550)
3412 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3413 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3414 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3415 CTL_EOL) != 0)
3416 aprint_error_dev(dev, "could not create sysctl\n");
3417
3418 /* for WoL-capable devices */
3419 if (adapter->wol_support) {
3420 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3421 CTLTYPE_BOOL, "wol_enable",
3422 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3423 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3424 CTL_EOL) != 0)
3425 aprint_error_dev(dev, "could not create sysctl\n");
3426
3427 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3428 CTLTYPE_INT, "wufc",
3429 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3430 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3431 CTL_EOL) != 0)
3432 aprint_error_dev(dev, "could not create sysctl\n");
3433 }
3434
3435 /* for X552/X557-AT devices */
3436 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3437 const struct sysctlnode *phy_node;
3438
3439 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3440 "phy", SYSCTL_DESCR("External PHY sysctls"),
3441 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3442 aprint_error_dev(dev, "could not create sysctl\n");
3443 return;
3444 }
3445
3446 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3447 CTLTYPE_INT, "temp",
3448 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3449 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3450 CTL_EOL) != 0)
3451 aprint_error_dev(dev, "could not create sysctl\n");
3452
3453 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3454 CTLTYPE_INT, "overtemp_occurred",
3455 SYSCTL_DESCR(
3456 "External PHY High Temperature Event Occurred"),
3457 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3458 CTL_CREATE, CTL_EOL) != 0)
3459 aprint_error_dev(dev, "could not create sysctl\n");
3460 }
3461
3462 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3463 && (hw->phy.type == ixgbe_phy_fw))
3464 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3465 CTLTYPE_BOOL, "force_10_100_autonego",
3466 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3467 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3468 CTL_CREATE, CTL_EOL) != 0)
3469 aprint_error_dev(dev, "could not create sysctl\n");
3470
3471 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3472 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3473 CTLTYPE_INT, "eee_state",
3474 SYSCTL_DESCR("EEE Power Save State"),
3475 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3476 CTL_EOL) != 0)
3477 aprint_error_dev(dev, "could not create sysctl\n");
3478 }
3479 } /* ixgbe_add_device_sysctls */
3480
3481 /************************************************************************
3482 * ixgbe_allocate_pci_resources
3483 ************************************************************************/
3484 static int
3485 ixgbe_allocate_pci_resources(struct adapter *adapter,
3486 const struct pci_attach_args *pa)
3487 {
3488 pcireg_t memtype, csr;
3489 device_t dev = adapter->dev;
3490 bus_addr_t addr;
3491 int flags;
3492
3493 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3494 switch (memtype) {
3495 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3496 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3497 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3498 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3499 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3500 goto map_err;
3501 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3502 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3503 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3504 }
3505 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3506 adapter->osdep.mem_size, flags,
3507 &adapter->osdep.mem_bus_space_handle) != 0) {
3508 map_err:
3509 adapter->osdep.mem_size = 0;
3510 aprint_error_dev(dev, "unable to map BAR0\n");
3511 return ENXIO;
3512 }
3513 /*
3514 * Enable address decoding for memory range in case BIOS or
3515 * UEFI don't set it.
3516 */
3517 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3518 PCI_COMMAND_STATUS_REG);
3519 csr |= PCI_COMMAND_MEM_ENABLE;
3520 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3521 csr);
3522 break;
3523 default:
3524 aprint_error_dev(dev, "unexpected type on BAR0\n");
3525 return ENXIO;
3526 }
3527
3528 return (0);
3529 } /* ixgbe_allocate_pci_resources */
3530
3531 static void
3532 ixgbe_free_softint(struct adapter *adapter)
3533 {
3534 struct ix_queue *que = adapter->queues;
3535 struct tx_ring *txr = adapter->tx_rings;
3536 int i;
3537
3538 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3539 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3540 if (txr->txr_si != NULL)
3541 softint_disestablish(txr->txr_si);
3542 }
3543 if (que->que_si != NULL)
3544 softint_disestablish(que->que_si);
3545 }
3546 if (adapter->txr_wq != NULL)
3547 workqueue_destroy(adapter->txr_wq);
3548 if (adapter->txr_wq_enqueued != NULL)
3549 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3550 if (adapter->que_wq != NULL)
3551 workqueue_destroy(adapter->que_wq);
3552
3553 /* Drain the Link queue */
3554 if (adapter->link_si != NULL) {
3555 softint_disestablish(adapter->link_si);
3556 adapter->link_si = NULL;
3557 }
3558 if (adapter->mod_si != NULL) {
3559 softint_disestablish(adapter->mod_si);
3560 adapter->mod_si = NULL;
3561 }
3562 if (adapter->msf_si != NULL) {
3563 softint_disestablish(adapter->msf_si);
3564 adapter->msf_si = NULL;
3565 }
3566 if (adapter->phy_si != NULL) {
3567 softint_disestablish(adapter->phy_si);
3568 adapter->phy_si = NULL;
3569 }
3570 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3571 if (adapter->fdir_si != NULL) {
3572 softint_disestablish(adapter->fdir_si);
3573 adapter->fdir_si = NULL;
3574 }
3575 }
3576 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3577 if (adapter->mbx_si != NULL) {
3578 softint_disestablish(adapter->mbx_si);
3579 adapter->mbx_si = NULL;
3580 }
3581 }
3582 } /* ixgbe_free_softint */
3583
3584 /************************************************************************
3585 * ixgbe_detach - Device removal routine
3586 *
3587 * Called when the driver is being removed.
3588 * Stops the adapter and deallocates all the resources
3589 * that were allocated for driver operation.
3590 *
3591 * return 0 on success, positive on failure
3592 ************************************************************************/
3593 static int
3594 ixgbe_detach(device_t dev, int flags)
3595 {
3596 struct adapter *adapter = device_private(dev);
3597 struct rx_ring *rxr = adapter->rx_rings;
3598 struct tx_ring *txr = adapter->tx_rings;
3599 struct ixgbe_hw *hw = &adapter->hw;
3600 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3601 u32 ctrl_ext;
3602 int i;
3603
3604 INIT_DEBUGOUT("ixgbe_detach: begin");
3605 if (adapter->osdep.attached == false)
3606 return 0;
3607
3608 if (ixgbe_pci_iov_detach(dev) != 0) {
3609 device_printf(dev, "SR-IOV in use; detach first.\n");
3610 return (EBUSY);
3611 }
3612
3613 /*
3614 * Stop the interface. ixgbe_setup_low_power_mode() calls
3615 * ixgbe_stop_locked(), so it's not required to call ixgbe_stop_locked()
3616 * directly.
3617 */
3618 IXGBE_CORE_LOCK(adapter);
3619 ixgbe_setup_low_power_mode(adapter);
3620 IXGBE_CORE_UNLOCK(adapter);
3621 #if NVLAN > 0
3622 /* Make sure VLANs are not using driver */
3623 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3624 ; /* nothing to do: no VLANs */
3625 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3626 vlan_ifdetach(adapter->ifp);
3627 else {
3628 aprint_error_dev(dev, "VLANs in use, detach first\n");
3629 return (EBUSY);
3630 }
3631 #endif
3632
3633 pmf_device_deregister(dev);
3634
3635 ether_ifdetach(adapter->ifp);
3636
3637 ixgbe_free_softint(adapter);
3638
3639 /* let hardware know driver is unloading */
3640 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3641 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3642 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3643
3644 callout_halt(&adapter->timer, NULL);
3645 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3646 callout_halt(&adapter->recovery_mode_timer, NULL);
3647
3648 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3649 netmap_detach(adapter->ifp);
3650
3651 ixgbe_free_pci_resources(adapter);
3652 #if 0 /* XXX the NetBSD port is probably missing something here */
3653 bus_generic_detach(dev);
3654 #endif
3655 if_detach(adapter->ifp);
3656 if_percpuq_destroy(adapter->ipq);
3657
3658 sysctl_teardown(&adapter->sysctllog);
3659 evcnt_detach(&adapter->efbig_tx_dma_setup);
3660 evcnt_detach(&adapter->mbuf_defrag_failed);
3661 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3662 evcnt_detach(&adapter->einval_tx_dma_setup);
3663 evcnt_detach(&adapter->other_tx_dma_setup);
3664 evcnt_detach(&adapter->eagain_tx_dma_setup);
3665 evcnt_detach(&adapter->enomem_tx_dma_setup);
3666 evcnt_detach(&adapter->watchdog_events);
3667 evcnt_detach(&adapter->tso_err);
3668 evcnt_detach(&adapter->admin_irq);
3669 evcnt_detach(&adapter->link_sicount);
3670 evcnt_detach(&adapter->mod_sicount);
3671 evcnt_detach(&adapter->msf_sicount);
3672 evcnt_detach(&adapter->phy_sicount);
3673
3674 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3675 if (i < __arraycount(stats->mpc)) {
3676 evcnt_detach(&stats->mpc[i]);
3677 if (hw->mac.type == ixgbe_mac_82598EB)
3678 evcnt_detach(&stats->rnbc[i]);
3679 }
3680 if (i < __arraycount(stats->pxontxc)) {
3681 evcnt_detach(&stats->pxontxc[i]);
3682 evcnt_detach(&stats->pxonrxc[i]);
3683 evcnt_detach(&stats->pxofftxc[i]);
3684 evcnt_detach(&stats->pxoffrxc[i]);
3685 if (hw->mac.type >= ixgbe_mac_82599EB)
3686 evcnt_detach(&stats->pxon2offc[i]);
3687 }
3688 }
3689
3690 txr = adapter->tx_rings;
3691 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3692 evcnt_detach(&adapter->queues[i].irqs);
3693 evcnt_detach(&adapter->queues[i].handleq);
3694 evcnt_detach(&adapter->queues[i].req);
3695 evcnt_detach(&txr->no_desc_avail);
3696 evcnt_detach(&txr->total_packets);
3697 evcnt_detach(&txr->tso_tx);
3698 #ifndef IXGBE_LEGACY_TX
3699 evcnt_detach(&txr->pcq_drops);
3700 #endif
3701
3702 if (i < __arraycount(stats->qprc)) {
3703 evcnt_detach(&stats->qprc[i]);
3704 evcnt_detach(&stats->qptc[i]);
3705 evcnt_detach(&stats->qbrc[i]);
3706 evcnt_detach(&stats->qbtc[i]);
3707 if (hw->mac.type >= ixgbe_mac_82599EB)
3708 evcnt_detach(&stats->qprdc[i]);
3709 }
3710
3711 evcnt_detach(&rxr->rx_packets);
3712 evcnt_detach(&rxr->rx_bytes);
3713 evcnt_detach(&rxr->rx_copies);
3714 evcnt_detach(&rxr->no_mbuf);
3715 evcnt_detach(&rxr->rx_discarded);
3716 }
3717 evcnt_detach(&stats->ipcs);
3718 evcnt_detach(&stats->l4cs);
3719 evcnt_detach(&stats->ipcs_bad);
3720 evcnt_detach(&stats->l4cs_bad);
3721 evcnt_detach(&stats->intzero);
3722 evcnt_detach(&stats->legint);
3723 evcnt_detach(&stats->crcerrs);
3724 evcnt_detach(&stats->illerrc);
3725 evcnt_detach(&stats->errbc);
3726 evcnt_detach(&stats->mspdc);
3727 if (hw->mac.type >= ixgbe_mac_X550)
3728 evcnt_detach(&stats->mbsdc);
3729 evcnt_detach(&stats->mpctotal);
3730 evcnt_detach(&stats->mlfc);
3731 evcnt_detach(&stats->mrfc);
3732 evcnt_detach(&stats->rlec);
3733 evcnt_detach(&stats->lxontxc);
3734 evcnt_detach(&stats->lxonrxc);
3735 evcnt_detach(&stats->lxofftxc);
3736 evcnt_detach(&stats->lxoffrxc);
3737
3738 /* Packet Reception Stats */
3739 evcnt_detach(&stats->tor);
3740 evcnt_detach(&stats->gorc);
3741 evcnt_detach(&stats->tpr);
3742 evcnt_detach(&stats->gprc);
3743 evcnt_detach(&stats->mprc);
3744 evcnt_detach(&stats->bprc);
3745 evcnt_detach(&stats->prc64);
3746 evcnt_detach(&stats->prc127);
3747 evcnt_detach(&stats->prc255);
3748 evcnt_detach(&stats->prc511);
3749 evcnt_detach(&stats->prc1023);
3750 evcnt_detach(&stats->prc1522);
3751 evcnt_detach(&stats->ruc);
3752 evcnt_detach(&stats->rfc);
3753 evcnt_detach(&stats->roc);
3754 evcnt_detach(&stats->rjc);
3755 evcnt_detach(&stats->mngprc);
3756 evcnt_detach(&stats->mngpdc);
3757 evcnt_detach(&stats->xec);
3758
3759 /* Packet Transmission Stats */
3760 evcnt_detach(&stats->gotc);
3761 evcnt_detach(&stats->tpt);
3762 evcnt_detach(&stats->gptc);
3763 evcnt_detach(&stats->bptc);
3764 evcnt_detach(&stats->mptc);
3765 evcnt_detach(&stats->mngptc);
3766 evcnt_detach(&stats->ptc64);
3767 evcnt_detach(&stats->ptc127);
3768 evcnt_detach(&stats->ptc255);
3769 evcnt_detach(&stats->ptc511);
3770 evcnt_detach(&stats->ptc1023);
3771 evcnt_detach(&stats->ptc1522);
3772
3773 ixgbe_free_queues(adapter);
3774 free(adapter->mta, M_DEVBUF);
3775
3776 IXGBE_CORE_LOCK_DESTROY(adapter);
3777
3778 return (0);
3779 } /* ixgbe_detach */
3780
3781 /************************************************************************
3782 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3783 *
3784 * Prepare the adapter/port for LPLU and/or WoL
3785 ************************************************************************/
3786 static int
3787 ixgbe_setup_low_power_mode(struct adapter *adapter)
3788 {
3789 struct ixgbe_hw *hw = &adapter->hw;
3790 device_t dev = adapter->dev;
3791 s32 error = 0;
3792
3793 KASSERT(mutex_owned(&adapter->core_mtx));
3794
3795 /* Limit power management flow to X550EM baseT */
3796 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3797 hw->phy.ops.enter_lplu) {
3798 /* X550EM baseT adapters need a special LPLU flow */
3799 hw->phy.reset_disable = true;
3800 ixgbe_stop_locked(adapter);
3801 error = hw->phy.ops.enter_lplu(hw);
3802 if (error)
3803 device_printf(dev,
3804 "Error entering LPLU: %d\n", error);
3805 hw->phy.reset_disable = false;
3806 } else {
3807 /* Just stop for other adapters */
3808 ixgbe_stop_locked(adapter);
3809 }
3810
3811 if (!hw->wol_enabled) {
3812 ixgbe_set_phy_power(hw, FALSE);
3813 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3814 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3815 } else {
3816 /* Turn off support for APM wakeup. (Using ACPI instead) */
3817 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3818 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3819
3820 /*
3821 * Clear Wake Up Status register to prevent any previous wakeup
3822 * events from waking us up immediately after we suspend.
3823 */
3824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3825
3826 /*
3827 * Program the Wakeup Filter Control register with user filter
3828 * settings
3829 */
3830 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3831
3832 /* Enable wakeups and power management in Wakeup Control */
3833 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3834 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3835 }
3836
3837 return error;
3838 } /* ixgbe_setup_low_power_mode */
3839
3840 /************************************************************************
3841 * ixgbe_shutdown - Shutdown entry point
3842 ************************************************************************/
3843 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3844 static int
3845 ixgbe_shutdown(device_t dev)
3846 {
3847 struct adapter *adapter = device_private(dev);
3848 int error = 0;
3849
3850 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3851
3852 IXGBE_CORE_LOCK(adapter);
3853 error = ixgbe_setup_low_power_mode(adapter);
3854 IXGBE_CORE_UNLOCK(adapter);
3855
3856 return (error);
3857 } /* ixgbe_shutdown */
3858 #endif
3859
3860 /************************************************************************
3861 * ixgbe_suspend
3862 *
3863 * From D0 to D3
3864 ************************************************************************/
3865 static bool
3866 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3867 {
3868 struct adapter *adapter = device_private(dev);
3869 int error = 0;
3870
3871 INIT_DEBUGOUT("ixgbe_suspend: begin");
3872
3873 IXGBE_CORE_LOCK(adapter);
3874
3875 error = ixgbe_setup_low_power_mode(adapter);
3876
3877 IXGBE_CORE_UNLOCK(adapter);
3878
3879 return (error);
3880 } /* ixgbe_suspend */
3881
3882 /************************************************************************
3883 * ixgbe_resume
3884 *
3885 * From D3 to D0
3886 ************************************************************************/
3887 static bool
3888 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3889 {
3890 struct adapter *adapter = device_private(dev);
3891 struct ifnet *ifp = adapter->ifp;
3892 struct ixgbe_hw *hw = &adapter->hw;
3893 u32 wus;
3894
3895 INIT_DEBUGOUT("ixgbe_resume: begin");
3896
3897 IXGBE_CORE_LOCK(adapter);
3898
3899 /* Read & clear WUS register */
3900 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3901 if (wus)
3902 device_printf(dev, "Woken up by (WUS): %#010x\n",
3903 IXGBE_READ_REG(hw, IXGBE_WUS));
3904 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3905 /* And clear WUFC until next low-power transition */
3906 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3907
3908 /*
3909 * Required after D3->D0 transition;
3910 * will re-advertise all previous advertised speeds
3911 */
3912 if (ifp->if_flags & IFF_UP)
3913 ixgbe_init_locked(adapter);
3914
3915 IXGBE_CORE_UNLOCK(adapter);
3916
3917 return true;
3918 } /* ixgbe_resume */
3919
3920 /*
3921 * Set the various hardware offload abilities.
3922 *
3923 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3924 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3925 * mbuf offload flags the driver will understand.
3926 */
3927 static void
3928 ixgbe_set_if_hwassist(struct adapter *adapter)
3929 {
3930 /* XXX */
3931 }
3932
3933 /************************************************************************
3934 * ixgbe_init_locked - Init entry point
3935 *
3936 * Used in two ways: It is used by the stack as an init
3937 * entry point in network interface structure. It is also
3938 * used by the driver as a hw/sw initialization routine to
3939 * get to a consistent state.
3940 *
3941 * return 0 on success, positive on failure
3942 ************************************************************************/
3943 static void
3944 ixgbe_init_locked(struct adapter *adapter)
3945 {
3946 struct ifnet *ifp = adapter->ifp;
3947 device_t dev = adapter->dev;
3948 struct ixgbe_hw *hw = &adapter->hw;
3949 struct ix_queue *que;
3950 struct tx_ring *txr;
3951 struct rx_ring *rxr;
3952 u32 txdctl, mhadd;
3953 u32 rxdctl, rxctrl;
3954 u32 ctrl_ext;
3955 bool unsupported_sfp = false;
3956 int i, j, error;
3957
3958 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3959
3960 KASSERT(mutex_owned(&adapter->core_mtx));
3961 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3962
3963 hw->need_unsupported_sfp_recovery = false;
3964 hw->adapter_stopped = FALSE;
3965 ixgbe_stop_adapter(hw);
3966 callout_stop(&adapter->timer);
3967 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3968 que->disabled_count = 0;
3969
3970 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3971 adapter->max_frame_size =
3972 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3973
3974 /* Queue indices may change with IOV mode */
3975 ixgbe_align_all_queue_indices(adapter);
3976
3977 /* reprogram the RAR[0] in case user changed it. */
3978 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3979
3980 /* Get the latest mac address, User can use a LAA */
3981 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3982 IXGBE_ETH_LENGTH_OF_ADDRESS);
3983 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3984 hw->addr_ctrl.rar_used_count = 1;
3985
3986 /* Set hardware offload abilities from ifnet flags */
3987 ixgbe_set_if_hwassist(adapter);
3988
3989 /* Prepare transmit descriptors and buffers */
3990 if (ixgbe_setup_transmit_structures(adapter)) {
3991 device_printf(dev, "Could not setup transmit structures\n");
3992 ixgbe_stop_locked(adapter);
3993 return;
3994 }
3995
3996 ixgbe_init_hw(hw);
3997
3998 ixgbe_initialize_iov(adapter);
3999
4000 ixgbe_initialize_transmit_units(adapter);
4001
4002 /* Setup Multicast table */
4003 ixgbe_set_rxfilter(adapter);
4004
4005 /* Use fixed buffer size, even for jumbo frames */
4006 adapter->rx_mbuf_sz = MCLBYTES;
4007
4008 /* Prepare receive descriptors and buffers */
4009 error = ixgbe_setup_receive_structures(adapter);
4010 if (error) {
4011 device_printf(dev,
4012 "Could not setup receive structures (err = %d)\n", error);
4013 ixgbe_stop_locked(adapter);
4014 return;
4015 }
4016
4017 /* Configure RX settings */
4018 ixgbe_initialize_receive_units(adapter);
4019
4020 /* Enable SDP & MSI-X interrupts based on adapter */
4021 ixgbe_config_gpie(adapter);
4022
4023 /* Set MTU size */
4024 if (ifp->if_mtu > ETHERMTU) {
4025 /* aka IXGBE_MAXFRS on 82599 and newer */
4026 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4027 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4028 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4029 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4030 }
4031
4032 /* Now enable all the queues */
4033 for (i = 0; i < adapter->num_queues; i++) {
4034 txr = &adapter->tx_rings[i];
4035 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4036 txdctl |= IXGBE_TXDCTL_ENABLE;
4037 /* Set WTHRESH to 8, burst writeback */
4038 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4039 /*
4040 * When the internal queue falls below PTHRESH (32),
4041 * start prefetching as long as there are at least
4042 * HTHRESH (1) buffers ready. The values are taken
4043 * from the Intel linux driver 3.8.21.
4044 * Prefetching enables tx line rate even with 1 queue.
4045 */
4046 txdctl |= (32 << 0) | (1 << 8);
4047 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4048 }
4049
4050 for (i = 0; i < adapter->num_queues; i++) {
4051 rxr = &adapter->rx_rings[i];
4052 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4053 if (hw->mac.type == ixgbe_mac_82598EB) {
4054 /*
4055 * PTHRESH = 21
4056 * HTHRESH = 4
4057 * WTHRESH = 8
4058 */
4059 rxdctl &= ~0x3FFFFF;
4060 rxdctl |= 0x080420;
4061 }
4062 rxdctl |= IXGBE_RXDCTL_ENABLE;
4063 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4064 for (j = 0; j < 10; j++) {
4065 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4066 IXGBE_RXDCTL_ENABLE)
4067 break;
4068 else
4069 msec_delay(1);
4070 }
4071 wmb();
4072
4073 /*
4074 * In netmap mode, we must preserve the buffers made
4075 * available to userspace before the if_init()
4076 * (this is true by default on the TX side, because
4077 * init makes all buffers available to userspace).
4078 *
4079 * netmap_reset() and the device specific routines
4080 * (e.g. ixgbe_setup_receive_rings()) map these
4081 * buffers at the end of the NIC ring, so here we
4082 * must set the RDT (tail) register to make sure
4083 * they are not overwritten.
4084 *
4085 * In this driver the NIC ring starts at RDH = 0,
4086 * RDT points to the last slot available for reception (?),
4087 * so RDT = num_rx_desc - 1 means the whole ring is available.
4088 */
4089 #ifdef DEV_NETMAP
4090 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4091 (ifp->if_capenable & IFCAP_NETMAP)) {
4092 struct netmap_adapter *na = NA(adapter->ifp);
4093 struct netmap_kring *kring = na->rx_rings[i];
4094 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4095
4096 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4097 } else
4098 #endif /* DEV_NETMAP */
4099 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4100 adapter->num_rx_desc - 1);
4101 }
4102
4103 /* Enable Receive engine */
4104 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4105 if (hw->mac.type == ixgbe_mac_82598EB)
4106 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4107 rxctrl |= IXGBE_RXCTRL_RXEN;
4108 ixgbe_enable_rx_dma(hw, rxctrl);
4109
4110 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4111
4112 /* Set up MSI/MSI-X routing */
4113 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4114 ixgbe_configure_ivars(adapter);
4115 /* Set up auto-mask */
4116 if (hw->mac.type == ixgbe_mac_82598EB)
4117 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4118 else {
4119 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4120 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4121 }
4122 } else { /* Simple settings for Legacy/MSI */
4123 ixgbe_set_ivar(adapter, 0, 0, 0);
4124 ixgbe_set_ivar(adapter, 0, 0, 1);
4125 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4126 }
4127
4128 ixgbe_init_fdir(adapter);
4129
4130 /*
4131 * Check on any SFP devices that
4132 * need to be kick-started
4133 */
4134 if (hw->phy.type == ixgbe_phy_none) {
4135 error = hw->phy.ops.identify(hw);
4136 if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4137 unsupported_sfp = true;
4138 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4139 unsupported_sfp = true;
4140
4141 if (unsupported_sfp)
4142 device_printf(dev,
4143 "Unsupported SFP+ module type was detected.\n");
4144
4145 /* Set moderation on the Link interrupt */
4146 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4147
4148 /* Enable EEE power saving */
4149 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4150 hw->mac.ops.setup_eee(hw,
4151 adapter->feat_en & IXGBE_FEATURE_EEE);
4152
4153 /* Enable power to the phy. */
4154 if (!unsupported_sfp) {
4155 ixgbe_set_phy_power(hw, TRUE);
4156
4157 /* Config/Enable Link */
4158 ixgbe_config_link(adapter);
4159 }
4160
4161 /* Hardware Packet Buffer & Flow Control setup */
4162 ixgbe_config_delay_values(adapter);
4163
4164 /* Initialize the FC settings */
4165 ixgbe_start_hw(hw);
4166
4167 /* Set up VLAN support and filter */
4168 ixgbe_setup_vlan_hw_support(adapter);
4169
4170 /* Setup DMA Coalescing */
4171 ixgbe_config_dmac(adapter);
4172
4173 /* And now turn on interrupts */
4174 ixgbe_enable_intr(adapter);
4175
4176 /* Enable the use of the MBX by the VF's */
4177 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4178 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4179 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4180 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4181 }
4182
4183 /* Update saved flags. See ixgbe_ifflags_cb() */
4184 adapter->if_flags = ifp->if_flags;
4185
4186 /* Now inform the stack we're ready */
4187 ifp->if_flags |= IFF_RUNNING;
4188
4189 return;
4190 } /* ixgbe_init_locked */
4191
4192 /************************************************************************
4193 * ixgbe_init
4194 ************************************************************************/
4195 static int
4196 ixgbe_init(struct ifnet *ifp)
4197 {
4198 struct adapter *adapter = ifp->if_softc;
4199
4200 IXGBE_CORE_LOCK(adapter);
4201 ixgbe_init_locked(adapter);
4202 IXGBE_CORE_UNLOCK(adapter);
4203
4204 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4205 } /* ixgbe_init */
4206
4207 /************************************************************************
4208 * ixgbe_set_ivar
4209 *
4210 * Setup the correct IVAR register for a particular MSI-X interrupt
4211 * (yes this is all very magic and confusing :)
4212 * - entry is the register array entry
4213 * - vector is the MSI-X vector for this queue
4214 * - type is RX/TX/MISC
4215 ************************************************************************/
4216 static void
4217 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4218 {
4219 struct ixgbe_hw *hw = &adapter->hw;
4220 u32 ivar, index;
4221
4222 vector |= IXGBE_IVAR_ALLOC_VAL;
4223
4224 switch (hw->mac.type) {
4225 case ixgbe_mac_82598EB:
4226 if (type == -1)
4227 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4228 else
4229 entry += (type * 64);
4230 index = (entry >> 2) & 0x1F;
4231 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4232 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4233 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4234 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4235 break;
4236 case ixgbe_mac_82599EB:
4237 case ixgbe_mac_X540:
4238 case ixgbe_mac_X550:
4239 case ixgbe_mac_X550EM_x:
4240 case ixgbe_mac_X550EM_a:
4241 if (type == -1) { /* MISC IVAR */
4242 index = (entry & 1) * 8;
4243 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4244 ivar &= ~(0xffUL << index);
4245 ivar |= ((u32)vector << index);
4246 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4247 } else { /* RX/TX IVARS */
4248 index = (16 * (entry & 1)) + (8 * type);
4249 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4250 ivar &= ~(0xffUL << index);
4251 ivar |= ((u32)vector << index);
4252 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4253 }
4254 break;
4255 default:
4256 break;
4257 }
4258 } /* ixgbe_set_ivar */
4259
4260 /************************************************************************
4261 * ixgbe_configure_ivars
4262 ************************************************************************/
4263 static void
4264 ixgbe_configure_ivars(struct adapter *adapter)
4265 {
4266 struct ix_queue *que = adapter->queues;
4267 u32 newitr;
4268
4269 if (ixgbe_max_interrupt_rate > 0)
4270 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4271 else {
4272 /*
4273 * Disable DMA coalescing if interrupt moderation is
4274 * disabled.
4275 */
4276 adapter->dmac = 0;
4277 newitr = 0;
4278 }
4279
4280 for (int i = 0; i < adapter->num_queues; i++, que++) {
4281 struct rx_ring *rxr = &adapter->rx_rings[i];
4282 struct tx_ring *txr = &adapter->tx_rings[i];
4283 /* First the RX queue entry */
4284 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4285 /* ... and the TX */
4286 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4287 /* Set an Initial EITR value */
4288 ixgbe_eitr_write(adapter, que->msix, newitr);
4289 /*
4290 * To eliminate influence of the previous state.
4291 * At this point, Tx/Rx interrupt handler
4292 * (ixgbe_msix_que()) cannot be called, so both
4293 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4294 */
4295 que->eitr_setting = 0;
4296 }
4297
4298 /* For the Link interrupt */
4299 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4300 } /* ixgbe_configure_ivars */
4301
4302 /************************************************************************
4303 * ixgbe_config_gpie
4304 ************************************************************************/
4305 static void
4306 ixgbe_config_gpie(struct adapter *adapter)
4307 {
4308 struct ixgbe_hw *hw = &adapter->hw;
4309 u32 gpie;
4310
4311 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4312
4313 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4314 /* Enable Enhanced MSI-X mode */
4315 gpie |= IXGBE_GPIE_MSIX_MODE
4316 | IXGBE_GPIE_EIAME
4317 | IXGBE_GPIE_PBA_SUPPORT
4318 | IXGBE_GPIE_OCD;
4319 }
4320
4321 /* Fan Failure Interrupt */
4322 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4323 gpie |= IXGBE_SDP1_GPIEN;
4324
4325 /* Thermal Sensor Interrupt */
4326 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4327 gpie |= IXGBE_SDP0_GPIEN_X540;
4328
4329 /* Link detection */
4330 switch (hw->mac.type) {
4331 case ixgbe_mac_82599EB:
4332 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4333 break;
4334 case ixgbe_mac_X550EM_x:
4335 case ixgbe_mac_X550EM_a:
4336 gpie |= IXGBE_SDP0_GPIEN_X540;
4337 break;
4338 default:
4339 break;
4340 }
4341
4342 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4343
4344 } /* ixgbe_config_gpie */
4345
4346 /************************************************************************
4347 * ixgbe_config_delay_values
4348 *
4349 * Requires adapter->max_frame_size to be set.
4350 ************************************************************************/
4351 static void
4352 ixgbe_config_delay_values(struct adapter *adapter)
4353 {
4354 struct ixgbe_hw *hw = &adapter->hw;
4355 u32 rxpb, frame, size, tmp;
4356
4357 frame = adapter->max_frame_size;
4358
4359 /* Calculate High Water */
4360 switch (hw->mac.type) {
4361 case ixgbe_mac_X540:
4362 case ixgbe_mac_X550:
4363 case ixgbe_mac_X550EM_x:
4364 case ixgbe_mac_X550EM_a:
4365 tmp = IXGBE_DV_X540(frame, frame);
4366 break;
4367 default:
4368 tmp = IXGBE_DV(frame, frame);
4369 break;
4370 }
4371 size = IXGBE_BT2KB(tmp);
4372 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4373 hw->fc.high_water[0] = rxpb - size;
4374
4375 /* Now calculate Low Water */
4376 switch (hw->mac.type) {
4377 case ixgbe_mac_X540:
4378 case ixgbe_mac_X550:
4379 case ixgbe_mac_X550EM_x:
4380 case ixgbe_mac_X550EM_a:
4381 tmp = IXGBE_LOW_DV_X540(frame);
4382 break;
4383 default:
4384 tmp = IXGBE_LOW_DV(frame);
4385 break;
4386 }
4387 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4388
4389 hw->fc.pause_time = IXGBE_FC_PAUSE;
4390 hw->fc.send_xon = TRUE;
4391 } /* ixgbe_config_delay_values */
4392
4393 /************************************************************************
4394 * ixgbe_set_rxfilter - Multicast Update
4395 *
4396 * Called whenever multicast address list is updated.
4397 ************************************************************************/
4398 static void
4399 ixgbe_set_rxfilter(struct adapter *adapter)
4400 {
4401 struct ixgbe_mc_addr *mta;
4402 struct ifnet *ifp = adapter->ifp;
4403 u8 *update_ptr;
4404 int mcnt = 0;
4405 u32 fctrl;
4406 struct ethercom *ec = &adapter->osdep.ec;
4407 struct ether_multi *enm;
4408 struct ether_multistep step;
4409
4410 KASSERT(mutex_owned(&adapter->core_mtx));
4411 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4412
4413 mta = adapter->mta;
4414 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4415
4416 ifp->if_flags &= ~IFF_ALLMULTI;
4417 ETHER_LOCK(ec);
4418 ETHER_FIRST_MULTI(step, ec, enm);
4419 while (enm != NULL) {
4420 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4421 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4422 ETHER_ADDR_LEN) != 0)) {
4423 ifp->if_flags |= IFF_ALLMULTI;
4424 break;
4425 }
4426 bcopy(enm->enm_addrlo,
4427 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4428 mta[mcnt].vmdq = adapter->pool;
4429 mcnt++;
4430 ETHER_NEXT_MULTI(step, enm);
4431 }
4432 ETHER_UNLOCK(ec);
4433
4434 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4435 if (ifp->if_flags & IFF_PROMISC)
4436 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4437 else if (ifp->if_flags & IFF_ALLMULTI) {
4438 fctrl |= IXGBE_FCTRL_MPE;
4439 fctrl &= ~IXGBE_FCTRL_UPE;
4440 } else
4441 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4442
4443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4444
4445 if (mcnt <= MAX_NUM_MULTICAST_ADDRESSES) {
4446 update_ptr = (u8 *)mta;
4447 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4448 ixgbe_mc_array_itr, TRUE);
4449 }
4450 } /* ixgbe_set_rxfilter */
4451
4452 /************************************************************************
4453 * ixgbe_mc_array_itr
4454 *
4455 * An iterator function needed by the multicast shared code.
4456 * It feeds the shared code routine the addresses in the
4457 * array of ixgbe_set_rxfilter() one by one.
4458 ************************************************************************/
4459 static u8 *
4460 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4461 {
4462 struct ixgbe_mc_addr *mta;
4463
4464 mta = (struct ixgbe_mc_addr *)*update_ptr;
4465 *vmdq = mta->vmdq;
4466
4467 *update_ptr = (u8*)(mta + 1);
4468
4469 return (mta->addr);
4470 } /* ixgbe_mc_array_itr */
4471
4472 /************************************************************************
4473 * ixgbe_local_timer - Timer routine
4474 *
4475 * Checks for link status, updates statistics,
4476 * and runs the watchdog check.
4477 ************************************************************************/
4478 static void
4479 ixgbe_local_timer(void *arg)
4480 {
4481 struct adapter *adapter = arg;
4482
4483 IXGBE_CORE_LOCK(adapter);
4484 ixgbe_local_timer1(adapter);
4485 IXGBE_CORE_UNLOCK(adapter);
4486 }
4487
4488 static void
4489 ixgbe_local_timer1(void *arg)
4490 {
4491 struct adapter *adapter = arg;
4492 device_t dev = adapter->dev;
4493 struct ix_queue *que = adapter->queues;
4494 u64 queues = 0;
4495 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4496 int hung = 0;
4497 int i;
4498
4499 KASSERT(mutex_owned(&adapter->core_mtx));
4500
4501 /* Check for pluggable optics */
4502 if (adapter->sfp_probe)
4503 if (!ixgbe_sfp_probe(adapter))
4504 goto out; /* Nothing to do */
4505
4506 ixgbe_update_link_status(adapter);
4507 ixgbe_update_stats_counters(adapter);
4508
4509 /* Update some event counters */
4510 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4511 que = adapter->queues;
4512 for (i = 0; i < adapter->num_queues; i++, que++) {
4513 struct tx_ring *txr = que->txr;
4514
4515 v0 += txr->q_efbig_tx_dma_setup;
4516 v1 += txr->q_mbuf_defrag_failed;
4517 v2 += txr->q_efbig2_tx_dma_setup;
4518 v3 += txr->q_einval_tx_dma_setup;
4519 v4 += txr->q_other_tx_dma_setup;
4520 v5 += txr->q_eagain_tx_dma_setup;
4521 v6 += txr->q_enomem_tx_dma_setup;
4522 v7 += txr->q_tso_err;
4523 }
4524 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, v0);
4525 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, v1);
4526 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, v2);
4527 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, v3);
4528 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, v4);
4529 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, v5);
4530 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, v6);
4531 IXGBE_EVC_STORE(&adapter->tso_err, v7);
4532
4533 /*
4534 * Check the TX queues status
4535 * - mark hung queues so we don't schedule on them
4536 * - watchdog only if all queues show hung
4537 */
4538 que = adapter->queues;
4539 for (i = 0; i < adapter->num_queues; i++, que++) {
4540 /* Keep track of queues with work for soft irq */
4541 if (que->txr->busy)
4542 queues |= 1ULL << que->me;
4543 /*
4544 * Each time txeof runs without cleaning, but there
4545 * are uncleaned descriptors it increments busy. If
4546 * we get to the MAX we declare it hung.
4547 */
4548 if (que->busy == IXGBE_QUEUE_HUNG) {
4549 ++hung;
4550 /* Mark the queue as inactive */
4551 adapter->active_queues &= ~(1ULL << que->me);
4552 continue;
4553 } else {
4554 /* Check if we've come back from hung */
4555 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4556 adapter->active_queues |= 1ULL << que->me;
4557 }
4558 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4559 device_printf(dev,
4560 "Warning queue %d appears to be hung!\n", i);
4561 que->txr->busy = IXGBE_QUEUE_HUNG;
4562 ++hung;
4563 }
4564 }
4565
4566 /* Only truly watchdog if all queues show hung */
4567 if (hung == adapter->num_queues)
4568 goto watchdog;
4569 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4570 else if (queues != 0) { /* Force an IRQ on queues with work */
4571 que = adapter->queues;
4572 for (i = 0; i < adapter->num_queues; i++, que++) {
4573 mutex_enter(&que->dc_mtx);
4574 if (que->disabled_count == 0)
4575 ixgbe_rearm_queues(adapter,
4576 queues & ((u64)1 << i));
4577 mutex_exit(&que->dc_mtx);
4578 }
4579 }
4580 #endif
4581
4582 out:
4583 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4584 return;
4585
4586 watchdog:
4587 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4588 adapter->ifp->if_flags &= ~IFF_RUNNING;
4589 IXGBE_EVC_ADD(&adapter->watchdog_events, 1);
4590 ixgbe_init_locked(adapter);
4591 } /* ixgbe_local_timer */
4592
4593 /************************************************************************
4594 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4595 ************************************************************************/
4596 static void
4597 ixgbe_recovery_mode_timer(void *arg)
4598 {
4599 struct adapter *adapter = arg;
4600 struct ixgbe_hw *hw = &adapter->hw;
4601
4602 IXGBE_CORE_LOCK(adapter);
4603 if (ixgbe_fw_recovery_mode(hw)) {
4604 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1) == 0) {
4605 /* Firmware error detected, entering recovery mode */
4606 device_printf(adapter->dev,
4607 "Firmware recovery mode detected. Limiting "
4608 "functionality. Refer to the Intel(R) Ethernet "
4609 "Adapters and Devices User Guide for details on "
4610 "firmware recovery mode.\n");
4611
4612 if (hw->adapter_stopped == FALSE)
4613 ixgbe_stop_locked(adapter);
4614 }
4615 } else
4616 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4617
4618 callout_reset(&adapter->recovery_mode_timer, hz,
4619 ixgbe_recovery_mode_timer, adapter);
4620 IXGBE_CORE_UNLOCK(adapter);
4621 } /* ixgbe_recovery_mode_timer */
4622
4623 /************************************************************************
4624 * ixgbe_sfp_probe
4625 *
4626 * Determine if a port had optics inserted.
4627 ************************************************************************/
4628 static bool
4629 ixgbe_sfp_probe(struct adapter *adapter)
4630 {
4631 struct ixgbe_hw *hw = &adapter->hw;
4632 device_t dev = adapter->dev;
4633 bool result = FALSE;
4634
4635 if ((hw->phy.type == ixgbe_phy_nl) &&
4636 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4637 s32 ret = hw->phy.ops.identify_sfp(hw);
4638 if (ret)
4639 goto out;
4640 ret = hw->phy.ops.reset(hw);
4641 adapter->sfp_probe = FALSE;
4642 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4643 device_printf(dev,"Unsupported SFP+ module detected!");
4644 device_printf(dev,
4645 "Reload driver with supported module.\n");
4646 goto out;
4647 } else
4648 device_printf(dev, "SFP+ module detected!\n");
4649 /* We now have supported optics */
4650 result = TRUE;
4651 }
4652 out:
4653
4654 return (result);
4655 } /* ixgbe_sfp_probe */
4656
4657 /************************************************************************
4658 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4659 ************************************************************************/
4660 static void
4661 ixgbe_handle_mod(void *context)
4662 {
4663 struct adapter *adapter = context;
4664 struct ixgbe_hw *hw = &adapter->hw;
4665 device_t dev = adapter->dev;
4666 u32 err, cage_full = 0;
4667
4668 IXGBE_CORE_LOCK(adapter);
4669 IXGBE_EVC_ADD(&adapter->mod_sicount, 1);
4670 if (adapter->hw.need_crosstalk_fix) {
4671 switch (hw->mac.type) {
4672 case ixgbe_mac_82599EB:
4673 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4674 IXGBE_ESDP_SDP2;
4675 break;
4676 case ixgbe_mac_X550EM_x:
4677 case ixgbe_mac_X550EM_a:
4678 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4679 IXGBE_ESDP_SDP0;
4680 break;
4681 default:
4682 break;
4683 }
4684
4685 if (!cage_full)
4686 goto out;
4687 }
4688
4689 err = hw->phy.ops.identify_sfp(hw);
4690 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4691 device_printf(dev,
4692 "Unsupported SFP+ module type was detected.\n");
4693 goto out;
4694 }
4695
4696 if (hw->need_unsupported_sfp_recovery) {
4697 device_printf(dev, "Recovering from unsupported SFP\n");
4698 /*
4699 * We could recover the status by calling setup_sfp(),
4700 * setup_link() and some others. It's complex and might not
4701 * work correctly on some unknown cases. To avoid such type of
4702 * problem, call ixgbe_init_locked(). It's simple and safe
4703 * approach.
4704 */
4705 ixgbe_init_locked(adapter);
4706 } else {
4707 if (hw->mac.type == ixgbe_mac_82598EB)
4708 err = hw->phy.ops.reset(hw);
4709 else {
4710 err = hw->mac.ops.setup_sfp(hw);
4711 hw->phy.sfp_setup_needed = FALSE;
4712 }
4713 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4714 device_printf(dev,
4715 "Setup failure - unsupported SFP+ module type.\n");
4716 goto out;
4717 }
4718 }
4719 softint_schedule(adapter->msf_si);
4720 out:
4721 IXGBE_CORE_UNLOCK(adapter);
4722 } /* ixgbe_handle_mod */
4723
4724
4725 /************************************************************************
4726 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4727 ************************************************************************/
4728 static void
4729 ixgbe_handle_msf(void *context)
4730 {
4731 struct adapter *adapter = context;
4732 struct ixgbe_hw *hw = &adapter->hw;
4733 u32 autoneg;
4734 bool negotiate;
4735
4736 IXGBE_CORE_LOCK(adapter);
4737 IXGBE_EVC_ADD(&adapter->msf_sicount, 1);
4738 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4739 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4740
4741 autoneg = hw->phy.autoneg_advertised;
4742 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4743 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4744 else
4745 negotiate = 0;
4746 if (hw->mac.ops.setup_link)
4747 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4748
4749 /* Adjust media types shown in ifconfig */
4750 ifmedia_removeall(&adapter->media);
4751 ixgbe_add_media_types(adapter);
4752 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4753 IXGBE_CORE_UNLOCK(adapter);
4754 } /* ixgbe_handle_msf */
4755
4756 /************************************************************************
4757 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4758 ************************************************************************/
4759 static void
4760 ixgbe_handle_phy(void *context)
4761 {
4762 struct adapter *adapter = context;
4763 struct ixgbe_hw *hw = &adapter->hw;
4764 int error;
4765
4766 IXGBE_EVC_ADD(&adapter->phy_sicount, 1);
4767 error = hw->phy.ops.handle_lasi(hw);
4768 if (error == IXGBE_ERR_OVERTEMP)
4769 device_printf(adapter->dev,
4770 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4771 " PHY will downshift to lower power state!\n");
4772 else if (error)
4773 device_printf(adapter->dev,
4774 "Error handling LASI interrupt: %d\n", error);
4775 } /* ixgbe_handle_phy */
4776
4777 static void
4778 ixgbe_ifstop(struct ifnet *ifp, int disable)
4779 {
4780 struct adapter *adapter = ifp->if_softc;
4781
4782 IXGBE_CORE_LOCK(adapter);
4783 ixgbe_stop_locked(adapter);
4784 IXGBE_CORE_UNLOCK(adapter);
4785 }
4786
4787 /************************************************************************
4788 * ixgbe_stop_locked - Stop the hardware
4789 *
4790 * Disables all traffic on the adapter by issuing a
4791 * global reset on the MAC and deallocates TX/RX buffers.
4792 ************************************************************************/
4793 static void
4794 ixgbe_stop_locked(void *arg)
4795 {
4796 struct ifnet *ifp;
4797 struct adapter *adapter = arg;
4798 struct ixgbe_hw *hw = &adapter->hw;
4799
4800 ifp = adapter->ifp;
4801
4802 KASSERT(mutex_owned(&adapter->core_mtx));
4803
4804 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4805 ixgbe_disable_intr(adapter);
4806 callout_stop(&adapter->timer);
4807
4808 /* Let the stack know...*/
4809 ifp->if_flags &= ~IFF_RUNNING;
4810
4811 ixgbe_reset_hw(hw);
4812 hw->adapter_stopped = FALSE;
4813 ixgbe_stop_adapter(hw);
4814 if (hw->mac.type == ixgbe_mac_82599EB)
4815 ixgbe_stop_mac_link_on_d3_82599(hw);
4816 /* Turn off the laser - noop with no optics */
4817 ixgbe_disable_tx_laser(hw);
4818
4819 /* Update the stack */
4820 adapter->link_up = FALSE;
4821 ixgbe_update_link_status(adapter);
4822
4823 /* reprogram the RAR[0] in case user changed it. */
4824 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4825
4826 return;
4827 } /* ixgbe_stop_locked */
4828
4829 /************************************************************************
4830 * ixgbe_update_link_status - Update OS on link state
4831 *
4832 * Note: Only updates the OS on the cached link state.
4833 * The real check of the hardware only happens with
4834 * a link interrupt.
4835 ************************************************************************/
4836 static void
4837 ixgbe_update_link_status(struct adapter *adapter)
4838 {
4839 struct ifnet *ifp = adapter->ifp;
4840 device_t dev = adapter->dev;
4841 struct ixgbe_hw *hw = &adapter->hw;
4842
4843 KASSERT(mutex_owned(&adapter->core_mtx));
4844
4845 if (adapter->link_up) {
4846 if (adapter->link_active != LINK_STATE_UP) {
4847 /*
4848 * To eliminate influence of the previous state
4849 * in the same way as ixgbe_init_locked().
4850 */
4851 struct ix_queue *que = adapter->queues;
4852 for (int i = 0; i < adapter->num_queues; i++, que++)
4853 que->eitr_setting = 0;
4854
4855 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4856 /*
4857 * Discard count for both MAC Local Fault and
4858 * Remote Fault because those registers are
4859 * valid only when the link speed is up and
4860 * 10Gbps.
4861 */
4862 IXGBE_READ_REG(hw, IXGBE_MLFC);
4863 IXGBE_READ_REG(hw, IXGBE_MRFC);
4864 }
4865
4866 if (bootverbose) {
4867 const char *bpsmsg;
4868
4869 switch (adapter->link_speed) {
4870 case IXGBE_LINK_SPEED_10GB_FULL:
4871 bpsmsg = "10 Gbps";
4872 break;
4873 case IXGBE_LINK_SPEED_5GB_FULL:
4874 bpsmsg = "5 Gbps";
4875 break;
4876 case IXGBE_LINK_SPEED_2_5GB_FULL:
4877 bpsmsg = "2.5 Gbps";
4878 break;
4879 case IXGBE_LINK_SPEED_1GB_FULL:
4880 bpsmsg = "1 Gbps";
4881 break;
4882 case IXGBE_LINK_SPEED_100_FULL:
4883 bpsmsg = "100 Mbps";
4884 break;
4885 case IXGBE_LINK_SPEED_10_FULL:
4886 bpsmsg = "10 Mbps";
4887 break;
4888 default:
4889 bpsmsg = "unknown speed";
4890 break;
4891 }
4892 device_printf(dev, "Link is up %s %s \n",
4893 bpsmsg, "Full Duplex");
4894 }
4895 adapter->link_active = LINK_STATE_UP;
4896 /* Update any Flow Control changes */
4897 ixgbe_fc_enable(&adapter->hw);
4898 /* Update DMA coalescing config */
4899 ixgbe_config_dmac(adapter);
4900 if_link_state_change(ifp, LINK_STATE_UP);
4901
4902 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4903 ixgbe_ping_all_vfs(adapter);
4904 }
4905 } else {
4906 /*
4907 * Do it when link active changes to DOWN. i.e.
4908 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4909 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4910 */
4911 if (adapter->link_active != LINK_STATE_DOWN) {
4912 if (bootverbose)
4913 device_printf(dev, "Link is Down\n");
4914 if_link_state_change(ifp, LINK_STATE_DOWN);
4915 adapter->link_active = LINK_STATE_DOWN;
4916 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4917 ixgbe_ping_all_vfs(adapter);
4918 ixgbe_drain_all(adapter);
4919 }
4920 }
4921 } /* ixgbe_update_link_status */
4922
4923 /************************************************************************
4924 * ixgbe_config_dmac - Configure DMA Coalescing
4925 ************************************************************************/
4926 static void
4927 ixgbe_config_dmac(struct adapter *adapter)
4928 {
4929 struct ixgbe_hw *hw = &adapter->hw;
4930 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4931
4932 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4933 return;
4934
4935 if (dcfg->watchdog_timer ^ adapter->dmac ||
4936 dcfg->link_speed ^ adapter->link_speed) {
4937 dcfg->watchdog_timer = adapter->dmac;
4938 dcfg->fcoe_en = false;
4939 dcfg->link_speed = adapter->link_speed;
4940 dcfg->num_tcs = 1;
4941
4942 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4943 dcfg->watchdog_timer, dcfg->link_speed);
4944
4945 hw->mac.ops.dmac_config(hw);
4946 }
4947 } /* ixgbe_config_dmac */
4948
4949 /************************************************************************
4950 * ixgbe_enable_intr
4951 ************************************************************************/
4952 static void
4953 ixgbe_enable_intr(struct adapter *adapter)
4954 {
4955 struct ixgbe_hw *hw = &adapter->hw;
4956 struct ix_queue *que = adapter->queues;
4957 u32 mask, fwsm;
4958
4959 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4960
4961 switch (adapter->hw.mac.type) {
4962 case ixgbe_mac_82599EB:
4963 mask |= IXGBE_EIMS_ECC;
4964 /* Temperature sensor on some adapters */
4965 mask |= IXGBE_EIMS_GPI_SDP0;
4966 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4967 mask |= IXGBE_EIMS_GPI_SDP1;
4968 mask |= IXGBE_EIMS_GPI_SDP2;
4969 break;
4970 case ixgbe_mac_X540:
4971 /* Detect if Thermal Sensor is enabled */
4972 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4973 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4974 mask |= IXGBE_EIMS_TS;
4975 mask |= IXGBE_EIMS_ECC;
4976 break;
4977 case ixgbe_mac_X550:
4978 /* MAC thermal sensor is automatically enabled */
4979 mask |= IXGBE_EIMS_TS;
4980 mask |= IXGBE_EIMS_ECC;
4981 break;
4982 case ixgbe_mac_X550EM_x:
4983 case ixgbe_mac_X550EM_a:
4984 /* Some devices use SDP0 for important information */
4985 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4986 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4987 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4988 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4989 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4990 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4991 mask |= IXGBE_EICR_GPI_SDP0_X540;
4992 mask |= IXGBE_EIMS_ECC;
4993 break;
4994 default:
4995 break;
4996 }
4997
4998 /* Enable Fan Failure detection */
4999 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5000 mask |= IXGBE_EIMS_GPI_SDP1;
5001 /* Enable SR-IOV */
5002 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5003 mask |= IXGBE_EIMS_MAILBOX;
5004 /* Enable Flow Director */
5005 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5006 mask |= IXGBE_EIMS_FLOW_DIR;
5007
5008 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5009
5010 /* With MSI-X we use auto clear */
5011 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
5012 /*
5013 * We use auto clear for RTX_QUEUE only. Don't use other
5014 * interrupts (e.g. link interrupt). BTW, we don't use
5015 * TCP_TIMER interrupt itself.
5016 */
5017 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5018 }
5019
5020 /*
5021 * Now enable all queues, this is done separately to
5022 * allow for handling the extended (beyond 32) MSI-X
5023 * vectors that can be used by 82599
5024 */
5025 for (int i = 0; i < adapter->num_queues; i++, que++)
5026 ixgbe_enable_queue(adapter, que->msix);
5027
5028 IXGBE_WRITE_FLUSH(hw);
5029
5030 } /* ixgbe_enable_intr */
5031
5032 /************************************************************************
5033 * ixgbe_disable_intr_internal
5034 ************************************************************************/
5035 static void
5036 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5037 {
5038 struct ix_queue *que = adapter->queues;
5039
5040 /* disable interrupts other than queues */
5041 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5042
5043 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0)
5044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5045
5046 for (int i = 0; i < adapter->num_queues; i++, que++)
5047 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5048
5049 IXGBE_WRITE_FLUSH(&adapter->hw);
5050
5051 } /* ixgbe_do_disable_intr_internal */
5052
5053 /************************************************************************
5054 * ixgbe_disable_intr
5055 ************************************************************************/
5056 static void
5057 ixgbe_disable_intr(struct adapter *adapter)
5058 {
5059
5060 ixgbe_disable_intr_internal(adapter, true);
5061 } /* ixgbe_disable_intr */
5062
5063 /************************************************************************
5064 * ixgbe_ensure_disabled_intr
5065 ************************************************************************/
5066 void
5067 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5068 {
5069
5070 ixgbe_disable_intr_internal(adapter, false);
5071 } /* ixgbe_ensure_disabled_intr */
5072
5073 /************************************************************************
5074 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5075 ************************************************************************/
5076 static int
5077 ixgbe_legacy_irq(void *arg)
5078 {
5079 struct ix_queue *que = arg;
5080 struct adapter *adapter = que->adapter;
5081 struct ixgbe_hw *hw = &adapter->hw;
5082 struct ifnet *ifp = adapter->ifp;
5083 struct tx_ring *txr = adapter->tx_rings;
5084 u32 eicr;
5085 u32 eims_orig;
5086 u32 eims_enable = 0;
5087 u32 eims_disable = 0;
5088
5089 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5090 /*
5091 * Silicon errata #26 on 82598. Disable all interrupts before reading
5092 * EICR.
5093 */
5094 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5095
5096 /* Read and clear EICR */
5097 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5098
5099 if (eicr == 0) {
5100 IXGBE_EVC_ADD(&adapter->stats.pf.intzero, 1);
5101 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5102 return 0;
5103 }
5104 IXGBE_EVC_ADD(&adapter->stats.pf.legint, 1);
5105
5106 /* Queue (0) intr */
5107 if (((ifp->if_flags & IFF_RUNNING) != 0) &&
5108 (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5109 IXGBE_EVC_ADD(&que->irqs, 1);
5110
5111 /*
5112 * The same as ixgbe_msix_que() about
5113 * "que->txrx_use_workqueue".
5114 */
5115 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5116
5117 IXGBE_TX_LOCK(txr);
5118 ixgbe_txeof(txr);
5119 #ifdef notyet
5120 if (!ixgbe_ring_empty(ifp, txr->br))
5121 ixgbe_start_locked(ifp, txr);
5122 #endif
5123 IXGBE_TX_UNLOCK(txr);
5124
5125 IXGBE_EVC_ADD(&que->req, 1);
5126 ixgbe_sched_handle_que(adapter, que);
5127 /* Disable queue 0 interrupt */
5128 eims_disable |= 1UL << 0;
5129 } else
5130 eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE;
5131
5132 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5133
5134 /* Re-enable some interrupts */
5135 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5136 (eims_orig & ~eims_disable) | eims_enable);
5137
5138 return 1;
5139 } /* ixgbe_legacy_irq */
5140
5141 /************************************************************************
5142 * ixgbe_free_pciintr_resources
5143 ************************************************************************/
5144 static void
5145 ixgbe_free_pciintr_resources(struct adapter *adapter)
5146 {
5147 struct ix_queue *que = adapter->queues;
5148 int rid;
5149
5150 /*
5151 * Release all msix queue resources:
5152 */
5153 for (int i = 0; i < adapter->num_queues; i++, que++) {
5154 if (que->res != NULL) {
5155 pci_intr_disestablish(adapter->osdep.pc,
5156 adapter->osdep.ihs[i]);
5157 adapter->osdep.ihs[i] = NULL;
5158 }
5159 }
5160
5161 /* Clean the Legacy or Link interrupt last */
5162 if (adapter->vector) /* we are doing MSIX */
5163 rid = adapter->vector;
5164 else
5165 rid = 0;
5166
5167 if (adapter->osdep.ihs[rid] != NULL) {
5168 pci_intr_disestablish(adapter->osdep.pc,
5169 adapter->osdep.ihs[rid]);
5170 adapter->osdep.ihs[rid] = NULL;
5171 }
5172
5173 if (adapter->osdep.intrs != NULL) {
5174 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5175 adapter->osdep.nintrs);
5176 adapter->osdep.intrs = NULL;
5177 }
5178 } /* ixgbe_free_pciintr_resources */
5179
5180 /************************************************************************
5181 * ixgbe_free_pci_resources
5182 ************************************************************************/
5183 static void
5184 ixgbe_free_pci_resources(struct adapter *adapter)
5185 {
5186
5187 ixgbe_free_pciintr_resources(adapter);
5188
5189 if (adapter->osdep.mem_size != 0) {
5190 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5191 adapter->osdep.mem_bus_space_handle,
5192 adapter->osdep.mem_size);
5193 }
5194 } /* ixgbe_free_pci_resources */
5195
5196 /************************************************************************
5197 * ixgbe_sysctl_flowcntl
5198 *
5199 * SYSCTL wrapper around setting Flow Control
5200 ************************************************************************/
5201 static int
5202 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5203 {
5204 struct sysctlnode node = *rnode;
5205 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5206 int error, fc;
5207
5208 if (ixgbe_fw_recovery_mode_swflag(adapter))
5209 return (EPERM);
5210
5211 fc = adapter->hw.fc.current_mode;
5212 node.sysctl_data = &fc;
5213 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5214 if (error != 0 || newp == NULL)
5215 return error;
5216
5217 /* Don't bother if it's not changed */
5218 if (fc == adapter->hw.fc.current_mode)
5219 return (0);
5220
5221 return ixgbe_set_flowcntl(adapter, fc);
5222 } /* ixgbe_sysctl_flowcntl */
5223
5224 /************************************************************************
5225 * ixgbe_set_flowcntl - Set flow control
5226 *
5227 * Flow control values:
5228 * 0 - off
5229 * 1 - rx pause
5230 * 2 - tx pause
5231 * 3 - full
5232 ************************************************************************/
5233 static int
5234 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5235 {
5236 switch (fc) {
5237 case ixgbe_fc_rx_pause:
5238 case ixgbe_fc_tx_pause:
5239 case ixgbe_fc_full:
5240 adapter->hw.fc.requested_mode = fc;
5241 if (adapter->num_queues > 1)
5242 ixgbe_disable_rx_drop(adapter);
5243 break;
5244 case ixgbe_fc_none:
5245 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5246 if (adapter->num_queues > 1)
5247 ixgbe_enable_rx_drop(adapter);
5248 break;
5249 default:
5250 return (EINVAL);
5251 }
5252
5253 #if 0 /* XXX NetBSD */
5254 /* Don't autoneg if forcing a value */
5255 adapter->hw.fc.disable_fc_autoneg = TRUE;
5256 #endif
5257 ixgbe_fc_enable(&adapter->hw);
5258
5259 return (0);
5260 } /* ixgbe_set_flowcntl */
5261
5262 /************************************************************************
5263 * ixgbe_enable_rx_drop
5264 *
5265 * Enable the hardware to drop packets when the buffer is
5266 * full. This is useful with multiqueue, so that no single
5267 * queue being full stalls the entire RX engine. We only
5268 * enable this when Multiqueue is enabled AND Flow Control
5269 * is disabled.
5270 ************************************************************************/
5271 static void
5272 ixgbe_enable_rx_drop(struct adapter *adapter)
5273 {
5274 struct ixgbe_hw *hw = &adapter->hw;
5275 struct rx_ring *rxr;
5276 u32 srrctl;
5277
5278 for (int i = 0; i < adapter->num_queues; i++) {
5279 rxr = &adapter->rx_rings[i];
5280 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5281 srrctl |= IXGBE_SRRCTL_DROP_EN;
5282 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5283 }
5284
5285 /* enable drop for each vf */
5286 for (int i = 0; i < adapter->num_vfs; i++) {
5287 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5288 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5289 IXGBE_QDE_ENABLE));
5290 }
5291 } /* ixgbe_enable_rx_drop */
5292
5293 /************************************************************************
5294 * ixgbe_disable_rx_drop
5295 ************************************************************************/
5296 static void
5297 ixgbe_disable_rx_drop(struct adapter *adapter)
5298 {
5299 struct ixgbe_hw *hw = &adapter->hw;
5300 struct rx_ring *rxr;
5301 u32 srrctl;
5302
5303 for (int i = 0; i < adapter->num_queues; i++) {
5304 rxr = &adapter->rx_rings[i];
5305 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5306 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5307 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5308 }
5309
5310 /* disable drop for each vf */
5311 for (int i = 0; i < adapter->num_vfs; i++) {
5312 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5313 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5314 }
5315 } /* ixgbe_disable_rx_drop */
5316
5317 /************************************************************************
5318 * ixgbe_sysctl_advertise
5319 *
5320 * SYSCTL wrapper around setting advertised speed
5321 ************************************************************************/
5322 static int
5323 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5324 {
5325 struct sysctlnode node = *rnode;
5326 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5327 int error = 0, advertise;
5328
5329 if (ixgbe_fw_recovery_mode_swflag(adapter))
5330 return (EPERM);
5331
5332 advertise = adapter->advertise;
5333 node.sysctl_data = &advertise;
5334 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5335 if (error != 0 || newp == NULL)
5336 return error;
5337
5338 return ixgbe_set_advertise(adapter, advertise);
5339 } /* ixgbe_sysctl_advertise */
5340
5341 /************************************************************************
5342 * ixgbe_set_advertise - Control advertised link speed
5343 *
5344 * Flags:
5345 * 0x00 - Default (all capable link speed)
5346 * 0x1 - advertise 100 Mb
5347 * 0x2 - advertise 1G
5348 * 0x4 - advertise 10G
5349 * 0x8 - advertise 10 Mb (yes, Mb)
5350 * 0x10 - advertise 2.5G
5351 * 0x20 - advertise 5G
5352 ************************************************************************/
5353 static int
5354 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5355 {
5356 device_t dev;
5357 struct ixgbe_hw *hw;
5358 ixgbe_link_speed speed = 0;
5359 ixgbe_link_speed link_caps = 0;
5360 s32 err = IXGBE_NOT_IMPLEMENTED;
5361 bool negotiate = FALSE;
5362
5363 /* Checks to validate new value */
5364 if (adapter->advertise == advertise) /* no change */
5365 return (0);
5366
5367 dev = adapter->dev;
5368 hw = &adapter->hw;
5369
5370 /* No speed changes for backplane media */
5371 if (hw->phy.media_type == ixgbe_media_type_backplane)
5372 return (ENODEV);
5373
5374 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5375 (hw->phy.multispeed_fiber))) {
5376 device_printf(dev,
5377 "Advertised speed can only be set on copper or "
5378 "multispeed fiber media types.\n");
5379 return (EINVAL);
5380 }
5381
5382 if (advertise < 0x0 || advertise > 0x3f) {
5383 device_printf(dev, "Invalid advertised speed; "
5384 "valid modes are 0x0 through 0x3f\n");
5385 return (EINVAL);
5386 }
5387
5388 if (hw->mac.ops.get_link_capabilities) {
5389 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5390 &negotiate);
5391 if (err != IXGBE_SUCCESS) {
5392 device_printf(dev, "Unable to determine supported "
5393 "advertise speeds\n");
5394 return (ENODEV);
5395 }
5396 }
5397
5398 /* Set new value and report new advertised mode */
5399 if (advertise & 0x1) {
5400 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5401 device_printf(dev, "Interface does not support 100Mb "
5402 "advertised speed\n");
5403 return (EINVAL);
5404 }
5405 speed |= IXGBE_LINK_SPEED_100_FULL;
5406 }
5407 if (advertise & 0x2) {
5408 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5409 device_printf(dev, "Interface does not support 1Gb "
5410 "advertised speed\n");
5411 return (EINVAL);
5412 }
5413 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5414 }
5415 if (advertise & 0x4) {
5416 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5417 device_printf(dev, "Interface does not support 10Gb "
5418 "advertised speed\n");
5419 return (EINVAL);
5420 }
5421 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5422 }
5423 if (advertise & 0x8) {
5424 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5425 device_printf(dev, "Interface does not support 10Mb "
5426 "advertised speed\n");
5427 return (EINVAL);
5428 }
5429 speed |= IXGBE_LINK_SPEED_10_FULL;
5430 }
5431 if (advertise & 0x10) {
5432 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5433 device_printf(dev, "Interface does not support 2.5Gb "
5434 "advertised speed\n");
5435 return (EINVAL);
5436 }
5437 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5438 }
5439 if (advertise & 0x20) {
5440 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5441 device_printf(dev, "Interface does not support 5Gb "
5442 "advertised speed\n");
5443 return (EINVAL);
5444 }
5445 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5446 }
5447 if (advertise == 0)
5448 speed = link_caps; /* All capable link speed */
5449
5450 hw->mac.autotry_restart = TRUE;
5451 hw->mac.ops.setup_link(hw, speed, TRUE);
5452 adapter->advertise = advertise;
5453
5454 return (0);
5455 } /* ixgbe_set_advertise */
5456
5457 /************************************************************************
5458 * ixgbe_get_default_advertise - Get default advertised speed settings
5459 *
5460 * Formatted for sysctl usage.
5461 * Flags:
5462 * 0x1 - advertise 100 Mb
5463 * 0x2 - advertise 1G
5464 * 0x4 - advertise 10G
5465 * 0x8 - advertise 10 Mb (yes, Mb)
5466 * 0x10 - advertise 2.5G
5467 * 0x20 - advertise 5G
5468 ************************************************************************/
5469 static int
5470 ixgbe_get_default_advertise(struct adapter *adapter)
5471 {
5472 struct ixgbe_hw *hw = &adapter->hw;
5473 int speed;
5474 ixgbe_link_speed link_caps = 0;
5475 s32 err;
5476 bool negotiate = FALSE;
5477
5478 /*
5479 * Advertised speed means nothing unless it's copper or
5480 * multi-speed fiber
5481 */
5482 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5483 !(hw->phy.multispeed_fiber))
5484 return (0);
5485
5486 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5487 if (err != IXGBE_SUCCESS)
5488 return (0);
5489
5490 speed =
5491 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5492 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5493 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5494 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5495 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5496 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5497
5498 return speed;
5499 } /* ixgbe_get_default_advertise */
5500
5501 /************************************************************************
5502 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5503 *
5504 * Control values:
5505 * 0/1 - off / on (use default value of 1000)
5506 *
5507 * Legal timer values are:
5508 * 50,100,250,500,1000,2000,5000,10000
5509 *
5510 * Turning off interrupt moderation will also turn this off.
5511 ************************************************************************/
5512 static int
5513 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5514 {
5515 struct sysctlnode node = *rnode;
5516 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5517 struct ifnet *ifp = adapter->ifp;
5518 int error;
5519 int newval;
5520
5521 if (ixgbe_fw_recovery_mode_swflag(adapter))
5522 return (EPERM);
5523
5524 newval = adapter->dmac;
5525 node.sysctl_data = &newval;
5526 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5527 if ((error) || (newp == NULL))
5528 return (error);
5529
5530 switch (newval) {
5531 case 0:
5532 /* Disabled */
5533 adapter->dmac = 0;
5534 break;
5535 case 1:
5536 /* Enable and use default */
5537 adapter->dmac = 1000;
5538 break;
5539 case 50:
5540 case 100:
5541 case 250:
5542 case 500:
5543 case 1000:
5544 case 2000:
5545 case 5000:
5546 case 10000:
5547 /* Legal values - allow */
5548 adapter->dmac = newval;
5549 break;
5550 default:
5551 /* Do nothing, illegal value */
5552 return (EINVAL);
5553 }
5554
5555 /* Re-initialize hardware if it's already running */
5556 if (ifp->if_flags & IFF_RUNNING)
5557 ifp->if_init(ifp);
5558
5559 return (0);
5560 }
5561
5562 #ifdef IXGBE_DEBUG
5563 /************************************************************************
5564 * ixgbe_sysctl_power_state
5565 *
5566 * Sysctl to test power states
5567 * Values:
5568 * 0 - set device to D0
5569 * 3 - set device to D3
5570 * (none) - get current device power state
5571 ************************************************************************/
5572 static int
5573 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5574 {
5575 #ifdef notyet
5576 struct sysctlnode node = *rnode;
5577 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5578 device_t dev = adapter->dev;
5579 int curr_ps, new_ps, error = 0;
5580
5581 if (ixgbe_fw_recovery_mode_swflag(adapter))
5582 return (EPERM);
5583
5584 curr_ps = new_ps = pci_get_powerstate(dev);
5585
5586 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5587 if ((error) || (req->newp == NULL))
5588 return (error);
5589
5590 if (new_ps == curr_ps)
5591 return (0);
5592
5593 if (new_ps == 3 && curr_ps == 0)
5594 error = DEVICE_SUSPEND(dev);
5595 else if (new_ps == 0 && curr_ps == 3)
5596 error = DEVICE_RESUME(dev);
5597 else
5598 return (EINVAL);
5599
5600 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5601
5602 return (error);
5603 #else
5604 return 0;
5605 #endif
5606 } /* ixgbe_sysctl_power_state */
5607 #endif
5608
5609 /************************************************************************
5610 * ixgbe_sysctl_wol_enable
5611 *
5612 * Sysctl to enable/disable the WoL capability,
5613 * if supported by the adapter.
5614 *
5615 * Values:
5616 * 0 - disabled
5617 * 1 - enabled
5618 ************************************************************************/
5619 static int
5620 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5621 {
5622 struct sysctlnode node = *rnode;
5623 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5624 struct ixgbe_hw *hw = &adapter->hw;
5625 bool new_wol_enabled;
5626 int error = 0;
5627
5628 /*
5629 * It's not required to check recovery mode because this function never
5630 * touches hardware.
5631 */
5632 new_wol_enabled = hw->wol_enabled;
5633 node.sysctl_data = &new_wol_enabled;
5634 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5635 if ((error) || (newp == NULL))
5636 return (error);
5637 if (new_wol_enabled == hw->wol_enabled)
5638 return (0);
5639
5640 if (new_wol_enabled && !adapter->wol_support)
5641 return (ENODEV);
5642 else
5643 hw->wol_enabled = new_wol_enabled;
5644
5645 return (0);
5646 } /* ixgbe_sysctl_wol_enable */
5647
5648 /************************************************************************
5649 * ixgbe_sysctl_wufc - Wake Up Filter Control
5650 *
5651 * Sysctl to enable/disable the types of packets that the
5652 * adapter will wake up on upon receipt.
5653 * Flags:
5654 * 0x1 - Link Status Change
5655 * 0x2 - Magic Packet
5656 * 0x4 - Direct Exact
5657 * 0x8 - Directed Multicast
5658 * 0x10 - Broadcast
5659 * 0x20 - ARP/IPv4 Request Packet
5660 * 0x40 - Direct IPv4 Packet
5661 * 0x80 - Direct IPv6 Packet
5662 *
5663 * Settings not listed above will cause the sysctl to return an error.
5664 ************************************************************************/
5665 static int
5666 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5667 {
5668 struct sysctlnode node = *rnode;
5669 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5670 int error = 0;
5671 u32 new_wufc;
5672
5673 /*
5674 * It's not required to check recovery mode because this function never
5675 * touches hardware.
5676 */
5677 new_wufc = adapter->wufc;
5678 node.sysctl_data = &new_wufc;
5679 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5680 if ((error) || (newp == NULL))
5681 return (error);
5682 if (new_wufc == adapter->wufc)
5683 return (0);
5684
5685 if (new_wufc & 0xffffff00)
5686 return (EINVAL);
5687
5688 new_wufc &= 0xff;
5689 new_wufc |= (0xffffff & adapter->wufc);
5690 adapter->wufc = new_wufc;
5691
5692 return (0);
5693 } /* ixgbe_sysctl_wufc */
5694
5695 #ifdef IXGBE_DEBUG
5696 /************************************************************************
5697 * ixgbe_sysctl_print_rss_config
5698 ************************************************************************/
5699 static int
5700 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5701 {
5702 #ifdef notyet
5703 struct sysctlnode node = *rnode;
5704 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5705 struct ixgbe_hw *hw = &adapter->hw;
5706 device_t dev = adapter->dev;
5707 struct sbuf *buf;
5708 int error = 0, reta_size;
5709 u32 reg;
5710
5711 if (ixgbe_fw_recovery_mode_swflag(adapter))
5712 return (EPERM);
5713
5714 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5715 if (!buf) {
5716 device_printf(dev, "Could not allocate sbuf for output.\n");
5717 return (ENOMEM);
5718 }
5719
5720 // TODO: use sbufs to make a string to print out
5721 /* Set multiplier for RETA setup and table size based on MAC */
5722 switch (adapter->hw.mac.type) {
5723 case ixgbe_mac_X550:
5724 case ixgbe_mac_X550EM_x:
5725 case ixgbe_mac_X550EM_a:
5726 reta_size = 128;
5727 break;
5728 default:
5729 reta_size = 32;
5730 break;
5731 }
5732
5733 /* Print out the redirection table */
5734 sbuf_cat(buf, "\n");
5735 for (int i = 0; i < reta_size; i++) {
5736 if (i < 32) {
5737 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5738 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5739 } else {
5740 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5741 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5742 }
5743 }
5744
5745 // TODO: print more config
5746
5747 error = sbuf_finish(buf);
5748 if (error)
5749 device_printf(dev, "Error finishing sbuf: %d\n", error);
5750
5751 sbuf_delete(buf);
5752 #endif
5753 return (0);
5754 } /* ixgbe_sysctl_print_rss_config */
5755 #endif /* IXGBE_DEBUG */
5756
5757 /************************************************************************
5758 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5759 *
5760 * For X552/X557-AT devices using an external PHY
5761 ************************************************************************/
5762 static int
5763 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5764 {
5765 struct sysctlnode node = *rnode;
5766 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5767 struct ixgbe_hw *hw = &adapter->hw;
5768 int val;
5769 u16 reg;
5770 int error;
5771
5772 if (ixgbe_fw_recovery_mode_swflag(adapter))
5773 return (EPERM);
5774
5775 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5776 device_printf(adapter->dev,
5777 "Device has no supported external thermal sensor.\n");
5778 return (ENODEV);
5779 }
5780
5781 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5782 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5783 device_printf(adapter->dev,
5784 "Error reading from PHY's current temperature register\n");
5785 return (EAGAIN);
5786 }
5787
5788 node.sysctl_data = &val;
5789
5790 /* Shift temp for output */
5791 val = reg >> 8;
5792
5793 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5794 if ((error) || (newp == NULL))
5795 return (error);
5796
5797 return (0);
5798 } /* ixgbe_sysctl_phy_temp */
5799
5800 /************************************************************************
5801 * ixgbe_sysctl_phy_overtemp_occurred
5802 *
5803 * Reports (directly from the PHY) whether the current PHY
5804 * temperature is over the overtemp threshold.
5805 ************************************************************************/
5806 static int
5807 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5808 {
5809 struct sysctlnode node = *rnode;
5810 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5811 struct ixgbe_hw *hw = &adapter->hw;
5812 int val, error;
5813 u16 reg;
5814
5815 if (ixgbe_fw_recovery_mode_swflag(adapter))
5816 return (EPERM);
5817
5818 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5819 device_printf(adapter->dev,
5820 "Device has no supported external thermal sensor.\n");
5821 return (ENODEV);
5822 }
5823
5824 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5825 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5826 device_printf(adapter->dev,
5827 "Error reading from PHY's temperature status register\n");
5828 return (EAGAIN);
5829 }
5830
5831 node.sysctl_data = &val;
5832
5833 /* Get occurrence bit */
5834 val = !!(reg & 0x4000);
5835
5836 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5837 if ((error) || (newp == NULL))
5838 return (error);
5839
5840 return (0);
5841 } /* ixgbe_sysctl_phy_overtemp_occurred */
5842
5843 /************************************************************************
5844 * ixgbe_sysctl_eee_state
5845 *
5846 * Sysctl to set EEE power saving feature
5847 * Values:
5848 * 0 - disable EEE
5849 * 1 - enable EEE
5850 * (none) - get current device EEE state
5851 ************************************************************************/
5852 static int
5853 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5854 {
5855 struct sysctlnode node = *rnode;
5856 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5857 struct ifnet *ifp = adapter->ifp;
5858 device_t dev = adapter->dev;
5859 int curr_eee, new_eee, error = 0;
5860 s32 retval;
5861
5862 if (ixgbe_fw_recovery_mode_swflag(adapter))
5863 return (EPERM);
5864
5865 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5866 node.sysctl_data = &new_eee;
5867 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5868 if ((error) || (newp == NULL))
5869 return (error);
5870
5871 /* Nothing to do */
5872 if (new_eee == curr_eee)
5873 return (0);
5874
5875 /* Not supported */
5876 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5877 return (EINVAL);
5878
5879 /* Bounds checking */
5880 if ((new_eee < 0) || (new_eee > 1))
5881 return (EINVAL);
5882
5883 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
5884 if (retval) {
5885 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5886 return (EINVAL);
5887 }
5888
5889 /* Restart auto-neg */
5890 ifp->if_init(ifp);
5891
5892 device_printf(dev, "New EEE state: %d\n", new_eee);
5893
5894 /* Cache new value */
5895 if (new_eee)
5896 adapter->feat_en |= IXGBE_FEATURE_EEE;
5897 else
5898 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5899
5900 return (error);
5901 } /* ixgbe_sysctl_eee_state */
5902
5903 #define PRINTQS(adapter, regname) \
5904 do { \
5905 struct ixgbe_hw *_hw = &(adapter)->hw; \
5906 int _i; \
5907 \
5908 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5909 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5910 printf((_i == 0) ? "\t" : " "); \
5911 printf("%08x", IXGBE_READ_REG(_hw, \
5912 IXGBE_##regname(_i))); \
5913 } \
5914 printf("\n"); \
5915 } while (0)
5916
5917 /************************************************************************
5918 * ixgbe_print_debug_info
5919 *
5920 * Called only when em_display_debug_stats is enabled.
5921 * Provides a way to take a look at important statistics
5922 * maintained by the driver and hardware.
5923 ************************************************************************/
5924 static void
5925 ixgbe_print_debug_info(struct adapter *adapter)
5926 {
5927 device_t dev = adapter->dev;
5928 struct ixgbe_hw *hw = &adapter->hw;
5929 int table_size;
5930 int i;
5931
5932 switch (adapter->hw.mac.type) {
5933 case ixgbe_mac_X550:
5934 case ixgbe_mac_X550EM_x:
5935 case ixgbe_mac_X550EM_a:
5936 table_size = 128;
5937 break;
5938 default:
5939 table_size = 32;
5940 break;
5941 }
5942
5943 device_printf(dev, "[E]RETA:\n");
5944 for (i = 0; i < table_size; i++) {
5945 if (i < 32)
5946 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5947 IXGBE_RETA(i)));
5948 else
5949 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5950 IXGBE_ERETA(i - 32)));
5951 }
5952
5953 device_printf(dev, "queue:");
5954 for (i = 0; i < adapter->num_queues; i++) {
5955 printf((i == 0) ? "\t" : " ");
5956 printf("%8d", i);
5957 }
5958 printf("\n");
5959 PRINTQS(adapter, RDBAL);
5960 PRINTQS(adapter, RDBAH);
5961 PRINTQS(adapter, RDLEN);
5962 PRINTQS(adapter, SRRCTL);
5963 PRINTQS(adapter, RDH);
5964 PRINTQS(adapter, RDT);
5965 PRINTQS(adapter, RXDCTL);
5966
5967 device_printf(dev, "RQSMR:");
5968 for (i = 0; i < adapter->num_queues / 4; i++) {
5969 printf((i == 0) ? "\t" : " ");
5970 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5971 }
5972 printf("\n");
5973
5974 device_printf(dev, "disabled_count:");
5975 for (i = 0; i < adapter->num_queues; i++) {
5976 printf((i == 0) ? "\t" : " ");
5977 printf("%8d", adapter->queues[i].disabled_count);
5978 }
5979 printf("\n");
5980
5981 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5982 if (hw->mac.type != ixgbe_mac_82598EB) {
5983 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5984 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5985 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5986 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5987 }
5988 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
5989 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
5990 } /* ixgbe_print_debug_info */
5991
5992 /************************************************************************
5993 * ixgbe_sysctl_debug
5994 ************************************************************************/
5995 static int
5996 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5997 {
5998 struct sysctlnode node = *rnode;
5999 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6000 int error, result = 0;
6001
6002 if (ixgbe_fw_recovery_mode_swflag(adapter))
6003 return (EPERM);
6004
6005 node.sysctl_data = &result;
6006 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6007
6008 if (error || newp == NULL)
6009 return error;
6010
6011 if (result == 1)
6012 ixgbe_print_debug_info(adapter);
6013
6014 return 0;
6015 } /* ixgbe_sysctl_debug */
6016
6017 /************************************************************************
6018 * ixgbe_sysctl_rx_copy_len
6019 ************************************************************************/
6020 static int
6021 ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6022 {
6023 struct sysctlnode node = *rnode;
6024 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6025 int error;
6026 int result = adapter->rx_copy_len;
6027
6028 node.sysctl_data = &result;
6029 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6030
6031 if (error || newp == NULL)
6032 return error;
6033
6034 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6035 return EINVAL;
6036
6037 adapter->rx_copy_len = result;
6038
6039 return 0;
6040 } /* ixgbe_sysctl_rx_copy_len */
6041
6042 /************************************************************************
6043 * ixgbe_sysctl_tx_process_limit
6044 ************************************************************************/
6045 static int
6046 ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6047 {
6048 struct sysctlnode node = *rnode;
6049 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6050 int error;
6051 int result = adapter->tx_process_limit;
6052
6053 node.sysctl_data = &result;
6054 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6055
6056 if (error || newp == NULL)
6057 return error;
6058
6059 if ((result <= 0) || (result > adapter->num_tx_desc))
6060 return EINVAL;
6061
6062 adapter->tx_process_limit = result;
6063
6064 return 0;
6065 } /* ixgbe_sysctl_tx_process_limit */
6066
6067 /************************************************************************
6068 * ixgbe_sysctl_rx_process_limit
6069 ************************************************************************/
6070 static int
6071 ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6072 {
6073 struct sysctlnode node = *rnode;
6074 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6075 int error;
6076 int result = adapter->rx_process_limit;
6077
6078 node.sysctl_data = &result;
6079 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6080
6081 if (error || newp == NULL)
6082 return error;
6083
6084 if ((result <= 0) || (result > adapter->num_rx_desc))
6085 return EINVAL;
6086
6087 adapter->rx_process_limit = result;
6088
6089 return 0;
6090 } /* ixgbe_sysctl_rx_process_limit */
6091
6092 /************************************************************************
6093 * ixgbe_init_device_features
6094 ************************************************************************/
6095 static void
6096 ixgbe_init_device_features(struct adapter *adapter)
6097 {
6098 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6099 | IXGBE_FEATURE_RSS
6100 | IXGBE_FEATURE_MSI
6101 | IXGBE_FEATURE_MSIX
6102 | IXGBE_FEATURE_LEGACY_IRQ
6103 | IXGBE_FEATURE_LEGACY_TX;
6104
6105 /* Set capabilities first... */
6106 switch (adapter->hw.mac.type) {
6107 case ixgbe_mac_82598EB:
6108 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6109 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6110 break;
6111 case ixgbe_mac_X540:
6112 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6113 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6114 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6115 (adapter->hw.bus.func == 0))
6116 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6117 break;
6118 case ixgbe_mac_X550:
6119 /*
6120 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6121 * NVM Image version.
6122 */
6123 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6124 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6125 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6126 break;
6127 case ixgbe_mac_X550EM_x:
6128 /*
6129 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6130 * NVM Image version.
6131 */
6132 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6133 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6134 break;
6135 case ixgbe_mac_X550EM_a:
6136 /*
6137 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6138 * NVM Image version.
6139 */
6140 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6141 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6142 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6143 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6144 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6145 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6146 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6147 }
6148 break;
6149 case ixgbe_mac_82599EB:
6150 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6151 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6152 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6153 (adapter->hw.bus.func == 0))
6154 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6155 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6156 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6157 break;
6158 default:
6159 break;
6160 }
6161
6162 /* Enabled by default... */
6163 /* Fan failure detection */
6164 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6165 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6166 /* Netmap */
6167 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6168 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6169 /* EEE */
6170 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6171 adapter->feat_en |= IXGBE_FEATURE_EEE;
6172 /* Thermal Sensor */
6173 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6174 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6175 /*
6176 * Recovery mode:
6177 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6178 * NVM Image version.
6179 */
6180
6181 /* Enabled via global sysctl... */
6182 /* Flow Director */
6183 if (ixgbe_enable_fdir) {
6184 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6185 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6186 else
6187 device_printf(adapter->dev, "Device does not support "
6188 "Flow Director. Leaving disabled.");
6189 }
6190 /* Legacy (single queue) transmit */
6191 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6192 ixgbe_enable_legacy_tx)
6193 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6194 /*
6195 * Message Signal Interrupts - Extended (MSI-X)
6196 * Normal MSI is only enabled if MSI-X calls fail.
6197 */
6198 if (!ixgbe_enable_msix)
6199 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6200 /* Receive-Side Scaling (RSS) */
6201 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6202 adapter->feat_en |= IXGBE_FEATURE_RSS;
6203
6204 /* Disable features with unmet dependencies... */
6205 /* No MSI-X */
6206 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6207 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6208 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6209 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6210 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6211 }
6212 } /* ixgbe_init_device_features */
6213
6214 /************************************************************************
6215 * ixgbe_probe - Device identification routine
6216 *
6217 * Determines if the driver should be loaded on
6218 * adapter based on its PCI vendor/device ID.
6219 *
6220 * return BUS_PROBE_DEFAULT on success, positive on failure
6221 ************************************************************************/
6222 static int
6223 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6224 {
6225 const struct pci_attach_args *pa = aux;
6226
6227 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6228 }
6229
6230 static const ixgbe_vendor_info_t *
6231 ixgbe_lookup(const struct pci_attach_args *pa)
6232 {
6233 const ixgbe_vendor_info_t *ent;
6234 pcireg_t subid;
6235
6236 INIT_DEBUGOUT("ixgbe_lookup: begin");
6237
6238 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6239 return NULL;
6240
6241 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6242
6243 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6244 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6245 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6246 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6247 (ent->subvendor_id == 0)) &&
6248 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6249 (ent->subdevice_id == 0))) {
6250 return ent;
6251 }
6252 }
6253 return NULL;
6254 }
6255
6256 static int
6257 ixgbe_ifflags_cb(struct ethercom *ec)
6258 {
6259 struct ifnet *ifp = &ec->ec_if;
6260 struct adapter *adapter = ifp->if_softc;
6261 u_short change;
6262 int rv = 0;
6263
6264 IXGBE_CORE_LOCK(adapter);
6265
6266 change = ifp->if_flags ^ adapter->if_flags;
6267 if (change != 0)
6268 adapter->if_flags = ifp->if_flags;
6269
6270 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6271 rv = ENETRESET;
6272 goto out;
6273 } else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
6274 ixgbe_set_rxfilter(adapter);
6275
6276 /* Set up VLAN support and filter */
6277 ixgbe_setup_vlan_hw_support(adapter);
6278
6279 out:
6280 IXGBE_CORE_UNLOCK(adapter);
6281
6282 return rv;
6283 }
6284
6285 /************************************************************************
6286 * ixgbe_ioctl - Ioctl entry point
6287 *
6288 * Called when the user wants to configure the interface.
6289 *
6290 * return 0 on success, positive on failure
6291 ************************************************************************/
6292 static int
6293 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6294 {
6295 struct adapter *adapter = ifp->if_softc;
6296 struct ixgbe_hw *hw = &adapter->hw;
6297 struct ifcapreq *ifcr = data;
6298 struct ifreq *ifr = data;
6299 int error = 0;
6300 int l4csum_en;
6301 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6302 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6303
6304 if (ixgbe_fw_recovery_mode_swflag(adapter))
6305 return (EPERM);
6306
6307 switch (command) {
6308 case SIOCSIFFLAGS:
6309 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6310 break;
6311 case SIOCADDMULTI:
6312 case SIOCDELMULTI:
6313 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6314 break;
6315 case SIOCSIFMEDIA:
6316 case SIOCGIFMEDIA:
6317 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6318 break;
6319 case SIOCSIFCAP:
6320 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6321 break;
6322 case SIOCSIFMTU:
6323 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6324 break;
6325 #ifdef __NetBSD__
6326 case SIOCINITIFADDR:
6327 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6328 break;
6329 case SIOCGIFFLAGS:
6330 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6331 break;
6332 case SIOCGIFAFLAG_IN:
6333 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6334 break;
6335 case SIOCGIFADDR:
6336 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6337 break;
6338 case SIOCGIFMTU:
6339 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6340 break;
6341 case SIOCGIFCAP:
6342 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6343 break;
6344 case SIOCGETHERCAP:
6345 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6346 break;
6347 case SIOCGLIFADDR:
6348 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6349 break;
6350 case SIOCZIFDATA:
6351 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6352 hw->mac.ops.clear_hw_cntrs(hw);
6353 ixgbe_clear_evcnt(adapter);
6354 break;
6355 case SIOCAIFADDR:
6356 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6357 break;
6358 #endif
6359 default:
6360 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6361 break;
6362 }
6363
6364 switch (command) {
6365 case SIOCSIFMEDIA:
6366 case SIOCGIFMEDIA:
6367 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6368 case SIOCGI2C:
6369 {
6370 struct ixgbe_i2c_req i2c;
6371
6372 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6373 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6374 if (error != 0)
6375 break;
6376 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6377 error = EINVAL;
6378 break;
6379 }
6380 if (i2c.len > sizeof(i2c.data)) {
6381 error = EINVAL;
6382 break;
6383 }
6384
6385 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6386 i2c.dev_addr, i2c.data);
6387 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6388 break;
6389 }
6390 case SIOCSIFCAP:
6391 /* Layer-4 Rx checksum offload has to be turned on and
6392 * off as a unit.
6393 */
6394 l4csum_en = ifcr->ifcr_capenable & l4csum;
6395 if (l4csum_en != l4csum && l4csum_en != 0)
6396 return EINVAL;
6397 /*FALLTHROUGH*/
6398 case SIOCADDMULTI:
6399 case SIOCDELMULTI:
6400 case SIOCSIFFLAGS:
6401 case SIOCSIFMTU:
6402 default:
6403 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6404 return error;
6405 if ((ifp->if_flags & IFF_RUNNING) == 0)
6406 ;
6407 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6408 IXGBE_CORE_LOCK(adapter);
6409 if ((ifp->if_flags & IFF_RUNNING) != 0)
6410 ixgbe_init_locked(adapter);
6411 ixgbe_recalculate_max_frame(adapter);
6412 IXGBE_CORE_UNLOCK(adapter);
6413 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6414 /*
6415 * Multicast list has changed; set the hardware filter
6416 * accordingly.
6417 */
6418 IXGBE_CORE_LOCK(adapter);
6419 ixgbe_disable_intr(adapter);
6420 ixgbe_set_rxfilter(adapter);
6421 ixgbe_enable_intr(adapter);
6422 IXGBE_CORE_UNLOCK(adapter);
6423 }
6424 return 0;
6425 }
6426
6427 return error;
6428 } /* ixgbe_ioctl */
6429
6430 /************************************************************************
6431 * ixgbe_check_fan_failure
6432 ************************************************************************/
6433 static void
6434 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6435 {
6436 u32 mask;
6437
6438 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6439 IXGBE_ESDP_SDP1;
6440
6441 if ((reg & mask) == 0)
6442 return;
6443
6444 /*
6445 * Use ratecheck() just in case interrupt occur frequently.
6446 * When EXPX9501AT's fan stopped, interrupt occurred only once,
6447 * an red LED on the board turned on and link never up until
6448 * power off.
6449 */
6450 if (ratecheck(&adapter->lasterr_time, &ixgbe_errlog_intrvl))
6451 device_printf(adapter->dev,
6452 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6453 } /* ixgbe_check_fan_failure */
6454
6455 /************************************************************************
6456 * ixgbe_handle_que
6457 ************************************************************************/
6458 static void
6459 ixgbe_handle_que(void *context)
6460 {
6461 struct ix_queue *que = context;
6462 struct adapter *adapter = que->adapter;
6463 struct tx_ring *txr = que->txr;
6464 struct ifnet *ifp = adapter->ifp;
6465 bool more = false;
6466
6467 IXGBE_EVC_ADD(&que->handleq, 1);
6468
6469 if (ifp->if_flags & IFF_RUNNING) {
6470 IXGBE_TX_LOCK(txr);
6471 more = ixgbe_txeof(txr);
6472 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6473 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6474 ixgbe_mq_start_locked(ifp, txr);
6475 /* Only for queue 0 */
6476 /* NetBSD still needs this for CBQ */
6477 if ((&adapter->queues[0] == que)
6478 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6479 ixgbe_legacy_start_locked(ifp, txr);
6480 IXGBE_TX_UNLOCK(txr);
6481 more |= ixgbe_rxeof(que);
6482 }
6483
6484 if (more) {
6485 IXGBE_EVC_ADD(&que->req, 1);
6486 ixgbe_sched_handle_que(adapter, que);
6487 } else if (que->res != NULL) {
6488 /* MSIX: Re-enable this interrupt */
6489 ixgbe_enable_queue(adapter, que->msix);
6490 } else {
6491 /* INTx or MSI */
6492 ixgbe_enable_queue(adapter, 0);
6493 }
6494
6495 return;
6496 } /* ixgbe_handle_que */
6497
6498 /************************************************************************
6499 * ixgbe_handle_que_work
6500 ************************************************************************/
6501 static void
6502 ixgbe_handle_que_work(struct work *wk, void *context)
6503 {
6504 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6505
6506 /*
6507 * "enqueued flag" is not required here.
6508 * See ixgbe_msix_que().
6509 */
6510 ixgbe_handle_que(que);
6511 }
6512
6513 /************************************************************************
6514 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6515 ************************************************************************/
6516 static int
6517 ixgbe_allocate_legacy(struct adapter *adapter,
6518 const struct pci_attach_args *pa)
6519 {
6520 device_t dev = adapter->dev;
6521 struct ix_queue *que = adapter->queues;
6522 struct tx_ring *txr = adapter->tx_rings;
6523 int counts[PCI_INTR_TYPE_SIZE];
6524 pci_intr_type_t intr_type, max_type;
6525 char intrbuf[PCI_INTRSTR_LEN];
6526 char wqname[MAXCOMLEN];
6527 const char *intrstr = NULL;
6528 int defertx_error = 0, error;
6529
6530 /* We allocate a single interrupt resource */
6531 max_type = PCI_INTR_TYPE_MSI;
6532 counts[PCI_INTR_TYPE_MSIX] = 0;
6533 counts[PCI_INTR_TYPE_MSI] =
6534 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6535 /* Check not feat_en but feat_cap to fallback to INTx */
6536 counts[PCI_INTR_TYPE_INTX] =
6537 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6538
6539 alloc_retry:
6540 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6541 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6542 return ENXIO;
6543 }
6544 adapter->osdep.nintrs = 1;
6545 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6546 intrbuf, sizeof(intrbuf));
6547 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6548 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6549 device_xname(dev));
6550 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6551 if (adapter->osdep.ihs[0] == NULL) {
6552 aprint_error_dev(dev,"unable to establish %s\n",
6553 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6554 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6555 adapter->osdep.intrs = NULL;
6556 switch (intr_type) {
6557 case PCI_INTR_TYPE_MSI:
6558 /* The next try is for INTx: Disable MSI */
6559 max_type = PCI_INTR_TYPE_INTX;
6560 counts[PCI_INTR_TYPE_INTX] = 1;
6561 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6562 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6563 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6564 goto alloc_retry;
6565 } else
6566 break;
6567 case PCI_INTR_TYPE_INTX:
6568 default:
6569 /* See below */
6570 break;
6571 }
6572 }
6573 if (intr_type == PCI_INTR_TYPE_INTX) {
6574 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6575 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6576 }
6577 if (adapter->osdep.ihs[0] == NULL) {
6578 aprint_error_dev(dev,
6579 "couldn't establish interrupt%s%s\n",
6580 intrstr ? " at " : "", intrstr ? intrstr : "");
6581 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6582 adapter->osdep.intrs = NULL;
6583 return ENXIO;
6584 }
6585 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6586 /*
6587 * Try allocating a fast interrupt and the associated deferred
6588 * processing contexts.
6589 */
6590 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6591 txr->txr_si =
6592 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6593 ixgbe_deferred_mq_start, txr);
6594
6595 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6596 device_xname(dev));
6597 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6598 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6599 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6600 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6601 }
6602 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6603 ixgbe_handle_que, que);
6604 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6605 error = workqueue_create(&adapter->que_wq, wqname,
6606 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6607 IXGBE_WORKQUEUE_FLAGS);
6608
6609 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6610 && ((txr->txr_si == NULL) || defertx_error != 0))
6611 || (que->que_si == NULL) || error != 0) {
6612 aprint_error_dev(dev,
6613 "could not establish software interrupts\n");
6614
6615 return ENXIO;
6616 }
6617 /* For simplicity in the handlers */
6618 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6619
6620 return (0);
6621 } /* ixgbe_allocate_legacy */
6622
6623 /************************************************************************
6624 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6625 ************************************************************************/
6626 static int
6627 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6628 {
6629 device_t dev = adapter->dev;
6630 struct ix_queue *que = adapter->queues;
6631 struct tx_ring *txr = adapter->tx_rings;
6632 pci_chipset_tag_t pc;
6633 char intrbuf[PCI_INTRSTR_LEN];
6634 char intr_xname[32];
6635 char wqname[MAXCOMLEN];
6636 const char *intrstr = NULL;
6637 int error, vector = 0;
6638 int cpu_id = 0;
6639 kcpuset_t *affinity;
6640 #ifdef RSS
6641 unsigned int rss_buckets = 0;
6642 kcpuset_t cpu_mask;
6643 #endif
6644
6645 pc = adapter->osdep.pc;
6646 #ifdef RSS
6647 /*
6648 * If we're doing RSS, the number of queues needs to
6649 * match the number of RSS buckets that are configured.
6650 *
6651 * + If there's more queues than RSS buckets, we'll end
6652 * up with queues that get no traffic.
6653 *
6654 * + If there's more RSS buckets than queues, we'll end
6655 * up having multiple RSS buckets map to the same queue,
6656 * so there'll be some contention.
6657 */
6658 rss_buckets = rss_getnumbuckets();
6659 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6660 (adapter->num_queues != rss_buckets)) {
6661 device_printf(dev,
6662 "%s: number of queues (%d) != number of RSS buckets (%d)"
6663 "; performance will be impacted.\n",
6664 __func__, adapter->num_queues, rss_buckets);
6665 }
6666 #endif
6667
6668 adapter->osdep.nintrs = adapter->num_queues + 1;
6669 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6670 adapter->osdep.nintrs) != 0) {
6671 aprint_error_dev(dev,
6672 "failed to allocate MSI-X interrupt\n");
6673 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
6674 return (ENXIO);
6675 }
6676
6677 kcpuset_create(&affinity, false);
6678 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6679 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6680 device_xname(dev), i);
6681 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6682 sizeof(intrbuf));
6683 #ifdef IXGBE_MPSAFE
6684 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6685 true);
6686 #endif
6687 /* Set the handler function */
6688 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6689 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6690 intr_xname);
6691 if (que->res == NULL) {
6692 aprint_error_dev(dev,
6693 "Failed to register QUE handler\n");
6694 error = ENXIO;
6695 goto err_out;
6696 }
6697 que->msix = vector;
6698 adapter->active_queues |= 1ULL << que->msix;
6699
6700 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6701 #ifdef RSS
6702 /*
6703 * The queue ID is used as the RSS layer bucket ID.
6704 * We look up the queue ID -> RSS CPU ID and select
6705 * that.
6706 */
6707 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6708 CPU_SETOF(cpu_id, &cpu_mask);
6709 #endif
6710 } else {
6711 /*
6712 * Bind the MSI-X vector, and thus the
6713 * rings to the corresponding CPU.
6714 *
6715 * This just happens to match the default RSS
6716 * round-robin bucket -> queue -> CPU allocation.
6717 */
6718 if (adapter->num_queues > 1)
6719 cpu_id = i;
6720 }
6721 /* Round-robin affinity */
6722 kcpuset_zero(affinity);
6723 kcpuset_set(affinity, cpu_id % ncpu);
6724 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6725 NULL);
6726 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6727 intrstr);
6728 if (error == 0) {
6729 #if 1 /* def IXGBE_DEBUG */
6730 #ifdef RSS
6731 aprint_normal(", bound RSS bucket %d to CPU %d", i,
6732 cpu_id % ncpu);
6733 #else
6734 aprint_normal(", bound queue %d to cpu %d", i,
6735 cpu_id % ncpu);
6736 #endif
6737 #endif /* IXGBE_DEBUG */
6738 }
6739 aprint_normal("\n");
6740
6741 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6742 txr->txr_si = softint_establish(
6743 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6744 ixgbe_deferred_mq_start, txr);
6745 if (txr->txr_si == NULL) {
6746 aprint_error_dev(dev,
6747 "couldn't establish software interrupt\n");
6748 error = ENXIO;
6749 goto err_out;
6750 }
6751 }
6752 que->que_si
6753 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6754 ixgbe_handle_que, que);
6755 if (que->que_si == NULL) {
6756 aprint_error_dev(dev,
6757 "couldn't establish software interrupt\n");
6758 error = ENXIO;
6759 goto err_out;
6760 }
6761 }
6762 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6763 error = workqueue_create(&adapter->txr_wq, wqname,
6764 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6765 IXGBE_WORKQUEUE_FLAGS);
6766 if (error) {
6767 aprint_error_dev(dev,
6768 "couldn't create workqueue for deferred Tx\n");
6769 goto err_out;
6770 }
6771 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6772
6773 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6774 error = workqueue_create(&adapter->que_wq, wqname,
6775 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6776 IXGBE_WORKQUEUE_FLAGS);
6777 if (error) {
6778 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6779 goto err_out;
6780 }
6781
6782 /* and Link */
6783 cpu_id++;
6784 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6785 adapter->vector = vector;
6786 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6787 sizeof(intrbuf));
6788 #ifdef IXGBE_MPSAFE
6789 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6790 true);
6791 #endif
6792 /* Set the link handler function */
6793 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6794 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6795 intr_xname);
6796 if (adapter->osdep.ihs[vector] == NULL) {
6797 aprint_error_dev(dev, "Failed to register LINK handler\n");
6798 error = ENXIO;
6799 goto err_out;
6800 }
6801 /* Round-robin affinity */
6802 kcpuset_zero(affinity);
6803 kcpuset_set(affinity, cpu_id % ncpu);
6804 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6805 NULL);
6806
6807 aprint_normal_dev(dev,
6808 "for link, interrupting at %s", intrstr);
6809 if (error == 0)
6810 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6811 else
6812 aprint_normal("\n");
6813
6814 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6815 adapter->mbx_si =
6816 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6817 ixgbe_handle_mbx, adapter);
6818 if (adapter->mbx_si == NULL) {
6819 aprint_error_dev(dev,
6820 "could not establish software interrupts\n");
6821
6822 error = ENXIO;
6823 goto err_out;
6824 }
6825 }
6826
6827 kcpuset_destroy(affinity);
6828 aprint_normal_dev(dev,
6829 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6830
6831 return (0);
6832
6833 err_out:
6834 kcpuset_destroy(affinity);
6835 ixgbe_free_softint(adapter);
6836 ixgbe_free_pciintr_resources(adapter);
6837 return (error);
6838 } /* ixgbe_allocate_msix */
6839
6840 /************************************************************************
6841 * ixgbe_configure_interrupts
6842 *
6843 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6844 * This will also depend on user settings.
6845 ************************************************************************/
6846 static int
6847 ixgbe_configure_interrupts(struct adapter *adapter)
6848 {
6849 device_t dev = adapter->dev;
6850 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6851 int want, queues, msgs;
6852
6853 /* Default to 1 queue if MSI-X setup fails */
6854 adapter->num_queues = 1;
6855
6856 /* Override by tuneable */
6857 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6858 goto msi;
6859
6860 /*
6861 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6862 * interrupt slot.
6863 */
6864 if (ncpu == 1)
6865 goto msi;
6866
6867 /* First try MSI-X */
6868 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6869 msgs = MIN(msgs, IXG_MAX_NINTR);
6870 if (msgs < 2)
6871 goto msi;
6872
6873 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6874
6875 /* Figure out a reasonable auto config value */
6876 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6877
6878 #ifdef RSS
6879 /* If we're doing RSS, clamp at the number of RSS buckets */
6880 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6881 queues = min(queues, rss_getnumbuckets());
6882 #endif
6883 if (ixgbe_num_queues > queues) {
6884 aprint_error_dev(adapter->dev,
6885 "ixgbe_num_queues (%d) is too large, "
6886 "using reduced amount (%d).\n", ixgbe_num_queues, queues);
6887 ixgbe_num_queues = queues;
6888 }
6889
6890 if (ixgbe_num_queues != 0)
6891 queues = ixgbe_num_queues;
6892 else
6893 queues = min(queues,
6894 min(mac->max_tx_queues, mac->max_rx_queues));
6895
6896 /* reflect correct sysctl value */
6897 ixgbe_num_queues = queues;
6898
6899 /*
6900 * Want one vector (RX/TX pair) per queue
6901 * plus an additional for Link.
6902 */
6903 want = queues + 1;
6904 if (msgs >= want)
6905 msgs = want;
6906 else {
6907 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6908 "%d vectors but %d queues wanted!\n", msgs, want);
6909 goto msi;
6910 }
6911 adapter->num_queues = queues;
6912 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6913 return (0);
6914
6915 /*
6916 * MSI-X allocation failed or provided us with
6917 * less vectors than needed. Free MSI-X resources
6918 * and we'll try enabling MSI.
6919 */
6920 msi:
6921 /* Without MSI-X, some features are no longer supported */
6922 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6923 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6924 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6925 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6926
6927 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6928 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
6929 if (msgs > 1)
6930 msgs = 1;
6931 if (msgs != 0) {
6932 msgs = 1;
6933 adapter->feat_en |= IXGBE_FEATURE_MSI;
6934 return (0);
6935 }
6936
6937 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6938 aprint_error_dev(dev,
6939 "Device does not support legacy interrupts.\n");
6940 return 1;
6941 }
6942
6943 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6944
6945 return (0);
6946 } /* ixgbe_configure_interrupts */
6947
6948
6949 /************************************************************************
6950 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6951 *
6952 * Done outside of interrupt context since the driver might sleep
6953 ************************************************************************/
6954 static void
6955 ixgbe_handle_link(void *context)
6956 {
6957 struct adapter *adapter = context;
6958 struct ixgbe_hw *hw = &adapter->hw;
6959
6960 IXGBE_CORE_LOCK(adapter);
6961 IXGBE_EVC_ADD(&adapter->link_sicount, 1);
6962 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6963 ixgbe_update_link_status(adapter);
6964
6965 /* Re-enable link interrupts */
6966 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6967
6968 IXGBE_CORE_UNLOCK(adapter);
6969 } /* ixgbe_handle_link */
6970
6971 /************************************************************************
6972 * ixgbe_rearm_queues
6973 ************************************************************************/
6974 static __inline void
6975 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6976 {
6977 u32 mask;
6978
6979 switch (adapter->hw.mac.type) {
6980 case ixgbe_mac_82598EB:
6981 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6982 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6983 break;
6984 case ixgbe_mac_82599EB:
6985 case ixgbe_mac_X540:
6986 case ixgbe_mac_X550:
6987 case ixgbe_mac_X550EM_x:
6988 case ixgbe_mac_X550EM_a:
6989 mask = (queues & 0xFFFFFFFF);
6990 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6991 mask = (queues >> 32);
6992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6993 break;
6994 default:
6995 break;
6996 }
6997 } /* ixgbe_rearm_queues */
6998