ixgbe.c revision 1.285 1 /* $NetBSD: ixgbe.c,v 1.285 2021/06/29 21:03:36 pgoyette Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.285 2021/06/29 21:03:36 pgoyette Exp $");
68
69 #ifdef _KERNEL_OPT
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_net_mpsafe.h"
73 #include "opt_ixgbe.h"
74 #endif
75
76 #include "ixgbe.h"
77 #include "ixgbe_phy.h"
78 #include "ixgbe_sriov.h"
79 #include "vlan.h"
80
81 #include <sys/cprng.h>
82 #include <dev/mii/mii.h>
83 #include <dev/mii/miivar.h>
84
85 /************************************************************************
86 * Driver version
87 ************************************************************************/
88 static const char ixgbe_driver_version[] = "4.0.1-k";
89 /* XXX NetBSD: + 3.3.10 */
90
91 /************************************************************************
92 * PCI Device ID Table
93 *
94 * Used by probe to select devices to load on
95 * Last field stores an index into ixgbe_strings
96 * Last entry must be all 0s
97 *
98 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
99 ************************************************************************/
100 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
101 {
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
150 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
151 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
152 /* required last entry */
153 {0, 0, 0, 0, 0}
154 };
155
156 /************************************************************************
157 * Table of branding strings
158 ************************************************************************/
159 static const char *ixgbe_strings[] = {
160 "Intel(R) PRO/10GbE PCI-Express Network Driver"
161 };
162
163 /************************************************************************
164 * Function prototypes
165 ************************************************************************/
166 static int ixgbe_probe(device_t, cfdata_t, void *);
167 static void ixgbe_quirks(struct adapter *);
168 static void ixgbe_attach(device_t, device_t, void *);
169 static int ixgbe_detach(device_t, int);
170 #if 0
171 static int ixgbe_shutdown(device_t);
172 #endif
173 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
174 static bool ixgbe_resume(device_t, const pmf_qual_t *);
175 static int ixgbe_ifflags_cb(struct ethercom *);
176 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
177 static int ixgbe_init(struct ifnet *);
178 static void ixgbe_init_locked(struct adapter *);
179 static void ixgbe_ifstop(struct ifnet *, int);
180 static void ixgbe_stop_locked(void *);
181 static void ixgbe_init_device_features(struct adapter *);
182 static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
183 static void ixgbe_add_media_types(struct adapter *);
184 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
185 static int ixgbe_media_change(struct ifnet *);
186 static int ixgbe_allocate_pci_resources(struct adapter *,
187 const struct pci_attach_args *);
188 static void ixgbe_free_deferred_handlers(struct adapter *);
189 static void ixgbe_get_slot_info(struct adapter *);
190 static int ixgbe_allocate_msix(struct adapter *,
191 const struct pci_attach_args *);
192 static int ixgbe_allocate_legacy(struct adapter *,
193 const struct pci_attach_args *);
194 static int ixgbe_configure_interrupts(struct adapter *);
195 static void ixgbe_free_pciintr_resources(struct adapter *);
196 static void ixgbe_free_pci_resources(struct adapter *);
197 static void ixgbe_local_timer(void *);
198 static void ixgbe_handle_timer(struct work *, void *);
199 static void ixgbe_recovery_mode_timer(void *);
200 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
201 static int ixgbe_setup_interface(device_t, struct adapter *);
202 static void ixgbe_config_gpie(struct adapter *);
203 static void ixgbe_config_dmac(struct adapter *);
204 static void ixgbe_config_delay_values(struct adapter *);
205 static void ixgbe_schedule_admin_tasklet(struct adapter *);
206 static void ixgbe_config_link(struct adapter *);
207 static void ixgbe_check_wol_support(struct adapter *);
208 static int ixgbe_setup_low_power_mode(struct adapter *);
209 #if 0
210 static void ixgbe_rearm_queues(struct adapter *, u64);
211 #endif
212
213 static void ixgbe_initialize_transmit_units(struct adapter *);
214 static void ixgbe_initialize_receive_units(struct adapter *);
215 static void ixgbe_enable_rx_drop(struct adapter *);
216 static void ixgbe_disable_rx_drop(struct adapter *);
217 static void ixgbe_initialize_rss_mapping(struct adapter *);
218
219 static void ixgbe_enable_intr(struct adapter *);
220 static void ixgbe_disable_intr(struct adapter *);
221 static void ixgbe_update_stats_counters(struct adapter *);
222 static void ixgbe_set_rxfilter(struct adapter *);
223 static void ixgbe_update_link_status(struct adapter *);
224 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
225 static void ixgbe_configure_ivars(struct adapter *);
226 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
227 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
228
229 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
230 static void ixgbe_setup_vlan_hw_support(struct adapter *);
231 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
232 static int ixgbe_register_vlan(struct adapter *, u16);
233 static int ixgbe_unregister_vlan(struct adapter *, u16);
234
235 static void ixgbe_add_device_sysctls(struct adapter *);
236 static void ixgbe_add_hw_stats(struct adapter *);
237 static void ixgbe_clear_evcnt(struct adapter *);
238 static int ixgbe_set_flowcntl(struct adapter *, int);
239 static int ixgbe_set_advertise(struct adapter *, int);
240 static int ixgbe_get_advertise(struct adapter *);
241
242 /* Sysctl handlers */
243 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
244 const char *, int *, int);
245 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
251 #ifdef IXGBE_DEBUG
252 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
254 #endif
255 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
260 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
261 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
262 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
263 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
264
265 /* Interrupt functions */
266 static int ixgbe_msix_que(void *);
267 static int ixgbe_msix_admin(void *);
268 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
269 static int ixgbe_legacy_irq(void *);
270
271 /* Event handlers running on workqueue */
272 static void ixgbe_handle_que(void *);
273 static void ixgbe_handle_link(void *);
274 static void ixgbe_handle_msf(void *);
275 static void ixgbe_handle_mod(void *, bool);
276 static void ixgbe_handle_phy(void *);
277
278 /* Deferred workqueue handlers */
279 static void ixgbe_handle_admin(struct work *, void *);
280 static void ixgbe_handle_que_work(struct work *, void *);
281
282 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
283
284 /************************************************************************
285 * NetBSD Device Interface Entry Points
286 ************************************************************************/
287 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
288 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
289 DVF_DETACH_SHUTDOWN);
290
291 #if 0
292 devclass_t ix_devclass;
293 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
294
295 MODULE_DEPEND(ix, pci, 1, 1, 1);
296 MODULE_DEPEND(ix, ether, 1, 1, 1);
297 #ifdef DEV_NETMAP
298 MODULE_DEPEND(ix, netmap, 1, 1, 1);
299 #endif
300 #endif
301
302 /*
303 * TUNEABLE PARAMETERS:
304 */
305
306 /*
307 * AIM: Adaptive Interrupt Moderation
308 * which means that the interrupt rate
309 * is varied over time based on the
310 * traffic for that interrupt vector
311 */
312 static bool ixgbe_enable_aim = true;
313 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
314 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
315 "Enable adaptive interrupt moderation");
316
317 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
318 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
319 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
320
321 /* How many packets rxeof tries to clean at a time */
322 static int ixgbe_rx_process_limit = 256;
323 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
325
326 /* How many packets txeof tries to clean at a time */
327 static int ixgbe_tx_process_limit = 256;
328 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
329 &ixgbe_tx_process_limit, 0,
330 "Maximum number of sent packets to process at a time, -1 means unlimited");
331
332 /* Flow control setting, default to full */
333 static int ixgbe_flow_control = ixgbe_fc_full;
334 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
335 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
336
337 /* Which packet processing uses workqueue or softint */
338 static bool ixgbe_txrx_workqueue = false;
339
340 /*
341 * Smart speed setting, default to on
342 * this only works as a compile option
343 * right now as its during attach, set
344 * this to 'ixgbe_smart_speed_off' to
345 * disable.
346 */
347 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
348
349 /*
350 * MSI-X should be the default for best performance,
351 * but this allows it to be forced off for testing.
352 */
353 static int ixgbe_enable_msix = 1;
354 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
355 "Enable MSI-X interrupts");
356
357 /*
358 * Number of Queues, can be set to 0,
359 * it then autoconfigures based on the
360 * number of cpus with a max of 8. This
361 * can be overridden manually here.
362 */
363 static int ixgbe_num_queues = 0;
364 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
365 "Number of queues to configure, 0 indicates autoconfigure");
366
367 /*
368 * Number of TX descriptors per ring,
369 * setting higher than RX as this seems
370 * the better performing choice.
371 */
372 static int ixgbe_txd = PERFORM_TXD;
373 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
374 "Number of transmit descriptors per queue");
375
376 /* Number of RX descriptors per ring */
377 static int ixgbe_rxd = PERFORM_RXD;
378 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
379 "Number of receive descriptors per queue");
380
381 /*
382 * Defining this on will allow the use
383 * of unsupported SFP+ modules, note that
384 * doing so you are on your own :)
385 */
386 static int allow_unsupported_sfp = false;
387 #define TUNABLE_INT(__x, __y)
388 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
389
390 /*
391 * Not sure if Flow Director is fully baked,
392 * so we'll default to turning it off.
393 */
394 static int ixgbe_enable_fdir = 0;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
396 "Enable Flow Director");
397
398 /* Legacy Transmit (single queue) */
399 static int ixgbe_enable_legacy_tx = 0;
400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
401 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
402
403 /* Receive-Side Scaling */
404 static int ixgbe_enable_rss = 1;
405 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
406 "Enable Receive-Side Scaling (RSS)");
407
408 #if 0
409 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
410 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
411 #endif
412
413 #ifdef NET_MPSAFE
414 #define IXGBE_MPSAFE 1
415 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
416 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
417 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
418 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
419 #else
420 #define IXGBE_CALLOUT_FLAGS 0
421 #define IXGBE_SOFTINT_FLAGS 0
422 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
423 #define IXGBE_TASKLET_WQ_FLAGS 0
424 #endif
425 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
426
427 /************************************************************************
428 * ixgbe_initialize_rss_mapping
429 ************************************************************************/
430 static void
431 ixgbe_initialize_rss_mapping(struct adapter *adapter)
432 {
433 struct ixgbe_hw *hw = &adapter->hw;
434 u32 reta = 0, mrqc, rss_key[10];
435 int queue_id, table_size, index_mult;
436 int i, j;
437 u32 rss_hash_config;
438
439 /* force use default RSS key. */
440 #ifdef __NetBSD__
441 rss_getkey((uint8_t *) &rss_key);
442 #else
443 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
444 /* Fetch the configured RSS key */
445 rss_getkey((uint8_t *) &rss_key);
446 } else {
447 /* set up random bits */
448 cprng_fast(&rss_key, sizeof(rss_key));
449 }
450 #endif
451
452 /* Set multiplier for RETA setup and table size based on MAC */
453 index_mult = 0x1;
454 table_size = 128;
455 switch (adapter->hw.mac.type) {
456 case ixgbe_mac_82598EB:
457 index_mult = 0x11;
458 break;
459 case ixgbe_mac_X550:
460 case ixgbe_mac_X550EM_x:
461 case ixgbe_mac_X550EM_a:
462 table_size = 512;
463 break;
464 default:
465 break;
466 }
467
468 /* Set up the redirection table */
469 for (i = 0, j = 0; i < table_size; i++, j++) {
470 if (j == adapter->num_queues)
471 j = 0;
472
473 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
474 /*
475 * Fetch the RSS bucket id for the given indirection
476 * entry. Cap it at the number of configured buckets
477 * (which is num_queues.)
478 */
479 queue_id = rss_get_indirection_to_bucket(i);
480 queue_id = queue_id % adapter->num_queues;
481 } else
482 queue_id = (j * index_mult);
483
484 /*
485 * The low 8 bits are for hash value (n+0);
486 * The next 8 bits are for hash value (n+1), etc.
487 */
488 reta = reta >> 8;
489 reta = reta | (((uint32_t) queue_id) << 24);
490 if ((i & 3) == 3) {
491 if (i < 128)
492 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
493 else
494 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
495 reta);
496 reta = 0;
497 }
498 }
499
500 /* Now fill our hash function seeds */
501 for (i = 0; i < 10; i++)
502 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
503
504 /* Perform hash on these packet types */
505 if (adapter->feat_en & IXGBE_FEATURE_RSS)
506 rss_hash_config = rss_gethashconfig();
507 else {
508 /*
509 * Disable UDP - IP fragments aren't currently being handled
510 * and so we end up with a mix of 2-tuple and 4-tuple
511 * traffic.
512 */
513 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
514 | RSS_HASHTYPE_RSS_TCP_IPV4
515 | RSS_HASHTYPE_RSS_IPV6
516 | RSS_HASHTYPE_RSS_TCP_IPV6
517 | RSS_HASHTYPE_RSS_IPV6_EX
518 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
519 }
520
521 mrqc = IXGBE_MRQC_RSSEN;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
540 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
542 } /* ixgbe_initialize_rss_mapping */
543
544 /************************************************************************
545 * ixgbe_initialize_receive_units - Setup receive registers and features.
546 ************************************************************************/
547 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
548
549 static void
550 ixgbe_initialize_receive_units(struct adapter *adapter)
551 {
552 struct rx_ring *rxr = adapter->rx_rings;
553 struct ixgbe_hw *hw = &adapter->hw;
554 struct ifnet *ifp = adapter->ifp;
555 int i, j;
556 u32 bufsz, fctrl, srrctl, rxcsum;
557 u32 hlreg;
558
559 /*
560 * Make sure receives are disabled while
561 * setting up the descriptor ring
562 */
563 ixgbe_disable_rx(hw);
564
565 /* Enable broadcasts */
566 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
567 fctrl |= IXGBE_FCTRL_BAM;
568 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
569 fctrl |= IXGBE_FCTRL_DPF;
570 fctrl |= IXGBE_FCTRL_PMCF;
571 }
572 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
573
574 /* Set for Jumbo Frames? */
575 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
576 if (ifp->if_mtu > ETHERMTU)
577 hlreg |= IXGBE_HLREG0_JUMBOEN;
578 else
579 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
580
581 #ifdef DEV_NETMAP
582 /* CRC stripping is conditional in Netmap */
583 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
584 (ifp->if_capenable & IFCAP_NETMAP) &&
585 !ix_crcstrip)
586 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
587 else
588 #endif /* DEV_NETMAP */
589 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
590
591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
592
593 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
594 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
595
596 for (i = 0; i < adapter->num_queues; i++, rxr++) {
597 u64 rdba = rxr->rxdma.dma_paddr;
598 u32 reg;
599 int regnum = i / 4; /* 1 register per 4 queues */
600 int regshift = i % 4; /* 4 bits per 1 queue */
601 j = rxr->me;
602
603 /* Setup the Base and Length of the Rx Descriptor Ring */
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
605 (rdba & 0x00000000ffffffffULL));
606 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
607 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
608 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
609
610 /* Set up the SRRCTL register */
611 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
612 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
613 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
614 srrctl |= bufsz;
615 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
616
617 /* Set RQSMR (Receive Queue Statistic Mapping) register */
618 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
619 reg &= ~(0x000000ffUL << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 }
635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641
642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me);
644 }
645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 }
653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655
656 ixgbe_initialize_rss_mapping(adapter);
657
658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 }
662
663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD;
665
666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671
672 } /* ixgbe_initialize_receive_units */
673
674 /************************************************************************
675 * ixgbe_initialize_transmit_units - Enable transmit units.
676 ************************************************************************/
677 static void
678 ixgbe_initialize_transmit_units(struct adapter *adapter)
679 {
680 struct tx_ring *txr = adapter->tx_rings;
681 struct ixgbe_hw *hw = &adapter->hw;
682 int i;
683
684 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
685
686 /* Setup the Base and Length of the Tx Descriptor Ring */
687 for (i = 0; i < adapter->num_queues; i++, txr++) {
688 u64 tdba = txr->txdma.dma_paddr;
689 u32 txctrl = 0;
690 u32 tqsmreg, reg;
691 int regnum = i / 4; /* 1 register per 4 queues */
692 int regshift = i % 4; /* 4 bits per 1 queue */
693 int j = txr->me;
694
695 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
696 (tdba & 0x00000000ffffffffULL));
697 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
698 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
699 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
700
701 /*
702 * Set TQSMR (Transmit Queue Statistic Mapping) register.
703 * Register location is different between 82598 and others.
704 */
705 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
706 tqsmreg = IXGBE_TQSMR(regnum);
707 else
708 tqsmreg = IXGBE_TQSM(regnum);
709 reg = IXGBE_READ_REG(hw, tqsmreg);
710 reg &= ~(0x000000ffUL << (regshift * 8));
711 reg |= i << (regshift * 8);
712 IXGBE_WRITE_REG(hw, tqsmreg, reg);
713
714 /* Setup the HW Tx Head and Tail descriptor pointers */
715 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
716 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
717
718 /* Cache the tail address */
719 txr->tail = IXGBE_TDT(j);
720
721 txr->txr_no_space = false;
722
723 /* Disable Head Writeback */
724 /*
725 * Note: for X550 series devices, these registers are actually
726 * prefixed with TPH_ isntead of DCA_, but the addresses and
727 * fields remain the same.
728 */
729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
732 break;
733 default:
734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
735 break;
736 }
737 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
738 switch (hw->mac.type) {
739 case ixgbe_mac_82598EB:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
741 break;
742 default:
743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
744 break;
745 }
746
747 }
748
749 if (hw->mac.type != ixgbe_mac_82598EB) {
750 u32 dmatxctl, rttdcs;
751
752 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
753 dmatxctl |= IXGBE_DMATXCTL_TE;
754 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
755 /* Disable arbiter to set MTQC */
756 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
757 rttdcs |= IXGBE_RTTDCS_ARBDIS;
758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
759 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
760 ixgbe_get_mtqc(adapter->iov_mode));
761 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
762 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
763 }
764
765 return;
766 } /* ixgbe_initialize_transmit_units */
767
768 static void
769 ixgbe_quirks(struct adapter *adapter)
770 {
771 device_t dev = adapter->dev;
772 struct ixgbe_hw *hw = &adapter->hw;
773 const char *vendor, *product;
774
775 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
776 /*
777 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
778 * MA10-ST0.
779 */
780 vendor = pmf_get_platform("system-vendor");
781 product = pmf_get_platform("system-product");
782
783 if ((vendor == NULL) || (product == NULL))
784 return;
785
786 if ((strcmp(vendor, "GIGABYTE") == 0) &&
787 (strcmp(product, "MA10-ST0") == 0)) {
788 aprint_verbose_dev(dev,
789 "Enable SFP+ MOD_ABS inverse quirk\n");
790 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
791 }
792 }
793 }
794
795 /************************************************************************
796 * ixgbe_attach - Device initialization routine
797 *
798 * Called when the driver is being loaded.
799 * Identifies the type of hardware, allocates all resources
800 * and initializes the hardware.
801 *
802 * return 0 on success, positive on failure
803 ************************************************************************/
804 static void
805 ixgbe_attach(device_t parent, device_t dev, void *aux)
806 {
807 struct adapter *adapter;
808 struct ixgbe_hw *hw;
809 int error = -1;
810 u32 ctrl_ext;
811 u16 high, low, nvmreg;
812 pcireg_t id, subid;
813 const ixgbe_vendor_info_t *ent;
814 struct pci_attach_args *pa = aux;
815 bool unsupported_sfp = false;
816 const char *str;
817 char wqname[MAXCOMLEN];
818 char buf[256];
819
820 INIT_DEBUGOUT("ixgbe_attach: begin");
821
822 /* Allocate, clear, and link in our adapter structure */
823 adapter = device_private(dev);
824 adapter->hw.back = adapter;
825 adapter->dev = dev;
826 hw = &adapter->hw;
827 adapter->osdep.pc = pa->pa_pc;
828 adapter->osdep.tag = pa->pa_tag;
829 if (pci_dma64_available(pa))
830 adapter->osdep.dmat = pa->pa_dmat64;
831 else
832 adapter->osdep.dmat = pa->pa_dmat;
833 adapter->osdep.attached = false;
834 adapter->osdep.detaching = false;
835
836 ent = ixgbe_lookup(pa);
837
838 KASSERT(ent != NULL);
839
840 aprint_normal(": %s, Version - %s\n",
841 ixgbe_strings[ent->index], ixgbe_driver_version);
842
843 /* Core Lock Init */
844 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
845
846 /* Set up the timer callout and workqueue */
847 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
848 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
849 error = workqueue_create(&adapter->timer_wq, wqname,
850 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
851 IXGBE_TASKLET_WQ_FLAGS);
852 if (error) {
853 aprint_error_dev(dev,
854 "could not create timer workqueue (%d)\n", error);
855 goto err_out;
856 }
857
858 /* Determine hardware revision */
859 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
860 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
861
862 hw->vendor_id = PCI_VENDOR(id);
863 hw->device_id = PCI_PRODUCT(id);
864 hw->revision_id =
865 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
866 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
867 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
868
869 /* Set quirk flags */
870 ixgbe_quirks(adapter);
871
872 /*
873 * Make sure BUSMASTER is set
874 */
875 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
876
877 /* Do base PCI setup - map BAR0 */
878 if (ixgbe_allocate_pci_resources(adapter, pa)) {
879 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
880 error = ENXIO;
881 goto err_out;
882 }
883
884 /* let hardware know driver is loaded */
885 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
886 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
887 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
888
889 /*
890 * Initialize the shared code
891 */
892 if (ixgbe_init_shared_code(hw) != 0) {
893 aprint_error_dev(dev, "Unable to initialize the shared code\n");
894 error = ENXIO;
895 goto err_out;
896 }
897
898 switch (hw->mac.type) {
899 case ixgbe_mac_82598EB:
900 str = "82598EB";
901 break;
902 case ixgbe_mac_82599EB:
903 str = "82599EB";
904 break;
905 case ixgbe_mac_X540:
906 str = "X540";
907 break;
908 case ixgbe_mac_X550:
909 str = "X550";
910 break;
911 case ixgbe_mac_X550EM_x:
912 str = "X550EM X";
913 break;
914 case ixgbe_mac_X550EM_a:
915 str = "X550EM A";
916 break;
917 default:
918 str = "Unknown";
919 break;
920 }
921 aprint_normal_dev(dev, "device %s\n", str);
922
923 if (hw->mbx.ops.init_params)
924 hw->mbx.ops.init_params(hw);
925
926 hw->allow_unsupported_sfp = allow_unsupported_sfp;
927
928 /* Pick up the 82599 settings */
929 if (hw->mac.type != ixgbe_mac_82598EB) {
930 hw->phy.smart_speed = ixgbe_smart_speed;
931 adapter->num_segs = IXGBE_82599_SCATTER;
932 } else
933 adapter->num_segs = IXGBE_82598_SCATTER;
934
935 /* Ensure SW/FW semaphore is free */
936 ixgbe_init_swfw_semaphore(hw);
937
938 hw->mac.ops.set_lan_id(hw);
939 ixgbe_init_device_features(adapter);
940
941 if (ixgbe_configure_interrupts(adapter)) {
942 error = ENXIO;
943 goto err_out;
944 }
945
946 /* Allocate multicast array memory. */
947 adapter->mta = malloc(sizeof(*adapter->mta) *
948 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949
950 /* Enable WoL (if supported) */
951 ixgbe_check_wol_support(adapter);
952
953 /* Register for VLAN events */
954 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
955
956 /* Verify adapter fan is still functional (if applicable) */
957 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 ixgbe_check_fan_failure(adapter, esdp, FALSE);
960 }
961
962 /* Set an initial default flow control value */
963 hw->fc.requested_mode = ixgbe_flow_control;
964
965 /* Sysctls for limiting the amount of work done in the taskqueues */
966 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
967 "max number of rx packets to process",
968 &adapter->rx_process_limit, ixgbe_rx_process_limit);
969
970 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
971 "max number of tx packets to process",
972 &adapter->tx_process_limit, ixgbe_tx_process_limit);
973
974 /* Do descriptor calc and sanity checks */
975 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
976 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
977 aprint_error_dev(dev, "TXD config issue, using default!\n");
978 adapter->num_tx_desc = DEFAULT_TXD;
979 } else
980 adapter->num_tx_desc = ixgbe_txd;
981
982 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
983 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
984 aprint_error_dev(dev, "RXD config issue, using default!\n");
985 adapter->num_rx_desc = DEFAULT_RXD;
986 } else
987 adapter->num_rx_desc = ixgbe_rxd;
988
989 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
990
991 /* Allocate our TX/RX Queues */
992 if (ixgbe_allocate_queues(adapter)) {
993 error = ENOMEM;
994 goto err_out;
995 }
996
997 hw->phy.reset_if_overtemp = TRUE;
998 error = ixgbe_reset_hw(hw);
999 hw->phy.reset_if_overtemp = FALSE;
1000 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1001 error = IXGBE_SUCCESS;
1002 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1003 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
1004 unsupported_sfp = true;
1005 error = IXGBE_SUCCESS;
1006 } else if (error) {
1007 aprint_error_dev(dev,
1008 "Hardware initialization failed(error = %d)\n", error);
1009 error = EIO;
1010 goto err_late;
1011 }
1012
1013 /* Make sure we have a good EEPROM before we read from it */
1014 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1015 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1016 error = EIO;
1017 goto err_late;
1018 }
1019
1020 aprint_normal("%s:", device_xname(dev));
1021 /* NVM Image Version */
1022 high = low = 0;
1023 switch (hw->mac.type) {
1024 case ixgbe_mac_X540:
1025 case ixgbe_mac_X550EM_a:
1026 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1027 if (nvmreg == 0xffff)
1028 break;
1029 high = (nvmreg >> 12) & 0x0f;
1030 low = (nvmreg >> 4) & 0xff;
1031 id = nvmreg & 0x0f;
1032 aprint_normal(" NVM Image Version %u.", high);
1033 if (hw->mac.type == ixgbe_mac_X540)
1034 str = "%x";
1035 else
1036 str = "%02x";
1037 aprint_normal(str, low);
1038 aprint_normal(" ID 0x%x,", id);
1039 break;
1040 case ixgbe_mac_X550EM_x:
1041 case ixgbe_mac_X550:
1042 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1043 if (nvmreg == 0xffff)
1044 break;
1045 high = (nvmreg >> 12) & 0x0f;
1046 low = nvmreg & 0xff;
1047 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1048 break;
1049 default:
1050 break;
1051 }
1052 hw->eeprom.nvm_image_ver_high = high;
1053 hw->eeprom.nvm_image_ver_low = low;
1054
1055 /* PHY firmware revision */
1056 switch (hw->mac.type) {
1057 case ixgbe_mac_X540:
1058 case ixgbe_mac_X550:
1059 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1060 if (nvmreg == 0xffff)
1061 break;
1062 high = (nvmreg >> 12) & 0x0f;
1063 low = (nvmreg >> 4) & 0xff;
1064 id = nvmreg & 0x000f;
1065 aprint_normal(" PHY FW Revision %u.", high);
1066 if (hw->mac.type == ixgbe_mac_X540)
1067 str = "%x";
1068 else
1069 str = "%02x";
1070 aprint_normal(str, low);
1071 aprint_normal(" ID 0x%x,", id);
1072 break;
1073 default:
1074 break;
1075 }
1076
1077 /* NVM Map version & OEM NVM Image version */
1078 switch (hw->mac.type) {
1079 case ixgbe_mac_X550:
1080 case ixgbe_mac_X550EM_x:
1081 case ixgbe_mac_X550EM_a:
1082 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1083 if (nvmreg != 0xffff) {
1084 high = (nvmreg >> 12) & 0x0f;
1085 low = nvmreg & 0x00ff;
1086 aprint_normal(" NVM Map version %u.%02x,", high, low);
1087 }
1088 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1089 if (nvmreg != 0xffff) {
1090 high = (nvmreg >> 12) & 0x0f;
1091 low = nvmreg & 0x00ff;
1092 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1093 low);
1094 }
1095 break;
1096 default:
1097 break;
1098 }
1099
1100 /* Print the ETrackID */
1101 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1102 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1103 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1104
1105 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1106 error = ixgbe_allocate_msix(adapter, pa);
1107 if (error) {
1108 /* Free allocated queue structures first */
1109 ixgbe_free_queues(adapter);
1110
1111 /* Fallback to legacy interrupt */
1112 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1113 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1114 adapter->feat_en |= IXGBE_FEATURE_MSI;
1115 adapter->num_queues = 1;
1116
1117 /* Allocate our TX/RX Queues again */
1118 if (ixgbe_allocate_queues(adapter)) {
1119 error = ENOMEM;
1120 goto err_out;
1121 }
1122 }
1123 }
1124 /* Recovery mode */
1125 switch (adapter->hw.mac.type) {
1126 case ixgbe_mac_X550:
1127 case ixgbe_mac_X550EM_x:
1128 case ixgbe_mac_X550EM_a:
1129 /* >= 2.00 */
1130 if (hw->eeprom.nvm_image_ver_high >= 2) {
1131 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1132 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1133 }
1134 break;
1135 default:
1136 break;
1137 }
1138
1139 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1140 error = ixgbe_allocate_legacy(adapter, pa);
1141 if (error)
1142 goto err_late;
1143
1144 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1145 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1146 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1147 error = workqueue_create(&adapter->admin_wq, wqname,
1148 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1149 IXGBE_TASKLET_WQ_FLAGS);
1150 if (error) {
1151 aprint_error_dev(dev,
1152 "could not create admin workqueue (%d)\n", error);
1153 goto err_out;
1154 }
1155
1156 error = ixgbe_start_hw(hw);
1157 switch (error) {
1158 case IXGBE_ERR_EEPROM_VERSION:
1159 aprint_error_dev(dev, "This device is a pre-production adapter/"
1160 "LOM. Please be aware there may be issues associated "
1161 "with your hardware.\nIf you are experiencing problems "
1162 "please contact your Intel or hardware representative "
1163 "who provided you with this hardware.\n");
1164 break;
1165 default:
1166 break;
1167 }
1168
1169 /* Setup OS specific network interface */
1170 if (ixgbe_setup_interface(dev, adapter) != 0)
1171 goto err_late;
1172
1173 /*
1174 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1175 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1176 */
1177 if (hw->phy.media_type == ixgbe_media_type_copper) {
1178 uint16_t id1, id2;
1179 int oui, model, rev;
1180 char descr[MII_MAX_DESCR_LEN];
1181
1182 id1 = hw->phy.id >> 16;
1183 id2 = hw->phy.id & 0xffff;
1184 oui = MII_OUI(id1, id2);
1185 model = MII_MODEL(id2);
1186 rev = MII_REV(id2);
1187 mii_get_descr(descr, sizeof(descr), oui, model);
1188 if (descr[0])
1189 aprint_normal_dev(dev,
1190 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1191 descr, oui, model, rev);
1192 else
1193 aprint_normal_dev(dev,
1194 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1195 oui, model, rev);
1196 }
1197
1198 /* Enable EEE power saving */
1199 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1200 hw->mac.ops.setup_eee(hw,
1201 adapter->feat_en & IXGBE_FEATURE_EEE);
1202
1203 /* Enable power to the phy. */
1204 if (!unsupported_sfp) {
1205 /* Enable the optics for 82599 SFP+ fiber */
1206 ixgbe_enable_tx_laser(hw);
1207
1208 /*
1209 * XXX Currently, ixgbe_set_phy_power() supports only copper
1210 * PHY, so it's not required to test with !unsupported_sfp.
1211 */
1212 ixgbe_set_phy_power(hw, TRUE);
1213 }
1214
1215 /* Initialize statistics */
1216 ixgbe_update_stats_counters(adapter);
1217
1218 /* Check PCIE slot type/speed/width */
1219 ixgbe_get_slot_info(adapter);
1220
1221 /*
1222 * Do time init and sysctl init here, but
1223 * only on the first port of a bypass adapter.
1224 */
1225 ixgbe_bypass_init(adapter);
1226
1227 /* Set an initial dmac value */
1228 adapter->dmac = 0;
1229 /* Set initial advertised speeds (if applicable) */
1230 adapter->advertise = ixgbe_get_advertise(adapter);
1231
1232 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1233 ixgbe_define_iov_schemas(dev, &error);
1234
1235 /* Add sysctls */
1236 ixgbe_add_device_sysctls(adapter);
1237 ixgbe_add_hw_stats(adapter);
1238
1239 /* For Netmap */
1240 adapter->init_locked = ixgbe_init_locked;
1241 adapter->stop_locked = ixgbe_stop_locked;
1242
1243 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1244 ixgbe_netmap_attach(adapter);
1245
1246 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1247 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1248 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1249 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1250
1251 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1252 pmf_class_network_register(dev, adapter->ifp);
1253 else
1254 aprint_error_dev(dev, "couldn't establish power handler\n");
1255
1256 /* Init recovery mode timer and state variable */
1257 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1258 adapter->recovery_mode = 0;
1259
1260 /* Set up the timer callout */
1261 callout_init(&adapter->recovery_mode_timer,
1262 IXGBE_CALLOUT_FLAGS);
1263 snprintf(wqname, sizeof(wqname), "%s-recovery",
1264 device_xname(dev));
1265 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1266 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1267 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1268 if (error) {
1269 aprint_error_dev(dev, "could not create "
1270 "recovery_mode_timer workqueue (%d)\n", error);
1271 goto err_out;
1272 }
1273
1274 /* Start the task */
1275 callout_reset(&adapter->recovery_mode_timer, hz,
1276 ixgbe_recovery_mode_timer, adapter);
1277 }
1278
1279 INIT_DEBUGOUT("ixgbe_attach: end");
1280 adapter->osdep.attached = true;
1281
1282 return;
1283
1284 err_late:
1285 ixgbe_free_queues(adapter);
1286 err_out:
1287 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1288 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1289 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1290 ixgbe_free_deferred_handlers(adapter);
1291 ixgbe_free_pci_resources(adapter);
1292 if (adapter->mta != NULL)
1293 free(adapter->mta, M_DEVBUF);
1294 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1295 IXGBE_CORE_LOCK_DESTROY(adapter);
1296
1297 return;
1298 } /* ixgbe_attach */
1299
1300 /************************************************************************
1301 * ixgbe_check_wol_support
1302 *
1303 * Checks whether the adapter's ports are capable of
1304 * Wake On LAN by reading the adapter's NVM.
1305 *
1306 * Sets each port's hw->wol_enabled value depending
1307 * on the value read here.
1308 ************************************************************************/
1309 static void
1310 ixgbe_check_wol_support(struct adapter *adapter)
1311 {
1312 struct ixgbe_hw *hw = &adapter->hw;
1313 u16 dev_caps = 0;
1314
1315 /* Find out WoL support for port */
1316 adapter->wol_support = hw->wol_enabled = 0;
1317 ixgbe_get_device_caps(hw, &dev_caps);
1318 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1319 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1320 hw->bus.func == 0))
1321 adapter->wol_support = hw->wol_enabled = 1;
1322
1323 /* Save initial wake up filter configuration */
1324 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1325
1326 return;
1327 } /* ixgbe_check_wol_support */
1328
1329 /************************************************************************
1330 * ixgbe_setup_interface
1331 *
1332 * Setup networking device structure and register an interface.
1333 ************************************************************************/
1334 static int
1335 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1336 {
1337 struct ethercom *ec = &adapter->osdep.ec;
1338 struct ifnet *ifp;
1339
1340 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1341
1342 ifp = adapter->ifp = &ec->ec_if;
1343 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1344 ifp->if_baudrate = IF_Gbps(10);
1345 ifp->if_init = ixgbe_init;
1346 ifp->if_stop = ixgbe_ifstop;
1347 ifp->if_softc = adapter;
1348 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1349 #ifdef IXGBE_MPSAFE
1350 ifp->if_extflags = IFEF_MPSAFE;
1351 #endif
1352 ifp->if_ioctl = ixgbe_ioctl;
1353 #if __FreeBSD_version >= 1100045
1354 /* TSO parameters */
1355 ifp->if_hw_tsomax = 65518;
1356 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1357 ifp->if_hw_tsomaxsegsize = 2048;
1358 #endif
1359 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1360 #if 0
1361 ixgbe_start_locked = ixgbe_legacy_start_locked;
1362 #endif
1363 } else {
1364 ifp->if_transmit = ixgbe_mq_start;
1365 #if 0
1366 ixgbe_start_locked = ixgbe_mq_start_locked;
1367 #endif
1368 }
1369 ifp->if_start = ixgbe_legacy_start;
1370 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1371 IFQ_SET_READY(&ifp->if_snd);
1372
1373 if_initialize(ifp);
1374 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1375 ether_ifattach(ifp, adapter->hw.mac.addr);
1376 aprint_normal_dev(dev, "Ethernet address %s\n",
1377 ether_sprintf(adapter->hw.mac.addr));
1378 /*
1379 * We use per TX queue softint, so if_deferred_start_init() isn't
1380 * used.
1381 */
1382 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1383
1384 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1385
1386 /*
1387 * Tell the upper layer(s) we support long frames.
1388 */
1389 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1390
1391 /* Set capability flags */
1392 ifp->if_capabilities |= IFCAP_RXCSUM
1393 | IFCAP_TXCSUM
1394 | IFCAP_TSOv4
1395 | IFCAP_TSOv6;
1396 ifp->if_capenable = 0;
1397
1398 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1399 | ETHERCAP_VLAN_HWCSUM
1400 | ETHERCAP_JUMBO_MTU
1401 | ETHERCAP_VLAN_MTU;
1402
1403 /* Enable the above capabilities by default */
1404 ec->ec_capenable = ec->ec_capabilities;
1405
1406 /*
1407 * Don't turn this on by default, if vlans are
1408 * created on another pseudo device (eg. lagg)
1409 * then vlan events are not passed thru, breaking
1410 * operation, but with HW FILTER off it works. If
1411 * using vlans directly on the ixgbe driver you can
1412 * enable this and get full hardware tag filtering.
1413 */
1414 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1415
1416 /*
1417 * Specify the media types supported by this adapter and register
1418 * callbacks to update media and link information
1419 */
1420 ec->ec_ifmedia = &adapter->media;
1421 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1422 ixgbe_media_status, &adapter->core_mtx);
1423
1424 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1425 ixgbe_add_media_types(adapter);
1426
1427 /* Set autoselect media by default */
1428 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1429
1430 if_register(ifp);
1431
1432 return (0);
1433 } /* ixgbe_setup_interface */
1434
1435 /************************************************************************
1436 * ixgbe_add_media_types
1437 ************************************************************************/
1438 static void
1439 ixgbe_add_media_types(struct adapter *adapter)
1440 {
1441 struct ixgbe_hw *hw = &adapter->hw;
1442 u64 layer;
1443
1444 layer = adapter->phy_layer;
1445
1446 #define ADD(mm, dd) \
1447 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1448
1449 ADD(IFM_NONE, 0);
1450
1451 /* Media types with matching NetBSD media defines */
1452 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1453 ADD(IFM_10G_T | IFM_FDX, 0);
1454 }
1455 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1456 ADD(IFM_1000_T | IFM_FDX, 0);
1457 }
1458 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1459 ADD(IFM_100_TX | IFM_FDX, 0);
1460 }
1461 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1462 ADD(IFM_10_T | IFM_FDX, 0);
1463 }
1464
1465 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1466 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1467 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1468 }
1469
1470 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1471 ADD(IFM_10G_LR | IFM_FDX, 0);
1472 if (hw->phy.multispeed_fiber) {
1473 ADD(IFM_1000_LX | IFM_FDX, 0);
1474 }
1475 }
1476 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1477 ADD(IFM_10G_SR | IFM_FDX, 0);
1478 if (hw->phy.multispeed_fiber) {
1479 ADD(IFM_1000_SX | IFM_FDX, 0);
1480 }
1481 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1482 ADD(IFM_1000_SX | IFM_FDX, 0);
1483 }
1484 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1485 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1486 }
1487
1488 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1489 ADD(IFM_10G_KR | IFM_FDX, 0);
1490 }
1491 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1492 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1493 }
1494 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1495 ADD(IFM_1000_KX | IFM_FDX, 0);
1496 }
1497 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1498 ADD(IFM_2500_KX | IFM_FDX, 0);
1499 }
1500 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1501 ADD(IFM_2500_T | IFM_FDX, 0);
1502 }
1503 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1504 ADD(IFM_5000_T | IFM_FDX, 0);
1505 }
1506 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1507 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1508 /* XXX no ifmedia_set? */
1509
1510 ADD(IFM_AUTO, 0);
1511
1512 #undef ADD
1513 } /* ixgbe_add_media_types */
1514
1515 /************************************************************************
1516 * ixgbe_is_sfp
1517 ************************************************************************/
1518 static inline bool
1519 ixgbe_is_sfp(struct ixgbe_hw *hw)
1520 {
1521 switch (hw->mac.type) {
1522 case ixgbe_mac_82598EB:
1523 if (hw->phy.type == ixgbe_phy_nl)
1524 return (TRUE);
1525 return (FALSE);
1526 case ixgbe_mac_82599EB:
1527 case ixgbe_mac_X550EM_x:
1528 case ixgbe_mac_X550EM_a:
1529 switch (hw->mac.ops.get_media_type(hw)) {
1530 case ixgbe_media_type_fiber:
1531 case ixgbe_media_type_fiber_qsfp:
1532 return (TRUE);
1533 default:
1534 return (FALSE);
1535 }
1536 default:
1537 return (FALSE);
1538 }
1539 } /* ixgbe_is_sfp */
1540
1541 static void
1542 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1543 {
1544
1545 KASSERT(mutex_owned(&adapter->admin_mtx));
1546
1547 if (__predict_true(adapter->osdep.detaching == false)) {
1548 if (adapter->admin_pending == 0)
1549 workqueue_enqueue(adapter->admin_wq,
1550 &adapter->admin_wc, NULL);
1551 adapter->admin_pending = 1;
1552 }
1553 }
1554
1555 /************************************************************************
1556 * ixgbe_config_link
1557 ************************************************************************/
1558 static void
1559 ixgbe_config_link(struct adapter *adapter)
1560 {
1561 struct ixgbe_hw *hw = &adapter->hw;
1562 u32 autoneg, err = 0;
1563 u32 task_requests = 0;
1564 bool sfp, negotiate = false;
1565
1566 sfp = ixgbe_is_sfp(hw);
1567
1568 if (sfp) {
1569 if (hw->phy.multispeed_fiber) {
1570 ixgbe_enable_tx_laser(hw);
1571 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1572 }
1573 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1574
1575 mutex_enter(&adapter->admin_mtx);
1576 adapter->task_requests |= task_requests;
1577 ixgbe_schedule_admin_tasklet(adapter);
1578 mutex_exit(&adapter->admin_mtx);
1579 } else {
1580 struct ifmedia *ifm = &adapter->media;
1581
1582 if (hw->mac.ops.check_link)
1583 err = ixgbe_check_link(hw, &adapter->link_speed,
1584 &adapter->link_up, FALSE);
1585 if (err)
1586 return;
1587
1588 /*
1589 * Check if it's the first call. If it's the first call,
1590 * get value for auto negotiation.
1591 */
1592 autoneg = hw->phy.autoneg_advertised;
1593 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1594 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1595 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1596 &negotiate);
1597 if (err)
1598 return;
1599 if (hw->mac.ops.setup_link)
1600 err = hw->mac.ops.setup_link(hw, autoneg,
1601 adapter->link_up);
1602 }
1603
1604 } /* ixgbe_config_link */
1605
1606 /************************************************************************
1607 * ixgbe_update_stats_counters - Update board statistics counters.
1608 ************************************************************************/
1609 static void
1610 ixgbe_update_stats_counters(struct adapter *adapter)
1611 {
1612 struct ifnet *ifp = adapter->ifp;
1613 struct ixgbe_hw *hw = &adapter->hw;
1614 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1615 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1616 u64 total_missed_rx = 0;
1617 uint64_t crcerrs, rlec;
1618 unsigned int queue_counters;
1619 int i;
1620
1621 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1622 stats->crcerrs.ev_count += crcerrs;
1623 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1624 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1625 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1626 if (hw->mac.type >= ixgbe_mac_X550)
1627 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1628
1629 /* 16 registers exist */
1630 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1631 for (i = 0; i < queue_counters; i++) {
1632 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1633 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1634 if (hw->mac.type >= ixgbe_mac_82599EB) {
1635 stats->qprdc[i].ev_count
1636 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1637 }
1638 }
1639
1640 /* 8 registers exist */
1641 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1642 uint32_t mp;
1643
1644 /* MPC */
1645 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1646 /* global total per queue */
1647 stats->mpc[i].ev_count += mp;
1648 /* running comprehensive total for stats display */
1649 total_missed_rx += mp;
1650
1651 if (hw->mac.type == ixgbe_mac_82598EB)
1652 stats->rnbc[i].ev_count
1653 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1654
1655 stats->pxontxc[i].ev_count
1656 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1657 stats->pxofftxc[i].ev_count
1658 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1659 if (hw->mac.type >= ixgbe_mac_82599EB) {
1660 stats->pxonrxc[i].ev_count
1661 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1662 stats->pxoffrxc[i].ev_count
1663 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1664 stats->pxon2offc[i].ev_count
1665 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1666 } else {
1667 stats->pxonrxc[i].ev_count
1668 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1669 stats->pxoffrxc[i].ev_count
1670 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1671 }
1672 }
1673 stats->mpctotal.ev_count += total_missed_rx;
1674
1675 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1676 if ((adapter->link_active == LINK_STATE_UP)
1677 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1678 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1679 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1680 }
1681 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1682 stats->rlec.ev_count += rlec;
1683
1684 /* Hardware workaround, gprc counts missed packets */
1685 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1686
1687 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1688 stats->lxontxc.ev_count += lxon;
1689 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1690 stats->lxofftxc.ev_count += lxoff;
1691 total = lxon + lxoff;
1692
1693 if (hw->mac.type != ixgbe_mac_82598EB) {
1694 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1695 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1696 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1697 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1698 - total * ETHER_MIN_LEN;
1699 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1700 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1701 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1702 stats->lxoffrxc.ev_count
1703 += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1704 } else {
1705 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1706 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1707 /* 82598 only has a counter in the high register */
1708 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1709 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH)
1710 - total * ETHER_MIN_LEN;
1711 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1712 }
1713
1714 /*
1715 * Workaround: mprc hardware is incorrectly counting
1716 * broadcasts, so for now we subtract those.
1717 */
1718 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1719 stats->bprc.ev_count += bprc;
1720 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1721 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1722
1723 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1724 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1725 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1726 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1727 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1728 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1729
1730 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1731 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1732 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1733
1734 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1735 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1736 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1737 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1738 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1739 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1740 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1741 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1742 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1743 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1744 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1745 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1746 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1747 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1748 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1749 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1750 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1751 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1752 /* Only read FCOE on 82599 */
1753 if (hw->mac.type != ixgbe_mac_82598EB) {
1754 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1755 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1756 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1757 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1758 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1759 }
1760
1761 /*
1762 * Fill out the OS statistics structure. Only RX errors are required
1763 * here because all TX counters are incremented in the TX path and
1764 * normal RX counters are prepared in ether_input().
1765 */
1766 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1767 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1768 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1769 IF_STAT_PUTREF(ifp);
1770 } /* ixgbe_update_stats_counters */
1771
1772 /************************************************************************
1773 * ixgbe_add_hw_stats
1774 *
1775 * Add sysctl variables, one per statistic, to the system.
1776 ************************************************************************/
1777 static void
1778 ixgbe_add_hw_stats(struct adapter *adapter)
1779 {
1780 device_t dev = adapter->dev;
1781 const struct sysctlnode *rnode, *cnode;
1782 struct sysctllog **log = &adapter->sysctllog;
1783 struct tx_ring *txr = adapter->tx_rings;
1784 struct rx_ring *rxr = adapter->rx_rings;
1785 struct ixgbe_hw *hw = &adapter->hw;
1786 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1787 const char *xname = device_xname(dev);
1788 int i;
1789
1790 /* Driver Statistics */
1791 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1792 NULL, xname, "Driver tx dma soft fail EFBIG");
1793 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1794 NULL, xname, "m_defrag() failed");
1795 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1796 NULL, xname, "Driver tx dma hard fail EFBIG");
1797 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1798 NULL, xname, "Driver tx dma hard fail EINVAL");
1799 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1800 NULL, xname, "Driver tx dma hard fail other");
1801 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1802 NULL, xname, "Driver tx dma soft fail EAGAIN");
1803 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1804 NULL, xname, "Driver tx dma soft fail ENOMEM");
1805 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1806 NULL, xname, "Watchdog timeouts");
1807 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1808 NULL, xname, "TSO errors");
1809 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1810 NULL, xname, "Admin MSI-X IRQ Handled");
1811 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1812 NULL, xname, "Link event");
1813 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1814 NULL, xname, "SFP+ module event");
1815 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1816 NULL, xname, "Multispeed event");
1817 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1818 NULL, xname, "External PHY event");
1819
1820 /* Max number of traffic class is 8 */
1821 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1822 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1823 snprintf(adapter->tcs[i].evnamebuf,
1824 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1825 xname, i);
1826 if (i < __arraycount(stats->mpc)) {
1827 evcnt_attach_dynamic(&stats->mpc[i],
1828 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1829 "RX Missed Packet Count");
1830 if (hw->mac.type == ixgbe_mac_82598EB)
1831 evcnt_attach_dynamic(&stats->rnbc[i],
1832 EVCNT_TYPE_MISC, NULL,
1833 adapter->tcs[i].evnamebuf,
1834 "Receive No Buffers");
1835 }
1836 if (i < __arraycount(stats->pxontxc)) {
1837 evcnt_attach_dynamic(&stats->pxontxc[i],
1838 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1839 "pxontxc");
1840 evcnt_attach_dynamic(&stats->pxonrxc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1842 "pxonrxc");
1843 evcnt_attach_dynamic(&stats->pxofftxc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1845 "pxofftxc");
1846 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1848 "pxoffrxc");
1849 if (hw->mac.type >= ixgbe_mac_82599EB)
1850 evcnt_attach_dynamic(&stats->pxon2offc[i],
1851 EVCNT_TYPE_MISC, NULL,
1852 adapter->tcs[i].evnamebuf,
1853 "pxon2offc");
1854 }
1855 }
1856
1857 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1858 #ifdef LRO
1859 struct lro_ctrl *lro = &rxr->lro;
1860 #endif /* LRO */
1861
1862 snprintf(adapter->queues[i].evnamebuf,
1863 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1864 xname, i);
1865 snprintf(adapter->queues[i].namebuf,
1866 sizeof(adapter->queues[i].namebuf), "q%d", i);
1867
1868 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1869 aprint_error_dev(dev, "could not create sysctl root\n");
1870 break;
1871 }
1872
1873 if (sysctl_createv(log, 0, &rnode, &rnode,
1874 0, CTLTYPE_NODE,
1875 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1876 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1877 break;
1878
1879 if (sysctl_createv(log, 0, &rnode, &cnode,
1880 CTLFLAG_READWRITE, CTLTYPE_INT,
1881 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1882 ixgbe_sysctl_interrupt_rate_handler, 0,
1883 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1884 break;
1885
1886 if (sysctl_createv(log, 0, &rnode, &cnode,
1887 CTLFLAG_READONLY, CTLTYPE_INT,
1888 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1889 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1890 0, CTL_CREATE, CTL_EOL) != 0)
1891 break;
1892
1893 if (sysctl_createv(log, 0, &rnode, &cnode,
1894 CTLFLAG_READONLY, CTLTYPE_INT,
1895 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1896 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1897 0, CTL_CREATE, CTL_EOL) != 0)
1898 break;
1899
1900 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1901 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1902 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1903 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1904 "Handled queue in softint");
1905 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1906 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1907 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1908 NULL, adapter->queues[i].evnamebuf, "TSO");
1909 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1910 NULL, adapter->queues[i].evnamebuf,
1911 "TX Queue No Descriptor Available");
1912 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1913 NULL, adapter->queues[i].evnamebuf,
1914 "Queue Packets Transmitted");
1915 #ifndef IXGBE_LEGACY_TX
1916 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1917 NULL, adapter->queues[i].evnamebuf,
1918 "Packets dropped in pcq");
1919 #endif
1920
1921 if (sysctl_createv(log, 0, &rnode, &cnode,
1922 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1923 SYSCTL_DESCR("Receive Descriptor next to check"),
1924 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1925 CTL_CREATE, CTL_EOL) != 0)
1926 break;
1927
1928 if (sysctl_createv(log, 0, &rnode, &cnode,
1929 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1930 SYSCTL_DESCR("Receive Descriptor Head"),
1931 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1932 CTL_CREATE, CTL_EOL) != 0)
1933 break;
1934
1935 if (sysctl_createv(log, 0, &rnode, &cnode,
1936 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1937 SYSCTL_DESCR("Receive Descriptor Tail"),
1938 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1939 CTL_CREATE, CTL_EOL) != 0)
1940 break;
1941
1942 if (i < __arraycount(stats->qprc)) {
1943 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1944 NULL, adapter->queues[i].evnamebuf, "qprc");
1945 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1946 NULL, adapter->queues[i].evnamebuf, "qptc");
1947 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1948 NULL, adapter->queues[i].evnamebuf, "qbrc");
1949 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1950 NULL, adapter->queues[i].evnamebuf, "qbtc");
1951 if (hw->mac.type >= ixgbe_mac_82599EB)
1952 evcnt_attach_dynamic(&stats->qprdc[i],
1953 EVCNT_TYPE_MISC, NULL,
1954 adapter->queues[i].evnamebuf, "qprdc");
1955 }
1956
1957 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1958 NULL, adapter->queues[i].evnamebuf,
1959 "Queue Packets Received");
1960 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1961 NULL, adapter->queues[i].evnamebuf,
1962 "Queue Bytes Received");
1963 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1964 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1965 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1966 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1967 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1968 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1969 #ifdef LRO
1970 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1971 CTLFLAG_RD, &lro->lro_queued, 0,
1972 "LRO Queued");
1973 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1974 CTLFLAG_RD, &lro->lro_flushed, 0,
1975 "LRO Flushed");
1976 #endif /* LRO */
1977 }
1978
1979 /* MAC stats get their own sub node */
1980
1981 snprintf(stats->namebuf,
1982 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1983
1984 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "rx csum offload - IP");
1986 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "rx csum offload - L4");
1988 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "rx csum offload - IP bad");
1990 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "rx csum offload - L4 bad");
1992 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Interrupt conditions zero");
1994 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Legacy interrupts");
1996
1997 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "CRC Errors");
1999 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "Illegal Byte Errors");
2001 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "Byte Errors");
2003 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "MAC Short Packets Discarded");
2005 if (hw->mac.type >= ixgbe_mac_X550)
2006 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Bad SFD");
2008 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Total Packets Missed");
2010 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "MAC Local Faults");
2012 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "MAC Remote Faults");
2014 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Receive Length Errors");
2016 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Link XON Transmitted");
2018 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Link XON Received");
2020 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Link XOFF Transmitted");
2022 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Link XOFF Received");
2024
2025 /* Packet Reception Stats */
2026 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Total Octets Received");
2028 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Good Octets Received");
2030 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Total Packets Received");
2032 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Good Packets Received");
2034 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Multicast Packets Received");
2036 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Broadcast Packets Received");
2038 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "64 byte frames received ");
2040 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "65-127 byte frames received");
2042 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "128-255 byte frames received");
2044 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "256-511 byte frames received");
2046 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "512-1023 byte frames received");
2048 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "1023-1522 byte frames received");
2050 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Receive Undersized");
2052 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Fragmented Packets Received ");
2054 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Oversized Packets Received");
2056 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Received Jabber");
2058 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Management Packets Received");
2060 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Management Packets Dropped");
2062 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Checksum Errors");
2064
2065 /* Packet Transmission Stats */
2066 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "Good Octets Transmitted");
2068 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "Total Packets Transmitted");
2070 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "Good Packets Transmitted");
2072 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "Broadcast Packets Transmitted");
2074 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "Multicast Packets Transmitted");
2076 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "Management Packets Transmitted");
2078 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "64 byte frames transmitted ");
2080 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2081 stats->namebuf, "65-127 byte frames transmitted");
2082 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2083 stats->namebuf, "128-255 byte frames transmitted");
2084 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2085 stats->namebuf, "256-511 byte frames transmitted");
2086 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2087 stats->namebuf, "512-1023 byte frames transmitted");
2088 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2089 stats->namebuf, "1024-1522 byte frames transmitted");
2090 } /* ixgbe_add_hw_stats */
2091
2092 static void
2093 ixgbe_clear_evcnt(struct adapter *adapter)
2094 {
2095 struct tx_ring *txr = adapter->tx_rings;
2096 struct rx_ring *rxr = adapter->rx_rings;
2097 struct ixgbe_hw *hw = &adapter->hw;
2098 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2099 int i;
2100
2101 adapter->efbig_tx_dma_setup.ev_count = 0;
2102 adapter->mbuf_defrag_failed.ev_count = 0;
2103 adapter->efbig2_tx_dma_setup.ev_count = 0;
2104 adapter->einval_tx_dma_setup.ev_count = 0;
2105 adapter->other_tx_dma_setup.ev_count = 0;
2106 adapter->eagain_tx_dma_setup.ev_count = 0;
2107 adapter->enomem_tx_dma_setup.ev_count = 0;
2108 adapter->tso_err.ev_count = 0;
2109 adapter->watchdog_events.ev_count = 0;
2110 adapter->admin_irqev.ev_count = 0;
2111 adapter->link_workev.ev_count = 0;
2112 adapter->mod_workev.ev_count = 0;
2113 adapter->msf_workev.ev_count = 0;
2114 adapter->phy_workev.ev_count = 0;
2115
2116 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2117 if (i < __arraycount(stats->mpc)) {
2118 stats->mpc[i].ev_count = 0;
2119 if (hw->mac.type == ixgbe_mac_82598EB)
2120 stats->rnbc[i].ev_count = 0;
2121 }
2122 if (i < __arraycount(stats->pxontxc)) {
2123 stats->pxontxc[i].ev_count = 0;
2124 stats->pxonrxc[i].ev_count = 0;
2125 stats->pxofftxc[i].ev_count = 0;
2126 stats->pxoffrxc[i].ev_count = 0;
2127 if (hw->mac.type >= ixgbe_mac_82599EB)
2128 stats->pxon2offc[i].ev_count = 0;
2129 }
2130 }
2131
2132 txr = adapter->tx_rings;
2133 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2134 adapter->queues[i].irqs.ev_count = 0;
2135 adapter->queues[i].handleq.ev_count = 0;
2136 adapter->queues[i].req.ev_count = 0;
2137 txr->no_desc_avail.ev_count = 0;
2138 txr->total_packets.ev_count = 0;
2139 txr->tso_tx.ev_count = 0;
2140 #ifndef IXGBE_LEGACY_TX
2141 txr->pcq_drops.ev_count = 0;
2142 #endif
2143 txr->q_efbig_tx_dma_setup = 0;
2144 txr->q_mbuf_defrag_failed = 0;
2145 txr->q_efbig2_tx_dma_setup = 0;
2146 txr->q_einval_tx_dma_setup = 0;
2147 txr->q_other_tx_dma_setup = 0;
2148 txr->q_eagain_tx_dma_setup = 0;
2149 txr->q_enomem_tx_dma_setup = 0;
2150 txr->q_tso_err = 0;
2151
2152 if (i < __arraycount(stats->qprc)) {
2153 stats->qprc[i].ev_count = 0;
2154 stats->qptc[i].ev_count = 0;
2155 stats->qbrc[i].ev_count = 0;
2156 stats->qbtc[i].ev_count = 0;
2157 if (hw->mac.type >= ixgbe_mac_82599EB)
2158 stats->qprdc[i].ev_count = 0;
2159 }
2160
2161 rxr->rx_packets.ev_count = 0;
2162 rxr->rx_bytes.ev_count = 0;
2163 rxr->rx_copies.ev_count = 0;
2164 rxr->no_jmbuf.ev_count = 0;
2165 rxr->rx_discarded.ev_count = 0;
2166 }
2167 stats->ipcs.ev_count = 0;
2168 stats->l4cs.ev_count = 0;
2169 stats->ipcs_bad.ev_count = 0;
2170 stats->l4cs_bad.ev_count = 0;
2171 stats->intzero.ev_count = 0;
2172 stats->legint.ev_count = 0;
2173 stats->crcerrs.ev_count = 0;
2174 stats->illerrc.ev_count = 0;
2175 stats->errbc.ev_count = 0;
2176 stats->mspdc.ev_count = 0;
2177 if (hw->mac.type >= ixgbe_mac_X550)
2178 stats->mbsdc.ev_count = 0;
2179 stats->mpctotal.ev_count = 0;
2180 stats->mlfc.ev_count = 0;
2181 stats->mrfc.ev_count = 0;
2182 stats->rlec.ev_count = 0;
2183 stats->lxontxc.ev_count = 0;
2184 stats->lxonrxc.ev_count = 0;
2185 stats->lxofftxc.ev_count = 0;
2186 stats->lxoffrxc.ev_count = 0;
2187
2188 /* Packet Reception Stats */
2189 stats->tor.ev_count = 0;
2190 stats->gorc.ev_count = 0;
2191 stats->tpr.ev_count = 0;
2192 stats->gprc.ev_count = 0;
2193 stats->mprc.ev_count = 0;
2194 stats->bprc.ev_count = 0;
2195 stats->prc64.ev_count = 0;
2196 stats->prc127.ev_count = 0;
2197 stats->prc255.ev_count = 0;
2198 stats->prc511.ev_count = 0;
2199 stats->prc1023.ev_count = 0;
2200 stats->prc1522.ev_count = 0;
2201 stats->ruc.ev_count = 0;
2202 stats->rfc.ev_count = 0;
2203 stats->roc.ev_count = 0;
2204 stats->rjc.ev_count = 0;
2205 stats->mngprc.ev_count = 0;
2206 stats->mngpdc.ev_count = 0;
2207 stats->xec.ev_count = 0;
2208
2209 /* Packet Transmission Stats */
2210 stats->gotc.ev_count = 0;
2211 stats->tpt.ev_count = 0;
2212 stats->gptc.ev_count = 0;
2213 stats->bptc.ev_count = 0;
2214 stats->mptc.ev_count = 0;
2215 stats->mngptc.ev_count = 0;
2216 stats->ptc64.ev_count = 0;
2217 stats->ptc127.ev_count = 0;
2218 stats->ptc255.ev_count = 0;
2219 stats->ptc511.ev_count = 0;
2220 stats->ptc1023.ev_count = 0;
2221 stats->ptc1522.ev_count = 0;
2222 }
2223
2224 /************************************************************************
2225 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2226 *
2227 * Retrieves the TDH value from the hardware
2228 ************************************************************************/
2229 static int
2230 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2231 {
2232 struct sysctlnode node = *rnode;
2233 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2234 struct adapter *adapter;
2235 uint32_t val;
2236
2237 if (!txr)
2238 return (0);
2239
2240 adapter = txr->adapter;
2241 if (ixgbe_fw_recovery_mode_swflag(adapter))
2242 return (EPERM);
2243
2244 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2245 node.sysctl_data = &val;
2246 return sysctl_lookup(SYSCTLFN_CALL(&node));
2247 } /* ixgbe_sysctl_tdh_handler */
2248
2249 /************************************************************************
2250 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2251 *
2252 * Retrieves the TDT value from the hardware
2253 ************************************************************************/
2254 static int
2255 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2256 {
2257 struct sysctlnode node = *rnode;
2258 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2259 struct adapter *adapter;
2260 uint32_t val;
2261
2262 if (!txr)
2263 return (0);
2264
2265 adapter = txr->adapter;
2266 if (ixgbe_fw_recovery_mode_swflag(adapter))
2267 return (EPERM);
2268
2269 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2270 node.sysctl_data = &val;
2271 return sysctl_lookup(SYSCTLFN_CALL(&node));
2272 } /* ixgbe_sysctl_tdt_handler */
2273
2274 /************************************************************************
2275 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2276 * handler function
2277 *
2278 * Retrieves the next_to_check value
2279 ************************************************************************/
2280 static int
2281 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2282 {
2283 struct sysctlnode node = *rnode;
2284 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2285 struct adapter *adapter;
2286 uint32_t val;
2287
2288 if (!rxr)
2289 return (0);
2290
2291 adapter = rxr->adapter;
2292 if (ixgbe_fw_recovery_mode_swflag(adapter))
2293 return (EPERM);
2294
2295 val = rxr->next_to_check;
2296 node.sysctl_data = &val;
2297 return sysctl_lookup(SYSCTLFN_CALL(&node));
2298 } /* ixgbe_sysctl_next_to_check_handler */
2299
2300 /************************************************************************
2301 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2302 *
2303 * Retrieves the RDH value from the hardware
2304 ************************************************************************/
2305 static int
2306 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2307 {
2308 struct sysctlnode node = *rnode;
2309 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2310 struct adapter *adapter;
2311 uint32_t val;
2312
2313 if (!rxr)
2314 return (0);
2315
2316 adapter = rxr->adapter;
2317 if (ixgbe_fw_recovery_mode_swflag(adapter))
2318 return (EPERM);
2319
2320 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2321 node.sysctl_data = &val;
2322 return sysctl_lookup(SYSCTLFN_CALL(&node));
2323 } /* ixgbe_sysctl_rdh_handler */
2324
2325 /************************************************************************
2326 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2327 *
2328 * Retrieves the RDT value from the hardware
2329 ************************************************************************/
2330 static int
2331 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2332 {
2333 struct sysctlnode node = *rnode;
2334 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2335 struct adapter *adapter;
2336 uint32_t val;
2337
2338 if (!rxr)
2339 return (0);
2340
2341 adapter = rxr->adapter;
2342 if (ixgbe_fw_recovery_mode_swflag(adapter))
2343 return (EPERM);
2344
2345 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2346 node.sysctl_data = &val;
2347 return sysctl_lookup(SYSCTLFN_CALL(&node));
2348 } /* ixgbe_sysctl_rdt_handler */
2349
2350 static int
2351 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2352 {
2353 struct ifnet *ifp = &ec->ec_if;
2354 struct adapter *adapter = ifp->if_softc;
2355 int rv;
2356
2357 if (set)
2358 rv = ixgbe_register_vlan(adapter, vid);
2359 else
2360 rv = ixgbe_unregister_vlan(adapter, vid);
2361
2362 if (rv != 0)
2363 return rv;
2364
2365 /*
2366 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2367 * or 0 to 1.
2368 */
2369 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2370 ixgbe_setup_vlan_hw_tagging(adapter);
2371
2372 return rv;
2373 }
2374
2375 /************************************************************************
2376 * ixgbe_register_vlan
2377 *
2378 * Run via vlan config EVENT, it enables us to use the
2379 * HW Filter table since we can get the vlan id. This
2380 * just creates the entry in the soft version of the
2381 * VFTA, init will repopulate the real table.
2382 ************************************************************************/
2383 static int
2384 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2385 {
2386 u16 index, bit;
2387 int error;
2388
2389 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2390 return EINVAL;
2391
2392 IXGBE_CORE_LOCK(adapter);
2393 index = (vtag >> 5) & 0x7F;
2394 bit = vtag & 0x1F;
2395 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2396 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2397 true);
2398 IXGBE_CORE_UNLOCK(adapter);
2399 if (error != 0)
2400 error = EACCES;
2401
2402 return error;
2403 } /* ixgbe_register_vlan */
2404
2405 /************************************************************************
2406 * ixgbe_unregister_vlan
2407 *
2408 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2409 ************************************************************************/
2410 static int
2411 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2412 {
2413 u16 index, bit;
2414 int error;
2415
2416 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2417 return EINVAL;
2418
2419 IXGBE_CORE_LOCK(adapter);
2420 index = (vtag >> 5) & 0x7F;
2421 bit = vtag & 0x1F;
2422 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2423 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2424 true);
2425 IXGBE_CORE_UNLOCK(adapter);
2426 if (error != 0)
2427 error = EACCES;
2428
2429 return error;
2430 } /* ixgbe_unregister_vlan */
2431
2432 static void
2433 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2434 {
2435 struct ethercom *ec = &adapter->osdep.ec;
2436 struct ixgbe_hw *hw = &adapter->hw;
2437 struct rx_ring *rxr;
2438 u32 ctrl;
2439 int i;
2440 bool hwtagging;
2441
2442 /* Enable HW tagging only if any vlan is attached */
2443 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2444 && VLAN_ATTACHED(ec);
2445
2446 /* Setup the queues for vlans */
2447 for (i = 0; i < adapter->num_queues; i++) {
2448 rxr = &adapter->rx_rings[i];
2449 /*
2450 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2451 */
2452 if (hw->mac.type != ixgbe_mac_82598EB) {
2453 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2454 if (hwtagging)
2455 ctrl |= IXGBE_RXDCTL_VME;
2456 else
2457 ctrl &= ~IXGBE_RXDCTL_VME;
2458 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2459 }
2460 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2461 }
2462
2463 /* VLAN hw tagging for 82598 */
2464 if (hw->mac.type == ixgbe_mac_82598EB) {
2465 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2466 if (hwtagging)
2467 ctrl |= IXGBE_VLNCTRL_VME;
2468 else
2469 ctrl &= ~IXGBE_VLNCTRL_VME;
2470 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2471 }
2472 } /* ixgbe_setup_vlan_hw_tagging */
2473
2474 static void
2475 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2476 {
2477 struct ethercom *ec = &adapter->osdep.ec;
2478 struct ixgbe_hw *hw = &adapter->hw;
2479 int i;
2480 u32 ctrl;
2481 struct vlanid_list *vlanidp;
2482
2483 /*
2484 * This function is called from both if_init and ifflags_cb()
2485 * on NetBSD.
2486 */
2487
2488 /*
2489 * Part 1:
2490 * Setup VLAN HW tagging
2491 */
2492 ixgbe_setup_vlan_hw_tagging(adapter);
2493
2494 /*
2495 * Part 2:
2496 * Setup VLAN HW filter
2497 */
2498 /* Cleanup shadow_vfta */
2499 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2500 adapter->shadow_vfta[i] = 0;
2501 /* Generate shadow_vfta from ec_vids */
2502 ETHER_LOCK(ec);
2503 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2504 uint32_t idx;
2505
2506 idx = vlanidp->vid / 32;
2507 KASSERT(idx < IXGBE_VFTA_SIZE);
2508 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2509 }
2510 ETHER_UNLOCK(ec);
2511 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2512 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2513
2514 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2515 /* Enable the Filter Table if enabled */
2516 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2517 ctrl |= IXGBE_VLNCTRL_VFE;
2518 else
2519 ctrl &= ~IXGBE_VLNCTRL_VFE;
2520 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2521 } /* ixgbe_setup_vlan_hw_support */
2522
2523 /************************************************************************
2524 * ixgbe_get_slot_info
2525 *
2526 * Get the width and transaction speed of
2527 * the slot this adapter is plugged into.
2528 ************************************************************************/
2529 static void
2530 ixgbe_get_slot_info(struct adapter *adapter)
2531 {
2532 device_t dev = adapter->dev;
2533 struct ixgbe_hw *hw = &adapter->hw;
2534 u32 offset;
2535 u16 link;
2536 int bus_info_valid = TRUE;
2537
2538 /* Some devices are behind an internal bridge */
2539 switch (hw->device_id) {
2540 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2541 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2542 goto get_parent_info;
2543 default:
2544 break;
2545 }
2546
2547 ixgbe_get_bus_info(hw);
2548
2549 /*
2550 * Some devices don't use PCI-E, but there is no need
2551 * to display "Unknown" for bus speed and width.
2552 */
2553 switch (hw->mac.type) {
2554 case ixgbe_mac_X550EM_x:
2555 case ixgbe_mac_X550EM_a:
2556 return;
2557 default:
2558 goto display;
2559 }
2560
2561 get_parent_info:
2562 /*
2563 * For the Quad port adapter we need to parse back
2564 * up the PCI tree to find the speed of the expansion
2565 * slot into which this adapter is plugged. A bit more work.
2566 */
2567 dev = device_parent(device_parent(dev));
2568 #if 0
2569 #ifdef IXGBE_DEBUG
2570 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2571 pci_get_slot(dev), pci_get_function(dev));
2572 #endif
2573 dev = device_parent(device_parent(dev));
2574 #ifdef IXGBE_DEBUG
2575 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2576 pci_get_slot(dev), pci_get_function(dev));
2577 #endif
2578 #endif
2579 /* Now get the PCI Express Capabilities offset */
2580 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2581 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2582 /*
2583 * Hmm...can't get PCI-Express capabilities.
2584 * Falling back to default method.
2585 */
2586 bus_info_valid = FALSE;
2587 ixgbe_get_bus_info(hw);
2588 goto display;
2589 }
2590 /* ...and read the Link Status Register */
2591 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2592 offset + PCIE_LCSR) >> 16;
2593 ixgbe_set_pci_config_data_generic(hw, link);
2594
2595 display:
2596 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2597 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2598 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2599 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2600 "Unknown"),
2601 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2602 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2603 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2604 "Unknown"));
2605
2606 if (bus_info_valid) {
2607 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2608 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2609 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2610 device_printf(dev, "PCI-Express bandwidth available"
2611 " for this card\n is not sufficient for"
2612 " optimal performance.\n");
2613 device_printf(dev, "For optimal performance a x8 "
2614 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2615 }
2616 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2617 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2618 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2619 device_printf(dev, "PCI-Express bandwidth available"
2620 " for this card\n is not sufficient for"
2621 " optimal performance.\n");
2622 device_printf(dev, "For optimal performance a x8 "
2623 "PCIE Gen3 slot is required.\n");
2624 }
2625 } else
2626 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2627
2628 return;
2629 } /* ixgbe_get_slot_info */
2630
2631 /************************************************************************
2632 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2633 ************************************************************************/
2634 static inline void
2635 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2636 {
2637 struct ixgbe_hw *hw = &adapter->hw;
2638 struct ix_queue *que = &adapter->queues[vector];
2639 u64 queue = 1ULL << vector;
2640 u32 mask;
2641
2642 mutex_enter(&que->dc_mtx);
2643 if (que->disabled_count > 0 && --que->disabled_count > 0)
2644 goto out;
2645
2646 if (hw->mac.type == ixgbe_mac_82598EB) {
2647 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2648 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2649 } else {
2650 mask = (queue & 0xFFFFFFFF);
2651 if (mask)
2652 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2653 mask = (queue >> 32);
2654 if (mask)
2655 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2656 }
2657 out:
2658 mutex_exit(&que->dc_mtx);
2659 } /* ixgbe_enable_queue */
2660
2661 /************************************************************************
2662 * ixgbe_disable_queue_internal
2663 ************************************************************************/
2664 static inline void
2665 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2666 {
2667 struct ixgbe_hw *hw = &adapter->hw;
2668 struct ix_queue *que = &adapter->queues[vector];
2669 u64 queue = 1ULL << vector;
2670 u32 mask;
2671
2672 mutex_enter(&que->dc_mtx);
2673
2674 if (que->disabled_count > 0) {
2675 if (nestok)
2676 que->disabled_count++;
2677 goto out;
2678 }
2679 que->disabled_count++;
2680
2681 if (hw->mac.type == ixgbe_mac_82598EB) {
2682 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2683 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2684 } else {
2685 mask = (queue & 0xFFFFFFFF);
2686 if (mask)
2687 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2688 mask = (queue >> 32);
2689 if (mask)
2690 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2691 }
2692 out:
2693 mutex_exit(&que->dc_mtx);
2694 } /* ixgbe_disable_queue_internal */
2695
2696 /************************************************************************
2697 * ixgbe_disable_queue
2698 ************************************************************************/
2699 static inline void
2700 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2701 {
2702
2703 ixgbe_disable_queue_internal(adapter, vector, true);
2704 } /* ixgbe_disable_queue */
2705
2706 /************************************************************************
2707 * ixgbe_sched_handle_que - schedule deferred packet processing
2708 ************************************************************************/
2709 static inline void
2710 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2711 {
2712
2713 if (que->txrx_use_workqueue) {
2714 /*
2715 * adapter->que_wq is bound to each CPU instead of
2716 * each NIC queue to reduce workqueue kthread. As we
2717 * should consider about interrupt affinity in this
2718 * function, the workqueue kthread must be WQ_PERCPU.
2719 * If create WQ_PERCPU workqueue kthread for each NIC
2720 * queue, that number of created workqueue kthread is
2721 * (number of used NIC queue) * (number of CPUs) =
2722 * (number of CPUs) ^ 2 most often.
2723 *
2724 * The same NIC queue's interrupts are avoided by
2725 * masking the queue's interrupt. And different
2726 * NIC queue's interrupts use different struct work
2727 * (que->wq_cookie). So, "enqueued flag" to avoid
2728 * twice workqueue_enqueue() is not required .
2729 */
2730 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2731 } else {
2732 softint_schedule(que->que_si);
2733 }
2734 }
2735
2736 /************************************************************************
2737 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2738 ************************************************************************/
2739 static int
2740 ixgbe_msix_que(void *arg)
2741 {
2742 struct ix_queue *que = arg;
2743 struct adapter *adapter = que->adapter;
2744 struct ifnet *ifp = adapter->ifp;
2745 struct tx_ring *txr = que->txr;
2746 struct rx_ring *rxr = que->rxr;
2747 bool more;
2748 u32 newitr = 0;
2749
2750 /* Protect against spurious interrupts */
2751 if ((ifp->if_flags & IFF_RUNNING) == 0)
2752 return 0;
2753
2754 ixgbe_disable_queue(adapter, que->msix);
2755 ++que->irqs.ev_count;
2756
2757 /*
2758 * Don't change "que->txrx_use_workqueue" from this point to avoid
2759 * flip-flopping softint/workqueue mode in one deferred processing.
2760 */
2761 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2762
2763 #ifdef __NetBSD__
2764 /* Don't run ixgbe_rxeof in interrupt context */
2765 more = true;
2766 #else
2767 more = ixgbe_rxeof(que);
2768 #endif
2769
2770 IXGBE_TX_LOCK(txr);
2771 ixgbe_txeof(txr);
2772 IXGBE_TX_UNLOCK(txr);
2773
2774 /* Do AIM now? */
2775
2776 if (adapter->enable_aim == false)
2777 goto no_calc;
2778 /*
2779 * Do Adaptive Interrupt Moderation:
2780 * - Write out last calculated setting
2781 * - Calculate based on average size over
2782 * the last interval.
2783 */
2784 if (que->eitr_setting)
2785 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2786
2787 que->eitr_setting = 0;
2788
2789 /* Idle, do nothing */
2790 if ((txr->bytes == 0) && (rxr->bytes == 0))
2791 goto no_calc;
2792
2793 if ((txr->bytes) && (txr->packets))
2794 newitr = txr->bytes/txr->packets;
2795 if ((rxr->bytes) && (rxr->packets))
2796 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2797 newitr += 24; /* account for hardware frame, crc */
2798
2799 /* set an upper boundary */
2800 newitr = uimin(newitr, 3000);
2801
2802 /* Be nice to the mid range */
2803 if ((newitr > 300) && (newitr < 1200))
2804 newitr = (newitr / 3);
2805 else
2806 newitr = (newitr / 2);
2807
2808 /*
2809 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2810 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2811 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2812 * on 1G and higher.
2813 */
2814 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2815 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2816 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2817 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2818 }
2819
2820 /* save for next interrupt */
2821 que->eitr_setting = newitr;
2822
2823 /* Reset state */
2824 txr->bytes = 0;
2825 txr->packets = 0;
2826 rxr->bytes = 0;
2827 rxr->packets = 0;
2828
2829 no_calc:
2830 if (more)
2831 ixgbe_sched_handle_que(adapter, que);
2832 else
2833 ixgbe_enable_queue(adapter, que->msix);
2834
2835 return 1;
2836 } /* ixgbe_msix_que */
2837
2838 /************************************************************************
2839 * ixgbe_media_status - Media Ioctl callback
2840 *
2841 * Called whenever the user queries the status of
2842 * the interface using ifconfig.
2843 ************************************************************************/
2844 static void
2845 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2846 {
2847 struct adapter *adapter = ifp->if_softc;
2848 struct ixgbe_hw *hw = &adapter->hw;
2849 int layer;
2850
2851 INIT_DEBUGOUT("ixgbe_media_status: begin");
2852 ixgbe_update_link_status(adapter);
2853
2854 ifmr->ifm_status = IFM_AVALID;
2855 ifmr->ifm_active = IFM_ETHER;
2856
2857 if (adapter->link_active != LINK_STATE_UP) {
2858 ifmr->ifm_active |= IFM_NONE;
2859 return;
2860 }
2861
2862 ifmr->ifm_status |= IFM_ACTIVE;
2863 layer = adapter->phy_layer;
2864
2865 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2866 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2867 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2868 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2869 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2870 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2871 switch (adapter->link_speed) {
2872 case IXGBE_LINK_SPEED_10GB_FULL:
2873 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2874 break;
2875 case IXGBE_LINK_SPEED_5GB_FULL:
2876 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2877 break;
2878 case IXGBE_LINK_SPEED_2_5GB_FULL:
2879 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2880 break;
2881 case IXGBE_LINK_SPEED_1GB_FULL:
2882 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2883 break;
2884 case IXGBE_LINK_SPEED_100_FULL:
2885 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2886 break;
2887 case IXGBE_LINK_SPEED_10_FULL:
2888 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2889 break;
2890 }
2891 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2892 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2893 switch (adapter->link_speed) {
2894 case IXGBE_LINK_SPEED_10GB_FULL:
2895 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2896 break;
2897 }
2898 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2899 switch (adapter->link_speed) {
2900 case IXGBE_LINK_SPEED_10GB_FULL:
2901 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2902 break;
2903 case IXGBE_LINK_SPEED_1GB_FULL:
2904 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2905 break;
2906 }
2907 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2908 switch (adapter->link_speed) {
2909 case IXGBE_LINK_SPEED_10GB_FULL:
2910 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2911 break;
2912 case IXGBE_LINK_SPEED_1GB_FULL:
2913 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2914 break;
2915 }
2916 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2917 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2918 switch (adapter->link_speed) {
2919 case IXGBE_LINK_SPEED_10GB_FULL:
2920 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2921 break;
2922 case IXGBE_LINK_SPEED_1GB_FULL:
2923 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2924 break;
2925 }
2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2927 switch (adapter->link_speed) {
2928 case IXGBE_LINK_SPEED_10GB_FULL:
2929 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2930 break;
2931 }
2932 /*
2933 * XXX: These need to use the proper media types once
2934 * they're added.
2935 */
2936 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2937 switch (adapter->link_speed) {
2938 case IXGBE_LINK_SPEED_10GB_FULL:
2939 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2940 break;
2941 case IXGBE_LINK_SPEED_2_5GB_FULL:
2942 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2943 break;
2944 case IXGBE_LINK_SPEED_1GB_FULL:
2945 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2946 break;
2947 }
2948 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2949 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2950 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2951 switch (adapter->link_speed) {
2952 case IXGBE_LINK_SPEED_10GB_FULL:
2953 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2954 break;
2955 case IXGBE_LINK_SPEED_2_5GB_FULL:
2956 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2957 break;
2958 case IXGBE_LINK_SPEED_1GB_FULL:
2959 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2960 break;
2961 }
2962
2963 /* If nothing is recognized... */
2964 #if 0
2965 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2966 ifmr->ifm_active |= IFM_UNKNOWN;
2967 #endif
2968
2969 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2970
2971 /* Display current flow control setting used on link */
2972 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2973 hw->fc.current_mode == ixgbe_fc_full)
2974 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2975 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2976 hw->fc.current_mode == ixgbe_fc_full)
2977 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2978
2979 return;
2980 } /* ixgbe_media_status */
2981
2982 /************************************************************************
2983 * ixgbe_media_change - Media Ioctl callback
2984 *
2985 * Called when the user changes speed/duplex using
2986 * media/mediopt option with ifconfig.
2987 ************************************************************************/
2988 static int
2989 ixgbe_media_change(struct ifnet *ifp)
2990 {
2991 struct adapter *adapter = ifp->if_softc;
2992 struct ifmedia *ifm = &adapter->media;
2993 struct ixgbe_hw *hw = &adapter->hw;
2994 ixgbe_link_speed speed = 0;
2995 ixgbe_link_speed link_caps = 0;
2996 bool negotiate = false;
2997 s32 err = IXGBE_NOT_IMPLEMENTED;
2998
2999 INIT_DEBUGOUT("ixgbe_media_change: begin");
3000
3001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3002 return (EINVAL);
3003
3004 if (hw->phy.media_type == ixgbe_media_type_backplane)
3005 return (EPERM);
3006
3007 /*
3008 * We don't actually need to check against the supported
3009 * media types of the adapter; ifmedia will take care of
3010 * that for us.
3011 */
3012 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3013 case IFM_AUTO:
3014 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3015 &negotiate);
3016 if (err != IXGBE_SUCCESS) {
3017 device_printf(adapter->dev, "Unable to determine "
3018 "supported advertise speeds\n");
3019 return (ENODEV);
3020 }
3021 speed |= link_caps;
3022 break;
3023 case IFM_10G_T:
3024 case IFM_10G_LRM:
3025 case IFM_10G_LR:
3026 case IFM_10G_TWINAX:
3027 case IFM_10G_SR:
3028 case IFM_10G_CX4:
3029 case IFM_10G_KR:
3030 case IFM_10G_KX4:
3031 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3032 break;
3033 case IFM_5000_T:
3034 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3035 break;
3036 case IFM_2500_T:
3037 case IFM_2500_KX:
3038 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3039 break;
3040 case IFM_1000_T:
3041 case IFM_1000_LX:
3042 case IFM_1000_SX:
3043 case IFM_1000_KX:
3044 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3045 break;
3046 case IFM_100_TX:
3047 speed |= IXGBE_LINK_SPEED_100_FULL;
3048 break;
3049 case IFM_10_T:
3050 speed |= IXGBE_LINK_SPEED_10_FULL;
3051 break;
3052 case IFM_NONE:
3053 break;
3054 default:
3055 goto invalid;
3056 }
3057
3058 hw->mac.autotry_restart = TRUE;
3059 hw->mac.ops.setup_link(hw, speed, TRUE);
3060 adapter->advertise = 0;
3061 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3062 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3063 adapter->advertise |= 1 << 2;
3064 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3065 adapter->advertise |= 1 << 1;
3066 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3067 adapter->advertise |= 1 << 0;
3068 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3069 adapter->advertise |= 1 << 3;
3070 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3071 adapter->advertise |= 1 << 4;
3072 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3073 adapter->advertise |= 1 << 5;
3074 }
3075
3076 return (0);
3077
3078 invalid:
3079 device_printf(adapter->dev, "Invalid media type!\n");
3080
3081 return (EINVAL);
3082 } /* ixgbe_media_change */
3083
3084 /************************************************************************
3085 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3086 ************************************************************************/
3087 static int
3088 ixgbe_msix_admin(void *arg)
3089 {
3090 struct adapter *adapter = arg;
3091 struct ixgbe_hw *hw = &adapter->hw;
3092 u32 eicr;
3093 u32 eims_orig;
3094 u32 eims_disable = 0;
3095
3096 ++adapter->admin_irqev.ev_count;
3097
3098 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3099 /* Pause other interrupts */
3100 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3101
3102 /*
3103 * First get the cause.
3104 *
3105 * The specifications of 82598, 82599, X540 and X550 say EICS register
3106 * is write only. However, Linux says it is a workaround for silicon
3107 * errata to read EICS instead of EICR to get interrupt cause.
3108 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3109 */
3110 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3111 /* Be sure the queue bits are not cleared */
3112 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3113 /* Clear all OTHER interrupts with write */
3114 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3115
3116 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3117
3118 /* Re-enable some OTHER interrupts */
3119 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3120
3121 return 1;
3122 } /* ixgbe_msix_admin */
3123
3124 static void
3125 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3126 {
3127 struct ixgbe_hw *hw = &adapter->hw;
3128 u32 eicr_mask;
3129 u32 task_requests = 0;
3130 s32 retval;
3131
3132 /* Link status change */
3133 if (eicr & IXGBE_EICR_LSC) {
3134 task_requests |= IXGBE_REQUEST_TASK_LSC;
3135 *eims_disable |= IXGBE_EIMS_LSC;
3136 }
3137
3138 if (ixgbe_is_sfp(hw)) {
3139 /* Pluggable optics-related interrupt */
3140 if (hw->mac.type >= ixgbe_mac_X540)
3141 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3142 else
3143 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3144
3145 /*
3146 * An interrupt might not arrive when a module is inserted.
3147 * When an link status change interrupt occurred and the driver
3148 * still regard SFP as unplugged, issue the module softint
3149 * and then issue LSC interrupt.
3150 */
3151 if ((eicr & eicr_mask)
3152 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3153 && (eicr & IXGBE_EICR_LSC))) {
3154 task_requests |= IXGBE_REQUEST_TASK_MOD;
3155 *eims_disable |= IXGBE_EIMS_LSC;
3156 }
3157
3158 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3159 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3160 task_requests |= IXGBE_REQUEST_TASK_MSF;
3161 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3162 }
3163 }
3164
3165 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3166 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3167 (eicr & IXGBE_EICR_FLOW_DIR)) {
3168 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3169 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3170 /* Disable the interrupt */
3171 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3172 }
3173 }
3174
3175 if (eicr & IXGBE_EICR_ECC) {
3176 device_printf(adapter->dev,
3177 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3178 /* Disable interrupt to prevent log spam */
3179 *eims_disable |= IXGBE_EICR_ECC;
3180 }
3181
3182 /* Check for over temp condition */
3183 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3184 switch (adapter->hw.mac.type) {
3185 case ixgbe_mac_X550EM_a:
3186 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3187 break;
3188 /* Disable interrupt to prevent log spam */
3189 *eims_disable |= IXGBE_EICR_GPI_SDP0_X550EM_a;
3190
3191 retval = hw->phy.ops.check_overtemp(hw);
3192 if (retval != IXGBE_ERR_OVERTEMP)
3193 break;
3194 device_printf(adapter->dev,
3195 "CRITICAL: OVER TEMP!! "
3196 "PHY IS SHUT DOWN!!\n");
3197 device_printf(adapter->dev,
3198 "System shutdown required!\n");
3199 break;
3200 default:
3201 if (!(eicr & IXGBE_EICR_TS))
3202 break;
3203 /* Disable interrupt to prevent log spam */
3204 *eims_disable |= IXGBE_EIMS_TS;
3205
3206 retval = hw->phy.ops.check_overtemp(hw);
3207 if (retval != IXGBE_ERR_OVERTEMP)
3208 break;
3209 device_printf(adapter->dev,
3210 "CRITICAL: OVER TEMP!! "
3211 "PHY IS SHUT DOWN!!\n");
3212 device_printf(adapter->dev,
3213 "System shutdown required!\n");
3214 break;
3215 }
3216 }
3217
3218 /* Check for VF message */
3219 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3220 (eicr & IXGBE_EICR_MAILBOX)) {
3221 task_requests |= IXGBE_REQUEST_TASK_MBX;
3222 *eims_disable |= IXGBE_EIMS_MAILBOX;
3223 }
3224 }
3225
3226 /* Check for fan failure */
3227 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3228 retval = ixgbe_check_fan_failure(adapter, eicr, true);
3229 if (retval == IXGBE_ERR_FAN_FAILURE) {
3230 /* Disable interrupt to prevent log spam */
3231 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3232 }
3233 }
3234
3235 /* External PHY interrupt */
3236 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3237 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3238 task_requests |= IXGBE_REQUEST_TASK_PHY;
3239 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3240 }
3241
3242 if (task_requests != 0) {
3243 mutex_enter(&adapter->admin_mtx);
3244 adapter->task_requests |= task_requests;
3245 ixgbe_schedule_admin_tasklet(adapter);
3246 mutex_exit(&adapter->admin_mtx);
3247 }
3248
3249 }
3250
3251 static void
3252 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3253 {
3254
3255 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3256 itr |= itr << 16;
3257 else
3258 itr |= IXGBE_EITR_CNT_WDIS;
3259
3260 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3261 }
3262
3263
3264 /************************************************************************
3265 * ixgbe_sysctl_interrupt_rate_handler
3266 ************************************************************************/
3267 static int
3268 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3269 {
3270 struct sysctlnode node = *rnode;
3271 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3272 struct adapter *adapter;
3273 uint32_t reg, usec, rate;
3274 int error;
3275
3276 if (que == NULL)
3277 return 0;
3278
3279 adapter = que->adapter;
3280 if (ixgbe_fw_recovery_mode_swflag(adapter))
3281 return (EPERM);
3282
3283 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3284 usec = ((reg & 0x0FF8) >> 3);
3285 if (usec > 0)
3286 rate = 500000 / usec;
3287 else
3288 rate = 0;
3289 node.sysctl_data = &rate;
3290 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3291 if (error || newp == NULL)
3292 return error;
3293 reg &= ~0xfff; /* default, no limitation */
3294 if (rate > 0 && rate < 500000) {
3295 if (rate < 1000)
3296 rate = 1000;
3297 reg |= ((4000000 / rate) & 0xff8);
3298 /*
3299 * When RSC is used, ITR interval must be larger than
3300 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3301 * The minimum value is always greater than 2us on 100M
3302 * (and 10M?(not documented)), but it's not on 1G and higher.
3303 */
3304 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3305 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3306 if ((adapter->num_queues > 1)
3307 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3308 return EINVAL;
3309 }
3310 ixgbe_max_interrupt_rate = rate;
3311 } else
3312 ixgbe_max_interrupt_rate = 0;
3313 ixgbe_eitr_write(adapter, que->msix, reg);
3314
3315 return (0);
3316 } /* ixgbe_sysctl_interrupt_rate_handler */
3317
3318 const struct sysctlnode *
3319 ixgbe_sysctl_instance(struct adapter *adapter)
3320 {
3321 const char *dvname;
3322 struct sysctllog **log;
3323 int rc;
3324 const struct sysctlnode *rnode;
3325
3326 if (adapter->sysctltop != NULL)
3327 return adapter->sysctltop;
3328
3329 log = &adapter->sysctllog;
3330 dvname = device_xname(adapter->dev);
3331
3332 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3333 0, CTLTYPE_NODE, dvname,
3334 SYSCTL_DESCR("ixgbe information and settings"),
3335 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3336 goto err;
3337
3338 return rnode;
3339 err:
3340 device_printf(adapter->dev,
3341 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3342 return NULL;
3343 }
3344
3345 /************************************************************************
3346 * ixgbe_add_device_sysctls
3347 ************************************************************************/
3348 static void
3349 ixgbe_add_device_sysctls(struct adapter *adapter)
3350 {
3351 device_t dev = adapter->dev;
3352 struct ixgbe_hw *hw = &adapter->hw;
3353 struct sysctllog **log;
3354 const struct sysctlnode *rnode, *cnode;
3355
3356 log = &adapter->sysctllog;
3357
3358 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3359 aprint_error_dev(dev, "could not create sysctl root\n");
3360 return;
3361 }
3362
3363 if (sysctl_createv(log, 0, &rnode, &cnode,
3364 CTLFLAG_READWRITE, CTLTYPE_INT,
3365 "debug", SYSCTL_DESCR("Debug Info"),
3366 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL)
3367 != 0)
3368 aprint_error_dev(dev, "could not create sysctl\n");
3369
3370 if (sysctl_createv(log, 0, &rnode, &cnode,
3371 CTLFLAG_READONLY, CTLTYPE_INT,
3372 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3373 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3374 aprint_error_dev(dev, "could not create sysctl\n");
3375
3376 if (sysctl_createv(log, 0, &rnode, &cnode,
3377 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
3378 SYSCTL_DESCR("Number of jumbo buffers per queue"),
3379 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
3380 CTL_EOL) != 0)
3381 aprint_error_dev(dev, "could not create sysctl\n");
3382
3383 if (sysctl_createv(log, 0, &rnode, &cnode,
3384 CTLFLAG_READONLY, CTLTYPE_INT,
3385 "num_queues", SYSCTL_DESCR("Number of queues"),
3386 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3387 aprint_error_dev(dev, "could not create sysctl\n");
3388
3389 /* Sysctls for all devices */
3390 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3391 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3392 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3393 CTL_EOL) != 0)
3394 aprint_error_dev(dev, "could not create sysctl\n");
3395
3396 adapter->enable_aim = ixgbe_enable_aim;
3397 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3398 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3399 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3400 aprint_error_dev(dev, "could not create sysctl\n");
3401
3402 if (sysctl_createv(log, 0, &rnode, &cnode,
3403 CTLFLAG_READWRITE, CTLTYPE_INT,
3404 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3405 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3406 CTL_EOL) != 0)
3407 aprint_error_dev(dev, "could not create sysctl\n");
3408
3409 /*
3410 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3411 * it causesflip-flopping softint/workqueue mode in one deferred
3412 * processing. Therefore, preempt_disable()/preempt_enable() are
3413 * required in ixgbe_sched_handle_que() to avoid
3414 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3415 * I think changing "que->txrx_use_workqueue" in interrupt handler
3416 * is lighter than doing preempt_disable()/preempt_enable() in every
3417 * ixgbe_sched_handle_que().
3418 */
3419 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3420 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3421 CTLTYPE_BOOL, "txrx_workqueue",
3422 SYSCTL_DESCR("Use workqueue for packet processing"),
3423 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE,
3424 CTL_EOL) != 0)
3425 aprint_error_dev(dev, "could not create sysctl\n");
3426
3427 #ifdef IXGBE_DEBUG
3428 /* testing sysctls (for all devices) */
3429 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3430 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3431 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3432 CTL_EOL) != 0)
3433 aprint_error_dev(dev, "could not create sysctl\n");
3434
3435 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3436 CTLTYPE_STRING, "print_rss_config",
3437 SYSCTL_DESCR("Prints RSS Configuration"),
3438 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3439 CTL_EOL) != 0)
3440 aprint_error_dev(dev, "could not create sysctl\n");
3441 #endif
3442 /* for X550 series devices */
3443 if (hw->mac.type >= ixgbe_mac_X550)
3444 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3445 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3446 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3447 CTL_EOL) != 0)
3448 aprint_error_dev(dev, "could not create sysctl\n");
3449
3450 /* for WoL-capable devices */
3451 if (adapter->wol_support) {
3452 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3453 CTLTYPE_BOOL, "wol_enable",
3454 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3455 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3456 CTL_EOL) != 0)
3457 aprint_error_dev(dev, "could not create sysctl\n");
3458
3459 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3460 CTLTYPE_INT, "wufc",
3461 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3462 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3463 CTL_EOL) != 0)
3464 aprint_error_dev(dev, "could not create sysctl\n");
3465 }
3466
3467 /* for X552/X557-AT devices */
3468 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3469 const struct sysctlnode *phy_node;
3470
3471 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3472 "phy", SYSCTL_DESCR("External PHY sysctls"),
3473 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3474 aprint_error_dev(dev, "could not create sysctl\n");
3475 return;
3476 }
3477
3478 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3479 CTLTYPE_INT, "temp",
3480 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3481 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3482 CTL_EOL) != 0)
3483 aprint_error_dev(dev, "could not create sysctl\n");
3484
3485 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3486 CTLTYPE_INT, "overtemp_occurred",
3487 SYSCTL_DESCR(
3488 "External PHY High Temperature Event Occurred"),
3489 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3490 CTL_CREATE, CTL_EOL) != 0)
3491 aprint_error_dev(dev, "could not create sysctl\n");
3492 }
3493
3494 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3495 && (hw->phy.type == ixgbe_phy_fw))
3496 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3497 CTLTYPE_BOOL, "force_10_100_autonego",
3498 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3499 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3500 CTL_CREATE, CTL_EOL) != 0)
3501 aprint_error_dev(dev, "could not create sysctl\n");
3502
3503 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3504 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3505 CTLTYPE_INT, "eee_state",
3506 SYSCTL_DESCR("EEE Power Save State"),
3507 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3508 CTL_EOL) != 0)
3509 aprint_error_dev(dev, "could not create sysctl\n");
3510 }
3511 } /* ixgbe_add_device_sysctls */
3512
3513 /************************************************************************
3514 * ixgbe_allocate_pci_resources
3515 ************************************************************************/
3516 static int
3517 ixgbe_allocate_pci_resources(struct adapter *adapter,
3518 const struct pci_attach_args *pa)
3519 {
3520 pcireg_t memtype, csr;
3521 device_t dev = adapter->dev;
3522 bus_addr_t addr;
3523 int flags;
3524
3525 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3526 switch (memtype) {
3527 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3528 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3529 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3530 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3531 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3532 goto map_err;
3533 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3534 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3535 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3536 }
3537 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3538 adapter->osdep.mem_size, flags,
3539 &adapter->osdep.mem_bus_space_handle) != 0) {
3540 map_err:
3541 adapter->osdep.mem_size = 0;
3542 aprint_error_dev(dev, "unable to map BAR0\n");
3543 return ENXIO;
3544 }
3545 /*
3546 * Enable address decoding for memory range in case BIOS or
3547 * UEFI don't set it.
3548 */
3549 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3550 PCI_COMMAND_STATUS_REG);
3551 csr |= PCI_COMMAND_MEM_ENABLE;
3552 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3553 csr);
3554 break;
3555 default:
3556 aprint_error_dev(dev, "unexpected type on BAR0\n");
3557 return ENXIO;
3558 }
3559
3560 return (0);
3561 } /* ixgbe_allocate_pci_resources */
3562
3563 static void
3564 ixgbe_free_deferred_handlers(struct adapter *adapter)
3565 {
3566 struct ix_queue *que = adapter->queues;
3567 struct tx_ring *txr = adapter->tx_rings;
3568 int i;
3569
3570 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3571 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3572 if (txr->txr_si != NULL)
3573 softint_disestablish(txr->txr_si);
3574 }
3575 if (que->que_si != NULL)
3576 softint_disestablish(que->que_si);
3577 }
3578 if (adapter->txr_wq != NULL)
3579 workqueue_destroy(adapter->txr_wq);
3580 if (adapter->txr_wq_enqueued != NULL)
3581 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3582 if (adapter->que_wq != NULL)
3583 workqueue_destroy(adapter->que_wq);
3584
3585 if (adapter->admin_wq != NULL) {
3586 workqueue_destroy(adapter->admin_wq);
3587 adapter->admin_wq = NULL;
3588 }
3589 if (adapter->timer_wq != NULL) {
3590 workqueue_destroy(adapter->timer_wq);
3591 adapter->timer_wq = NULL;
3592 }
3593 if (adapter->recovery_mode_timer_wq != NULL) {
3594 /*
3595 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3596 * the recovery_mode_timer workqueue, so call it here.
3597 */
3598 workqueue_wait(adapter->recovery_mode_timer_wq,
3599 &adapter->recovery_mode_timer_wc);
3600 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3601 workqueue_destroy(adapter->recovery_mode_timer_wq);
3602 adapter->recovery_mode_timer_wq = NULL;
3603 }
3604 } /* ixgbe_free_deferred_handlers */
3605
3606 /************************************************************************
3607 * ixgbe_detach - Device removal routine
3608 *
3609 * Called when the driver is being removed.
3610 * Stops the adapter and deallocates all the resources
3611 * that were allocated for driver operation.
3612 *
3613 * return 0 on success, positive on failure
3614 ************************************************************************/
3615 static int
3616 ixgbe_detach(device_t dev, int flags)
3617 {
3618 struct adapter *adapter = device_private(dev);
3619 struct rx_ring *rxr = adapter->rx_rings;
3620 struct tx_ring *txr = adapter->tx_rings;
3621 struct ixgbe_hw *hw = &adapter->hw;
3622 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3623 u32 ctrl_ext;
3624 int i;
3625
3626 INIT_DEBUGOUT("ixgbe_detach: begin");
3627 if (adapter->osdep.attached == false)
3628 return 0;
3629
3630 if (ixgbe_pci_iov_detach(dev) != 0) {
3631 device_printf(dev, "SR-IOV in use; detach first.\n");
3632 return (EBUSY);
3633 }
3634
3635 #if NVLAN > 0
3636 /* Make sure VLANs are not using driver */
3637 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3638 ; /* nothing to do: no VLANs */
3639 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3640 vlan_ifdetach(adapter->ifp);
3641 else {
3642 aprint_error_dev(dev, "VLANs in use, detach first\n");
3643 return (EBUSY);
3644 }
3645 #endif
3646
3647 adapter->osdep.detaching = true;
3648 /*
3649 * Stop the interface. ixgbe_setup_low_power_mode() calls
3650 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3651 * directly.
3652 */
3653 ixgbe_setup_low_power_mode(adapter);
3654
3655 callout_halt(&adapter->timer, NULL);
3656 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3657 callout_halt(&adapter->recovery_mode_timer, NULL);
3658
3659 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3660 atomic_store_relaxed(&adapter->admin_pending, 0);
3661 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3662 atomic_store_relaxed(&adapter->timer_pending, 0);
3663
3664 pmf_device_deregister(dev);
3665
3666 ether_ifdetach(adapter->ifp);
3667
3668 ixgbe_free_deferred_handlers(adapter);
3669
3670 /* let hardware know driver is unloading */
3671 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3672 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3674
3675 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3676 netmap_detach(adapter->ifp);
3677
3678 ixgbe_free_pci_resources(adapter);
3679 #if 0 /* XXX the NetBSD port is probably missing something here */
3680 bus_generic_detach(dev);
3681 #endif
3682 if_detach(adapter->ifp);
3683 ifmedia_fini(&adapter->media);
3684 if_percpuq_destroy(adapter->ipq);
3685
3686 sysctl_teardown(&adapter->sysctllog);
3687 evcnt_detach(&adapter->efbig_tx_dma_setup);
3688 evcnt_detach(&adapter->mbuf_defrag_failed);
3689 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3690 evcnt_detach(&adapter->einval_tx_dma_setup);
3691 evcnt_detach(&adapter->other_tx_dma_setup);
3692 evcnt_detach(&adapter->eagain_tx_dma_setup);
3693 evcnt_detach(&adapter->enomem_tx_dma_setup);
3694 evcnt_detach(&adapter->watchdog_events);
3695 evcnt_detach(&adapter->tso_err);
3696 evcnt_detach(&adapter->admin_irqev);
3697 evcnt_detach(&adapter->link_workev);
3698 evcnt_detach(&adapter->mod_workev);
3699 evcnt_detach(&adapter->msf_workev);
3700 evcnt_detach(&adapter->phy_workev);
3701
3702 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3703 if (i < __arraycount(stats->mpc)) {
3704 evcnt_detach(&stats->mpc[i]);
3705 if (hw->mac.type == ixgbe_mac_82598EB)
3706 evcnt_detach(&stats->rnbc[i]);
3707 }
3708 if (i < __arraycount(stats->pxontxc)) {
3709 evcnt_detach(&stats->pxontxc[i]);
3710 evcnt_detach(&stats->pxonrxc[i]);
3711 evcnt_detach(&stats->pxofftxc[i]);
3712 evcnt_detach(&stats->pxoffrxc[i]);
3713 if (hw->mac.type >= ixgbe_mac_82599EB)
3714 evcnt_detach(&stats->pxon2offc[i]);
3715 }
3716 }
3717
3718 txr = adapter->tx_rings;
3719 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3720 evcnt_detach(&adapter->queues[i].irqs);
3721 evcnt_detach(&adapter->queues[i].handleq);
3722 evcnt_detach(&adapter->queues[i].req);
3723 evcnt_detach(&txr->no_desc_avail);
3724 evcnt_detach(&txr->total_packets);
3725 evcnt_detach(&txr->tso_tx);
3726 #ifndef IXGBE_LEGACY_TX
3727 evcnt_detach(&txr->pcq_drops);
3728 #endif
3729
3730 if (i < __arraycount(stats->qprc)) {
3731 evcnt_detach(&stats->qprc[i]);
3732 evcnt_detach(&stats->qptc[i]);
3733 evcnt_detach(&stats->qbrc[i]);
3734 evcnt_detach(&stats->qbtc[i]);
3735 if (hw->mac.type >= ixgbe_mac_82599EB)
3736 evcnt_detach(&stats->qprdc[i]);
3737 }
3738
3739 evcnt_detach(&rxr->rx_packets);
3740 evcnt_detach(&rxr->rx_bytes);
3741 evcnt_detach(&rxr->rx_copies);
3742 evcnt_detach(&rxr->no_jmbuf);
3743 evcnt_detach(&rxr->rx_discarded);
3744 }
3745 evcnt_detach(&stats->ipcs);
3746 evcnt_detach(&stats->l4cs);
3747 evcnt_detach(&stats->ipcs_bad);
3748 evcnt_detach(&stats->l4cs_bad);
3749 evcnt_detach(&stats->intzero);
3750 evcnt_detach(&stats->legint);
3751 evcnt_detach(&stats->crcerrs);
3752 evcnt_detach(&stats->illerrc);
3753 evcnt_detach(&stats->errbc);
3754 evcnt_detach(&stats->mspdc);
3755 if (hw->mac.type >= ixgbe_mac_X550)
3756 evcnt_detach(&stats->mbsdc);
3757 evcnt_detach(&stats->mpctotal);
3758 evcnt_detach(&stats->mlfc);
3759 evcnt_detach(&stats->mrfc);
3760 evcnt_detach(&stats->rlec);
3761 evcnt_detach(&stats->lxontxc);
3762 evcnt_detach(&stats->lxonrxc);
3763 evcnt_detach(&stats->lxofftxc);
3764 evcnt_detach(&stats->lxoffrxc);
3765
3766 /* Packet Reception Stats */
3767 evcnt_detach(&stats->tor);
3768 evcnt_detach(&stats->gorc);
3769 evcnt_detach(&stats->tpr);
3770 evcnt_detach(&stats->gprc);
3771 evcnt_detach(&stats->mprc);
3772 evcnt_detach(&stats->bprc);
3773 evcnt_detach(&stats->prc64);
3774 evcnt_detach(&stats->prc127);
3775 evcnt_detach(&stats->prc255);
3776 evcnt_detach(&stats->prc511);
3777 evcnt_detach(&stats->prc1023);
3778 evcnt_detach(&stats->prc1522);
3779 evcnt_detach(&stats->ruc);
3780 evcnt_detach(&stats->rfc);
3781 evcnt_detach(&stats->roc);
3782 evcnt_detach(&stats->rjc);
3783 evcnt_detach(&stats->mngprc);
3784 evcnt_detach(&stats->mngpdc);
3785 evcnt_detach(&stats->xec);
3786
3787 /* Packet Transmission Stats */
3788 evcnt_detach(&stats->gotc);
3789 evcnt_detach(&stats->tpt);
3790 evcnt_detach(&stats->gptc);
3791 evcnt_detach(&stats->bptc);
3792 evcnt_detach(&stats->mptc);
3793 evcnt_detach(&stats->mngptc);
3794 evcnt_detach(&stats->ptc64);
3795 evcnt_detach(&stats->ptc127);
3796 evcnt_detach(&stats->ptc255);
3797 evcnt_detach(&stats->ptc511);
3798 evcnt_detach(&stats->ptc1023);
3799 evcnt_detach(&stats->ptc1522);
3800
3801 ixgbe_free_queues(adapter);
3802 free(adapter->mta, M_DEVBUF);
3803
3804 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3805 IXGBE_CORE_LOCK_DESTROY(adapter);
3806
3807 return (0);
3808 } /* ixgbe_detach */
3809
3810 /************************************************************************
3811 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3812 *
3813 * Prepare the adapter/port for LPLU and/or WoL
3814 ************************************************************************/
3815 static int
3816 ixgbe_setup_low_power_mode(struct adapter *adapter)
3817 {
3818 struct ixgbe_hw *hw = &adapter->hw;
3819 device_t dev = adapter->dev;
3820 struct ifnet *ifp = adapter->ifp;
3821 s32 error = 0;
3822
3823 /* Limit power management flow to X550EM baseT */
3824 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3825 hw->phy.ops.enter_lplu) {
3826 /* X550EM baseT adapters need a special LPLU flow */
3827 hw->phy.reset_disable = true;
3828 ixgbe_ifstop(ifp, 1);
3829 error = hw->phy.ops.enter_lplu(hw);
3830 if (error)
3831 device_printf(dev,
3832 "Error entering LPLU: %d\n", error);
3833 hw->phy.reset_disable = false;
3834 } else {
3835 /* Just stop for other adapters */
3836 ixgbe_ifstop(ifp, 1);
3837 }
3838
3839 IXGBE_CORE_LOCK(adapter);
3840
3841 if (!hw->wol_enabled) {
3842 ixgbe_set_phy_power(hw, FALSE);
3843 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3844 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3845 } else {
3846 /* Turn off support for APM wakeup. (Using ACPI instead) */
3847 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3848 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3849
3850 /*
3851 * Clear Wake Up Status register to prevent any previous wakeup
3852 * events from waking us up immediately after we suspend.
3853 */
3854 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3855
3856 /*
3857 * Program the Wakeup Filter Control register with user filter
3858 * settings
3859 */
3860 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3861
3862 /* Enable wakeups and power management in Wakeup Control */
3863 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3864 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3865
3866 }
3867
3868 IXGBE_CORE_UNLOCK(adapter);
3869
3870 return error;
3871 } /* ixgbe_setup_low_power_mode */
3872
3873 /************************************************************************
3874 * ixgbe_shutdown - Shutdown entry point
3875 ************************************************************************/
3876 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3877 static int
3878 ixgbe_shutdown(device_t dev)
3879 {
3880 struct adapter *adapter = device_private(dev);
3881 int error = 0;
3882
3883 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3884
3885 error = ixgbe_setup_low_power_mode(adapter);
3886
3887 return (error);
3888 } /* ixgbe_shutdown */
3889 #endif
3890
3891 /************************************************************************
3892 * ixgbe_suspend
3893 *
3894 * From D0 to D3
3895 ************************************************************************/
3896 static bool
3897 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3898 {
3899 struct adapter *adapter = device_private(dev);
3900 int error = 0;
3901
3902 INIT_DEBUGOUT("ixgbe_suspend: begin");
3903
3904 error = ixgbe_setup_low_power_mode(adapter);
3905
3906 return (error);
3907 } /* ixgbe_suspend */
3908
3909 /************************************************************************
3910 * ixgbe_resume
3911 *
3912 * From D3 to D0
3913 ************************************************************************/
3914 static bool
3915 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3916 {
3917 struct adapter *adapter = device_private(dev);
3918 struct ifnet *ifp = adapter->ifp;
3919 struct ixgbe_hw *hw = &adapter->hw;
3920 u32 wus;
3921
3922 INIT_DEBUGOUT("ixgbe_resume: begin");
3923
3924 IXGBE_CORE_LOCK(adapter);
3925
3926 /* Read & clear WUS register */
3927 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3928 if (wus)
3929 device_printf(dev, "Woken up by (WUS): %#010x\n",
3930 IXGBE_READ_REG(hw, IXGBE_WUS));
3931 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3932 /* And clear WUFC until next low-power transition */
3933 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3934
3935 /*
3936 * Required after D3->D0 transition;
3937 * will re-advertise all previous advertised speeds
3938 */
3939 if (ifp->if_flags & IFF_UP)
3940 ixgbe_init_locked(adapter);
3941
3942 IXGBE_CORE_UNLOCK(adapter);
3943
3944 return true;
3945 } /* ixgbe_resume */
3946
3947 /*
3948 * Set the various hardware offload abilities.
3949 *
3950 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3951 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3952 * mbuf offload flags the driver will understand.
3953 */
3954 static void
3955 ixgbe_set_if_hwassist(struct adapter *adapter)
3956 {
3957 /* XXX */
3958 }
3959
3960 /************************************************************************
3961 * ixgbe_init_locked - Init entry point
3962 *
3963 * Used in two ways: It is used by the stack as an init
3964 * entry point in network interface structure. It is also
3965 * used by the driver as a hw/sw initialization routine to
3966 * get to a consistent state.
3967 *
3968 * return 0 on success, positive on failure
3969 ************************************************************************/
3970 static void
3971 ixgbe_init_locked(struct adapter *adapter)
3972 {
3973 struct ifnet *ifp = adapter->ifp;
3974 device_t dev = adapter->dev;
3975 struct ixgbe_hw *hw = &adapter->hw;
3976 struct ix_queue *que;
3977 struct tx_ring *txr;
3978 struct rx_ring *rxr;
3979 u32 txdctl, mhadd;
3980 u32 rxdctl, rxctrl;
3981 u32 ctrl_ext;
3982 bool unsupported_sfp = false;
3983 int i, j, error;
3984
3985 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3986
3987 KASSERT(mutex_owned(&adapter->core_mtx));
3988 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3989
3990 hw->need_unsupported_sfp_recovery = false;
3991 hw->adapter_stopped = FALSE;
3992 ixgbe_stop_adapter(hw);
3993 callout_stop(&adapter->timer);
3994 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3995 callout_stop(&adapter->recovery_mode_timer);
3996 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3997 que->disabled_count = 0;
3998
3999 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4000 adapter->max_frame_size =
4001 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4002
4003 /* Queue indices may change with IOV mode */
4004 ixgbe_align_all_queue_indices(adapter);
4005
4006 /* reprogram the RAR[0] in case user changed it. */
4007 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
4008
4009 /* Get the latest mac address, User can use a LAA */
4010 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4011 IXGBE_ETH_LENGTH_OF_ADDRESS);
4012 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
4013 hw->addr_ctrl.rar_used_count = 1;
4014
4015 /* Set hardware offload abilities from ifnet flags */
4016 ixgbe_set_if_hwassist(adapter);
4017
4018 /* Prepare transmit descriptors and buffers */
4019 if (ixgbe_setup_transmit_structures(adapter)) {
4020 device_printf(dev, "Could not setup transmit structures\n");
4021 ixgbe_stop_locked(adapter);
4022 return;
4023 }
4024
4025 ixgbe_init_hw(hw);
4026
4027 ixgbe_initialize_iov(adapter);
4028
4029 ixgbe_initialize_transmit_units(adapter);
4030
4031 /* Setup Multicast table */
4032 ixgbe_set_rxfilter(adapter);
4033
4034 /* Determine the correct mbuf pool, based on frame size */
4035 if (adapter->max_frame_size <= MCLBYTES)
4036 adapter->rx_mbuf_sz = MCLBYTES;
4037 else
4038 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4039
4040 /* Prepare receive descriptors and buffers */
4041 error = ixgbe_setup_receive_structures(adapter);
4042 if (error) {
4043 device_printf(dev,
4044 "Could not setup receive structures (err = %d)\n", error);
4045 ixgbe_stop_locked(adapter);
4046 return;
4047 }
4048
4049 /* Configure RX settings */
4050 ixgbe_initialize_receive_units(adapter);
4051
4052 /* Initialize variable holding task enqueue requests interrupts */
4053 adapter->task_requests = 0;
4054
4055 /* Enable SDP & MSI-X interrupts based on adapter */
4056 ixgbe_config_gpie(adapter);
4057
4058 /* Set MTU size */
4059 if (ifp->if_mtu > ETHERMTU) {
4060 /* aka IXGBE_MAXFRS on 82599 and newer */
4061 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4062 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4063 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4064 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4065 }
4066
4067 /* Now enable all the queues */
4068 for (i = 0; i < adapter->num_queues; i++) {
4069 txr = &adapter->tx_rings[i];
4070 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4071 txdctl |= IXGBE_TXDCTL_ENABLE;
4072 /* Set WTHRESH to 8, burst writeback */
4073 txdctl |= (8 << 16);
4074 /*
4075 * When the internal queue falls below PTHRESH (32),
4076 * start prefetching as long as there are at least
4077 * HTHRESH (1) buffers ready. The values are taken
4078 * from the Intel linux driver 3.8.21.
4079 * Prefetching enables tx line rate even with 1 queue.
4080 */
4081 txdctl |= (32 << 0) | (1 << 8);
4082 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4083 }
4084
4085 for (i = 0; i < adapter->num_queues; i++) {
4086 rxr = &adapter->rx_rings[i];
4087 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4088 if (hw->mac.type == ixgbe_mac_82598EB) {
4089 /*
4090 * PTHRESH = 21
4091 * HTHRESH = 4
4092 * WTHRESH = 8
4093 */
4094 rxdctl &= ~0x3FFFFF;
4095 rxdctl |= 0x080420;
4096 }
4097 rxdctl |= IXGBE_RXDCTL_ENABLE;
4098 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4099 for (j = 0; j < 10; j++) {
4100 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4101 IXGBE_RXDCTL_ENABLE)
4102 break;
4103 else
4104 msec_delay(1);
4105 }
4106 IXGBE_WRITE_BARRIER(hw);
4107
4108 /*
4109 * In netmap mode, we must preserve the buffers made
4110 * available to userspace before the if_init()
4111 * (this is true by default on the TX side, because
4112 * init makes all buffers available to userspace).
4113 *
4114 * netmap_reset() and the device specific routines
4115 * (e.g. ixgbe_setup_receive_rings()) map these
4116 * buffers at the end of the NIC ring, so here we
4117 * must set the RDT (tail) register to make sure
4118 * they are not overwritten.
4119 *
4120 * In this driver the NIC ring starts at RDH = 0,
4121 * RDT points to the last slot available for reception (?),
4122 * so RDT = num_rx_desc - 1 means the whole ring is available.
4123 */
4124 #ifdef DEV_NETMAP
4125 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4126 (ifp->if_capenable & IFCAP_NETMAP)) {
4127 struct netmap_adapter *na = NA(adapter->ifp);
4128 struct netmap_kring *kring = na->rx_rings[i];
4129 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4130
4131 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4132 } else
4133 #endif /* DEV_NETMAP */
4134 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4135 adapter->num_rx_desc - 1);
4136 }
4137
4138 /* Enable Receive engine */
4139 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4140 if (hw->mac.type == ixgbe_mac_82598EB)
4141 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4142 rxctrl |= IXGBE_RXCTRL_RXEN;
4143 ixgbe_enable_rx_dma(hw, rxctrl);
4144
4145 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4146 atomic_store_relaxed(&adapter->timer_pending, 0);
4147 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4148 callout_reset(&adapter->recovery_mode_timer, hz,
4149 ixgbe_recovery_mode_timer, adapter);
4150
4151 /* Set up MSI/MSI-X routing */
4152 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4153 ixgbe_configure_ivars(adapter);
4154 /* Set up auto-mask */
4155 if (hw->mac.type == ixgbe_mac_82598EB)
4156 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4157 else {
4158 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4159 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4160 }
4161 } else { /* Simple settings for Legacy/MSI */
4162 ixgbe_set_ivar(adapter, 0, 0, 0);
4163 ixgbe_set_ivar(adapter, 0, 0, 1);
4164 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4165 }
4166
4167 ixgbe_init_fdir(adapter);
4168
4169 /*
4170 * Check on any SFP devices that
4171 * need to be kick-started
4172 */
4173 if (hw->phy.type == ixgbe_phy_none) {
4174 error = hw->phy.ops.identify(hw);
4175 if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4176 unsupported_sfp = true;
4177 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4178 unsupported_sfp = true;
4179
4180 if (unsupported_sfp)
4181 device_printf(dev,
4182 "Unsupported SFP+ module type was detected.\n");
4183
4184 /* Set moderation on the Link interrupt */
4185 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4186
4187 /* Enable EEE power saving */
4188 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4189 hw->mac.ops.setup_eee(hw,
4190 adapter->feat_en & IXGBE_FEATURE_EEE);
4191
4192 /* Enable power to the phy. */
4193 if (!unsupported_sfp) {
4194 ixgbe_set_phy_power(hw, TRUE);
4195
4196 /* Config/Enable Link */
4197 ixgbe_config_link(adapter);
4198 }
4199
4200 /* Hardware Packet Buffer & Flow Control setup */
4201 ixgbe_config_delay_values(adapter);
4202
4203 /* Initialize the FC settings */
4204 ixgbe_start_hw(hw);
4205
4206 /* Set up VLAN support and filter */
4207 ixgbe_setup_vlan_hw_support(adapter);
4208
4209 /* Setup DMA Coalescing */
4210 ixgbe_config_dmac(adapter);
4211
4212 /* OK to schedule workqueues. */
4213 adapter->schedule_wqs_ok = true;
4214
4215 /* And now turn on interrupts */
4216 ixgbe_enable_intr(adapter);
4217
4218 /* Enable the use of the MBX by the VF's */
4219 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4220 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4221 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4222 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4223 }
4224
4225 /* Update saved flags. See ixgbe_ifflags_cb() */
4226 adapter->if_flags = ifp->if_flags;
4227 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4228
4229 /* Now inform the stack we're ready */
4230 ifp->if_flags |= IFF_RUNNING;
4231
4232 return;
4233 } /* ixgbe_init_locked */
4234
4235 /************************************************************************
4236 * ixgbe_init
4237 ************************************************************************/
4238 static int
4239 ixgbe_init(struct ifnet *ifp)
4240 {
4241 struct adapter *adapter = ifp->if_softc;
4242
4243 IXGBE_CORE_LOCK(adapter);
4244 ixgbe_init_locked(adapter);
4245 IXGBE_CORE_UNLOCK(adapter);
4246
4247 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4248 } /* ixgbe_init */
4249
4250 /************************************************************************
4251 * ixgbe_set_ivar
4252 *
4253 * Setup the correct IVAR register for a particular MSI-X interrupt
4254 * (yes this is all very magic and confusing :)
4255 * - entry is the register array entry
4256 * - vector is the MSI-X vector for this queue
4257 * - type is RX/TX/MISC
4258 ************************************************************************/
4259 static void
4260 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4261 {
4262 struct ixgbe_hw *hw = &adapter->hw;
4263 u32 ivar, index;
4264
4265 vector |= IXGBE_IVAR_ALLOC_VAL;
4266
4267 switch (hw->mac.type) {
4268 case ixgbe_mac_82598EB:
4269 if (type == -1)
4270 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4271 else
4272 entry += (type * 64);
4273 index = (entry >> 2) & 0x1F;
4274 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4275 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4276 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4277 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4278 break;
4279 case ixgbe_mac_82599EB:
4280 case ixgbe_mac_X540:
4281 case ixgbe_mac_X550:
4282 case ixgbe_mac_X550EM_x:
4283 case ixgbe_mac_X550EM_a:
4284 if (type == -1) { /* MISC IVAR */
4285 index = (entry & 1) * 8;
4286 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4287 ivar &= ~(0xffUL << index);
4288 ivar |= ((u32)vector << index);
4289 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4290 } else { /* RX/TX IVARS */
4291 index = (16 * (entry & 1)) + (8 * type);
4292 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4293 ivar &= ~(0xffUL << index);
4294 ivar |= ((u32)vector << index);
4295 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4296 }
4297 break;
4298 default:
4299 break;
4300 }
4301 } /* ixgbe_set_ivar */
4302
4303 /************************************************************************
4304 * ixgbe_configure_ivars
4305 ************************************************************************/
4306 static void
4307 ixgbe_configure_ivars(struct adapter *adapter)
4308 {
4309 struct ix_queue *que = adapter->queues;
4310 u32 newitr;
4311
4312 if (ixgbe_max_interrupt_rate > 0)
4313 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4314 else {
4315 /*
4316 * Disable DMA coalescing if interrupt moderation is
4317 * disabled.
4318 */
4319 adapter->dmac = 0;
4320 newitr = 0;
4321 }
4322
4323 for (int i = 0; i < adapter->num_queues; i++, que++) {
4324 struct rx_ring *rxr = &adapter->rx_rings[i];
4325 struct tx_ring *txr = &adapter->tx_rings[i];
4326 /* First the RX queue entry */
4327 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4328 /* ... and the TX */
4329 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4330 /* Set an Initial EITR value */
4331 ixgbe_eitr_write(adapter, que->msix, newitr);
4332 /*
4333 * To eliminate influence of the previous state.
4334 * At this point, Tx/Rx interrupt handler
4335 * (ixgbe_msix_que()) cannot be called, so both
4336 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4337 */
4338 que->eitr_setting = 0;
4339 }
4340
4341 /* For the Link interrupt */
4342 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4343 } /* ixgbe_configure_ivars */
4344
4345 /************************************************************************
4346 * ixgbe_config_gpie
4347 ************************************************************************/
4348 static void
4349 ixgbe_config_gpie(struct adapter *adapter)
4350 {
4351 struct ixgbe_hw *hw = &adapter->hw;
4352 u32 gpie;
4353
4354 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4355
4356 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4357 /* Enable Enhanced MSI-X mode */
4358 gpie |= IXGBE_GPIE_MSIX_MODE
4359 | IXGBE_GPIE_EIAME
4360 | IXGBE_GPIE_PBA_SUPPORT
4361 | IXGBE_GPIE_OCD;
4362 }
4363
4364 /* Fan Failure Interrupt */
4365 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4366 gpie |= IXGBE_SDP1_GPIEN;
4367
4368 /* Thermal Sensor Interrupt */
4369 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4370 gpie |= IXGBE_SDP0_GPIEN_X540;
4371
4372 /* Link detection */
4373 switch (hw->mac.type) {
4374 case ixgbe_mac_82599EB:
4375 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4376 break;
4377 case ixgbe_mac_X550EM_x:
4378 case ixgbe_mac_X550EM_a:
4379 gpie |= IXGBE_SDP0_GPIEN_X540;
4380 break;
4381 default:
4382 break;
4383 }
4384
4385 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4386
4387 } /* ixgbe_config_gpie */
4388
4389 /************************************************************************
4390 * ixgbe_config_delay_values
4391 *
4392 * Requires adapter->max_frame_size to be set.
4393 ************************************************************************/
4394 static void
4395 ixgbe_config_delay_values(struct adapter *adapter)
4396 {
4397 struct ixgbe_hw *hw = &adapter->hw;
4398 u32 rxpb, frame, size, tmp;
4399
4400 frame = adapter->max_frame_size;
4401
4402 /* Calculate High Water */
4403 switch (hw->mac.type) {
4404 case ixgbe_mac_X540:
4405 case ixgbe_mac_X550:
4406 case ixgbe_mac_X550EM_x:
4407 case ixgbe_mac_X550EM_a:
4408 tmp = IXGBE_DV_X540(frame, frame);
4409 break;
4410 default:
4411 tmp = IXGBE_DV(frame, frame);
4412 break;
4413 }
4414 size = IXGBE_BT2KB(tmp);
4415 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4416 hw->fc.high_water[0] = rxpb - size;
4417
4418 /* Now calculate Low Water */
4419 switch (hw->mac.type) {
4420 case ixgbe_mac_X540:
4421 case ixgbe_mac_X550:
4422 case ixgbe_mac_X550EM_x:
4423 case ixgbe_mac_X550EM_a:
4424 tmp = IXGBE_LOW_DV_X540(frame);
4425 break;
4426 default:
4427 tmp = IXGBE_LOW_DV(frame);
4428 break;
4429 }
4430 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4431
4432 hw->fc.pause_time = IXGBE_FC_PAUSE;
4433 hw->fc.send_xon = TRUE;
4434 } /* ixgbe_config_delay_values */
4435
4436 /************************************************************************
4437 * ixgbe_set_rxfilter - Multicast Update
4438 *
4439 * Called whenever multicast address list is updated.
4440 ************************************************************************/
4441 static void
4442 ixgbe_set_rxfilter(struct adapter *adapter)
4443 {
4444 struct ixgbe_mc_addr *mta;
4445 struct ifnet *ifp = adapter->ifp;
4446 u8 *update_ptr;
4447 int mcnt = 0;
4448 u32 fctrl;
4449 struct ethercom *ec = &adapter->osdep.ec;
4450 struct ether_multi *enm;
4451 struct ether_multistep step;
4452
4453 KASSERT(mutex_owned(&adapter->core_mtx));
4454 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4455
4456 mta = adapter->mta;
4457 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4458
4459 ETHER_LOCK(ec);
4460 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4461 ETHER_FIRST_MULTI(step, ec, enm);
4462 while (enm != NULL) {
4463 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4464 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4465 ETHER_ADDR_LEN) != 0)) {
4466 ec->ec_flags |= ETHER_F_ALLMULTI;
4467 break;
4468 }
4469 bcopy(enm->enm_addrlo,
4470 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4471 mta[mcnt].vmdq = adapter->pool;
4472 mcnt++;
4473 ETHER_NEXT_MULTI(step, enm);
4474 }
4475
4476 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4477 if (ifp->if_flags & IFF_PROMISC)
4478 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4479 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4480 fctrl |= IXGBE_FCTRL_MPE;
4481 fctrl &= ~IXGBE_FCTRL_UPE;
4482 } else
4483 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4484
4485 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4486
4487 /* Update multicast filter entries only when it's not ALLMULTI */
4488 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4489 ETHER_UNLOCK(ec);
4490 update_ptr = (u8 *)mta;
4491 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4492 ixgbe_mc_array_itr, TRUE);
4493 } else
4494 ETHER_UNLOCK(ec);
4495 } /* ixgbe_set_rxfilter */
4496
4497 /************************************************************************
4498 * ixgbe_mc_array_itr
4499 *
4500 * An iterator function needed by the multicast shared code.
4501 * It feeds the shared code routine the addresses in the
4502 * array of ixgbe_set_rxfilter() one by one.
4503 ************************************************************************/
4504 static u8 *
4505 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4506 {
4507 struct ixgbe_mc_addr *mta;
4508
4509 mta = (struct ixgbe_mc_addr *)*update_ptr;
4510 *vmdq = mta->vmdq;
4511
4512 *update_ptr = (u8*)(mta + 1);
4513
4514 return (mta->addr);
4515 } /* ixgbe_mc_array_itr */
4516
4517 /************************************************************************
4518 * ixgbe_local_timer - Timer routine
4519 *
4520 * Checks for link status, updates statistics,
4521 * and runs the watchdog check.
4522 ************************************************************************/
4523 static void
4524 ixgbe_local_timer(void *arg)
4525 {
4526 struct adapter *adapter = arg;
4527
4528 if (adapter->schedule_wqs_ok) {
4529 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4530 workqueue_enqueue(adapter->timer_wq,
4531 &adapter->timer_wc, NULL);
4532 }
4533 }
4534
4535 static void
4536 ixgbe_handle_timer(struct work *wk, void *context)
4537 {
4538 struct adapter *adapter = context;
4539 struct ixgbe_hw *hw = &adapter->hw;
4540 device_t dev = adapter->dev;
4541 struct ix_queue *que = adapter->queues;
4542 u64 queues = 0;
4543 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4544 int hung = 0;
4545 int i;
4546
4547 IXGBE_CORE_LOCK(adapter);
4548
4549 /* Check for pluggable optics */
4550 if (ixgbe_is_sfp(hw)) {
4551 bool sched_mod_task = false;
4552
4553 if (hw->mac.type == ixgbe_mac_82598EB) {
4554 /*
4555 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4556 * any GPIO(SDP). So just schedule TASK_MOD.
4557 */
4558 sched_mod_task = true;
4559 } else {
4560 bool was_full, is_full;
4561
4562 was_full =
4563 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4564 is_full = ixgbe_sfp_cage_full(hw);
4565
4566 /* Do probe if cage state changed */
4567 if (was_full ^ is_full)
4568 sched_mod_task = true;
4569 }
4570 if (sched_mod_task) {
4571 mutex_enter(&adapter->admin_mtx);
4572 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4573 ixgbe_schedule_admin_tasklet(adapter);
4574 mutex_exit(&adapter->admin_mtx);
4575 }
4576 }
4577
4578 ixgbe_update_link_status(adapter);
4579 ixgbe_update_stats_counters(adapter);
4580
4581 /* Update some event counters */
4582 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4583 que = adapter->queues;
4584 for (i = 0; i < adapter->num_queues; i++, que++) {
4585 struct tx_ring *txr = que->txr;
4586
4587 v0 += txr->q_efbig_tx_dma_setup;
4588 v1 += txr->q_mbuf_defrag_failed;
4589 v2 += txr->q_efbig2_tx_dma_setup;
4590 v3 += txr->q_einval_tx_dma_setup;
4591 v4 += txr->q_other_tx_dma_setup;
4592 v5 += txr->q_eagain_tx_dma_setup;
4593 v6 += txr->q_enomem_tx_dma_setup;
4594 v7 += txr->q_tso_err;
4595 }
4596 adapter->efbig_tx_dma_setup.ev_count = v0;
4597 adapter->mbuf_defrag_failed.ev_count = v1;
4598 adapter->efbig2_tx_dma_setup.ev_count = v2;
4599 adapter->einval_tx_dma_setup.ev_count = v3;
4600 adapter->other_tx_dma_setup.ev_count = v4;
4601 adapter->eagain_tx_dma_setup.ev_count = v5;
4602 adapter->enomem_tx_dma_setup.ev_count = v6;
4603 adapter->tso_err.ev_count = v7;
4604
4605 /*
4606 * Check the TX queues status
4607 * - mark hung queues so we don't schedule on them
4608 * - watchdog only if all queues show hung
4609 */
4610 que = adapter->queues;
4611 for (i = 0; i < adapter->num_queues; i++, que++) {
4612 /* Keep track of queues with work for soft irq */
4613 if (que->txr->busy)
4614 queues |= 1ULL << que->me;
4615 /*
4616 * Each time txeof runs without cleaning, but there
4617 * are uncleaned descriptors it increments busy. If
4618 * we get to the MAX we declare it hung.
4619 */
4620 if (que->busy == IXGBE_QUEUE_HUNG) {
4621 ++hung;
4622 /* Mark the queue as inactive */
4623 adapter->active_queues &= ~(1ULL << que->me);
4624 continue;
4625 } else {
4626 /* Check if we've come back from hung */
4627 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4628 adapter->active_queues |= 1ULL << que->me;
4629 }
4630 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4631 device_printf(dev,
4632 "Warning queue %d appears to be hung!\n", i);
4633 que->txr->busy = IXGBE_QUEUE_HUNG;
4634 ++hung;
4635 }
4636 }
4637
4638 /* Only truly watchdog if all queues show hung */
4639 if (hung == adapter->num_queues)
4640 goto watchdog;
4641 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4642 else if (queues != 0) { /* Force an IRQ on queues with work */
4643 que = adapter->queues;
4644 for (i = 0; i < adapter->num_queues; i++, que++) {
4645 mutex_enter(&que->dc_mtx);
4646 if (que->disabled_count == 0)
4647 ixgbe_rearm_queues(adapter,
4648 queues & ((u64)1 << i));
4649 mutex_exit(&que->dc_mtx);
4650 }
4651 }
4652 #endif
4653
4654 atomic_store_relaxed(&adapter->timer_pending, 0);
4655 IXGBE_CORE_UNLOCK(adapter);
4656 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4657 return;
4658
4659 watchdog:
4660 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4661 adapter->ifp->if_flags &= ~IFF_RUNNING;
4662 adapter->watchdog_events.ev_count++;
4663 ixgbe_init_locked(adapter);
4664 IXGBE_CORE_UNLOCK(adapter);
4665 } /* ixgbe_handle_timer */
4666
4667 /************************************************************************
4668 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4669 ************************************************************************/
4670 static void
4671 ixgbe_recovery_mode_timer(void *arg)
4672 {
4673 struct adapter *adapter = arg;
4674
4675 if (__predict_true(adapter->osdep.detaching == false)) {
4676 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4677 0, 1) == 0) {
4678 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4679 &adapter->recovery_mode_timer_wc, NULL);
4680 }
4681 }
4682 }
4683
4684 static void
4685 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4686 {
4687 struct adapter *adapter = context;
4688 struct ixgbe_hw *hw = &adapter->hw;
4689
4690 IXGBE_CORE_LOCK(adapter);
4691 if (ixgbe_fw_recovery_mode(hw)) {
4692 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4693 /* Firmware error detected, entering recovery mode */
4694 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4695
4696 if (hw->adapter_stopped == FALSE)
4697 ixgbe_stop_locked(adapter);
4698 }
4699 } else
4700 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4701
4702 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4703 callout_reset(&adapter->recovery_mode_timer, hz,
4704 ixgbe_recovery_mode_timer, adapter);
4705 IXGBE_CORE_UNLOCK(adapter);
4706 } /* ixgbe_handle_recovery_mode_timer */
4707
4708 /************************************************************************
4709 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4710 * bool int_en: true if it's called when the interrupt is enabled.
4711 ************************************************************************/
4712 static void
4713 ixgbe_handle_mod(void *context, bool int_en)
4714 {
4715 struct adapter *adapter = context;
4716 struct ixgbe_hw *hw = &adapter->hw;
4717 device_t dev = adapter->dev;
4718 enum ixgbe_sfp_type last_sfp_type;
4719 u32 err;
4720 bool last_unsupported_sfp_recovery;
4721
4722 KASSERT(mutex_owned(&adapter->core_mtx));
4723
4724 last_sfp_type = hw->phy.sfp_type;
4725 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4726 ++adapter->mod_workev.ev_count;
4727 if (adapter->hw.need_crosstalk_fix) {
4728 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4729 !ixgbe_sfp_cage_full(hw))
4730 goto out;
4731 }
4732
4733 err = hw->phy.ops.identify_sfp(hw);
4734 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4735 if (last_unsupported_sfp_recovery == false)
4736 device_printf(dev,
4737 "Unsupported SFP+ module type was detected.\n");
4738 goto out;
4739 }
4740
4741 if (hw->need_unsupported_sfp_recovery) {
4742 device_printf(dev, "Recovering from unsupported SFP\n");
4743 /*
4744 * We could recover the status by calling setup_sfp(),
4745 * setup_link() and some others. It's complex and might not
4746 * work correctly on some unknown cases. To avoid such type of
4747 * problem, call ixgbe_init_locked(). It's simple and safe
4748 * approach.
4749 */
4750 ixgbe_init_locked(adapter);
4751 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4752 (hw->phy.sfp_type != last_sfp_type)) {
4753 /* A module is inserted and changed. */
4754
4755 if (hw->mac.type == ixgbe_mac_82598EB)
4756 err = hw->phy.ops.reset(hw);
4757 else {
4758 err = hw->mac.ops.setup_sfp(hw);
4759 hw->phy.sfp_setup_needed = FALSE;
4760 }
4761 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4762 device_printf(dev,
4763 "Setup failure - unsupported SFP+ module type.\n");
4764 goto out;
4765 }
4766 }
4767
4768 out:
4769 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4770 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4771
4772 /* Adjust media types shown in ifconfig */
4773 IXGBE_CORE_UNLOCK(adapter);
4774 ifmedia_removeall(&adapter->media);
4775 ixgbe_add_media_types(adapter);
4776 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4777 IXGBE_CORE_LOCK(adapter);
4778
4779 /*
4780 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4781 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4782 * flap because the function calls setup_link().
4783 */
4784 if (hw->mac.type != ixgbe_mac_82598EB) {
4785 mutex_enter(&adapter->admin_mtx);
4786 if (int_en)
4787 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4788 else
4789 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4790 mutex_exit(&adapter->admin_mtx);
4791 }
4792
4793 /*
4794 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4795 * the workqueue now.
4796 */
4797 } /* ixgbe_handle_mod */
4798
4799
4800 /************************************************************************
4801 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4802 ************************************************************************/
4803 static void
4804 ixgbe_handle_msf(void *context)
4805 {
4806 struct adapter *adapter = context;
4807 struct ixgbe_hw *hw = &adapter->hw;
4808 u32 autoneg;
4809 bool negotiate;
4810
4811 KASSERT(mutex_owned(&adapter->core_mtx));
4812
4813 ++adapter->msf_workev.ev_count;
4814
4815 autoneg = hw->phy.autoneg_advertised;
4816 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4817 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4818 if (hw->mac.ops.setup_link)
4819 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4820 } /* ixgbe_handle_msf */
4821
4822 /************************************************************************
4823 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4824 ************************************************************************/
4825 static void
4826 ixgbe_handle_phy(void *context)
4827 {
4828 struct adapter *adapter = context;
4829 struct ixgbe_hw *hw = &adapter->hw;
4830 int error;
4831
4832 KASSERT(mutex_owned(&adapter->core_mtx));
4833
4834 ++adapter->phy_workev.ev_count;
4835 error = hw->phy.ops.handle_lasi(hw);
4836 if (error == IXGBE_ERR_OVERTEMP)
4837 device_printf(adapter->dev,
4838 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4839 " PHY will downshift to lower power state!\n");
4840 else if (error)
4841 device_printf(adapter->dev,
4842 "Error handling LASI interrupt: %d\n", error);
4843 } /* ixgbe_handle_phy */
4844
4845 static void
4846 ixgbe_handle_admin(struct work *wk, void *context)
4847 {
4848 struct adapter *adapter = context;
4849 struct ifnet *ifp = adapter->ifp;
4850 struct ixgbe_hw *hw = &adapter->hw;
4851 u32 task_requests;
4852 u32 eims_enable = 0;
4853
4854 mutex_enter(&adapter->admin_mtx);
4855 adapter->admin_pending = 0;
4856 task_requests = adapter->task_requests;
4857 adapter->task_requests = 0;
4858 mutex_exit(&adapter->admin_mtx);
4859
4860 /*
4861 * Hold the IFNET_LOCK across this entire call. This will
4862 * prevent additional changes to adapter->phy_layer
4863 * and serialize calls to this tasklet. We cannot hold the
4864 * CORE_LOCK while calling into the ifmedia functions as
4865 * they call ifmedia_lock() and the lock is CORE_LOCK.
4866 */
4867 IFNET_LOCK(ifp);
4868 IXGBE_CORE_LOCK(adapter);
4869 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4870 ixgbe_handle_link(adapter);
4871 eims_enable |= IXGBE_EIMS_LSC;
4872 }
4873 if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) {
4874 ixgbe_handle_mod(adapter, false);
4875 }
4876 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4877 ixgbe_handle_mod(adapter, true);
4878 if (hw->mac.type >= ixgbe_mac_X540)
4879 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4880 else
4881 eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4882 }
4883 if ((task_requests
4884 & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4885 ixgbe_handle_msf(adapter);
4886 if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4887 (hw->mac.type == ixgbe_mac_82599EB))
4888 eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4889 }
4890 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4891 ixgbe_handle_phy(adapter);
4892 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4893 }
4894 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4895 ixgbe_reinit_fdir(adapter);
4896 eims_enable |= IXGBE_EIMS_FLOW_DIR;
4897 }
4898 #if 0 /* notyet */
4899 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4900 ixgbe_handle_mbx(adapter);
4901 eims_enable |= IXGBE_EIMS_MAILBOX;
4902 }
4903 #endif
4904 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4905
4906 IXGBE_CORE_UNLOCK(adapter);
4907 IFNET_UNLOCK(ifp);
4908 } /* ixgbe_handle_admin */
4909
4910 static void
4911 ixgbe_ifstop(struct ifnet *ifp, int disable)
4912 {
4913 struct adapter *adapter = ifp->if_softc;
4914
4915 IXGBE_CORE_LOCK(adapter);
4916 ixgbe_stop_locked(adapter);
4917 IXGBE_CORE_UNLOCK(adapter);
4918
4919 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4920 atomic_store_relaxed(&adapter->timer_pending, 0);
4921 }
4922
4923 /************************************************************************
4924 * ixgbe_stop_locked - Stop the hardware
4925 *
4926 * Disables all traffic on the adapter by issuing a
4927 * global reset on the MAC and deallocates TX/RX buffers.
4928 ************************************************************************/
4929 static void
4930 ixgbe_stop_locked(void *arg)
4931 {
4932 struct ifnet *ifp;
4933 struct adapter *adapter = arg;
4934 struct ixgbe_hw *hw = &adapter->hw;
4935
4936 ifp = adapter->ifp;
4937
4938 KASSERT(mutex_owned(&adapter->core_mtx));
4939
4940 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4941 ixgbe_disable_intr(adapter);
4942 callout_stop(&adapter->timer);
4943
4944 /* Don't schedule workqueues. */
4945 adapter->schedule_wqs_ok = false;
4946
4947 /* Let the stack know...*/
4948 ifp->if_flags &= ~IFF_RUNNING;
4949
4950 ixgbe_reset_hw(hw);
4951 hw->adapter_stopped = FALSE;
4952 ixgbe_stop_adapter(hw);
4953 if (hw->mac.type == ixgbe_mac_82599EB)
4954 ixgbe_stop_mac_link_on_d3_82599(hw);
4955 /* Turn off the laser - noop with no optics */
4956 ixgbe_disable_tx_laser(hw);
4957
4958 /* Update the stack */
4959 adapter->link_up = FALSE;
4960 ixgbe_update_link_status(adapter);
4961
4962 /* reprogram the RAR[0] in case user changed it. */
4963 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4964
4965 return;
4966 } /* ixgbe_stop_locked */
4967
4968 /************************************************************************
4969 * ixgbe_update_link_status - Update OS on link state
4970 *
4971 * Note: Only updates the OS on the cached link state.
4972 * The real check of the hardware only happens with
4973 * a link interrupt.
4974 ************************************************************************/
4975 static void
4976 ixgbe_update_link_status(struct adapter *adapter)
4977 {
4978 struct ifnet *ifp = adapter->ifp;
4979 device_t dev = adapter->dev;
4980 struct ixgbe_hw *hw = &adapter->hw;
4981
4982 KASSERT(mutex_owned(&adapter->core_mtx));
4983
4984 if (adapter->link_up) {
4985 if (adapter->link_active != LINK_STATE_UP) {
4986 /*
4987 * To eliminate influence of the previous state
4988 * in the same way as ixgbe_init_locked().
4989 */
4990 struct ix_queue *que = adapter->queues;
4991 for (int i = 0; i < adapter->num_queues; i++, que++)
4992 que->eitr_setting = 0;
4993
4994 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4995 /*
4996 * Discard count for both MAC Local Fault and
4997 * Remote Fault because those registers are
4998 * valid only when the link speed is up and
4999 * 10Gbps.
5000 */
5001 IXGBE_READ_REG(hw, IXGBE_MLFC);
5002 IXGBE_READ_REG(hw, IXGBE_MRFC);
5003 }
5004
5005 if (bootverbose) {
5006 const char *bpsmsg;
5007
5008 switch (adapter->link_speed) {
5009 case IXGBE_LINK_SPEED_10GB_FULL:
5010 bpsmsg = "10 Gbps";
5011 break;
5012 case IXGBE_LINK_SPEED_5GB_FULL:
5013 bpsmsg = "5 Gbps";
5014 break;
5015 case IXGBE_LINK_SPEED_2_5GB_FULL:
5016 bpsmsg = "2.5 Gbps";
5017 break;
5018 case IXGBE_LINK_SPEED_1GB_FULL:
5019 bpsmsg = "1 Gbps";
5020 break;
5021 case IXGBE_LINK_SPEED_100_FULL:
5022 bpsmsg = "100 Mbps";
5023 break;
5024 case IXGBE_LINK_SPEED_10_FULL:
5025 bpsmsg = "10 Mbps";
5026 break;
5027 default:
5028 bpsmsg = "unknown speed";
5029 break;
5030 }
5031 device_printf(dev, "Link is up %s %s \n",
5032 bpsmsg, "Full Duplex");
5033 }
5034 adapter->link_active = LINK_STATE_UP;
5035 /* Update any Flow Control changes */
5036 ixgbe_fc_enable(&adapter->hw);
5037 /* Update DMA coalescing config */
5038 ixgbe_config_dmac(adapter);
5039 if_link_state_change(ifp, LINK_STATE_UP);
5040
5041 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5042 ixgbe_ping_all_vfs(adapter);
5043 }
5044 } else {
5045 /*
5046 * Do it when link active changes to DOWN. i.e.
5047 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5048 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5049 */
5050 if (adapter->link_active != LINK_STATE_DOWN) {
5051 if (bootverbose)
5052 device_printf(dev, "Link is Down\n");
5053 if_link_state_change(ifp, LINK_STATE_DOWN);
5054 adapter->link_active = LINK_STATE_DOWN;
5055 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5056 ixgbe_ping_all_vfs(adapter);
5057 ixgbe_drain_all(adapter);
5058 }
5059 }
5060 } /* ixgbe_update_link_status */
5061
5062 /************************************************************************
5063 * ixgbe_config_dmac - Configure DMA Coalescing
5064 ************************************************************************/
5065 static void
5066 ixgbe_config_dmac(struct adapter *adapter)
5067 {
5068 struct ixgbe_hw *hw = &adapter->hw;
5069 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5070
5071 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5072 return;
5073
5074 if (dcfg->watchdog_timer ^ adapter->dmac ||
5075 dcfg->link_speed ^ adapter->link_speed) {
5076 dcfg->watchdog_timer = adapter->dmac;
5077 dcfg->fcoe_en = false;
5078 dcfg->link_speed = adapter->link_speed;
5079 dcfg->num_tcs = 1;
5080
5081 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5082 dcfg->watchdog_timer, dcfg->link_speed);
5083
5084 hw->mac.ops.dmac_config(hw);
5085 }
5086 } /* ixgbe_config_dmac */
5087
5088 /************************************************************************
5089 * ixgbe_enable_intr
5090 ************************************************************************/
5091 static void
5092 ixgbe_enable_intr(struct adapter *adapter)
5093 {
5094 struct ixgbe_hw *hw = &adapter->hw;
5095 struct ix_queue *que = adapter->queues;
5096 u32 mask, fwsm;
5097
5098 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5099
5100 switch (adapter->hw.mac.type) {
5101 case ixgbe_mac_82599EB:
5102 mask |= IXGBE_EIMS_ECC;
5103 /* Temperature sensor on some adapters */
5104 mask |= IXGBE_EIMS_GPI_SDP0;
5105 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5106 mask |= IXGBE_EIMS_GPI_SDP1;
5107 mask |= IXGBE_EIMS_GPI_SDP2;
5108 break;
5109 case ixgbe_mac_X540:
5110 /* Detect if Thermal Sensor is enabled */
5111 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5112 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5113 mask |= IXGBE_EIMS_TS;
5114 mask |= IXGBE_EIMS_ECC;
5115 break;
5116 case ixgbe_mac_X550:
5117 /* MAC thermal sensor is automatically enabled */
5118 mask |= IXGBE_EIMS_TS;
5119 mask |= IXGBE_EIMS_ECC;
5120 break;
5121 case ixgbe_mac_X550EM_x:
5122 case ixgbe_mac_X550EM_a:
5123 /* Some devices use SDP0 for important information */
5124 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5125 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5126 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5127 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5128 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5129 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5130 mask |= IXGBE_EICR_GPI_SDP0_X540;
5131 mask |= IXGBE_EIMS_ECC;
5132 break;
5133 default:
5134 break;
5135 }
5136
5137 /* Enable Fan Failure detection */
5138 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5139 mask |= IXGBE_EIMS_GPI_SDP1;
5140 /* Enable SR-IOV */
5141 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5142 mask |= IXGBE_EIMS_MAILBOX;
5143 /* Enable Flow Director */
5144 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5145 mask |= IXGBE_EIMS_FLOW_DIR;
5146
5147 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5148
5149 /* With MSI-X we use auto clear */
5150 if (adapter->msix_mem) {
5151 /*
5152 * It's not required to set TCP_TIMER because we don't use
5153 * it.
5154 */
5155 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5156 }
5157
5158 /*
5159 * Now enable all queues, this is done separately to
5160 * allow for handling the extended (beyond 32) MSI-X
5161 * vectors that can be used by 82599
5162 */
5163 for (int i = 0; i < adapter->num_queues; i++, que++)
5164 ixgbe_enable_queue(adapter, que->msix);
5165
5166 IXGBE_WRITE_FLUSH(hw);
5167
5168 } /* ixgbe_enable_intr */
5169
5170 /************************************************************************
5171 * ixgbe_disable_intr_internal
5172 ************************************************************************/
5173 static void
5174 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5175 {
5176 struct ix_queue *que = adapter->queues;
5177
5178 /* disable interrupts other than queues */
5179 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5180
5181 if (adapter->msix_mem)
5182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5183
5184 for (int i = 0; i < adapter->num_queues; i++, que++)
5185 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5186
5187 IXGBE_WRITE_FLUSH(&adapter->hw);
5188
5189 } /* ixgbe_do_disable_intr_internal */
5190
5191 /************************************************************************
5192 * ixgbe_disable_intr
5193 ************************************************************************/
5194 static void
5195 ixgbe_disable_intr(struct adapter *adapter)
5196 {
5197
5198 ixgbe_disable_intr_internal(adapter, true);
5199 } /* ixgbe_disable_intr */
5200
5201 /************************************************************************
5202 * ixgbe_ensure_disabled_intr
5203 ************************************************************************/
5204 void
5205 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5206 {
5207
5208 ixgbe_disable_intr_internal(adapter, false);
5209 } /* ixgbe_ensure_disabled_intr */
5210
5211 /************************************************************************
5212 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5213 ************************************************************************/
5214 static int
5215 ixgbe_legacy_irq(void *arg)
5216 {
5217 struct ix_queue *que = arg;
5218 struct adapter *adapter = que->adapter;
5219 struct ixgbe_hw *hw = &adapter->hw;
5220 struct tx_ring *txr = adapter->tx_rings;
5221 u32 eicr;
5222 u32 eims_orig;
5223 u32 eims_enable = 0;
5224 u32 eims_disable = 0;
5225
5226 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5227 /*
5228 * Silicon errata #26 on 82598. Disable all interrupts before reading
5229 * EICR.
5230 */
5231 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5232
5233 /* Read and clear EICR */
5234 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5235
5236 adapter->stats.pf.legint.ev_count++;
5237 if (eicr == 0) {
5238 adapter->stats.pf.intzero.ev_count++;
5239 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5240 return 0;
5241 }
5242
5243 /* Queue (0) intr */
5244 if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5245 ++que->irqs.ev_count;
5246
5247 /*
5248 * The same as ixgbe_msix_que() about
5249 * "que->txrx_use_workqueue".
5250 */
5251 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5252
5253 IXGBE_TX_LOCK(txr);
5254 ixgbe_txeof(txr);
5255 #ifdef notyet
5256 if (!ixgbe_ring_empty(ifp, txr->br))
5257 ixgbe_start_locked(ifp, txr);
5258 #endif
5259 IXGBE_TX_UNLOCK(txr);
5260
5261 que->req.ev_count++;
5262 ixgbe_sched_handle_que(adapter, que);
5263 /* Disable queue 0 interrupt */
5264 eims_disable |= 1UL << 0;
5265
5266 } else
5267 eims_enable |= IXGBE_EIMC_RTX_QUEUE;
5268
5269 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5270
5271 /* Re-enable some interrupts */
5272 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5273 (eims_orig & ~eims_disable) | eims_enable);
5274
5275 return 1;
5276 } /* ixgbe_legacy_irq */
5277
5278 /************************************************************************
5279 * ixgbe_free_pciintr_resources
5280 ************************************************************************/
5281 static void
5282 ixgbe_free_pciintr_resources(struct adapter *adapter)
5283 {
5284 struct ix_queue *que = adapter->queues;
5285 int rid;
5286
5287 /*
5288 * Release all msix queue resources:
5289 */
5290 for (int i = 0; i < adapter->num_queues; i++, que++) {
5291 if (que->res != NULL) {
5292 pci_intr_disestablish(adapter->osdep.pc,
5293 adapter->osdep.ihs[i]);
5294 adapter->osdep.ihs[i] = NULL;
5295 }
5296 }
5297
5298 /* Clean the Legacy or Link interrupt last */
5299 if (adapter->vector) /* we are doing MSIX */
5300 rid = adapter->vector;
5301 else
5302 rid = 0;
5303
5304 if (adapter->osdep.ihs[rid] != NULL) {
5305 pci_intr_disestablish(adapter->osdep.pc,
5306 adapter->osdep.ihs[rid]);
5307 adapter->osdep.ihs[rid] = NULL;
5308 }
5309
5310 if (adapter->osdep.intrs != NULL) {
5311 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5312 adapter->osdep.nintrs);
5313 adapter->osdep.intrs = NULL;
5314 }
5315 } /* ixgbe_free_pciintr_resources */
5316
5317 /************************************************************************
5318 * ixgbe_free_pci_resources
5319 ************************************************************************/
5320 static void
5321 ixgbe_free_pci_resources(struct adapter *adapter)
5322 {
5323
5324 ixgbe_free_pciintr_resources(adapter);
5325
5326 if (adapter->osdep.mem_size != 0) {
5327 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5328 adapter->osdep.mem_bus_space_handle,
5329 adapter->osdep.mem_size);
5330 }
5331
5332 } /* ixgbe_free_pci_resources */
5333
5334 /************************************************************************
5335 * ixgbe_set_sysctl_value
5336 ************************************************************************/
5337 static void
5338 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5339 const char *description, int *limit, int value)
5340 {
5341 device_t dev = adapter->dev;
5342 struct sysctllog **log;
5343 const struct sysctlnode *rnode, *cnode;
5344
5345 /*
5346 * It's not required to check recovery mode because this function never
5347 * touches hardware.
5348 */
5349
5350 log = &adapter->sysctllog;
5351 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5352 aprint_error_dev(dev, "could not create sysctl root\n");
5353 return;
5354 }
5355 if (sysctl_createv(log, 0, &rnode, &cnode,
5356 CTLFLAG_READWRITE, CTLTYPE_INT,
5357 name, SYSCTL_DESCR(description),
5358 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5359 aprint_error_dev(dev, "could not create sysctl\n");
5360 *limit = value;
5361 } /* ixgbe_set_sysctl_value */
5362
5363 /************************************************************************
5364 * ixgbe_sysctl_flowcntl
5365 *
5366 * SYSCTL wrapper around setting Flow Control
5367 ************************************************************************/
5368 static int
5369 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5370 {
5371 struct sysctlnode node = *rnode;
5372 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5373 int error, fc;
5374
5375 if (ixgbe_fw_recovery_mode_swflag(adapter))
5376 return (EPERM);
5377
5378 fc = adapter->hw.fc.current_mode;
5379 node.sysctl_data = &fc;
5380 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5381 if (error != 0 || newp == NULL)
5382 return error;
5383
5384 /* Don't bother if it's not changed */
5385 if (fc == adapter->hw.fc.current_mode)
5386 return (0);
5387
5388 return ixgbe_set_flowcntl(adapter, fc);
5389 } /* ixgbe_sysctl_flowcntl */
5390
5391 /************************************************************************
5392 * ixgbe_set_flowcntl - Set flow control
5393 *
5394 * Flow control values:
5395 * 0 - off
5396 * 1 - rx pause
5397 * 2 - tx pause
5398 * 3 - full
5399 ************************************************************************/
5400 static int
5401 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5402 {
5403 switch (fc) {
5404 case ixgbe_fc_rx_pause:
5405 case ixgbe_fc_tx_pause:
5406 case ixgbe_fc_full:
5407 adapter->hw.fc.requested_mode = fc;
5408 if (adapter->num_queues > 1)
5409 ixgbe_disable_rx_drop(adapter);
5410 break;
5411 case ixgbe_fc_none:
5412 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5413 if (adapter->num_queues > 1)
5414 ixgbe_enable_rx_drop(adapter);
5415 break;
5416 default:
5417 return (EINVAL);
5418 }
5419
5420 #if 0 /* XXX NetBSD */
5421 /* Don't autoneg if forcing a value */
5422 adapter->hw.fc.disable_fc_autoneg = TRUE;
5423 #endif
5424 ixgbe_fc_enable(&adapter->hw);
5425
5426 return (0);
5427 } /* ixgbe_set_flowcntl */
5428
5429 /************************************************************************
5430 * ixgbe_enable_rx_drop
5431 *
5432 * Enable the hardware to drop packets when the buffer is
5433 * full. This is useful with multiqueue, so that no single
5434 * queue being full stalls the entire RX engine. We only
5435 * enable this when Multiqueue is enabled AND Flow Control
5436 * is disabled.
5437 ************************************************************************/
5438 static void
5439 ixgbe_enable_rx_drop(struct adapter *adapter)
5440 {
5441 struct ixgbe_hw *hw = &adapter->hw;
5442 struct rx_ring *rxr;
5443 u32 srrctl;
5444
5445 for (int i = 0; i < adapter->num_queues; i++) {
5446 rxr = &adapter->rx_rings[i];
5447 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5448 srrctl |= IXGBE_SRRCTL_DROP_EN;
5449 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5450 }
5451
5452 /* enable drop for each vf */
5453 for (int i = 0; i < adapter->num_vfs; i++) {
5454 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5455 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5456 IXGBE_QDE_ENABLE));
5457 }
5458 } /* ixgbe_enable_rx_drop */
5459
5460 /************************************************************************
5461 * ixgbe_disable_rx_drop
5462 ************************************************************************/
5463 static void
5464 ixgbe_disable_rx_drop(struct adapter *adapter)
5465 {
5466 struct ixgbe_hw *hw = &adapter->hw;
5467 struct rx_ring *rxr;
5468 u32 srrctl;
5469
5470 for (int i = 0; i < adapter->num_queues; i++) {
5471 rxr = &adapter->rx_rings[i];
5472 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5473 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5474 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5475 }
5476
5477 /* disable drop for each vf */
5478 for (int i = 0; i < adapter->num_vfs; i++) {
5479 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5480 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5481 }
5482 } /* ixgbe_disable_rx_drop */
5483
5484 /************************************************************************
5485 * ixgbe_sysctl_advertise
5486 *
5487 * SYSCTL wrapper around setting advertised speed
5488 ************************************************************************/
5489 static int
5490 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5491 {
5492 struct sysctlnode node = *rnode;
5493 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5494 int error = 0, advertise;
5495
5496 if (ixgbe_fw_recovery_mode_swflag(adapter))
5497 return (EPERM);
5498
5499 advertise = adapter->advertise;
5500 node.sysctl_data = &advertise;
5501 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5502 if (error != 0 || newp == NULL)
5503 return error;
5504
5505 return ixgbe_set_advertise(adapter, advertise);
5506 } /* ixgbe_sysctl_advertise */
5507
5508 /************************************************************************
5509 * ixgbe_set_advertise - Control advertised link speed
5510 *
5511 * Flags:
5512 * 0x00 - Default (all capable link speed)
5513 * 0x01 - advertise 100 Mb
5514 * 0x02 - advertise 1G
5515 * 0x04 - advertise 10G
5516 * 0x08 - advertise 10 Mb
5517 * 0x10 - advertise 2.5G
5518 * 0x20 - advertise 5G
5519 ************************************************************************/
5520 static int
5521 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5522 {
5523 device_t dev;
5524 struct ixgbe_hw *hw;
5525 ixgbe_link_speed speed = 0;
5526 ixgbe_link_speed link_caps = 0;
5527 s32 err = IXGBE_NOT_IMPLEMENTED;
5528 bool negotiate = FALSE;
5529
5530 /* Checks to validate new value */
5531 if (adapter->advertise == advertise) /* no change */
5532 return (0);
5533
5534 dev = adapter->dev;
5535 hw = &adapter->hw;
5536
5537 /* No speed changes for backplane media */
5538 if (hw->phy.media_type == ixgbe_media_type_backplane)
5539 return (ENODEV);
5540
5541 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5542 (hw->phy.multispeed_fiber))) {
5543 device_printf(dev,
5544 "Advertised speed can only be set on copper or "
5545 "multispeed fiber media types.\n");
5546 return (EINVAL);
5547 }
5548
5549 if (advertise < 0x0 || advertise > 0x3f) {
5550 device_printf(dev, "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5551 return (EINVAL);
5552 }
5553
5554 if (hw->mac.ops.get_link_capabilities) {
5555 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5556 &negotiate);
5557 if (err != IXGBE_SUCCESS) {
5558 device_printf(dev, "Unable to determine supported advertise speeds\n");
5559 return (ENODEV);
5560 }
5561 }
5562
5563 /* Set new value and report new advertised mode */
5564 if (advertise & 0x1) {
5565 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5566 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5567 return (EINVAL);
5568 }
5569 speed |= IXGBE_LINK_SPEED_100_FULL;
5570 }
5571 if (advertise & 0x2) {
5572 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5573 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5574 return (EINVAL);
5575 }
5576 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5577 }
5578 if (advertise & 0x4) {
5579 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5580 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5581 return (EINVAL);
5582 }
5583 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5584 }
5585 if (advertise & 0x8) {
5586 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5587 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5588 return (EINVAL);
5589 }
5590 speed |= IXGBE_LINK_SPEED_10_FULL;
5591 }
5592 if (advertise & 0x10) {
5593 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5594 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5595 return (EINVAL);
5596 }
5597 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5598 }
5599 if (advertise & 0x20) {
5600 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5601 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5602 return (EINVAL);
5603 }
5604 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5605 }
5606 if (advertise == 0)
5607 speed = link_caps; /* All capable link speed */
5608
5609 hw->mac.autotry_restart = TRUE;
5610 hw->mac.ops.setup_link(hw, speed, TRUE);
5611 adapter->advertise = advertise;
5612
5613 return (0);
5614 } /* ixgbe_set_advertise */
5615
5616 /************************************************************************
5617 * ixgbe_get_advertise - Get current advertised speed settings
5618 *
5619 * Formatted for sysctl usage.
5620 * Flags:
5621 * 0x01 - advertise 100 Mb
5622 * 0x02 - advertise 1G
5623 * 0x04 - advertise 10G
5624 * 0x08 - advertise 10 Mb (yes, Mb)
5625 * 0x10 - advertise 2.5G
5626 * 0x20 - advertise 5G
5627 ************************************************************************/
5628 static int
5629 ixgbe_get_advertise(struct adapter *adapter)
5630 {
5631 struct ixgbe_hw *hw = &adapter->hw;
5632 int speed;
5633 ixgbe_link_speed link_caps = 0;
5634 s32 err;
5635 bool negotiate = FALSE;
5636
5637 /*
5638 * Advertised speed means nothing unless it's copper or
5639 * multi-speed fiber
5640 */
5641 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5642 !(hw->phy.multispeed_fiber))
5643 return (0);
5644
5645 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5646 if (err != IXGBE_SUCCESS)
5647 return (0);
5648
5649 speed =
5650 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5651 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5652 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5653 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5654 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5655 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5656
5657 return speed;
5658 } /* ixgbe_get_advertise */
5659
5660 /************************************************************************
5661 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5662 *
5663 * Control values:
5664 * 0/1 - off / on (use default value of 1000)
5665 *
5666 * Legal timer values are:
5667 * 50,100,250,500,1000,2000,5000,10000
5668 *
5669 * Turning off interrupt moderation will also turn this off.
5670 ************************************************************************/
5671 static int
5672 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5673 {
5674 struct sysctlnode node = *rnode;
5675 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5676 struct ifnet *ifp = adapter->ifp;
5677 int error;
5678 int newval;
5679
5680 if (ixgbe_fw_recovery_mode_swflag(adapter))
5681 return (EPERM);
5682
5683 newval = adapter->dmac;
5684 node.sysctl_data = &newval;
5685 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5686 if ((error) || (newp == NULL))
5687 return (error);
5688
5689 switch (newval) {
5690 case 0:
5691 /* Disabled */
5692 adapter->dmac = 0;
5693 break;
5694 case 1:
5695 /* Enable and use default */
5696 adapter->dmac = 1000;
5697 break;
5698 case 50:
5699 case 100:
5700 case 250:
5701 case 500:
5702 case 1000:
5703 case 2000:
5704 case 5000:
5705 case 10000:
5706 /* Legal values - allow */
5707 adapter->dmac = newval;
5708 break;
5709 default:
5710 /* Do nothing, illegal value */
5711 return (EINVAL);
5712 }
5713
5714 /* Re-initialize hardware if it's already running */
5715 if (ifp->if_flags & IFF_RUNNING)
5716 ifp->if_init(ifp);
5717
5718 return (0);
5719 }
5720
5721 #ifdef IXGBE_DEBUG
5722 /************************************************************************
5723 * ixgbe_sysctl_power_state
5724 *
5725 * Sysctl to test power states
5726 * Values:
5727 * 0 - set device to D0
5728 * 3 - set device to D3
5729 * (none) - get current device power state
5730 ************************************************************************/
5731 static int
5732 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5733 {
5734 #ifdef notyet
5735 struct sysctlnode node = *rnode;
5736 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5737 device_t dev = adapter->dev;
5738 int curr_ps, new_ps, error = 0;
5739
5740 if (ixgbe_fw_recovery_mode_swflag(adapter))
5741 return (EPERM);
5742
5743 curr_ps = new_ps = pci_get_powerstate(dev);
5744
5745 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5746 if ((error) || (req->newp == NULL))
5747 return (error);
5748
5749 if (new_ps == curr_ps)
5750 return (0);
5751
5752 if (new_ps == 3 && curr_ps == 0)
5753 error = DEVICE_SUSPEND(dev);
5754 else if (new_ps == 0 && curr_ps == 3)
5755 error = DEVICE_RESUME(dev);
5756 else
5757 return (EINVAL);
5758
5759 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5760
5761 return (error);
5762 #else
5763 return 0;
5764 #endif
5765 } /* ixgbe_sysctl_power_state */
5766 #endif
5767
5768 /************************************************************************
5769 * ixgbe_sysctl_wol_enable
5770 *
5771 * Sysctl to enable/disable the WoL capability,
5772 * if supported by the adapter.
5773 *
5774 * Values:
5775 * 0 - disabled
5776 * 1 - enabled
5777 ************************************************************************/
5778 static int
5779 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5780 {
5781 struct sysctlnode node = *rnode;
5782 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5783 struct ixgbe_hw *hw = &adapter->hw;
5784 bool new_wol_enabled;
5785 int error = 0;
5786
5787 /*
5788 * It's not required to check recovery mode because this function never
5789 * touches hardware.
5790 */
5791 new_wol_enabled = hw->wol_enabled;
5792 node.sysctl_data = &new_wol_enabled;
5793 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5794 if ((error) || (newp == NULL))
5795 return (error);
5796 if (new_wol_enabled == hw->wol_enabled)
5797 return (0);
5798
5799 if (new_wol_enabled && !adapter->wol_support)
5800 return (ENODEV);
5801 else
5802 hw->wol_enabled = new_wol_enabled;
5803
5804 return (0);
5805 } /* ixgbe_sysctl_wol_enable */
5806
5807 /************************************************************************
5808 * ixgbe_sysctl_wufc - Wake Up Filter Control
5809 *
5810 * Sysctl to enable/disable the types of packets that the
5811 * adapter will wake up on upon receipt.
5812 * Flags:
5813 * 0x1 - Link Status Change
5814 * 0x2 - Magic Packet
5815 * 0x4 - Direct Exact
5816 * 0x8 - Directed Multicast
5817 * 0x10 - Broadcast
5818 * 0x20 - ARP/IPv4 Request Packet
5819 * 0x40 - Direct IPv4 Packet
5820 * 0x80 - Direct IPv6 Packet
5821 *
5822 * Settings not listed above will cause the sysctl to return an error.
5823 ************************************************************************/
5824 static int
5825 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5826 {
5827 struct sysctlnode node = *rnode;
5828 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5829 int error = 0;
5830 u32 new_wufc;
5831
5832 /*
5833 * It's not required to check recovery mode because this function never
5834 * touches hardware.
5835 */
5836 new_wufc = adapter->wufc;
5837 node.sysctl_data = &new_wufc;
5838 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5839 if ((error) || (newp == NULL))
5840 return (error);
5841 if (new_wufc == adapter->wufc)
5842 return (0);
5843
5844 if (new_wufc & 0xffffff00)
5845 return (EINVAL);
5846
5847 new_wufc &= 0xff;
5848 new_wufc |= (0xffffff & adapter->wufc);
5849 adapter->wufc = new_wufc;
5850
5851 return (0);
5852 } /* ixgbe_sysctl_wufc */
5853
5854 #ifdef IXGBE_DEBUG
5855 /************************************************************************
5856 * ixgbe_sysctl_print_rss_config
5857 ************************************************************************/
5858 static int
5859 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5860 {
5861 #ifdef notyet
5862 struct sysctlnode node = *rnode;
5863 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5864 struct ixgbe_hw *hw = &adapter->hw;
5865 device_t dev = adapter->dev;
5866 struct sbuf *buf;
5867 int error = 0, reta_size;
5868 u32 reg;
5869
5870 if (ixgbe_fw_recovery_mode_swflag(adapter))
5871 return (EPERM);
5872
5873 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5874 if (!buf) {
5875 device_printf(dev, "Could not allocate sbuf for output.\n");
5876 return (ENOMEM);
5877 }
5878
5879 // TODO: use sbufs to make a string to print out
5880 /* Set multiplier for RETA setup and table size based on MAC */
5881 switch (adapter->hw.mac.type) {
5882 case ixgbe_mac_X550:
5883 case ixgbe_mac_X550EM_x:
5884 case ixgbe_mac_X550EM_a:
5885 reta_size = 128;
5886 break;
5887 default:
5888 reta_size = 32;
5889 break;
5890 }
5891
5892 /* Print out the redirection table */
5893 sbuf_cat(buf, "\n");
5894 for (int i = 0; i < reta_size; i++) {
5895 if (i < 32) {
5896 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5897 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5898 } else {
5899 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5900 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5901 }
5902 }
5903
5904 // TODO: print more config
5905
5906 error = sbuf_finish(buf);
5907 if (error)
5908 device_printf(dev, "Error finishing sbuf: %d\n", error);
5909
5910 sbuf_delete(buf);
5911 #endif
5912 return (0);
5913 } /* ixgbe_sysctl_print_rss_config */
5914 #endif /* IXGBE_DEBUG */
5915
5916 /************************************************************************
5917 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5918 *
5919 * For X552/X557-AT devices using an external PHY
5920 ************************************************************************/
5921 static int
5922 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5923 {
5924 struct sysctlnode node = *rnode;
5925 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5926 struct ixgbe_hw *hw = &adapter->hw;
5927 int val;
5928 u16 reg;
5929 int error;
5930
5931 if (ixgbe_fw_recovery_mode_swflag(adapter))
5932 return (EPERM);
5933
5934 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5935 device_printf(adapter->dev,
5936 "Device has no supported external thermal sensor.\n");
5937 return (ENODEV);
5938 }
5939
5940 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5941 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5942 device_printf(adapter->dev,
5943 "Error reading from PHY's current temperature register\n");
5944 return (EAGAIN);
5945 }
5946
5947 node.sysctl_data = &val;
5948
5949 /* Shift temp for output */
5950 val = reg >> 8;
5951
5952 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5953 if ((error) || (newp == NULL))
5954 return (error);
5955
5956 return (0);
5957 } /* ixgbe_sysctl_phy_temp */
5958
5959 /************************************************************************
5960 * ixgbe_sysctl_phy_overtemp_occurred
5961 *
5962 * Reports (directly from the PHY) whether the current PHY
5963 * temperature is over the overtemp threshold.
5964 ************************************************************************/
5965 static int
5966 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5967 {
5968 struct sysctlnode node = *rnode;
5969 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5970 struct ixgbe_hw *hw = &adapter->hw;
5971 int val, error;
5972 u16 reg;
5973
5974 if (ixgbe_fw_recovery_mode_swflag(adapter))
5975 return (EPERM);
5976
5977 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5978 device_printf(adapter->dev,
5979 "Device has no supported external thermal sensor.\n");
5980 return (ENODEV);
5981 }
5982
5983 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5984 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5985 device_printf(adapter->dev,
5986 "Error reading from PHY's temperature status register\n");
5987 return (EAGAIN);
5988 }
5989
5990 node.sysctl_data = &val;
5991
5992 /* Get occurrence bit */
5993 val = !!(reg & 0x4000);
5994
5995 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5996 if ((error) || (newp == NULL))
5997 return (error);
5998
5999 return (0);
6000 } /* ixgbe_sysctl_phy_overtemp_occurred */
6001
6002 /************************************************************************
6003 * ixgbe_sysctl_eee_state
6004 *
6005 * Sysctl to set EEE power saving feature
6006 * Values:
6007 * 0 - disable EEE
6008 * 1 - enable EEE
6009 * (none) - get current device EEE state
6010 ************************************************************************/
6011 static int
6012 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6013 {
6014 struct sysctlnode node = *rnode;
6015 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6016 struct ifnet *ifp = adapter->ifp;
6017 device_t dev = adapter->dev;
6018 int curr_eee, new_eee, error = 0;
6019 s32 retval;
6020
6021 if (ixgbe_fw_recovery_mode_swflag(adapter))
6022 return (EPERM);
6023
6024 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6025 node.sysctl_data = &new_eee;
6026 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6027 if ((error) || (newp == NULL))
6028 return (error);
6029
6030 /* Nothing to do */
6031 if (new_eee == curr_eee)
6032 return (0);
6033
6034 /* Not supported */
6035 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6036 return (EINVAL);
6037
6038 /* Bounds checking */
6039 if ((new_eee < 0) || (new_eee > 1))
6040 return (EINVAL);
6041
6042 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6043 if (retval) {
6044 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6045 return (EINVAL);
6046 }
6047
6048 /* Restart auto-neg */
6049 ifp->if_init(ifp);
6050
6051 device_printf(dev, "New EEE state: %d\n", new_eee);
6052
6053 /* Cache new value */
6054 if (new_eee)
6055 adapter->feat_en |= IXGBE_FEATURE_EEE;
6056 else
6057 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6058
6059 return (error);
6060 } /* ixgbe_sysctl_eee_state */
6061
6062 #define PRINTQS(adapter, regname) \
6063 do { \
6064 struct ixgbe_hw *_hw = &(adapter)->hw; \
6065 int _i; \
6066 \
6067 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6068 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6069 printf((_i == 0) ? "\t" : " "); \
6070 printf("%08x", IXGBE_READ_REG(_hw, \
6071 IXGBE_##regname(_i))); \
6072 } \
6073 printf("\n"); \
6074 } while (0)
6075
6076 /************************************************************************
6077 * ixgbe_print_debug_info
6078 *
6079 * Called only when em_display_debug_stats is enabled.
6080 * Provides a way to take a look at important statistics
6081 * maintained by the driver and hardware.
6082 ************************************************************************/
6083 static void
6084 ixgbe_print_debug_info(struct adapter *adapter)
6085 {
6086 device_t dev = adapter->dev;
6087 struct ixgbe_hw *hw = &adapter->hw;
6088 int table_size;
6089 int i;
6090
6091 switch (adapter->hw.mac.type) {
6092 case ixgbe_mac_X550:
6093 case ixgbe_mac_X550EM_x:
6094 case ixgbe_mac_X550EM_a:
6095 table_size = 128;
6096 break;
6097 default:
6098 table_size = 32;
6099 break;
6100 }
6101
6102 device_printf(dev, "[E]RETA:\n");
6103 for (i = 0; i < table_size; i++) {
6104 if (i < 32)
6105 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6106 IXGBE_RETA(i)));
6107 else
6108 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6109 IXGBE_ERETA(i - 32)));
6110 }
6111
6112 device_printf(dev, "queue:");
6113 for (i = 0; i < adapter->num_queues; i++) {
6114 printf((i == 0) ? "\t" : " ");
6115 printf("%8d", i);
6116 }
6117 printf("\n");
6118 PRINTQS(adapter, RDBAL);
6119 PRINTQS(adapter, RDBAH);
6120 PRINTQS(adapter, RDLEN);
6121 PRINTQS(adapter, SRRCTL);
6122 PRINTQS(adapter, RDH);
6123 PRINTQS(adapter, RDT);
6124 PRINTQS(adapter, RXDCTL);
6125
6126 device_printf(dev, "RQSMR:");
6127 for (i = 0; i < adapter->num_queues / 4; i++) {
6128 printf((i == 0) ? "\t" : " ");
6129 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6130 }
6131 printf("\n");
6132
6133 device_printf(dev, "disabled_count:");
6134 for (i = 0; i < adapter->num_queues; i++) {
6135 printf((i == 0) ? "\t" : " ");
6136 printf("%8d", adapter->queues[i].disabled_count);
6137 }
6138 printf("\n");
6139
6140 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6141 if (hw->mac.type != ixgbe_mac_82598EB) {
6142 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6143 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6144 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6145 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6146 }
6147 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6148 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6149 } /* ixgbe_print_debug_info */
6150
6151 /************************************************************************
6152 * ixgbe_sysctl_debug
6153 ************************************************************************/
6154 static int
6155 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6156 {
6157 struct sysctlnode node = *rnode;
6158 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6159 int error, result = 0;
6160
6161 if (ixgbe_fw_recovery_mode_swflag(adapter))
6162 return (EPERM);
6163
6164 node.sysctl_data = &result;
6165 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6166
6167 if (error || newp == NULL)
6168 return error;
6169
6170 if (result == 1)
6171 ixgbe_print_debug_info(adapter);
6172
6173 return 0;
6174 } /* ixgbe_sysctl_debug */
6175
6176 /************************************************************************
6177 * ixgbe_init_device_features
6178 ************************************************************************/
6179 static void
6180 ixgbe_init_device_features(struct adapter *adapter)
6181 {
6182 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6183 | IXGBE_FEATURE_RSS
6184 | IXGBE_FEATURE_MSI
6185 | IXGBE_FEATURE_MSIX
6186 | IXGBE_FEATURE_LEGACY_IRQ
6187 | IXGBE_FEATURE_LEGACY_TX;
6188
6189 /* Set capabilities first... */
6190 switch (adapter->hw.mac.type) {
6191 case ixgbe_mac_82598EB:
6192 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6193 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6194 break;
6195 case ixgbe_mac_X540:
6196 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6197 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6198 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6199 (adapter->hw.bus.func == 0))
6200 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6201 break;
6202 case ixgbe_mac_X550:
6203 /*
6204 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6205 * NVM Image version.
6206 */
6207 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6208 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6209 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6210 break;
6211 case ixgbe_mac_X550EM_x:
6212 /*
6213 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6214 * NVM Image version.
6215 */
6216 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6217 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6218 break;
6219 case ixgbe_mac_X550EM_a:
6220 /*
6221 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6222 * NVM Image version.
6223 */
6224 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6225 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6226 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6227 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6228 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6229 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6230 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6231 }
6232 break;
6233 case ixgbe_mac_82599EB:
6234 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6235 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6236 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6237 (adapter->hw.bus.func == 0))
6238 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6239 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6240 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6241 break;
6242 default:
6243 break;
6244 }
6245
6246 /* Enabled by default... */
6247 /* Fan failure detection */
6248 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6249 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6250 /* Netmap */
6251 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6252 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6253 /* EEE */
6254 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6255 adapter->feat_en |= IXGBE_FEATURE_EEE;
6256 /* Thermal Sensor */
6257 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6258 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6259 /*
6260 * Recovery mode:
6261 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6262 * NVM Image version.
6263 */
6264
6265 /* Enabled via global sysctl... */
6266 /* Flow Director */
6267 if (ixgbe_enable_fdir) {
6268 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6269 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6270 else
6271 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6272 }
6273 /* Legacy (single queue) transmit */
6274 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6275 ixgbe_enable_legacy_tx)
6276 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6277 /*
6278 * Message Signal Interrupts - Extended (MSI-X)
6279 * Normal MSI is only enabled if MSI-X calls fail.
6280 */
6281 if (!ixgbe_enable_msix)
6282 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6283 /* Receive-Side Scaling (RSS) */
6284 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6285 adapter->feat_en |= IXGBE_FEATURE_RSS;
6286
6287 /* Disable features with unmet dependencies... */
6288 /* No MSI-X */
6289 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6290 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6291 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6292 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6293 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6294 }
6295 } /* ixgbe_init_device_features */
6296
6297 /************************************************************************
6298 * ixgbe_probe - Device identification routine
6299 *
6300 * Determines if the driver should be loaded on
6301 * adapter based on its PCI vendor/device ID.
6302 *
6303 * return BUS_PROBE_DEFAULT on success, positive on failure
6304 ************************************************************************/
6305 static int
6306 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6307 {
6308 const struct pci_attach_args *pa = aux;
6309
6310 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6311 }
6312
6313 static const ixgbe_vendor_info_t *
6314 ixgbe_lookup(const struct pci_attach_args *pa)
6315 {
6316 const ixgbe_vendor_info_t *ent;
6317 pcireg_t subid;
6318
6319 INIT_DEBUGOUT("ixgbe_lookup: begin");
6320
6321 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6322 return NULL;
6323
6324 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6325
6326 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6327 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6328 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6329 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6330 (ent->subvendor_id == 0)) &&
6331 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6332 (ent->subdevice_id == 0))) {
6333 return ent;
6334 }
6335 }
6336 return NULL;
6337 }
6338
6339 static int
6340 ixgbe_ifflags_cb(struct ethercom *ec)
6341 {
6342 struct ifnet *ifp = &ec->ec_if;
6343 struct adapter *adapter = ifp->if_softc;
6344 u_short change;
6345 int rv = 0;
6346
6347 IXGBE_CORE_LOCK(adapter);
6348
6349 change = ifp->if_flags ^ adapter->if_flags;
6350 if (change != 0)
6351 adapter->if_flags = ifp->if_flags;
6352
6353 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6354 rv = ENETRESET;
6355 goto out;
6356 } else if ((change & IFF_PROMISC) != 0)
6357 ixgbe_set_rxfilter(adapter);
6358
6359 /* Check for ec_capenable. */
6360 change = ec->ec_capenable ^ adapter->ec_capenable;
6361 adapter->ec_capenable = ec->ec_capenable;
6362 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6363 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6364 rv = ENETRESET;
6365 goto out;
6366 }
6367
6368 /*
6369 * Special handling is not required for ETHERCAP_VLAN_MTU.
6370 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6371 */
6372
6373 /* Set up VLAN support and filter */
6374 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6375 ixgbe_setup_vlan_hw_support(adapter);
6376
6377 out:
6378 IXGBE_CORE_UNLOCK(adapter);
6379
6380 return rv;
6381 }
6382
6383 /************************************************************************
6384 * ixgbe_ioctl - Ioctl entry point
6385 *
6386 * Called when the user wants to configure the interface.
6387 *
6388 * return 0 on success, positive on failure
6389 ************************************************************************/
6390 static int
6391 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6392 {
6393 struct adapter *adapter = ifp->if_softc;
6394 struct ixgbe_hw *hw = &adapter->hw;
6395 struct ifcapreq *ifcr = data;
6396 struct ifreq *ifr = data;
6397 int error = 0;
6398 int l4csum_en;
6399 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6400 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6401
6402 if (ixgbe_fw_recovery_mode_swflag(adapter))
6403 return (EPERM);
6404
6405 switch (command) {
6406 case SIOCSIFFLAGS:
6407 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6408 break;
6409 case SIOCADDMULTI:
6410 case SIOCDELMULTI:
6411 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6412 break;
6413 case SIOCSIFMEDIA:
6414 case SIOCGIFMEDIA:
6415 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6416 break;
6417 case SIOCSIFCAP:
6418 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6419 break;
6420 case SIOCSIFMTU:
6421 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6422 break;
6423 #ifdef __NetBSD__
6424 case SIOCINITIFADDR:
6425 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6426 break;
6427 case SIOCGIFFLAGS:
6428 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6429 break;
6430 case SIOCGIFAFLAG_IN:
6431 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6432 break;
6433 case SIOCGIFADDR:
6434 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6435 break;
6436 case SIOCGIFMTU:
6437 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6438 break;
6439 case SIOCGIFCAP:
6440 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6441 break;
6442 case SIOCGETHERCAP:
6443 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6444 break;
6445 case SIOCGLIFADDR:
6446 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6447 break;
6448 case SIOCZIFDATA:
6449 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6450 hw->mac.ops.clear_hw_cntrs(hw);
6451 ixgbe_clear_evcnt(adapter);
6452 break;
6453 case SIOCAIFADDR:
6454 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6455 break;
6456 #endif
6457 default:
6458 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6459 break;
6460 }
6461
6462 switch (command) {
6463 case SIOCGI2C:
6464 {
6465 struct ixgbe_i2c_req i2c;
6466
6467 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6468 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6469 if (error != 0)
6470 break;
6471 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6472 error = EINVAL;
6473 break;
6474 }
6475 if (i2c.len > sizeof(i2c.data)) {
6476 error = EINVAL;
6477 break;
6478 }
6479
6480 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6481 i2c.dev_addr, i2c.data);
6482 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6483 break;
6484 }
6485 case SIOCSIFCAP:
6486 /* Layer-4 Rx checksum offload has to be turned on and
6487 * off as a unit.
6488 */
6489 l4csum_en = ifcr->ifcr_capenable & l4csum;
6490 if (l4csum_en != l4csum && l4csum_en != 0)
6491 return EINVAL;
6492 /*FALLTHROUGH*/
6493 case SIOCADDMULTI:
6494 case SIOCDELMULTI:
6495 case SIOCSIFFLAGS:
6496 case SIOCSIFMTU:
6497 default:
6498 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6499 return error;
6500 if ((ifp->if_flags & IFF_RUNNING) == 0)
6501 ;
6502 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6503 IXGBE_CORE_LOCK(adapter);
6504 if ((ifp->if_flags & IFF_RUNNING) != 0)
6505 ixgbe_init_locked(adapter);
6506 ixgbe_recalculate_max_frame(adapter);
6507 IXGBE_CORE_UNLOCK(adapter);
6508 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6509 /*
6510 * Multicast list has changed; set the hardware filter
6511 * accordingly.
6512 */
6513 IXGBE_CORE_LOCK(adapter);
6514 ixgbe_disable_intr(adapter);
6515 ixgbe_set_rxfilter(adapter);
6516 ixgbe_enable_intr(adapter);
6517 IXGBE_CORE_UNLOCK(adapter);
6518 }
6519 return 0;
6520 }
6521
6522 return error;
6523 } /* ixgbe_ioctl */
6524
6525 /************************************************************************
6526 * ixgbe_check_fan_failure
6527 ************************************************************************/
6528 static int
6529 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6530 {
6531 u32 mask;
6532
6533 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6534 IXGBE_ESDP_SDP1;
6535
6536 if (reg & mask) {
6537 device_printf(adapter->dev,
6538 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6539 return IXGBE_ERR_FAN_FAILURE;
6540 }
6541
6542 return IXGBE_SUCCESS;
6543 } /* ixgbe_check_fan_failure */
6544
6545 /************************************************************************
6546 * ixgbe_handle_que
6547 ************************************************************************/
6548 static void
6549 ixgbe_handle_que(void *context)
6550 {
6551 struct ix_queue *que = context;
6552 struct adapter *adapter = que->adapter;
6553 struct tx_ring *txr = que->txr;
6554 struct ifnet *ifp = adapter->ifp;
6555 bool more = false;
6556
6557 que->handleq.ev_count++;
6558
6559 if (ifp->if_flags & IFF_RUNNING) {
6560 more = ixgbe_rxeof(que);
6561 IXGBE_TX_LOCK(txr);
6562 more |= ixgbe_txeof(txr);
6563 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6564 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6565 ixgbe_mq_start_locked(ifp, txr);
6566 /* Only for queue 0 */
6567 /* NetBSD still needs this for CBQ */
6568 if ((&adapter->queues[0] == que)
6569 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6570 ixgbe_legacy_start_locked(ifp, txr);
6571 IXGBE_TX_UNLOCK(txr);
6572 }
6573
6574 if (more) {
6575 que->req.ev_count++;
6576 ixgbe_sched_handle_que(adapter, que);
6577 } else if (que->res != NULL) {
6578 /* MSIX: Re-enable this interrupt */
6579 ixgbe_enable_queue(adapter, que->msix);
6580 } else {
6581 /* INTx or MSI */
6582 ixgbe_enable_queue(adapter, 0);
6583 }
6584
6585 return;
6586 } /* ixgbe_handle_que */
6587
6588 /************************************************************************
6589 * ixgbe_handle_que_work
6590 ************************************************************************/
6591 static void
6592 ixgbe_handle_que_work(struct work *wk, void *context)
6593 {
6594 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6595
6596 /*
6597 * "enqueued flag" is not required here.
6598 * See ixgbe_msix_que().
6599 */
6600 ixgbe_handle_que(que);
6601 }
6602
6603 /************************************************************************
6604 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6605 ************************************************************************/
6606 static int
6607 ixgbe_allocate_legacy(struct adapter *adapter,
6608 const struct pci_attach_args *pa)
6609 {
6610 device_t dev = adapter->dev;
6611 struct ix_queue *que = adapter->queues;
6612 struct tx_ring *txr = adapter->tx_rings;
6613 int counts[PCI_INTR_TYPE_SIZE];
6614 pci_intr_type_t intr_type, max_type;
6615 char intrbuf[PCI_INTRSTR_LEN];
6616 char wqname[MAXCOMLEN];
6617 const char *intrstr = NULL;
6618 int defertx_error = 0, error;
6619
6620 /* We allocate a single interrupt resource */
6621 max_type = PCI_INTR_TYPE_MSI;
6622 counts[PCI_INTR_TYPE_MSIX] = 0;
6623 counts[PCI_INTR_TYPE_MSI] =
6624 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6625 /* Check not feat_en but feat_cap to fallback to INTx */
6626 counts[PCI_INTR_TYPE_INTX] =
6627 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6628
6629 alloc_retry:
6630 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6631 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6632 return ENXIO;
6633 }
6634 adapter->osdep.nintrs = 1;
6635 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6636 intrbuf, sizeof(intrbuf));
6637 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6638 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6639 device_xname(dev));
6640 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6641 if (adapter->osdep.ihs[0] == NULL) {
6642 aprint_error_dev(dev,"unable to establish %s\n",
6643 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6644 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6645 adapter->osdep.intrs = NULL;
6646 switch (intr_type) {
6647 case PCI_INTR_TYPE_MSI:
6648 /* The next try is for INTx: Disable MSI */
6649 max_type = PCI_INTR_TYPE_INTX;
6650 counts[PCI_INTR_TYPE_INTX] = 1;
6651 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6652 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6653 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6654 goto alloc_retry;
6655 } else
6656 break;
6657 case PCI_INTR_TYPE_INTX:
6658 default:
6659 /* See below */
6660 break;
6661 }
6662 }
6663 if (intr_type == PCI_INTR_TYPE_INTX) {
6664 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6665 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6666 }
6667 if (adapter->osdep.ihs[0] == NULL) {
6668 aprint_error_dev(dev,
6669 "couldn't establish interrupt%s%s\n",
6670 intrstr ? " at " : "", intrstr ? intrstr : "");
6671 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6672 adapter->osdep.intrs = NULL;
6673 return ENXIO;
6674 }
6675 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6676 /*
6677 * Try allocating a fast interrupt and the associated deferred
6678 * processing contexts.
6679 */
6680 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6681 txr->txr_si =
6682 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6683 ixgbe_deferred_mq_start, txr);
6684
6685 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6686 device_xname(dev));
6687 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6688 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6689 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6690 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6691 }
6692 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6693 ixgbe_handle_que, que);
6694 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6695 error = workqueue_create(&adapter->que_wq, wqname,
6696 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6697 IXGBE_WORKQUEUE_FLAGS);
6698
6699 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6700 && ((txr->txr_si == NULL) || defertx_error != 0))
6701 || (que->que_si == NULL) || error != 0) {
6702 aprint_error_dev(dev,
6703 "could not establish software interrupts\n");
6704
6705 return ENXIO;
6706 }
6707 /* For simplicity in the handlers */
6708 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6709
6710 return (0);
6711 } /* ixgbe_allocate_legacy */
6712
6713 /************************************************************************
6714 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6715 ************************************************************************/
6716 static int
6717 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6718 {
6719 device_t dev = adapter->dev;
6720 struct ix_queue *que = adapter->queues;
6721 struct tx_ring *txr = adapter->tx_rings;
6722 pci_chipset_tag_t pc;
6723 char intrbuf[PCI_INTRSTR_LEN];
6724 char intr_xname[32];
6725 char wqname[MAXCOMLEN];
6726 const char *intrstr = NULL;
6727 int error, vector = 0;
6728 int cpu_id = 0;
6729 kcpuset_t *affinity;
6730 #ifdef RSS
6731 unsigned int rss_buckets = 0;
6732 kcpuset_t cpu_mask;
6733 #endif
6734
6735 pc = adapter->osdep.pc;
6736 #ifdef RSS
6737 /*
6738 * If we're doing RSS, the number of queues needs to
6739 * match the number of RSS buckets that are configured.
6740 *
6741 * + If there's more queues than RSS buckets, we'll end
6742 * up with queues that get no traffic.
6743 *
6744 * + If there's more RSS buckets than queues, we'll end
6745 * up having multiple RSS buckets map to the same queue,
6746 * so there'll be some contention.
6747 */
6748 rss_buckets = rss_getnumbuckets();
6749 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6750 (adapter->num_queues != rss_buckets)) {
6751 device_printf(dev,
6752 "%s: number of queues (%d) != number of RSS buckets (%d)"
6753 "; performance will be impacted.\n",
6754 __func__, adapter->num_queues, rss_buckets);
6755 }
6756 #endif
6757
6758 adapter->osdep.nintrs = adapter->num_queues + 1;
6759 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6760 adapter->osdep.nintrs) != 0) {
6761 aprint_error_dev(dev,
6762 "failed to allocate MSI-X interrupt\n");
6763 return (ENXIO);
6764 }
6765
6766 kcpuset_create(&affinity, false);
6767 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6768 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6769 device_xname(dev), i);
6770 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6771 sizeof(intrbuf));
6772 #ifdef IXGBE_MPSAFE
6773 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6774 true);
6775 #endif
6776 /* Set the handler function */
6777 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6778 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6779 intr_xname);
6780 if (que->res == NULL) {
6781 aprint_error_dev(dev,
6782 "Failed to register QUE handler\n");
6783 error = ENXIO;
6784 goto err_out;
6785 }
6786 que->msix = vector;
6787 adapter->active_queues |= 1ULL << que->msix;
6788
6789 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6790 #ifdef RSS
6791 /*
6792 * The queue ID is used as the RSS layer bucket ID.
6793 * We look up the queue ID -> RSS CPU ID and select
6794 * that.
6795 */
6796 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6797 CPU_SETOF(cpu_id, &cpu_mask);
6798 #endif
6799 } else {
6800 /*
6801 * Bind the MSI-X vector, and thus the
6802 * rings to the corresponding CPU.
6803 *
6804 * This just happens to match the default RSS
6805 * round-robin bucket -> queue -> CPU allocation.
6806 */
6807 if (adapter->num_queues > 1)
6808 cpu_id = i;
6809 }
6810 /* Round-robin affinity */
6811 kcpuset_zero(affinity);
6812 kcpuset_set(affinity, cpu_id % ncpu);
6813 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6814 NULL);
6815 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6816 intrstr);
6817 if (error == 0) {
6818 #if 1 /* def IXGBE_DEBUG */
6819 #ifdef RSS
6820 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6821 cpu_id % ncpu);
6822 #else
6823 aprint_normal(", bound queue %d to cpu %d", i,
6824 cpu_id % ncpu);
6825 #endif
6826 #endif /* IXGBE_DEBUG */
6827 }
6828 aprint_normal("\n");
6829
6830 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6831 txr->txr_si = softint_establish(
6832 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6833 ixgbe_deferred_mq_start, txr);
6834 if (txr->txr_si == NULL) {
6835 aprint_error_dev(dev,
6836 "couldn't establish software interrupt\n");
6837 error = ENXIO;
6838 goto err_out;
6839 }
6840 }
6841 que->que_si
6842 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6843 ixgbe_handle_que, que);
6844 if (que->que_si == NULL) {
6845 aprint_error_dev(dev,
6846 "couldn't establish software interrupt\n");
6847 error = ENXIO;
6848 goto err_out;
6849 }
6850 }
6851 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6852 error = workqueue_create(&adapter->txr_wq, wqname,
6853 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6854 IXGBE_WORKQUEUE_FLAGS);
6855 if (error) {
6856 aprint_error_dev(dev,
6857 "couldn't create workqueue for deferred Tx\n");
6858 goto err_out;
6859 }
6860 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6861
6862 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6863 error = workqueue_create(&adapter->que_wq, wqname,
6864 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6865 IXGBE_WORKQUEUE_FLAGS);
6866 if (error) {
6867 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6868 goto err_out;
6869 }
6870
6871 /* and Link */
6872 cpu_id++;
6873 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6874 adapter->vector = vector;
6875 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6876 sizeof(intrbuf));
6877 #ifdef IXGBE_MPSAFE
6878 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6879 true);
6880 #endif
6881 /* Set the link handler function */
6882 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6883 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6884 intr_xname);
6885 if (adapter->osdep.ihs[vector] == NULL) {
6886 aprint_error_dev(dev, "Failed to register LINK handler\n");
6887 error = ENXIO;
6888 goto err_out;
6889 }
6890 /* Round-robin affinity */
6891 kcpuset_zero(affinity);
6892 kcpuset_set(affinity, cpu_id % ncpu);
6893 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6894 NULL);
6895
6896 aprint_normal_dev(dev,
6897 "for link, interrupting at %s", intrstr);
6898 if (error == 0)
6899 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6900 else
6901 aprint_normal("\n");
6902
6903 kcpuset_destroy(affinity);
6904 aprint_normal_dev(dev,
6905 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6906
6907 return (0);
6908
6909 err_out:
6910 kcpuset_destroy(affinity);
6911 ixgbe_free_deferred_handlers(adapter);
6912 ixgbe_free_pciintr_resources(adapter);
6913 return (error);
6914 } /* ixgbe_allocate_msix */
6915
6916 /************************************************************************
6917 * ixgbe_configure_interrupts
6918 *
6919 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6920 * This will also depend on user settings.
6921 ************************************************************************/
6922 static int
6923 ixgbe_configure_interrupts(struct adapter *adapter)
6924 {
6925 device_t dev = adapter->dev;
6926 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6927 int want, queues, msgs;
6928
6929 /* Default to 1 queue if MSI-X setup fails */
6930 adapter->num_queues = 1;
6931
6932 /* Override by tuneable */
6933 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6934 goto msi;
6935
6936 /*
6937 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6938 * interrupt slot.
6939 */
6940 if (ncpu == 1)
6941 goto msi;
6942
6943 /* First try MSI-X */
6944 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6945 msgs = MIN(msgs, IXG_MAX_NINTR);
6946 if (msgs < 2)
6947 goto msi;
6948
6949 adapter->msix_mem = (void *)1; /* XXX */
6950
6951 /* Figure out a reasonable auto config value */
6952 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6953
6954 #ifdef RSS
6955 /* If we're doing RSS, clamp at the number of RSS buckets */
6956 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6957 queues = uimin(queues, rss_getnumbuckets());
6958 #endif
6959 if (ixgbe_num_queues > queues) {
6960 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6961 ixgbe_num_queues = queues;
6962 }
6963
6964 if (ixgbe_num_queues != 0)
6965 queues = ixgbe_num_queues;
6966 else
6967 queues = uimin(queues,
6968 uimin(mac->max_tx_queues, mac->max_rx_queues));
6969
6970 /* reflect correct sysctl value */
6971 ixgbe_num_queues = queues;
6972
6973 /*
6974 * Want one vector (RX/TX pair) per queue
6975 * plus an additional for Link.
6976 */
6977 want = queues + 1;
6978 if (msgs >= want)
6979 msgs = want;
6980 else {
6981 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6982 "%d vectors but %d queues wanted!\n",
6983 msgs, want);
6984 goto msi;
6985 }
6986 adapter->num_queues = queues;
6987 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6988 return (0);
6989
6990 /*
6991 * MSI-X allocation failed or provided us with
6992 * less vectors than needed. Free MSI-X resources
6993 * and we'll try enabling MSI.
6994 */
6995 msi:
6996 /* Without MSI-X, some features are no longer supported */
6997 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6998 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6999 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7000 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
7001
7002 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
7003 adapter->msix_mem = NULL; /* XXX */
7004 if (msgs > 1)
7005 msgs = 1;
7006 if (msgs != 0) {
7007 msgs = 1;
7008 adapter->feat_en |= IXGBE_FEATURE_MSI;
7009 return (0);
7010 }
7011
7012 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7013 aprint_error_dev(dev,
7014 "Device does not support legacy interrupts.\n");
7015 return 1;
7016 }
7017
7018 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7019
7020 return (0);
7021 } /* ixgbe_configure_interrupts */
7022
7023
7024 /************************************************************************
7025 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7026 *
7027 * Done outside of interrupt context since the driver might sleep
7028 ************************************************************************/
7029 static void
7030 ixgbe_handle_link(void *context)
7031 {
7032 struct adapter *adapter = context;
7033 struct ixgbe_hw *hw = &adapter->hw;
7034
7035 KASSERT(mutex_owned(&adapter->core_mtx));
7036
7037 ++adapter->link_workev.ev_count;
7038 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7039 ixgbe_update_link_status(adapter);
7040
7041 /* Re-enable link interrupts */
7042 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7043 } /* ixgbe_handle_link */
7044
7045 #if 0
7046 /************************************************************************
7047 * ixgbe_rearm_queues
7048 ************************************************************************/
7049 static __inline void
7050 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7051 {
7052 u32 mask;
7053
7054 switch (adapter->hw.mac.type) {
7055 case ixgbe_mac_82598EB:
7056 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7057 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7058 break;
7059 case ixgbe_mac_82599EB:
7060 case ixgbe_mac_X540:
7061 case ixgbe_mac_X550:
7062 case ixgbe_mac_X550EM_x:
7063 case ixgbe_mac_X550EM_a:
7064 mask = (queues & 0xFFFFFFFF);
7065 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7066 mask = (queues >> 32);
7067 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7068 break;
7069 default:
7070 break;
7071 }
7072 } /* ixgbe_rearm_queues */
7073 #endif
7074