ixgbe.c revision 1.284 1 /* $NetBSD: ixgbe.c,v 1.284 2021/06/16 00:21:18 riastradh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.284 2021/06/16 00:21:18 riastradh Exp $");
68
69 #ifdef _KERNEL_OPT
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_net_mpsafe.h"
73 #include "opt_ixgbe.h"
74 #endif
75
76 #include "ixgbe.h"
77 #include "ixgbe_phy.h"
78 #include "ixgbe_sriov.h"
79 #include "vlan.h"
80
81 #include <sys/cprng.h>
82 #include <dev/mii/mii.h>
83 #include <dev/mii/miivar.h>
84
85 /************************************************************************
86 * Driver version
87 ************************************************************************/
88 static const char ixgbe_driver_version[] = "4.0.1-k";
89 /* XXX NetBSD: + 3.3.10 */
90
91 /************************************************************************
92 * PCI Device ID Table
93 *
94 * Used by probe to select devices to load on
95 * Last field stores an index into ixgbe_strings
96 * Last entry must be all 0s
97 *
98 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
99 ************************************************************************/
100 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
101 {
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
150 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
151 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
152 /* required last entry */
153 {0, 0, 0, 0, 0}
154 };
155
156 /************************************************************************
157 * Table of branding strings
158 ************************************************************************/
159 static const char *ixgbe_strings[] = {
160 "Intel(R) PRO/10GbE PCI-Express Network Driver"
161 };
162
163 /************************************************************************
164 * Function prototypes
165 ************************************************************************/
166 static int ixgbe_probe(device_t, cfdata_t, void *);
167 static void ixgbe_quirks(struct adapter *);
168 static void ixgbe_attach(device_t, device_t, void *);
169 static int ixgbe_detach(device_t, int);
170 #if 0
171 static int ixgbe_shutdown(device_t);
172 #endif
173 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
174 static bool ixgbe_resume(device_t, const pmf_qual_t *);
175 static int ixgbe_ifflags_cb(struct ethercom *);
176 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
177 static int ixgbe_init(struct ifnet *);
178 static void ixgbe_init_locked(struct adapter *);
179 static void ixgbe_ifstop(struct ifnet *, int);
180 static void ixgbe_stop_locked(void *);
181 static void ixgbe_init_device_features(struct adapter *);
182 static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
183 static void ixgbe_add_media_types(struct adapter *);
184 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
185 static int ixgbe_media_change(struct ifnet *);
186 static int ixgbe_allocate_pci_resources(struct adapter *,
187 const struct pci_attach_args *);
188 static void ixgbe_free_deferred_handlers(struct adapter *);
189 static void ixgbe_get_slot_info(struct adapter *);
190 static int ixgbe_allocate_msix(struct adapter *,
191 const struct pci_attach_args *);
192 static int ixgbe_allocate_legacy(struct adapter *,
193 const struct pci_attach_args *);
194 static int ixgbe_configure_interrupts(struct adapter *);
195 static void ixgbe_free_pciintr_resources(struct adapter *);
196 static void ixgbe_free_pci_resources(struct adapter *);
197 static void ixgbe_local_timer(void *);
198 static void ixgbe_handle_timer(struct work *, void *);
199 static void ixgbe_recovery_mode_timer(void *);
200 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
201 static int ixgbe_setup_interface(device_t, struct adapter *);
202 static void ixgbe_config_gpie(struct adapter *);
203 static void ixgbe_config_dmac(struct adapter *);
204 static void ixgbe_config_delay_values(struct adapter *);
205 static void ixgbe_schedule_admin_tasklet(struct adapter *);
206 static void ixgbe_config_link(struct adapter *);
207 static void ixgbe_check_wol_support(struct adapter *);
208 static int ixgbe_setup_low_power_mode(struct adapter *);
209 #if 0
210 static void ixgbe_rearm_queues(struct adapter *, u64);
211 #endif
212
213 static void ixgbe_initialize_transmit_units(struct adapter *);
214 static void ixgbe_initialize_receive_units(struct adapter *);
215 static void ixgbe_enable_rx_drop(struct adapter *);
216 static void ixgbe_disable_rx_drop(struct adapter *);
217 static void ixgbe_initialize_rss_mapping(struct adapter *);
218
219 static void ixgbe_enable_intr(struct adapter *);
220 static void ixgbe_disable_intr(struct adapter *);
221 static void ixgbe_update_stats_counters(struct adapter *);
222 static void ixgbe_set_rxfilter(struct adapter *);
223 static void ixgbe_update_link_status(struct adapter *);
224 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
225 static void ixgbe_configure_ivars(struct adapter *);
226 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
227 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
228
229 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
230 static void ixgbe_setup_vlan_hw_support(struct adapter *);
231 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
232 static int ixgbe_register_vlan(struct adapter *, u16);
233 static int ixgbe_unregister_vlan(struct adapter *, u16);
234
235 static void ixgbe_add_device_sysctls(struct adapter *);
236 static void ixgbe_add_hw_stats(struct adapter *);
237 static void ixgbe_clear_evcnt(struct adapter *);
238 static int ixgbe_set_flowcntl(struct adapter *, int);
239 static int ixgbe_set_advertise(struct adapter *, int);
240 static int ixgbe_get_advertise(struct adapter *);
241
242 /* Sysctl handlers */
243 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
244 const char *, int *, int);
245 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
251 #ifdef IXGBE_DEBUG
252 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
254 #endif
255 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
260 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
261 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
262 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
263 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
264
265 /* Interrupt functions */
266 static int ixgbe_msix_que(void *);
267 static int ixgbe_msix_admin(void *);
268 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
269 static int ixgbe_legacy_irq(void *);
270
271 /* Event handlers running on workqueue */
272 static void ixgbe_handle_que(void *);
273 static void ixgbe_handle_link(void *);
274 static void ixgbe_handle_msf(void *);
275 static void ixgbe_handle_mod(void *, bool);
276 static void ixgbe_handle_phy(void *);
277
278 /* Deferred workqueue handlers */
279 static void ixgbe_handle_admin(struct work *, void *);
280 static void ixgbe_handle_que_work(struct work *, void *);
281
282 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
283
284 /************************************************************************
285 * NetBSD Device Interface Entry Points
286 ************************************************************************/
287 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
288 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
289 DVF_DETACH_SHUTDOWN);
290
291 #if 0
292 devclass_t ix_devclass;
293 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
294
295 MODULE_DEPEND(ix, pci, 1, 1, 1);
296 MODULE_DEPEND(ix, ether, 1, 1, 1);
297 #ifdef DEV_NETMAP
298 MODULE_DEPEND(ix, netmap, 1, 1, 1);
299 #endif
300 #endif
301
302 /*
303 * TUNEABLE PARAMETERS:
304 */
305
306 /*
307 * AIM: Adaptive Interrupt Moderation
308 * which means that the interrupt rate
309 * is varied over time based on the
310 * traffic for that interrupt vector
311 */
312 static bool ixgbe_enable_aim = true;
313 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
314 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
315 "Enable adaptive interrupt moderation");
316
317 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
318 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
319 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
320
321 /* How many packets rxeof tries to clean at a time */
322 static int ixgbe_rx_process_limit = 256;
323 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
325
326 /* How many packets txeof tries to clean at a time */
327 static int ixgbe_tx_process_limit = 256;
328 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
329 &ixgbe_tx_process_limit, 0,
330 "Maximum number of sent packets to process at a time, -1 means unlimited");
331
332 /* Flow control setting, default to full */
333 static int ixgbe_flow_control = ixgbe_fc_full;
334 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
335 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
336
337 /* Which packet processing uses workqueue or softint */
338 static bool ixgbe_txrx_workqueue = false;
339
340 /*
341 * Smart speed setting, default to on
342 * this only works as a compile option
343 * right now as its during attach, set
344 * this to 'ixgbe_smart_speed_off' to
345 * disable.
346 */
347 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
348
349 /*
350 * MSI-X should be the default for best performance,
351 * but this allows it to be forced off for testing.
352 */
353 static int ixgbe_enable_msix = 1;
354 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
355 "Enable MSI-X interrupts");
356
357 /*
358 * Number of Queues, can be set to 0,
359 * it then autoconfigures based on the
360 * number of cpus with a max of 8. This
361 * can be overridden manually here.
362 */
363 static int ixgbe_num_queues = 0;
364 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
365 "Number of queues to configure, 0 indicates autoconfigure");
366
367 /*
368 * Number of TX descriptors per ring,
369 * setting higher than RX as this seems
370 * the better performing choice.
371 */
372 static int ixgbe_txd = PERFORM_TXD;
373 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
374 "Number of transmit descriptors per queue");
375
376 /* Number of RX descriptors per ring */
377 static int ixgbe_rxd = PERFORM_RXD;
378 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
379 "Number of receive descriptors per queue");
380
381 /*
382 * Defining this on will allow the use
383 * of unsupported SFP+ modules, note that
384 * doing so you are on your own :)
385 */
386 static int allow_unsupported_sfp = false;
387 #define TUNABLE_INT(__x, __y)
388 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
389
390 /*
391 * Not sure if Flow Director is fully baked,
392 * so we'll default to turning it off.
393 */
394 static int ixgbe_enable_fdir = 0;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
396 "Enable Flow Director");
397
398 /* Legacy Transmit (single queue) */
399 static int ixgbe_enable_legacy_tx = 0;
400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
401 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
402
403 /* Receive-Side Scaling */
404 static int ixgbe_enable_rss = 1;
405 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
406 "Enable Receive-Side Scaling (RSS)");
407
408 #if 0
409 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
410 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
411 #endif
412
413 #ifdef NET_MPSAFE
414 #define IXGBE_MPSAFE 1
415 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
416 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
417 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
418 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
419 #else
420 #define IXGBE_CALLOUT_FLAGS 0
421 #define IXGBE_SOFTINT_FLAGS 0
422 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
423 #define IXGBE_TASKLET_WQ_FLAGS 0
424 #endif
425 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
426
427 /************************************************************************
428 * ixgbe_initialize_rss_mapping
429 ************************************************************************/
430 static void
431 ixgbe_initialize_rss_mapping(struct adapter *adapter)
432 {
433 struct ixgbe_hw *hw = &adapter->hw;
434 u32 reta = 0, mrqc, rss_key[10];
435 int queue_id, table_size, index_mult;
436 int i, j;
437 u32 rss_hash_config;
438
439 /* force use default RSS key. */
440 #ifdef __NetBSD__
441 rss_getkey((uint8_t *) &rss_key);
442 #else
443 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
444 /* Fetch the configured RSS key */
445 rss_getkey((uint8_t *) &rss_key);
446 } else {
447 /* set up random bits */
448 cprng_fast(&rss_key, sizeof(rss_key));
449 }
450 #endif
451
452 /* Set multiplier for RETA setup and table size based on MAC */
453 index_mult = 0x1;
454 table_size = 128;
455 switch (adapter->hw.mac.type) {
456 case ixgbe_mac_82598EB:
457 index_mult = 0x11;
458 break;
459 case ixgbe_mac_X550:
460 case ixgbe_mac_X550EM_x:
461 case ixgbe_mac_X550EM_a:
462 table_size = 512;
463 break;
464 default:
465 break;
466 }
467
468 /* Set up the redirection table */
469 for (i = 0, j = 0; i < table_size; i++, j++) {
470 if (j == adapter->num_queues)
471 j = 0;
472
473 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
474 /*
475 * Fetch the RSS bucket id for the given indirection
476 * entry. Cap it at the number of configured buckets
477 * (which is num_queues.)
478 */
479 queue_id = rss_get_indirection_to_bucket(i);
480 queue_id = queue_id % adapter->num_queues;
481 } else
482 queue_id = (j * index_mult);
483
484 /*
485 * The low 8 bits are for hash value (n+0);
486 * The next 8 bits are for hash value (n+1), etc.
487 */
488 reta = reta >> 8;
489 reta = reta | (((uint32_t) queue_id) << 24);
490 if ((i & 3) == 3) {
491 if (i < 128)
492 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
493 else
494 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
495 reta);
496 reta = 0;
497 }
498 }
499
500 /* Now fill our hash function seeds */
501 for (i = 0; i < 10; i++)
502 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
503
504 /* Perform hash on these packet types */
505 if (adapter->feat_en & IXGBE_FEATURE_RSS)
506 rss_hash_config = rss_gethashconfig();
507 else {
508 /*
509 * Disable UDP - IP fragments aren't currently being handled
510 * and so we end up with a mix of 2-tuple and 4-tuple
511 * traffic.
512 */
513 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
514 | RSS_HASHTYPE_RSS_TCP_IPV4
515 | RSS_HASHTYPE_RSS_IPV6
516 | RSS_HASHTYPE_RSS_TCP_IPV6
517 | RSS_HASHTYPE_RSS_IPV6_EX
518 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
519 }
520
521 mrqc = IXGBE_MRQC_RSSEN;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
540 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
542 } /* ixgbe_initialize_rss_mapping */
543
544 /************************************************************************
545 * ixgbe_initialize_receive_units - Setup receive registers and features.
546 ************************************************************************/
547 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
548
549 static void
550 ixgbe_initialize_receive_units(struct adapter *adapter)
551 {
552 struct rx_ring *rxr = adapter->rx_rings;
553 struct ixgbe_hw *hw = &adapter->hw;
554 struct ifnet *ifp = adapter->ifp;
555 int i, j;
556 u32 bufsz, fctrl, srrctl, rxcsum;
557 u32 hlreg;
558
559 /*
560 * Make sure receives are disabled while
561 * setting up the descriptor ring
562 */
563 ixgbe_disable_rx(hw);
564
565 /* Enable broadcasts */
566 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
567 fctrl |= IXGBE_FCTRL_BAM;
568 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
569 fctrl |= IXGBE_FCTRL_DPF;
570 fctrl |= IXGBE_FCTRL_PMCF;
571 }
572 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
573
574 /* Set for Jumbo Frames? */
575 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
576 if (ifp->if_mtu > ETHERMTU)
577 hlreg |= IXGBE_HLREG0_JUMBOEN;
578 else
579 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
580
581 #ifdef DEV_NETMAP
582 /* CRC stripping is conditional in Netmap */
583 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
584 (ifp->if_capenable & IFCAP_NETMAP) &&
585 !ix_crcstrip)
586 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
587 else
588 #endif /* DEV_NETMAP */
589 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
590
591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
592
593 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
594 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
595
596 for (i = 0; i < adapter->num_queues; i++, rxr++) {
597 u64 rdba = rxr->rxdma.dma_paddr;
598 u32 reg;
599 int regnum = i / 4; /* 1 register per 4 queues */
600 int regshift = i % 4; /* 4 bits per 1 queue */
601 j = rxr->me;
602
603 /* Setup the Base and Length of the Rx Descriptor Ring */
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
605 (rdba & 0x00000000ffffffffULL));
606 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
607 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
608 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
609
610 /* Set up the SRRCTL register */
611 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
612 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
613 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
614 srrctl |= bufsz;
615 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
616
617 /* Set RQSMR (Receive Queue Statistic Mapping) register */
618 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
619 reg &= ~(0x000000ffUL << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 }
635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641
642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me);
644 }
645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 }
653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655
656 ixgbe_initialize_rss_mapping(adapter);
657
658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 }
662
663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD;
665
666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671
672 } /* ixgbe_initialize_receive_units */
673
674 /************************************************************************
675 * ixgbe_initialize_transmit_units - Enable transmit units.
676 ************************************************************************/
677 static void
678 ixgbe_initialize_transmit_units(struct adapter *adapter)
679 {
680 struct tx_ring *txr = adapter->tx_rings;
681 struct ixgbe_hw *hw = &adapter->hw;
682 int i;
683
684 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
685
686 /* Setup the Base and Length of the Tx Descriptor Ring */
687 for (i = 0; i < adapter->num_queues; i++, txr++) {
688 u64 tdba = txr->txdma.dma_paddr;
689 u32 txctrl = 0;
690 u32 tqsmreg, reg;
691 int regnum = i / 4; /* 1 register per 4 queues */
692 int regshift = i % 4; /* 4 bits per 1 queue */
693 int j = txr->me;
694
695 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
696 (tdba & 0x00000000ffffffffULL));
697 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
698 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
699 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
700
701 /*
702 * Set TQSMR (Transmit Queue Statistic Mapping) register.
703 * Register location is different between 82598 and others.
704 */
705 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
706 tqsmreg = IXGBE_TQSMR(regnum);
707 else
708 tqsmreg = IXGBE_TQSM(regnum);
709 reg = IXGBE_READ_REG(hw, tqsmreg);
710 reg &= ~(0x000000ffUL << (regshift * 8));
711 reg |= i << (regshift * 8);
712 IXGBE_WRITE_REG(hw, tqsmreg, reg);
713
714 /* Setup the HW Tx Head and Tail descriptor pointers */
715 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
716 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
717
718 /* Cache the tail address */
719 txr->tail = IXGBE_TDT(j);
720
721 txr->txr_no_space = false;
722
723 /* Disable Head Writeback */
724 /*
725 * Note: for X550 series devices, these registers are actually
726 * prefixed with TPH_ isntead of DCA_, but the addresses and
727 * fields remain the same.
728 */
729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
732 break;
733 default:
734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
735 break;
736 }
737 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
738 switch (hw->mac.type) {
739 case ixgbe_mac_82598EB:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
741 break;
742 default:
743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
744 break;
745 }
746
747 }
748
749 if (hw->mac.type != ixgbe_mac_82598EB) {
750 u32 dmatxctl, rttdcs;
751
752 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
753 dmatxctl |= IXGBE_DMATXCTL_TE;
754 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
755 /* Disable arbiter to set MTQC */
756 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
757 rttdcs |= IXGBE_RTTDCS_ARBDIS;
758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
759 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
760 ixgbe_get_mtqc(adapter->iov_mode));
761 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
762 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
763 }
764
765 return;
766 } /* ixgbe_initialize_transmit_units */
767
768 static void
769 ixgbe_quirks(struct adapter *adapter)
770 {
771 device_t dev = adapter->dev;
772 struct ixgbe_hw *hw = &adapter->hw;
773 const char *vendor, *product;
774
775 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
776 /*
777 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
778 * MA10-ST0.
779 */
780 vendor = pmf_get_platform("system-vendor");
781 product = pmf_get_platform("system-product");
782
783 if ((vendor == NULL) || (product == NULL))
784 return;
785
786 if ((strcmp(vendor, "GIGABYTE") == 0) &&
787 (strcmp(product, "MA10-ST0") == 0)) {
788 aprint_verbose_dev(dev,
789 "Enable SFP+ MOD_ABS inverse quirk\n");
790 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
791 }
792 }
793 }
794
795 /************************************************************************
796 * ixgbe_attach - Device initialization routine
797 *
798 * Called when the driver is being loaded.
799 * Identifies the type of hardware, allocates all resources
800 * and initializes the hardware.
801 *
802 * return 0 on success, positive on failure
803 ************************************************************************/
804 static void
805 ixgbe_attach(device_t parent, device_t dev, void *aux)
806 {
807 struct adapter *adapter;
808 struct ixgbe_hw *hw;
809 int error = -1;
810 u32 ctrl_ext;
811 u16 high, low, nvmreg;
812 pcireg_t id, subid;
813 const ixgbe_vendor_info_t *ent;
814 struct pci_attach_args *pa = aux;
815 bool unsupported_sfp = false;
816 const char *str;
817 char wqname[MAXCOMLEN];
818 char buf[256];
819
820 INIT_DEBUGOUT("ixgbe_attach: begin");
821
822 /* Allocate, clear, and link in our adapter structure */
823 adapter = device_private(dev);
824 adapter->hw.back = adapter;
825 adapter->dev = dev;
826 hw = &adapter->hw;
827 adapter->osdep.pc = pa->pa_pc;
828 adapter->osdep.tag = pa->pa_tag;
829 if (pci_dma64_available(pa))
830 adapter->osdep.dmat = pa->pa_dmat64;
831 else
832 adapter->osdep.dmat = pa->pa_dmat;
833 adapter->osdep.attached = false;
834 adapter->osdep.detaching = false;
835
836 ent = ixgbe_lookup(pa);
837
838 KASSERT(ent != NULL);
839
840 aprint_normal(": %s, Version - %s\n",
841 ixgbe_strings[ent->index], ixgbe_driver_version);
842
843 /* Core Lock Init */
844 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
845
846 /* Set up the timer callout and workqueue */
847 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
848 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
849 error = workqueue_create(&adapter->timer_wq, wqname,
850 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
851 IXGBE_TASKLET_WQ_FLAGS);
852 if (error) {
853 aprint_error_dev(dev,
854 "could not create timer workqueue (%d)\n", error);
855 goto err_out;
856 }
857
858 /* Determine hardware revision */
859 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
860 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
861
862 hw->vendor_id = PCI_VENDOR(id);
863 hw->device_id = PCI_PRODUCT(id);
864 hw->revision_id =
865 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
866 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
867 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
868
869 /* Set quirk flags */
870 ixgbe_quirks(adapter);
871
872 /*
873 * Make sure BUSMASTER is set
874 */
875 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
876
877 /* Do base PCI setup - map BAR0 */
878 if (ixgbe_allocate_pci_resources(adapter, pa)) {
879 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
880 error = ENXIO;
881 goto err_out;
882 }
883
884 /* let hardware know driver is loaded */
885 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
886 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
887 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
888
889 /*
890 * Initialize the shared code
891 */
892 if (ixgbe_init_shared_code(hw) != 0) {
893 aprint_error_dev(dev, "Unable to initialize the shared code\n");
894 error = ENXIO;
895 goto err_out;
896 }
897
898 switch (hw->mac.type) {
899 case ixgbe_mac_82598EB:
900 str = "82598EB";
901 break;
902 case ixgbe_mac_82599EB:
903 str = "82599EB";
904 break;
905 case ixgbe_mac_X540:
906 str = "X540";
907 break;
908 case ixgbe_mac_X550:
909 str = "X550";
910 break;
911 case ixgbe_mac_X550EM_x:
912 str = "X550EM X";
913 break;
914 case ixgbe_mac_X550EM_a:
915 str = "X550EM A";
916 break;
917 default:
918 str = "Unknown";
919 break;
920 }
921 aprint_normal_dev(dev, "device %s\n", str);
922
923 if (hw->mbx.ops.init_params)
924 hw->mbx.ops.init_params(hw);
925
926 hw->allow_unsupported_sfp = allow_unsupported_sfp;
927
928 /* Pick up the 82599 settings */
929 if (hw->mac.type != ixgbe_mac_82598EB) {
930 hw->phy.smart_speed = ixgbe_smart_speed;
931 adapter->num_segs = IXGBE_82599_SCATTER;
932 } else
933 adapter->num_segs = IXGBE_82598_SCATTER;
934
935 /* Ensure SW/FW semaphore is free */
936 ixgbe_init_swfw_semaphore(hw);
937
938 hw->mac.ops.set_lan_id(hw);
939 ixgbe_init_device_features(adapter);
940
941 if (ixgbe_configure_interrupts(adapter)) {
942 error = ENXIO;
943 goto err_out;
944 }
945
946 /* Allocate multicast array memory. */
947 adapter->mta = malloc(sizeof(*adapter->mta) *
948 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949
950 /* Enable WoL (if supported) */
951 ixgbe_check_wol_support(adapter);
952
953 /* Register for VLAN events */
954 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
955
956 /* Verify adapter fan is still functional (if applicable) */
957 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 ixgbe_check_fan_failure(adapter, esdp, FALSE);
960 }
961
962 /* Set an initial default flow control value */
963 hw->fc.requested_mode = ixgbe_flow_control;
964
965 /* Sysctls for limiting the amount of work done in the taskqueues */
966 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
967 "max number of rx packets to process",
968 &adapter->rx_process_limit, ixgbe_rx_process_limit);
969
970 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
971 "max number of tx packets to process",
972 &adapter->tx_process_limit, ixgbe_tx_process_limit);
973
974 /* Do descriptor calc and sanity checks */
975 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
976 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
977 aprint_error_dev(dev, "TXD config issue, using default!\n");
978 adapter->num_tx_desc = DEFAULT_TXD;
979 } else
980 adapter->num_tx_desc = ixgbe_txd;
981
982 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
983 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
984 aprint_error_dev(dev, "RXD config issue, using default!\n");
985 adapter->num_rx_desc = DEFAULT_RXD;
986 } else
987 adapter->num_rx_desc = ixgbe_rxd;
988
989 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
990
991 /* Allocate our TX/RX Queues */
992 if (ixgbe_allocate_queues(adapter)) {
993 error = ENOMEM;
994 goto err_out;
995 }
996
997 hw->phy.reset_if_overtemp = TRUE;
998 error = ixgbe_reset_hw(hw);
999 hw->phy.reset_if_overtemp = FALSE;
1000 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1001 error = IXGBE_SUCCESS;
1002 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1003 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
1004 unsupported_sfp = true;
1005 error = IXGBE_SUCCESS;
1006 } else if (error) {
1007 aprint_error_dev(dev,
1008 "Hardware initialization failed(error = %d)\n", error);
1009 error = EIO;
1010 goto err_late;
1011 }
1012
1013 /* Make sure we have a good EEPROM before we read from it */
1014 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1015 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1016 error = EIO;
1017 goto err_late;
1018 }
1019
1020 aprint_normal("%s:", device_xname(dev));
1021 /* NVM Image Version */
1022 high = low = 0;
1023 switch (hw->mac.type) {
1024 case ixgbe_mac_X540:
1025 case ixgbe_mac_X550EM_a:
1026 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1027 if (nvmreg == 0xffff)
1028 break;
1029 high = (nvmreg >> 12) & 0x0f;
1030 low = (nvmreg >> 4) & 0xff;
1031 id = nvmreg & 0x0f;
1032 aprint_normal(" NVM Image Version %u.", high);
1033 if (hw->mac.type == ixgbe_mac_X540)
1034 str = "%x";
1035 else
1036 str = "%02x";
1037 aprint_normal(str, low);
1038 aprint_normal(" ID 0x%x,", id);
1039 break;
1040 case ixgbe_mac_X550EM_x:
1041 case ixgbe_mac_X550:
1042 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1043 if (nvmreg == 0xffff)
1044 break;
1045 high = (nvmreg >> 12) & 0x0f;
1046 low = nvmreg & 0xff;
1047 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1048 break;
1049 default:
1050 break;
1051 }
1052 hw->eeprom.nvm_image_ver_high = high;
1053 hw->eeprom.nvm_image_ver_low = low;
1054
1055 /* PHY firmware revision */
1056 switch (hw->mac.type) {
1057 case ixgbe_mac_X540:
1058 case ixgbe_mac_X550:
1059 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1060 if (nvmreg == 0xffff)
1061 break;
1062 high = (nvmreg >> 12) & 0x0f;
1063 low = (nvmreg >> 4) & 0xff;
1064 id = nvmreg & 0x000f;
1065 aprint_normal(" PHY FW Revision %u.", high);
1066 if (hw->mac.type == ixgbe_mac_X540)
1067 str = "%x";
1068 else
1069 str = "%02x";
1070 aprint_normal(str, low);
1071 aprint_normal(" ID 0x%x,", id);
1072 break;
1073 default:
1074 break;
1075 }
1076
1077 /* NVM Map version & OEM NVM Image version */
1078 switch (hw->mac.type) {
1079 case ixgbe_mac_X550:
1080 case ixgbe_mac_X550EM_x:
1081 case ixgbe_mac_X550EM_a:
1082 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1083 if (nvmreg != 0xffff) {
1084 high = (nvmreg >> 12) & 0x0f;
1085 low = nvmreg & 0x00ff;
1086 aprint_normal(" NVM Map version %u.%02x,", high, low);
1087 }
1088 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1089 if (nvmreg != 0xffff) {
1090 high = (nvmreg >> 12) & 0x0f;
1091 low = nvmreg & 0x00ff;
1092 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1093 low);
1094 }
1095 break;
1096 default:
1097 break;
1098 }
1099
1100 /* Print the ETrackID */
1101 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1102 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1103 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1104
1105 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1106 error = ixgbe_allocate_msix(adapter, pa);
1107 if (error) {
1108 /* Free allocated queue structures first */
1109 ixgbe_free_queues(adapter);
1110
1111 /* Fallback to legacy interrupt */
1112 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1113 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1114 adapter->feat_en |= IXGBE_FEATURE_MSI;
1115 adapter->num_queues = 1;
1116
1117 /* Allocate our TX/RX Queues again */
1118 if (ixgbe_allocate_queues(adapter)) {
1119 error = ENOMEM;
1120 goto err_out;
1121 }
1122 }
1123 }
1124 /* Recovery mode */
1125 switch (adapter->hw.mac.type) {
1126 case ixgbe_mac_X550:
1127 case ixgbe_mac_X550EM_x:
1128 case ixgbe_mac_X550EM_a:
1129 /* >= 2.00 */
1130 if (hw->eeprom.nvm_image_ver_high >= 2) {
1131 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1132 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1133 }
1134 break;
1135 default:
1136 break;
1137 }
1138
1139 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1140 error = ixgbe_allocate_legacy(adapter, pa);
1141 if (error)
1142 goto err_late;
1143
1144 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1145 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1146 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1147 error = workqueue_create(&adapter->admin_wq, wqname,
1148 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1149 IXGBE_TASKLET_WQ_FLAGS);
1150 if (error) {
1151 aprint_error_dev(dev,
1152 "could not create admin workqueue (%d)\n", error);
1153 goto err_out;
1154 }
1155
1156 error = ixgbe_start_hw(hw);
1157 switch (error) {
1158 case IXGBE_ERR_EEPROM_VERSION:
1159 aprint_error_dev(dev, "This device is a pre-production adapter/"
1160 "LOM. Please be aware there may be issues associated "
1161 "with your hardware.\nIf you are experiencing problems "
1162 "please contact your Intel or hardware representative "
1163 "who provided you with this hardware.\n");
1164 break;
1165 default:
1166 break;
1167 }
1168
1169 /* Setup OS specific network interface */
1170 if (ixgbe_setup_interface(dev, adapter) != 0)
1171 goto err_late;
1172
1173 /*
1174 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1175 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1176 */
1177 if (hw->phy.media_type == ixgbe_media_type_copper) {
1178 uint16_t id1, id2;
1179 int oui, model, rev;
1180 const char *descr;
1181
1182 id1 = hw->phy.id >> 16;
1183 id2 = hw->phy.id & 0xffff;
1184 oui = MII_OUI(id1, id2);
1185 model = MII_MODEL(id2);
1186 rev = MII_REV(id2);
1187 if ((descr = mii_get_descr(oui, model)) != NULL)
1188 aprint_normal_dev(dev,
1189 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1190 descr, oui, model, rev);
1191 else
1192 aprint_normal_dev(dev,
1193 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1194 oui, model, rev);
1195 }
1196
1197 /* Enable EEE power saving */
1198 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1199 hw->mac.ops.setup_eee(hw,
1200 adapter->feat_en & IXGBE_FEATURE_EEE);
1201
1202 /* Enable power to the phy. */
1203 if (!unsupported_sfp) {
1204 /* Enable the optics for 82599 SFP+ fiber */
1205 ixgbe_enable_tx_laser(hw);
1206
1207 /*
1208 * XXX Currently, ixgbe_set_phy_power() supports only copper
1209 * PHY, so it's not required to test with !unsupported_sfp.
1210 */
1211 ixgbe_set_phy_power(hw, TRUE);
1212 }
1213
1214 /* Initialize statistics */
1215 ixgbe_update_stats_counters(adapter);
1216
1217 /* Check PCIE slot type/speed/width */
1218 ixgbe_get_slot_info(adapter);
1219
1220 /*
1221 * Do time init and sysctl init here, but
1222 * only on the first port of a bypass adapter.
1223 */
1224 ixgbe_bypass_init(adapter);
1225
1226 /* Set an initial dmac value */
1227 adapter->dmac = 0;
1228 /* Set initial advertised speeds (if applicable) */
1229 adapter->advertise = ixgbe_get_advertise(adapter);
1230
1231 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1232 ixgbe_define_iov_schemas(dev, &error);
1233
1234 /* Add sysctls */
1235 ixgbe_add_device_sysctls(adapter);
1236 ixgbe_add_hw_stats(adapter);
1237
1238 /* For Netmap */
1239 adapter->init_locked = ixgbe_init_locked;
1240 adapter->stop_locked = ixgbe_stop_locked;
1241
1242 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1243 ixgbe_netmap_attach(adapter);
1244
1245 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1246 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1247 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1248 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1249
1250 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1251 pmf_class_network_register(dev, adapter->ifp);
1252 else
1253 aprint_error_dev(dev, "couldn't establish power handler\n");
1254
1255 /* Init recovery mode timer and state variable */
1256 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1257 adapter->recovery_mode = 0;
1258
1259 /* Set up the timer callout */
1260 callout_init(&adapter->recovery_mode_timer,
1261 IXGBE_CALLOUT_FLAGS);
1262 snprintf(wqname, sizeof(wqname), "%s-recovery",
1263 device_xname(dev));
1264 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1265 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1266 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1267 if (error) {
1268 aprint_error_dev(dev, "could not create "
1269 "recovery_mode_timer workqueue (%d)\n", error);
1270 goto err_out;
1271 }
1272
1273 /* Start the task */
1274 callout_reset(&adapter->recovery_mode_timer, hz,
1275 ixgbe_recovery_mode_timer, adapter);
1276 }
1277
1278 INIT_DEBUGOUT("ixgbe_attach: end");
1279 adapter->osdep.attached = true;
1280
1281 return;
1282
1283 err_late:
1284 ixgbe_free_queues(adapter);
1285 err_out:
1286 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1287 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1288 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1289 ixgbe_free_deferred_handlers(adapter);
1290 ixgbe_free_pci_resources(adapter);
1291 if (adapter->mta != NULL)
1292 free(adapter->mta, M_DEVBUF);
1293 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1294 IXGBE_CORE_LOCK_DESTROY(adapter);
1295
1296 return;
1297 } /* ixgbe_attach */
1298
1299 /************************************************************************
1300 * ixgbe_check_wol_support
1301 *
1302 * Checks whether the adapter's ports are capable of
1303 * Wake On LAN by reading the adapter's NVM.
1304 *
1305 * Sets each port's hw->wol_enabled value depending
1306 * on the value read here.
1307 ************************************************************************/
1308 static void
1309 ixgbe_check_wol_support(struct adapter *adapter)
1310 {
1311 struct ixgbe_hw *hw = &adapter->hw;
1312 u16 dev_caps = 0;
1313
1314 /* Find out WoL support for port */
1315 adapter->wol_support = hw->wol_enabled = 0;
1316 ixgbe_get_device_caps(hw, &dev_caps);
1317 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1318 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1319 hw->bus.func == 0))
1320 adapter->wol_support = hw->wol_enabled = 1;
1321
1322 /* Save initial wake up filter configuration */
1323 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1324
1325 return;
1326 } /* ixgbe_check_wol_support */
1327
1328 /************************************************************************
1329 * ixgbe_setup_interface
1330 *
1331 * Setup networking device structure and register an interface.
1332 ************************************************************************/
1333 static int
1334 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1335 {
1336 struct ethercom *ec = &adapter->osdep.ec;
1337 struct ifnet *ifp;
1338
1339 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1340
1341 ifp = adapter->ifp = &ec->ec_if;
1342 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1343 ifp->if_baudrate = IF_Gbps(10);
1344 ifp->if_init = ixgbe_init;
1345 ifp->if_stop = ixgbe_ifstop;
1346 ifp->if_softc = adapter;
1347 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1348 #ifdef IXGBE_MPSAFE
1349 ifp->if_extflags = IFEF_MPSAFE;
1350 #endif
1351 ifp->if_ioctl = ixgbe_ioctl;
1352 #if __FreeBSD_version >= 1100045
1353 /* TSO parameters */
1354 ifp->if_hw_tsomax = 65518;
1355 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1356 ifp->if_hw_tsomaxsegsize = 2048;
1357 #endif
1358 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1359 #if 0
1360 ixgbe_start_locked = ixgbe_legacy_start_locked;
1361 #endif
1362 } else {
1363 ifp->if_transmit = ixgbe_mq_start;
1364 #if 0
1365 ixgbe_start_locked = ixgbe_mq_start_locked;
1366 #endif
1367 }
1368 ifp->if_start = ixgbe_legacy_start;
1369 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1370 IFQ_SET_READY(&ifp->if_snd);
1371
1372 if_initialize(ifp);
1373 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1374 ether_ifattach(ifp, adapter->hw.mac.addr);
1375 aprint_normal_dev(dev, "Ethernet address %s\n",
1376 ether_sprintf(adapter->hw.mac.addr));
1377 /*
1378 * We use per TX queue softint, so if_deferred_start_init() isn't
1379 * used.
1380 */
1381 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1382
1383 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1384
1385 /*
1386 * Tell the upper layer(s) we support long frames.
1387 */
1388 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1389
1390 /* Set capability flags */
1391 ifp->if_capabilities |= IFCAP_RXCSUM
1392 | IFCAP_TXCSUM
1393 | IFCAP_TSOv4
1394 | IFCAP_TSOv6;
1395 ifp->if_capenable = 0;
1396
1397 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1398 | ETHERCAP_VLAN_HWCSUM
1399 | ETHERCAP_JUMBO_MTU
1400 | ETHERCAP_VLAN_MTU;
1401
1402 /* Enable the above capabilities by default */
1403 ec->ec_capenable = ec->ec_capabilities;
1404
1405 /*
1406 * Don't turn this on by default, if vlans are
1407 * created on another pseudo device (eg. lagg)
1408 * then vlan events are not passed thru, breaking
1409 * operation, but with HW FILTER off it works. If
1410 * using vlans directly on the ixgbe driver you can
1411 * enable this and get full hardware tag filtering.
1412 */
1413 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1414
1415 /*
1416 * Specify the media types supported by this adapter and register
1417 * callbacks to update media and link information
1418 */
1419 ec->ec_ifmedia = &adapter->media;
1420 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1421 ixgbe_media_status, &adapter->core_mtx);
1422
1423 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1424 ixgbe_add_media_types(adapter);
1425
1426 /* Set autoselect media by default */
1427 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1428
1429 if_register(ifp);
1430
1431 return (0);
1432 } /* ixgbe_setup_interface */
1433
1434 /************************************************************************
1435 * ixgbe_add_media_types
1436 ************************************************************************/
1437 static void
1438 ixgbe_add_media_types(struct adapter *adapter)
1439 {
1440 struct ixgbe_hw *hw = &adapter->hw;
1441 u64 layer;
1442
1443 layer = adapter->phy_layer;
1444
1445 #define ADD(mm, dd) \
1446 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1447
1448 ADD(IFM_NONE, 0);
1449
1450 /* Media types with matching NetBSD media defines */
1451 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1452 ADD(IFM_10G_T | IFM_FDX, 0);
1453 }
1454 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1455 ADD(IFM_1000_T | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1458 ADD(IFM_100_TX | IFM_FDX, 0);
1459 }
1460 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1461 ADD(IFM_10_T | IFM_FDX, 0);
1462 }
1463
1464 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1465 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1466 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1467 }
1468
1469 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1470 ADD(IFM_10G_LR | IFM_FDX, 0);
1471 if (hw->phy.multispeed_fiber) {
1472 ADD(IFM_1000_LX | IFM_FDX, 0);
1473 }
1474 }
1475 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1476 ADD(IFM_10G_SR | IFM_FDX, 0);
1477 if (hw->phy.multispeed_fiber) {
1478 ADD(IFM_1000_SX | IFM_FDX, 0);
1479 }
1480 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1481 ADD(IFM_1000_SX | IFM_FDX, 0);
1482 }
1483 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1484 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1485 }
1486
1487 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1488 ADD(IFM_10G_KR | IFM_FDX, 0);
1489 }
1490 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1491 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1492 }
1493 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1494 ADD(IFM_1000_KX | IFM_FDX, 0);
1495 }
1496 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1497 ADD(IFM_2500_KX | IFM_FDX, 0);
1498 }
1499 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1500 ADD(IFM_2500_T | IFM_FDX, 0);
1501 }
1502 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1503 ADD(IFM_5000_T | IFM_FDX, 0);
1504 }
1505 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1506 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1507 /* XXX no ifmedia_set? */
1508
1509 ADD(IFM_AUTO, 0);
1510
1511 #undef ADD
1512 } /* ixgbe_add_media_types */
1513
1514 /************************************************************************
1515 * ixgbe_is_sfp
1516 ************************************************************************/
1517 static inline bool
1518 ixgbe_is_sfp(struct ixgbe_hw *hw)
1519 {
1520 switch (hw->mac.type) {
1521 case ixgbe_mac_82598EB:
1522 if (hw->phy.type == ixgbe_phy_nl)
1523 return (TRUE);
1524 return (FALSE);
1525 case ixgbe_mac_82599EB:
1526 case ixgbe_mac_X550EM_x:
1527 case ixgbe_mac_X550EM_a:
1528 switch (hw->mac.ops.get_media_type(hw)) {
1529 case ixgbe_media_type_fiber:
1530 case ixgbe_media_type_fiber_qsfp:
1531 return (TRUE);
1532 default:
1533 return (FALSE);
1534 }
1535 default:
1536 return (FALSE);
1537 }
1538 } /* ixgbe_is_sfp */
1539
1540 static void
1541 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1542 {
1543
1544 KASSERT(mutex_owned(&adapter->admin_mtx));
1545
1546 if (__predict_true(adapter->osdep.detaching == false)) {
1547 if (adapter->admin_pending == 0)
1548 workqueue_enqueue(adapter->admin_wq,
1549 &adapter->admin_wc, NULL);
1550 adapter->admin_pending = 1;
1551 }
1552 }
1553
1554 /************************************************************************
1555 * ixgbe_config_link
1556 ************************************************************************/
1557 static void
1558 ixgbe_config_link(struct adapter *adapter)
1559 {
1560 struct ixgbe_hw *hw = &adapter->hw;
1561 u32 autoneg, err = 0;
1562 u32 task_requests = 0;
1563 bool sfp, negotiate = false;
1564
1565 sfp = ixgbe_is_sfp(hw);
1566
1567 if (sfp) {
1568 if (hw->phy.multispeed_fiber) {
1569 ixgbe_enable_tx_laser(hw);
1570 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1571 }
1572 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1573
1574 mutex_enter(&adapter->admin_mtx);
1575 adapter->task_requests |= task_requests;
1576 ixgbe_schedule_admin_tasklet(adapter);
1577 mutex_exit(&adapter->admin_mtx);
1578 } else {
1579 struct ifmedia *ifm = &adapter->media;
1580
1581 if (hw->mac.ops.check_link)
1582 err = ixgbe_check_link(hw, &adapter->link_speed,
1583 &adapter->link_up, FALSE);
1584 if (err)
1585 return;
1586
1587 /*
1588 * Check if it's the first call. If it's the first call,
1589 * get value for auto negotiation.
1590 */
1591 autoneg = hw->phy.autoneg_advertised;
1592 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1593 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1594 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1595 &negotiate);
1596 if (err)
1597 return;
1598 if (hw->mac.ops.setup_link)
1599 err = hw->mac.ops.setup_link(hw, autoneg,
1600 adapter->link_up);
1601 }
1602
1603 } /* ixgbe_config_link */
1604
1605 /************************************************************************
1606 * ixgbe_update_stats_counters - Update board statistics counters.
1607 ************************************************************************/
1608 static void
1609 ixgbe_update_stats_counters(struct adapter *adapter)
1610 {
1611 struct ifnet *ifp = adapter->ifp;
1612 struct ixgbe_hw *hw = &adapter->hw;
1613 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1614 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1615 u64 total_missed_rx = 0;
1616 uint64_t crcerrs, rlec;
1617 unsigned int queue_counters;
1618 int i;
1619
1620 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1621 stats->crcerrs.ev_count += crcerrs;
1622 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1623 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1624 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1625 if (hw->mac.type >= ixgbe_mac_X550)
1626 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1627
1628 /* 16 registers exist */
1629 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1630 for (i = 0; i < queue_counters; i++) {
1631 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1632 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1633 if (hw->mac.type >= ixgbe_mac_82599EB) {
1634 stats->qprdc[i].ev_count
1635 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1636 }
1637 }
1638
1639 /* 8 registers exist */
1640 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1641 uint32_t mp;
1642
1643 /* MPC */
1644 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1645 /* global total per queue */
1646 stats->mpc[i].ev_count += mp;
1647 /* running comprehensive total for stats display */
1648 total_missed_rx += mp;
1649
1650 if (hw->mac.type == ixgbe_mac_82598EB)
1651 stats->rnbc[i].ev_count
1652 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1653
1654 stats->pxontxc[i].ev_count
1655 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1656 stats->pxofftxc[i].ev_count
1657 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1658 if (hw->mac.type >= ixgbe_mac_82599EB) {
1659 stats->pxonrxc[i].ev_count
1660 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1661 stats->pxoffrxc[i].ev_count
1662 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1663 stats->pxon2offc[i].ev_count
1664 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1665 } else {
1666 stats->pxonrxc[i].ev_count
1667 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1668 stats->pxoffrxc[i].ev_count
1669 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1670 }
1671 }
1672 stats->mpctotal.ev_count += total_missed_rx;
1673
1674 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1675 if ((adapter->link_active == LINK_STATE_UP)
1676 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1677 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1678 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1679 }
1680 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1681 stats->rlec.ev_count += rlec;
1682
1683 /* Hardware workaround, gprc counts missed packets */
1684 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1685
1686 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1687 stats->lxontxc.ev_count += lxon;
1688 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1689 stats->lxofftxc.ev_count += lxoff;
1690 total = lxon + lxoff;
1691
1692 if (hw->mac.type != ixgbe_mac_82598EB) {
1693 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1694 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1695 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1696 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1697 - total * ETHER_MIN_LEN;
1698 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1699 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1700 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1701 stats->lxoffrxc.ev_count
1702 += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1703 } else {
1704 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1705 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1706 /* 82598 only has a counter in the high register */
1707 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1708 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH)
1709 - total * ETHER_MIN_LEN;
1710 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1711 }
1712
1713 /*
1714 * Workaround: mprc hardware is incorrectly counting
1715 * broadcasts, so for now we subtract those.
1716 */
1717 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1718 stats->bprc.ev_count += bprc;
1719 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1720 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1721
1722 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1723 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1724 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1725 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1726 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1727 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1728
1729 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1730 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1731 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1732
1733 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1734 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1735 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1736 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1737 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1738 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1739 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1740 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1741 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1742 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1743 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1744 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1745 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1746 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1747 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1748 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1749 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1750 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1751 /* Only read FCOE on 82599 */
1752 if (hw->mac.type != ixgbe_mac_82598EB) {
1753 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1754 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1755 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1756 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1757 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1758 }
1759
1760 /*
1761 * Fill out the OS statistics structure. Only RX errors are required
1762 * here because all TX counters are incremented in the TX path and
1763 * normal RX counters are prepared in ether_input().
1764 */
1765 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1766 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1767 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1768 IF_STAT_PUTREF(ifp);
1769 } /* ixgbe_update_stats_counters */
1770
1771 /************************************************************************
1772 * ixgbe_add_hw_stats
1773 *
1774 * Add sysctl variables, one per statistic, to the system.
1775 ************************************************************************/
1776 static void
1777 ixgbe_add_hw_stats(struct adapter *adapter)
1778 {
1779 device_t dev = adapter->dev;
1780 const struct sysctlnode *rnode, *cnode;
1781 struct sysctllog **log = &adapter->sysctllog;
1782 struct tx_ring *txr = adapter->tx_rings;
1783 struct rx_ring *rxr = adapter->rx_rings;
1784 struct ixgbe_hw *hw = &adapter->hw;
1785 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1786 const char *xname = device_xname(dev);
1787 int i;
1788
1789 /* Driver Statistics */
1790 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1791 NULL, xname, "Driver tx dma soft fail EFBIG");
1792 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1793 NULL, xname, "m_defrag() failed");
1794 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1795 NULL, xname, "Driver tx dma hard fail EFBIG");
1796 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1797 NULL, xname, "Driver tx dma hard fail EINVAL");
1798 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1799 NULL, xname, "Driver tx dma hard fail other");
1800 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1801 NULL, xname, "Driver tx dma soft fail EAGAIN");
1802 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1803 NULL, xname, "Driver tx dma soft fail ENOMEM");
1804 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1805 NULL, xname, "Watchdog timeouts");
1806 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1807 NULL, xname, "TSO errors");
1808 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1809 NULL, xname, "Admin MSI-X IRQ Handled");
1810 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1811 NULL, xname, "Link event");
1812 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1813 NULL, xname, "SFP+ module event");
1814 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1815 NULL, xname, "Multispeed event");
1816 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1817 NULL, xname, "External PHY event");
1818
1819 /* Max number of traffic class is 8 */
1820 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1821 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1822 snprintf(adapter->tcs[i].evnamebuf,
1823 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1824 xname, i);
1825 if (i < __arraycount(stats->mpc)) {
1826 evcnt_attach_dynamic(&stats->mpc[i],
1827 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1828 "RX Missed Packet Count");
1829 if (hw->mac.type == ixgbe_mac_82598EB)
1830 evcnt_attach_dynamic(&stats->rnbc[i],
1831 EVCNT_TYPE_MISC, NULL,
1832 adapter->tcs[i].evnamebuf,
1833 "Receive No Buffers");
1834 }
1835 if (i < __arraycount(stats->pxontxc)) {
1836 evcnt_attach_dynamic(&stats->pxontxc[i],
1837 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1838 "pxontxc");
1839 evcnt_attach_dynamic(&stats->pxonrxc[i],
1840 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1841 "pxonrxc");
1842 evcnt_attach_dynamic(&stats->pxofftxc[i],
1843 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1844 "pxofftxc");
1845 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1846 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1847 "pxoffrxc");
1848 if (hw->mac.type >= ixgbe_mac_82599EB)
1849 evcnt_attach_dynamic(&stats->pxon2offc[i],
1850 EVCNT_TYPE_MISC, NULL,
1851 adapter->tcs[i].evnamebuf,
1852 "pxon2offc");
1853 }
1854 }
1855
1856 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1857 #ifdef LRO
1858 struct lro_ctrl *lro = &rxr->lro;
1859 #endif /* LRO */
1860
1861 snprintf(adapter->queues[i].evnamebuf,
1862 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1863 xname, i);
1864 snprintf(adapter->queues[i].namebuf,
1865 sizeof(adapter->queues[i].namebuf), "q%d", i);
1866
1867 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1868 aprint_error_dev(dev, "could not create sysctl root\n");
1869 break;
1870 }
1871
1872 if (sysctl_createv(log, 0, &rnode, &rnode,
1873 0, CTLTYPE_NODE,
1874 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1875 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1876 break;
1877
1878 if (sysctl_createv(log, 0, &rnode, &cnode,
1879 CTLFLAG_READWRITE, CTLTYPE_INT,
1880 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1881 ixgbe_sysctl_interrupt_rate_handler, 0,
1882 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1883 break;
1884
1885 if (sysctl_createv(log, 0, &rnode, &cnode,
1886 CTLFLAG_READONLY, CTLTYPE_INT,
1887 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1888 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1889 0, CTL_CREATE, CTL_EOL) != 0)
1890 break;
1891
1892 if (sysctl_createv(log, 0, &rnode, &cnode,
1893 CTLFLAG_READONLY, CTLTYPE_INT,
1894 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1895 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1896 0, CTL_CREATE, CTL_EOL) != 0)
1897 break;
1898
1899 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1900 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1901 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1902 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1903 "Handled queue in softint");
1904 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1905 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1906 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1907 NULL, adapter->queues[i].evnamebuf, "TSO");
1908 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1909 NULL, adapter->queues[i].evnamebuf,
1910 "TX Queue No Descriptor Available");
1911 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1912 NULL, adapter->queues[i].evnamebuf,
1913 "Queue Packets Transmitted");
1914 #ifndef IXGBE_LEGACY_TX
1915 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1916 NULL, adapter->queues[i].evnamebuf,
1917 "Packets dropped in pcq");
1918 #endif
1919
1920 if (sysctl_createv(log, 0, &rnode, &cnode,
1921 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1922 SYSCTL_DESCR("Receive Descriptor next to check"),
1923 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1924 CTL_CREATE, CTL_EOL) != 0)
1925 break;
1926
1927 if (sysctl_createv(log, 0, &rnode, &cnode,
1928 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1929 SYSCTL_DESCR("Receive Descriptor Head"),
1930 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1931 CTL_CREATE, CTL_EOL) != 0)
1932 break;
1933
1934 if (sysctl_createv(log, 0, &rnode, &cnode,
1935 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1936 SYSCTL_DESCR("Receive Descriptor Tail"),
1937 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1938 CTL_CREATE, CTL_EOL) != 0)
1939 break;
1940
1941 if (i < __arraycount(stats->qprc)) {
1942 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1943 NULL, adapter->queues[i].evnamebuf, "qprc");
1944 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1945 NULL, adapter->queues[i].evnamebuf, "qptc");
1946 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1947 NULL, adapter->queues[i].evnamebuf, "qbrc");
1948 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1949 NULL, adapter->queues[i].evnamebuf, "qbtc");
1950 if (hw->mac.type >= ixgbe_mac_82599EB)
1951 evcnt_attach_dynamic(&stats->qprdc[i],
1952 EVCNT_TYPE_MISC, NULL,
1953 adapter->queues[i].evnamebuf, "qprdc");
1954 }
1955
1956 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1957 NULL, adapter->queues[i].evnamebuf,
1958 "Queue Packets Received");
1959 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1960 NULL, adapter->queues[i].evnamebuf,
1961 "Queue Bytes Received");
1962 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1963 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1964 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1965 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1966 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1967 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1968 #ifdef LRO
1969 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1970 CTLFLAG_RD, &lro->lro_queued, 0,
1971 "LRO Queued");
1972 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1973 CTLFLAG_RD, &lro->lro_flushed, 0,
1974 "LRO Flushed");
1975 #endif /* LRO */
1976 }
1977
1978 /* MAC stats get their own sub node */
1979
1980 snprintf(stats->namebuf,
1981 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1982
1983 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "rx csum offload - IP");
1985 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "rx csum offload - L4");
1987 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "rx csum offload - IP bad");
1989 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "rx csum offload - L4 bad");
1991 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Interrupt conditions zero");
1993 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Legacy interrupts");
1995
1996 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "CRC Errors");
1998 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Illegal Byte Errors");
2000 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Byte Errors");
2002 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "MAC Short Packets Discarded");
2004 if (hw->mac.type >= ixgbe_mac_X550)
2005 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "Bad SFD");
2007 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "Total Packets Missed");
2009 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "MAC Local Faults");
2011 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "MAC Remote Faults");
2013 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Receive Length Errors");
2015 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Link XON Transmitted");
2017 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Link XON Received");
2019 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Link XOFF Transmitted");
2021 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Link XOFF Received");
2023
2024 /* Packet Reception Stats */
2025 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Total Octets Received");
2027 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Good Octets Received");
2029 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Total Packets Received");
2031 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Good Packets Received");
2033 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "Multicast Packets Received");
2035 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "Broadcast Packets Received");
2037 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "64 byte frames received ");
2039 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "65-127 byte frames received");
2041 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "128-255 byte frames received");
2043 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "256-511 byte frames received");
2045 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "512-1023 byte frames received");
2047 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2048 stats->namebuf, "1023-1522 byte frames received");
2049 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2050 stats->namebuf, "Receive Undersized");
2051 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2052 stats->namebuf, "Fragmented Packets Received ");
2053 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2054 stats->namebuf, "Oversized Packets Received");
2055 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2056 stats->namebuf, "Received Jabber");
2057 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2058 stats->namebuf, "Management Packets Received");
2059 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2060 stats->namebuf, "Management Packets Dropped");
2061 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2062 stats->namebuf, "Checksum Errors");
2063
2064 /* Packet Transmission Stats */
2065 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2066 stats->namebuf, "Good Octets Transmitted");
2067 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2068 stats->namebuf, "Total Packets Transmitted");
2069 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2070 stats->namebuf, "Good Packets Transmitted");
2071 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2072 stats->namebuf, "Broadcast Packets Transmitted");
2073 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2074 stats->namebuf, "Multicast Packets Transmitted");
2075 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2076 stats->namebuf, "Management Packets Transmitted");
2077 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2078 stats->namebuf, "64 byte frames transmitted ");
2079 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2080 stats->namebuf, "65-127 byte frames transmitted");
2081 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2082 stats->namebuf, "128-255 byte frames transmitted");
2083 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2084 stats->namebuf, "256-511 byte frames transmitted");
2085 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2086 stats->namebuf, "512-1023 byte frames transmitted");
2087 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2088 stats->namebuf, "1024-1522 byte frames transmitted");
2089 } /* ixgbe_add_hw_stats */
2090
2091 static void
2092 ixgbe_clear_evcnt(struct adapter *adapter)
2093 {
2094 struct tx_ring *txr = adapter->tx_rings;
2095 struct rx_ring *rxr = adapter->rx_rings;
2096 struct ixgbe_hw *hw = &adapter->hw;
2097 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2098 int i;
2099
2100 adapter->efbig_tx_dma_setup.ev_count = 0;
2101 adapter->mbuf_defrag_failed.ev_count = 0;
2102 adapter->efbig2_tx_dma_setup.ev_count = 0;
2103 adapter->einval_tx_dma_setup.ev_count = 0;
2104 adapter->other_tx_dma_setup.ev_count = 0;
2105 adapter->eagain_tx_dma_setup.ev_count = 0;
2106 adapter->enomem_tx_dma_setup.ev_count = 0;
2107 adapter->tso_err.ev_count = 0;
2108 adapter->watchdog_events.ev_count = 0;
2109 adapter->admin_irqev.ev_count = 0;
2110 adapter->link_workev.ev_count = 0;
2111 adapter->mod_workev.ev_count = 0;
2112 adapter->msf_workev.ev_count = 0;
2113 adapter->phy_workev.ev_count = 0;
2114
2115 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2116 if (i < __arraycount(stats->mpc)) {
2117 stats->mpc[i].ev_count = 0;
2118 if (hw->mac.type == ixgbe_mac_82598EB)
2119 stats->rnbc[i].ev_count = 0;
2120 }
2121 if (i < __arraycount(stats->pxontxc)) {
2122 stats->pxontxc[i].ev_count = 0;
2123 stats->pxonrxc[i].ev_count = 0;
2124 stats->pxofftxc[i].ev_count = 0;
2125 stats->pxoffrxc[i].ev_count = 0;
2126 if (hw->mac.type >= ixgbe_mac_82599EB)
2127 stats->pxon2offc[i].ev_count = 0;
2128 }
2129 }
2130
2131 txr = adapter->tx_rings;
2132 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2133 adapter->queues[i].irqs.ev_count = 0;
2134 adapter->queues[i].handleq.ev_count = 0;
2135 adapter->queues[i].req.ev_count = 0;
2136 txr->no_desc_avail.ev_count = 0;
2137 txr->total_packets.ev_count = 0;
2138 txr->tso_tx.ev_count = 0;
2139 #ifndef IXGBE_LEGACY_TX
2140 txr->pcq_drops.ev_count = 0;
2141 #endif
2142 txr->q_efbig_tx_dma_setup = 0;
2143 txr->q_mbuf_defrag_failed = 0;
2144 txr->q_efbig2_tx_dma_setup = 0;
2145 txr->q_einval_tx_dma_setup = 0;
2146 txr->q_other_tx_dma_setup = 0;
2147 txr->q_eagain_tx_dma_setup = 0;
2148 txr->q_enomem_tx_dma_setup = 0;
2149 txr->q_tso_err = 0;
2150
2151 if (i < __arraycount(stats->qprc)) {
2152 stats->qprc[i].ev_count = 0;
2153 stats->qptc[i].ev_count = 0;
2154 stats->qbrc[i].ev_count = 0;
2155 stats->qbtc[i].ev_count = 0;
2156 if (hw->mac.type >= ixgbe_mac_82599EB)
2157 stats->qprdc[i].ev_count = 0;
2158 }
2159
2160 rxr->rx_packets.ev_count = 0;
2161 rxr->rx_bytes.ev_count = 0;
2162 rxr->rx_copies.ev_count = 0;
2163 rxr->no_jmbuf.ev_count = 0;
2164 rxr->rx_discarded.ev_count = 0;
2165 }
2166 stats->ipcs.ev_count = 0;
2167 stats->l4cs.ev_count = 0;
2168 stats->ipcs_bad.ev_count = 0;
2169 stats->l4cs_bad.ev_count = 0;
2170 stats->intzero.ev_count = 0;
2171 stats->legint.ev_count = 0;
2172 stats->crcerrs.ev_count = 0;
2173 stats->illerrc.ev_count = 0;
2174 stats->errbc.ev_count = 0;
2175 stats->mspdc.ev_count = 0;
2176 if (hw->mac.type >= ixgbe_mac_X550)
2177 stats->mbsdc.ev_count = 0;
2178 stats->mpctotal.ev_count = 0;
2179 stats->mlfc.ev_count = 0;
2180 stats->mrfc.ev_count = 0;
2181 stats->rlec.ev_count = 0;
2182 stats->lxontxc.ev_count = 0;
2183 stats->lxonrxc.ev_count = 0;
2184 stats->lxofftxc.ev_count = 0;
2185 stats->lxoffrxc.ev_count = 0;
2186
2187 /* Packet Reception Stats */
2188 stats->tor.ev_count = 0;
2189 stats->gorc.ev_count = 0;
2190 stats->tpr.ev_count = 0;
2191 stats->gprc.ev_count = 0;
2192 stats->mprc.ev_count = 0;
2193 stats->bprc.ev_count = 0;
2194 stats->prc64.ev_count = 0;
2195 stats->prc127.ev_count = 0;
2196 stats->prc255.ev_count = 0;
2197 stats->prc511.ev_count = 0;
2198 stats->prc1023.ev_count = 0;
2199 stats->prc1522.ev_count = 0;
2200 stats->ruc.ev_count = 0;
2201 stats->rfc.ev_count = 0;
2202 stats->roc.ev_count = 0;
2203 stats->rjc.ev_count = 0;
2204 stats->mngprc.ev_count = 0;
2205 stats->mngpdc.ev_count = 0;
2206 stats->xec.ev_count = 0;
2207
2208 /* Packet Transmission Stats */
2209 stats->gotc.ev_count = 0;
2210 stats->tpt.ev_count = 0;
2211 stats->gptc.ev_count = 0;
2212 stats->bptc.ev_count = 0;
2213 stats->mptc.ev_count = 0;
2214 stats->mngptc.ev_count = 0;
2215 stats->ptc64.ev_count = 0;
2216 stats->ptc127.ev_count = 0;
2217 stats->ptc255.ev_count = 0;
2218 stats->ptc511.ev_count = 0;
2219 stats->ptc1023.ev_count = 0;
2220 stats->ptc1522.ev_count = 0;
2221 }
2222
2223 /************************************************************************
2224 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2225 *
2226 * Retrieves the TDH value from the hardware
2227 ************************************************************************/
2228 static int
2229 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2230 {
2231 struct sysctlnode node = *rnode;
2232 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2233 struct adapter *adapter;
2234 uint32_t val;
2235
2236 if (!txr)
2237 return (0);
2238
2239 adapter = txr->adapter;
2240 if (ixgbe_fw_recovery_mode_swflag(adapter))
2241 return (EPERM);
2242
2243 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2244 node.sysctl_data = &val;
2245 return sysctl_lookup(SYSCTLFN_CALL(&node));
2246 } /* ixgbe_sysctl_tdh_handler */
2247
2248 /************************************************************************
2249 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2250 *
2251 * Retrieves the TDT value from the hardware
2252 ************************************************************************/
2253 static int
2254 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2255 {
2256 struct sysctlnode node = *rnode;
2257 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2258 struct adapter *adapter;
2259 uint32_t val;
2260
2261 if (!txr)
2262 return (0);
2263
2264 adapter = txr->adapter;
2265 if (ixgbe_fw_recovery_mode_swflag(adapter))
2266 return (EPERM);
2267
2268 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2269 node.sysctl_data = &val;
2270 return sysctl_lookup(SYSCTLFN_CALL(&node));
2271 } /* ixgbe_sysctl_tdt_handler */
2272
2273 /************************************************************************
2274 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2275 * handler function
2276 *
2277 * Retrieves the next_to_check value
2278 ************************************************************************/
2279 static int
2280 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2281 {
2282 struct sysctlnode node = *rnode;
2283 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2284 struct adapter *adapter;
2285 uint32_t val;
2286
2287 if (!rxr)
2288 return (0);
2289
2290 adapter = rxr->adapter;
2291 if (ixgbe_fw_recovery_mode_swflag(adapter))
2292 return (EPERM);
2293
2294 val = rxr->next_to_check;
2295 node.sysctl_data = &val;
2296 return sysctl_lookup(SYSCTLFN_CALL(&node));
2297 } /* ixgbe_sysctl_next_to_check_handler */
2298
2299 /************************************************************************
2300 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2301 *
2302 * Retrieves the RDH value from the hardware
2303 ************************************************************************/
2304 static int
2305 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2306 {
2307 struct sysctlnode node = *rnode;
2308 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2309 struct adapter *adapter;
2310 uint32_t val;
2311
2312 if (!rxr)
2313 return (0);
2314
2315 adapter = rxr->adapter;
2316 if (ixgbe_fw_recovery_mode_swflag(adapter))
2317 return (EPERM);
2318
2319 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2320 node.sysctl_data = &val;
2321 return sysctl_lookup(SYSCTLFN_CALL(&node));
2322 } /* ixgbe_sysctl_rdh_handler */
2323
2324 /************************************************************************
2325 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2326 *
2327 * Retrieves the RDT value from the hardware
2328 ************************************************************************/
2329 static int
2330 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2331 {
2332 struct sysctlnode node = *rnode;
2333 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2334 struct adapter *adapter;
2335 uint32_t val;
2336
2337 if (!rxr)
2338 return (0);
2339
2340 adapter = rxr->adapter;
2341 if (ixgbe_fw_recovery_mode_swflag(adapter))
2342 return (EPERM);
2343
2344 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2345 node.sysctl_data = &val;
2346 return sysctl_lookup(SYSCTLFN_CALL(&node));
2347 } /* ixgbe_sysctl_rdt_handler */
2348
2349 static int
2350 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2351 {
2352 struct ifnet *ifp = &ec->ec_if;
2353 struct adapter *adapter = ifp->if_softc;
2354 int rv;
2355
2356 if (set)
2357 rv = ixgbe_register_vlan(adapter, vid);
2358 else
2359 rv = ixgbe_unregister_vlan(adapter, vid);
2360
2361 if (rv != 0)
2362 return rv;
2363
2364 /*
2365 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2366 * or 0 to 1.
2367 */
2368 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2369 ixgbe_setup_vlan_hw_tagging(adapter);
2370
2371 return rv;
2372 }
2373
2374 /************************************************************************
2375 * ixgbe_register_vlan
2376 *
2377 * Run via vlan config EVENT, it enables us to use the
2378 * HW Filter table since we can get the vlan id. This
2379 * just creates the entry in the soft version of the
2380 * VFTA, init will repopulate the real table.
2381 ************************************************************************/
2382 static int
2383 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2384 {
2385 u16 index, bit;
2386 int error;
2387
2388 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2389 return EINVAL;
2390
2391 IXGBE_CORE_LOCK(adapter);
2392 index = (vtag >> 5) & 0x7F;
2393 bit = vtag & 0x1F;
2394 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2395 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2396 true);
2397 IXGBE_CORE_UNLOCK(adapter);
2398 if (error != 0)
2399 error = EACCES;
2400
2401 return error;
2402 } /* ixgbe_register_vlan */
2403
2404 /************************************************************************
2405 * ixgbe_unregister_vlan
2406 *
2407 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2408 ************************************************************************/
2409 static int
2410 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2411 {
2412 u16 index, bit;
2413 int error;
2414
2415 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2416 return EINVAL;
2417
2418 IXGBE_CORE_LOCK(adapter);
2419 index = (vtag >> 5) & 0x7F;
2420 bit = vtag & 0x1F;
2421 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2422 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2423 true);
2424 IXGBE_CORE_UNLOCK(adapter);
2425 if (error != 0)
2426 error = EACCES;
2427
2428 return error;
2429 } /* ixgbe_unregister_vlan */
2430
2431 static void
2432 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2433 {
2434 struct ethercom *ec = &adapter->osdep.ec;
2435 struct ixgbe_hw *hw = &adapter->hw;
2436 struct rx_ring *rxr;
2437 u32 ctrl;
2438 int i;
2439 bool hwtagging;
2440
2441 /* Enable HW tagging only if any vlan is attached */
2442 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2443 && VLAN_ATTACHED(ec);
2444
2445 /* Setup the queues for vlans */
2446 for (i = 0; i < adapter->num_queues; i++) {
2447 rxr = &adapter->rx_rings[i];
2448 /*
2449 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2450 */
2451 if (hw->mac.type != ixgbe_mac_82598EB) {
2452 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2453 if (hwtagging)
2454 ctrl |= IXGBE_RXDCTL_VME;
2455 else
2456 ctrl &= ~IXGBE_RXDCTL_VME;
2457 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2458 }
2459 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2460 }
2461
2462 /* VLAN hw tagging for 82598 */
2463 if (hw->mac.type == ixgbe_mac_82598EB) {
2464 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2465 if (hwtagging)
2466 ctrl |= IXGBE_VLNCTRL_VME;
2467 else
2468 ctrl &= ~IXGBE_VLNCTRL_VME;
2469 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2470 }
2471 } /* ixgbe_setup_vlan_hw_tagging */
2472
2473 static void
2474 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2475 {
2476 struct ethercom *ec = &adapter->osdep.ec;
2477 struct ixgbe_hw *hw = &adapter->hw;
2478 int i;
2479 u32 ctrl;
2480 struct vlanid_list *vlanidp;
2481
2482 /*
2483 * This function is called from both if_init and ifflags_cb()
2484 * on NetBSD.
2485 */
2486
2487 /*
2488 * Part 1:
2489 * Setup VLAN HW tagging
2490 */
2491 ixgbe_setup_vlan_hw_tagging(adapter);
2492
2493 /*
2494 * Part 2:
2495 * Setup VLAN HW filter
2496 */
2497 /* Cleanup shadow_vfta */
2498 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2499 adapter->shadow_vfta[i] = 0;
2500 /* Generate shadow_vfta from ec_vids */
2501 ETHER_LOCK(ec);
2502 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2503 uint32_t idx;
2504
2505 idx = vlanidp->vid / 32;
2506 KASSERT(idx < IXGBE_VFTA_SIZE);
2507 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2508 }
2509 ETHER_UNLOCK(ec);
2510 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2511 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2512
2513 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2514 /* Enable the Filter Table if enabled */
2515 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2516 ctrl |= IXGBE_VLNCTRL_VFE;
2517 else
2518 ctrl &= ~IXGBE_VLNCTRL_VFE;
2519 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2520 } /* ixgbe_setup_vlan_hw_support */
2521
2522 /************************************************************************
2523 * ixgbe_get_slot_info
2524 *
2525 * Get the width and transaction speed of
2526 * the slot this adapter is plugged into.
2527 ************************************************************************/
2528 static void
2529 ixgbe_get_slot_info(struct adapter *adapter)
2530 {
2531 device_t dev = adapter->dev;
2532 struct ixgbe_hw *hw = &adapter->hw;
2533 u32 offset;
2534 u16 link;
2535 int bus_info_valid = TRUE;
2536
2537 /* Some devices are behind an internal bridge */
2538 switch (hw->device_id) {
2539 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2540 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2541 goto get_parent_info;
2542 default:
2543 break;
2544 }
2545
2546 ixgbe_get_bus_info(hw);
2547
2548 /*
2549 * Some devices don't use PCI-E, but there is no need
2550 * to display "Unknown" for bus speed and width.
2551 */
2552 switch (hw->mac.type) {
2553 case ixgbe_mac_X550EM_x:
2554 case ixgbe_mac_X550EM_a:
2555 return;
2556 default:
2557 goto display;
2558 }
2559
2560 get_parent_info:
2561 /*
2562 * For the Quad port adapter we need to parse back
2563 * up the PCI tree to find the speed of the expansion
2564 * slot into which this adapter is plugged. A bit more work.
2565 */
2566 dev = device_parent(device_parent(dev));
2567 #if 0
2568 #ifdef IXGBE_DEBUG
2569 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2570 pci_get_slot(dev), pci_get_function(dev));
2571 #endif
2572 dev = device_parent(device_parent(dev));
2573 #ifdef IXGBE_DEBUG
2574 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2575 pci_get_slot(dev), pci_get_function(dev));
2576 #endif
2577 #endif
2578 /* Now get the PCI Express Capabilities offset */
2579 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2580 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2581 /*
2582 * Hmm...can't get PCI-Express capabilities.
2583 * Falling back to default method.
2584 */
2585 bus_info_valid = FALSE;
2586 ixgbe_get_bus_info(hw);
2587 goto display;
2588 }
2589 /* ...and read the Link Status Register */
2590 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2591 offset + PCIE_LCSR) >> 16;
2592 ixgbe_set_pci_config_data_generic(hw, link);
2593
2594 display:
2595 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2596 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2597 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2598 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2599 "Unknown"),
2600 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2601 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2602 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2603 "Unknown"));
2604
2605 if (bus_info_valid) {
2606 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2607 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2608 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2609 device_printf(dev, "PCI-Express bandwidth available"
2610 " for this card\n is not sufficient for"
2611 " optimal performance.\n");
2612 device_printf(dev, "For optimal performance a x8 "
2613 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2614 }
2615 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2616 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2617 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2618 device_printf(dev, "PCI-Express bandwidth available"
2619 " for this card\n is not sufficient for"
2620 " optimal performance.\n");
2621 device_printf(dev, "For optimal performance a x8 "
2622 "PCIE Gen3 slot is required.\n");
2623 }
2624 } else
2625 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2626
2627 return;
2628 } /* ixgbe_get_slot_info */
2629
2630 /************************************************************************
2631 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2632 ************************************************************************/
2633 static inline void
2634 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2635 {
2636 struct ixgbe_hw *hw = &adapter->hw;
2637 struct ix_queue *que = &adapter->queues[vector];
2638 u64 queue = 1ULL << vector;
2639 u32 mask;
2640
2641 mutex_enter(&que->dc_mtx);
2642 if (que->disabled_count > 0 && --que->disabled_count > 0)
2643 goto out;
2644
2645 if (hw->mac.type == ixgbe_mac_82598EB) {
2646 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2647 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2648 } else {
2649 mask = (queue & 0xFFFFFFFF);
2650 if (mask)
2651 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2652 mask = (queue >> 32);
2653 if (mask)
2654 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2655 }
2656 out:
2657 mutex_exit(&que->dc_mtx);
2658 } /* ixgbe_enable_queue */
2659
2660 /************************************************************************
2661 * ixgbe_disable_queue_internal
2662 ************************************************************************/
2663 static inline void
2664 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2665 {
2666 struct ixgbe_hw *hw = &adapter->hw;
2667 struct ix_queue *que = &adapter->queues[vector];
2668 u64 queue = 1ULL << vector;
2669 u32 mask;
2670
2671 mutex_enter(&que->dc_mtx);
2672
2673 if (que->disabled_count > 0) {
2674 if (nestok)
2675 que->disabled_count++;
2676 goto out;
2677 }
2678 que->disabled_count++;
2679
2680 if (hw->mac.type == ixgbe_mac_82598EB) {
2681 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2682 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2683 } else {
2684 mask = (queue & 0xFFFFFFFF);
2685 if (mask)
2686 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2687 mask = (queue >> 32);
2688 if (mask)
2689 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2690 }
2691 out:
2692 mutex_exit(&que->dc_mtx);
2693 } /* ixgbe_disable_queue_internal */
2694
2695 /************************************************************************
2696 * ixgbe_disable_queue
2697 ************************************************************************/
2698 static inline void
2699 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2700 {
2701
2702 ixgbe_disable_queue_internal(adapter, vector, true);
2703 } /* ixgbe_disable_queue */
2704
2705 /************************************************************************
2706 * ixgbe_sched_handle_que - schedule deferred packet processing
2707 ************************************************************************/
2708 static inline void
2709 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2710 {
2711
2712 if (que->txrx_use_workqueue) {
2713 /*
2714 * adapter->que_wq is bound to each CPU instead of
2715 * each NIC queue to reduce workqueue kthread. As we
2716 * should consider about interrupt affinity in this
2717 * function, the workqueue kthread must be WQ_PERCPU.
2718 * If create WQ_PERCPU workqueue kthread for each NIC
2719 * queue, that number of created workqueue kthread is
2720 * (number of used NIC queue) * (number of CPUs) =
2721 * (number of CPUs) ^ 2 most often.
2722 *
2723 * The same NIC queue's interrupts are avoided by
2724 * masking the queue's interrupt. And different
2725 * NIC queue's interrupts use different struct work
2726 * (que->wq_cookie). So, "enqueued flag" to avoid
2727 * twice workqueue_enqueue() is not required .
2728 */
2729 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2730 } else {
2731 softint_schedule(que->que_si);
2732 }
2733 }
2734
2735 /************************************************************************
2736 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2737 ************************************************************************/
2738 static int
2739 ixgbe_msix_que(void *arg)
2740 {
2741 struct ix_queue *que = arg;
2742 struct adapter *adapter = que->adapter;
2743 struct ifnet *ifp = adapter->ifp;
2744 struct tx_ring *txr = que->txr;
2745 struct rx_ring *rxr = que->rxr;
2746 bool more;
2747 u32 newitr = 0;
2748
2749 /* Protect against spurious interrupts */
2750 if ((ifp->if_flags & IFF_RUNNING) == 0)
2751 return 0;
2752
2753 ixgbe_disable_queue(adapter, que->msix);
2754 ++que->irqs.ev_count;
2755
2756 /*
2757 * Don't change "que->txrx_use_workqueue" from this point to avoid
2758 * flip-flopping softint/workqueue mode in one deferred processing.
2759 */
2760 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2761
2762 #ifdef __NetBSD__
2763 /* Don't run ixgbe_rxeof in interrupt context */
2764 more = true;
2765 #else
2766 more = ixgbe_rxeof(que);
2767 #endif
2768
2769 IXGBE_TX_LOCK(txr);
2770 ixgbe_txeof(txr);
2771 IXGBE_TX_UNLOCK(txr);
2772
2773 /* Do AIM now? */
2774
2775 if (adapter->enable_aim == false)
2776 goto no_calc;
2777 /*
2778 * Do Adaptive Interrupt Moderation:
2779 * - Write out last calculated setting
2780 * - Calculate based on average size over
2781 * the last interval.
2782 */
2783 if (que->eitr_setting)
2784 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2785
2786 que->eitr_setting = 0;
2787
2788 /* Idle, do nothing */
2789 if ((txr->bytes == 0) && (rxr->bytes == 0))
2790 goto no_calc;
2791
2792 if ((txr->bytes) && (txr->packets))
2793 newitr = txr->bytes/txr->packets;
2794 if ((rxr->bytes) && (rxr->packets))
2795 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2796 newitr += 24; /* account for hardware frame, crc */
2797
2798 /* set an upper boundary */
2799 newitr = uimin(newitr, 3000);
2800
2801 /* Be nice to the mid range */
2802 if ((newitr > 300) && (newitr < 1200))
2803 newitr = (newitr / 3);
2804 else
2805 newitr = (newitr / 2);
2806
2807 /*
2808 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2809 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2810 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2811 * on 1G and higher.
2812 */
2813 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2814 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2815 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2816 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2817 }
2818
2819 /* save for next interrupt */
2820 que->eitr_setting = newitr;
2821
2822 /* Reset state */
2823 txr->bytes = 0;
2824 txr->packets = 0;
2825 rxr->bytes = 0;
2826 rxr->packets = 0;
2827
2828 no_calc:
2829 if (more)
2830 ixgbe_sched_handle_que(adapter, que);
2831 else
2832 ixgbe_enable_queue(adapter, que->msix);
2833
2834 return 1;
2835 } /* ixgbe_msix_que */
2836
2837 /************************************************************************
2838 * ixgbe_media_status - Media Ioctl callback
2839 *
2840 * Called whenever the user queries the status of
2841 * the interface using ifconfig.
2842 ************************************************************************/
2843 static void
2844 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2845 {
2846 struct adapter *adapter = ifp->if_softc;
2847 struct ixgbe_hw *hw = &adapter->hw;
2848 int layer;
2849
2850 INIT_DEBUGOUT("ixgbe_media_status: begin");
2851 ixgbe_update_link_status(adapter);
2852
2853 ifmr->ifm_status = IFM_AVALID;
2854 ifmr->ifm_active = IFM_ETHER;
2855
2856 if (adapter->link_active != LINK_STATE_UP) {
2857 ifmr->ifm_active |= IFM_NONE;
2858 return;
2859 }
2860
2861 ifmr->ifm_status |= IFM_ACTIVE;
2862 layer = adapter->phy_layer;
2863
2864 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2865 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2866 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2867 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2868 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2869 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2870 switch (adapter->link_speed) {
2871 case IXGBE_LINK_SPEED_10GB_FULL:
2872 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2873 break;
2874 case IXGBE_LINK_SPEED_5GB_FULL:
2875 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2876 break;
2877 case IXGBE_LINK_SPEED_2_5GB_FULL:
2878 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2879 break;
2880 case IXGBE_LINK_SPEED_1GB_FULL:
2881 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2882 break;
2883 case IXGBE_LINK_SPEED_100_FULL:
2884 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2885 break;
2886 case IXGBE_LINK_SPEED_10_FULL:
2887 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2888 break;
2889 }
2890 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2891 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2892 switch (adapter->link_speed) {
2893 case IXGBE_LINK_SPEED_10GB_FULL:
2894 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2895 break;
2896 }
2897 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2898 switch (adapter->link_speed) {
2899 case IXGBE_LINK_SPEED_10GB_FULL:
2900 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2901 break;
2902 case IXGBE_LINK_SPEED_1GB_FULL:
2903 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2904 break;
2905 }
2906 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2907 switch (adapter->link_speed) {
2908 case IXGBE_LINK_SPEED_10GB_FULL:
2909 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2910 break;
2911 case IXGBE_LINK_SPEED_1GB_FULL:
2912 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2913 break;
2914 }
2915 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2916 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2917 switch (adapter->link_speed) {
2918 case IXGBE_LINK_SPEED_10GB_FULL:
2919 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2920 break;
2921 case IXGBE_LINK_SPEED_1GB_FULL:
2922 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2923 break;
2924 }
2925 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2926 switch (adapter->link_speed) {
2927 case IXGBE_LINK_SPEED_10GB_FULL:
2928 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2929 break;
2930 }
2931 /*
2932 * XXX: These need to use the proper media types once
2933 * they're added.
2934 */
2935 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2936 switch (adapter->link_speed) {
2937 case IXGBE_LINK_SPEED_10GB_FULL:
2938 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2939 break;
2940 case IXGBE_LINK_SPEED_2_5GB_FULL:
2941 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2942 break;
2943 case IXGBE_LINK_SPEED_1GB_FULL:
2944 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2945 break;
2946 }
2947 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2948 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2949 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2950 switch (adapter->link_speed) {
2951 case IXGBE_LINK_SPEED_10GB_FULL:
2952 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2953 break;
2954 case IXGBE_LINK_SPEED_2_5GB_FULL:
2955 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2956 break;
2957 case IXGBE_LINK_SPEED_1GB_FULL:
2958 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2959 break;
2960 }
2961
2962 /* If nothing is recognized... */
2963 #if 0
2964 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2965 ifmr->ifm_active |= IFM_UNKNOWN;
2966 #endif
2967
2968 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2969
2970 /* Display current flow control setting used on link */
2971 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2972 hw->fc.current_mode == ixgbe_fc_full)
2973 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2974 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2975 hw->fc.current_mode == ixgbe_fc_full)
2976 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2977
2978 return;
2979 } /* ixgbe_media_status */
2980
2981 /************************************************************************
2982 * ixgbe_media_change - Media Ioctl callback
2983 *
2984 * Called when the user changes speed/duplex using
2985 * media/mediopt option with ifconfig.
2986 ************************************************************************/
2987 static int
2988 ixgbe_media_change(struct ifnet *ifp)
2989 {
2990 struct adapter *adapter = ifp->if_softc;
2991 struct ifmedia *ifm = &adapter->media;
2992 struct ixgbe_hw *hw = &adapter->hw;
2993 ixgbe_link_speed speed = 0;
2994 ixgbe_link_speed link_caps = 0;
2995 bool negotiate = false;
2996 s32 err = IXGBE_NOT_IMPLEMENTED;
2997
2998 INIT_DEBUGOUT("ixgbe_media_change: begin");
2999
3000 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3001 return (EINVAL);
3002
3003 if (hw->phy.media_type == ixgbe_media_type_backplane)
3004 return (EPERM);
3005
3006 /*
3007 * We don't actually need to check against the supported
3008 * media types of the adapter; ifmedia will take care of
3009 * that for us.
3010 */
3011 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3012 case IFM_AUTO:
3013 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3014 &negotiate);
3015 if (err != IXGBE_SUCCESS) {
3016 device_printf(adapter->dev, "Unable to determine "
3017 "supported advertise speeds\n");
3018 return (ENODEV);
3019 }
3020 speed |= link_caps;
3021 break;
3022 case IFM_10G_T:
3023 case IFM_10G_LRM:
3024 case IFM_10G_LR:
3025 case IFM_10G_TWINAX:
3026 case IFM_10G_SR:
3027 case IFM_10G_CX4:
3028 case IFM_10G_KR:
3029 case IFM_10G_KX4:
3030 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3031 break;
3032 case IFM_5000_T:
3033 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3034 break;
3035 case IFM_2500_T:
3036 case IFM_2500_KX:
3037 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3038 break;
3039 case IFM_1000_T:
3040 case IFM_1000_LX:
3041 case IFM_1000_SX:
3042 case IFM_1000_KX:
3043 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3044 break;
3045 case IFM_100_TX:
3046 speed |= IXGBE_LINK_SPEED_100_FULL;
3047 break;
3048 case IFM_10_T:
3049 speed |= IXGBE_LINK_SPEED_10_FULL;
3050 break;
3051 case IFM_NONE:
3052 break;
3053 default:
3054 goto invalid;
3055 }
3056
3057 hw->mac.autotry_restart = TRUE;
3058 hw->mac.ops.setup_link(hw, speed, TRUE);
3059 adapter->advertise = 0;
3060 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3061 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3062 adapter->advertise |= 1 << 2;
3063 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3064 adapter->advertise |= 1 << 1;
3065 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3066 adapter->advertise |= 1 << 0;
3067 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3068 adapter->advertise |= 1 << 3;
3069 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3070 adapter->advertise |= 1 << 4;
3071 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3072 adapter->advertise |= 1 << 5;
3073 }
3074
3075 return (0);
3076
3077 invalid:
3078 device_printf(adapter->dev, "Invalid media type!\n");
3079
3080 return (EINVAL);
3081 } /* ixgbe_media_change */
3082
3083 /************************************************************************
3084 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3085 ************************************************************************/
3086 static int
3087 ixgbe_msix_admin(void *arg)
3088 {
3089 struct adapter *adapter = arg;
3090 struct ixgbe_hw *hw = &adapter->hw;
3091 u32 eicr;
3092 u32 eims_orig;
3093 u32 eims_disable = 0;
3094
3095 ++adapter->admin_irqev.ev_count;
3096
3097 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3098 /* Pause other interrupts */
3099 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3100
3101 /*
3102 * First get the cause.
3103 *
3104 * The specifications of 82598, 82599, X540 and X550 say EICS register
3105 * is write only. However, Linux says it is a workaround for silicon
3106 * errata to read EICS instead of EICR to get interrupt cause.
3107 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3108 */
3109 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3110 /* Be sure the queue bits are not cleared */
3111 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3112 /* Clear all OTHER interrupts with write */
3113 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3114
3115 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3116
3117 /* Re-enable some OTHER interrupts */
3118 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3119
3120 return 1;
3121 } /* ixgbe_msix_admin */
3122
3123 static void
3124 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3125 {
3126 struct ixgbe_hw *hw = &adapter->hw;
3127 u32 eicr_mask;
3128 u32 task_requests = 0;
3129 s32 retval;
3130
3131 /* Link status change */
3132 if (eicr & IXGBE_EICR_LSC) {
3133 task_requests |= IXGBE_REQUEST_TASK_LSC;
3134 *eims_disable |= IXGBE_EIMS_LSC;
3135 }
3136
3137 if (ixgbe_is_sfp(hw)) {
3138 /* Pluggable optics-related interrupt */
3139 if (hw->mac.type >= ixgbe_mac_X540)
3140 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3141 else
3142 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3143
3144 /*
3145 * An interrupt might not arrive when a module is inserted.
3146 * When an link status change interrupt occurred and the driver
3147 * still regard SFP as unplugged, issue the module softint
3148 * and then issue LSC interrupt.
3149 */
3150 if ((eicr & eicr_mask)
3151 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3152 && (eicr & IXGBE_EICR_LSC))) {
3153 task_requests |= IXGBE_REQUEST_TASK_MOD;
3154 *eims_disable |= IXGBE_EIMS_LSC;
3155 }
3156
3157 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3158 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3159 task_requests |= IXGBE_REQUEST_TASK_MSF;
3160 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3161 }
3162 }
3163
3164 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3165 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3166 (eicr & IXGBE_EICR_FLOW_DIR)) {
3167 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3168 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3169 /* Disable the interrupt */
3170 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3171 }
3172 }
3173
3174 if (eicr & IXGBE_EICR_ECC) {
3175 device_printf(adapter->dev,
3176 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3177 /* Disable interrupt to prevent log spam */
3178 *eims_disable |= IXGBE_EICR_ECC;
3179 }
3180
3181 /* Check for over temp condition */
3182 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3183 switch (adapter->hw.mac.type) {
3184 case ixgbe_mac_X550EM_a:
3185 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3186 break;
3187 /* Disable interrupt to prevent log spam */
3188 *eims_disable |= IXGBE_EICR_GPI_SDP0_X550EM_a;
3189
3190 retval = hw->phy.ops.check_overtemp(hw);
3191 if (retval != IXGBE_ERR_OVERTEMP)
3192 break;
3193 device_printf(adapter->dev,
3194 "CRITICAL: OVER TEMP!! "
3195 "PHY IS SHUT DOWN!!\n");
3196 device_printf(adapter->dev,
3197 "System shutdown required!\n");
3198 break;
3199 default:
3200 if (!(eicr & IXGBE_EICR_TS))
3201 break;
3202 /* Disable interrupt to prevent log spam */
3203 *eims_disable |= IXGBE_EIMS_TS;
3204
3205 retval = hw->phy.ops.check_overtemp(hw);
3206 if (retval != IXGBE_ERR_OVERTEMP)
3207 break;
3208 device_printf(adapter->dev,
3209 "CRITICAL: OVER TEMP!! "
3210 "PHY IS SHUT DOWN!!\n");
3211 device_printf(adapter->dev,
3212 "System shutdown required!\n");
3213 break;
3214 }
3215 }
3216
3217 /* Check for VF message */
3218 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3219 (eicr & IXGBE_EICR_MAILBOX)) {
3220 task_requests |= IXGBE_REQUEST_TASK_MBX;
3221 *eims_disable |= IXGBE_EIMS_MAILBOX;
3222 }
3223 }
3224
3225 /* Check for fan failure */
3226 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3227 retval = ixgbe_check_fan_failure(adapter, eicr, true);
3228 if (retval == IXGBE_ERR_FAN_FAILURE) {
3229 /* Disable interrupt to prevent log spam */
3230 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3231 }
3232 }
3233
3234 /* External PHY interrupt */
3235 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3236 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3237 task_requests |= IXGBE_REQUEST_TASK_PHY;
3238 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3239 }
3240
3241 if (task_requests != 0) {
3242 mutex_enter(&adapter->admin_mtx);
3243 adapter->task_requests |= task_requests;
3244 ixgbe_schedule_admin_tasklet(adapter);
3245 mutex_exit(&adapter->admin_mtx);
3246 }
3247
3248 }
3249
3250 static void
3251 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3252 {
3253
3254 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3255 itr |= itr << 16;
3256 else
3257 itr |= IXGBE_EITR_CNT_WDIS;
3258
3259 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3260 }
3261
3262
3263 /************************************************************************
3264 * ixgbe_sysctl_interrupt_rate_handler
3265 ************************************************************************/
3266 static int
3267 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3268 {
3269 struct sysctlnode node = *rnode;
3270 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3271 struct adapter *adapter;
3272 uint32_t reg, usec, rate;
3273 int error;
3274
3275 if (que == NULL)
3276 return 0;
3277
3278 adapter = que->adapter;
3279 if (ixgbe_fw_recovery_mode_swflag(adapter))
3280 return (EPERM);
3281
3282 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3283 usec = ((reg & 0x0FF8) >> 3);
3284 if (usec > 0)
3285 rate = 500000 / usec;
3286 else
3287 rate = 0;
3288 node.sysctl_data = &rate;
3289 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3290 if (error || newp == NULL)
3291 return error;
3292 reg &= ~0xfff; /* default, no limitation */
3293 if (rate > 0 && rate < 500000) {
3294 if (rate < 1000)
3295 rate = 1000;
3296 reg |= ((4000000 / rate) & 0xff8);
3297 /*
3298 * When RSC is used, ITR interval must be larger than
3299 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3300 * The minimum value is always greater than 2us on 100M
3301 * (and 10M?(not documented)), but it's not on 1G and higher.
3302 */
3303 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3304 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3305 if ((adapter->num_queues > 1)
3306 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3307 return EINVAL;
3308 }
3309 ixgbe_max_interrupt_rate = rate;
3310 } else
3311 ixgbe_max_interrupt_rate = 0;
3312 ixgbe_eitr_write(adapter, que->msix, reg);
3313
3314 return (0);
3315 } /* ixgbe_sysctl_interrupt_rate_handler */
3316
3317 const struct sysctlnode *
3318 ixgbe_sysctl_instance(struct adapter *adapter)
3319 {
3320 const char *dvname;
3321 struct sysctllog **log;
3322 int rc;
3323 const struct sysctlnode *rnode;
3324
3325 if (adapter->sysctltop != NULL)
3326 return adapter->sysctltop;
3327
3328 log = &adapter->sysctllog;
3329 dvname = device_xname(adapter->dev);
3330
3331 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3332 0, CTLTYPE_NODE, dvname,
3333 SYSCTL_DESCR("ixgbe information and settings"),
3334 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3335 goto err;
3336
3337 return rnode;
3338 err:
3339 device_printf(adapter->dev,
3340 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3341 return NULL;
3342 }
3343
3344 /************************************************************************
3345 * ixgbe_add_device_sysctls
3346 ************************************************************************/
3347 static void
3348 ixgbe_add_device_sysctls(struct adapter *adapter)
3349 {
3350 device_t dev = adapter->dev;
3351 struct ixgbe_hw *hw = &adapter->hw;
3352 struct sysctllog **log;
3353 const struct sysctlnode *rnode, *cnode;
3354
3355 log = &adapter->sysctllog;
3356
3357 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3358 aprint_error_dev(dev, "could not create sysctl root\n");
3359 return;
3360 }
3361
3362 if (sysctl_createv(log, 0, &rnode, &cnode,
3363 CTLFLAG_READWRITE, CTLTYPE_INT,
3364 "debug", SYSCTL_DESCR("Debug Info"),
3365 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL)
3366 != 0)
3367 aprint_error_dev(dev, "could not create sysctl\n");
3368
3369 if (sysctl_createv(log, 0, &rnode, &cnode,
3370 CTLFLAG_READONLY, CTLTYPE_INT,
3371 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3372 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3373 aprint_error_dev(dev, "could not create sysctl\n");
3374
3375 if (sysctl_createv(log, 0, &rnode, &cnode,
3376 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
3377 SYSCTL_DESCR("Number of jumbo buffers per queue"),
3378 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
3379 CTL_EOL) != 0)
3380 aprint_error_dev(dev, "could not create sysctl\n");
3381
3382 if (sysctl_createv(log, 0, &rnode, &cnode,
3383 CTLFLAG_READONLY, CTLTYPE_INT,
3384 "num_queues", SYSCTL_DESCR("Number of queues"),
3385 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3386 aprint_error_dev(dev, "could not create sysctl\n");
3387
3388 /* Sysctls for all devices */
3389 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3390 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3391 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3392 CTL_EOL) != 0)
3393 aprint_error_dev(dev, "could not create sysctl\n");
3394
3395 adapter->enable_aim = ixgbe_enable_aim;
3396 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3397 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3398 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3399 aprint_error_dev(dev, "could not create sysctl\n");
3400
3401 if (sysctl_createv(log, 0, &rnode, &cnode,
3402 CTLFLAG_READWRITE, CTLTYPE_INT,
3403 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3404 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3405 CTL_EOL) != 0)
3406 aprint_error_dev(dev, "could not create sysctl\n");
3407
3408 /*
3409 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3410 * it causesflip-flopping softint/workqueue mode in one deferred
3411 * processing. Therefore, preempt_disable()/preempt_enable() are
3412 * required in ixgbe_sched_handle_que() to avoid
3413 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3414 * I think changing "que->txrx_use_workqueue" in interrupt handler
3415 * is lighter than doing preempt_disable()/preempt_enable() in every
3416 * ixgbe_sched_handle_que().
3417 */
3418 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3419 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3420 CTLTYPE_BOOL, "txrx_workqueue",
3421 SYSCTL_DESCR("Use workqueue for packet processing"),
3422 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE,
3423 CTL_EOL) != 0)
3424 aprint_error_dev(dev, "could not create sysctl\n");
3425
3426 #ifdef IXGBE_DEBUG
3427 /* testing sysctls (for all devices) */
3428 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3429 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3430 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3431 CTL_EOL) != 0)
3432 aprint_error_dev(dev, "could not create sysctl\n");
3433
3434 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3435 CTLTYPE_STRING, "print_rss_config",
3436 SYSCTL_DESCR("Prints RSS Configuration"),
3437 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3438 CTL_EOL) != 0)
3439 aprint_error_dev(dev, "could not create sysctl\n");
3440 #endif
3441 /* for X550 series devices */
3442 if (hw->mac.type >= ixgbe_mac_X550)
3443 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3444 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3445 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3446 CTL_EOL) != 0)
3447 aprint_error_dev(dev, "could not create sysctl\n");
3448
3449 /* for WoL-capable devices */
3450 if (adapter->wol_support) {
3451 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3452 CTLTYPE_BOOL, "wol_enable",
3453 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3454 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3455 CTL_EOL) != 0)
3456 aprint_error_dev(dev, "could not create sysctl\n");
3457
3458 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3459 CTLTYPE_INT, "wufc",
3460 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3461 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3462 CTL_EOL) != 0)
3463 aprint_error_dev(dev, "could not create sysctl\n");
3464 }
3465
3466 /* for X552/X557-AT devices */
3467 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3468 const struct sysctlnode *phy_node;
3469
3470 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3471 "phy", SYSCTL_DESCR("External PHY sysctls"),
3472 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3473 aprint_error_dev(dev, "could not create sysctl\n");
3474 return;
3475 }
3476
3477 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3478 CTLTYPE_INT, "temp",
3479 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3480 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3481 CTL_EOL) != 0)
3482 aprint_error_dev(dev, "could not create sysctl\n");
3483
3484 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3485 CTLTYPE_INT, "overtemp_occurred",
3486 SYSCTL_DESCR(
3487 "External PHY High Temperature Event Occurred"),
3488 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3489 CTL_CREATE, CTL_EOL) != 0)
3490 aprint_error_dev(dev, "could not create sysctl\n");
3491 }
3492
3493 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3494 && (hw->phy.type == ixgbe_phy_fw))
3495 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3496 CTLTYPE_BOOL, "force_10_100_autonego",
3497 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3498 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3499 CTL_CREATE, CTL_EOL) != 0)
3500 aprint_error_dev(dev, "could not create sysctl\n");
3501
3502 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3503 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3504 CTLTYPE_INT, "eee_state",
3505 SYSCTL_DESCR("EEE Power Save State"),
3506 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3507 CTL_EOL) != 0)
3508 aprint_error_dev(dev, "could not create sysctl\n");
3509 }
3510 } /* ixgbe_add_device_sysctls */
3511
3512 /************************************************************************
3513 * ixgbe_allocate_pci_resources
3514 ************************************************************************/
3515 static int
3516 ixgbe_allocate_pci_resources(struct adapter *adapter,
3517 const struct pci_attach_args *pa)
3518 {
3519 pcireg_t memtype, csr;
3520 device_t dev = adapter->dev;
3521 bus_addr_t addr;
3522 int flags;
3523
3524 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3525 switch (memtype) {
3526 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3527 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3528 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3529 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3530 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3531 goto map_err;
3532 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3533 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3534 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3535 }
3536 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3537 adapter->osdep.mem_size, flags,
3538 &adapter->osdep.mem_bus_space_handle) != 0) {
3539 map_err:
3540 adapter->osdep.mem_size = 0;
3541 aprint_error_dev(dev, "unable to map BAR0\n");
3542 return ENXIO;
3543 }
3544 /*
3545 * Enable address decoding for memory range in case BIOS or
3546 * UEFI don't set it.
3547 */
3548 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3549 PCI_COMMAND_STATUS_REG);
3550 csr |= PCI_COMMAND_MEM_ENABLE;
3551 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3552 csr);
3553 break;
3554 default:
3555 aprint_error_dev(dev, "unexpected type on BAR0\n");
3556 return ENXIO;
3557 }
3558
3559 return (0);
3560 } /* ixgbe_allocate_pci_resources */
3561
3562 static void
3563 ixgbe_free_deferred_handlers(struct adapter *adapter)
3564 {
3565 struct ix_queue *que = adapter->queues;
3566 struct tx_ring *txr = adapter->tx_rings;
3567 int i;
3568
3569 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3570 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3571 if (txr->txr_si != NULL)
3572 softint_disestablish(txr->txr_si);
3573 }
3574 if (que->que_si != NULL)
3575 softint_disestablish(que->que_si);
3576 }
3577 if (adapter->txr_wq != NULL)
3578 workqueue_destroy(adapter->txr_wq);
3579 if (adapter->txr_wq_enqueued != NULL)
3580 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3581 if (adapter->que_wq != NULL)
3582 workqueue_destroy(adapter->que_wq);
3583
3584 if (adapter->admin_wq != NULL) {
3585 workqueue_destroy(adapter->admin_wq);
3586 adapter->admin_wq = NULL;
3587 }
3588 if (adapter->timer_wq != NULL) {
3589 workqueue_destroy(adapter->timer_wq);
3590 adapter->timer_wq = NULL;
3591 }
3592 if (adapter->recovery_mode_timer_wq != NULL) {
3593 /*
3594 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3595 * the recovery_mode_timer workqueue, so call it here.
3596 */
3597 workqueue_wait(adapter->recovery_mode_timer_wq,
3598 &adapter->recovery_mode_timer_wc);
3599 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3600 workqueue_destroy(adapter->recovery_mode_timer_wq);
3601 adapter->recovery_mode_timer_wq = NULL;
3602 }
3603 } /* ixgbe_free_deferred_handlers */
3604
3605 /************************************************************************
3606 * ixgbe_detach - Device removal routine
3607 *
3608 * Called when the driver is being removed.
3609 * Stops the adapter and deallocates all the resources
3610 * that were allocated for driver operation.
3611 *
3612 * return 0 on success, positive on failure
3613 ************************************************************************/
3614 static int
3615 ixgbe_detach(device_t dev, int flags)
3616 {
3617 struct adapter *adapter = device_private(dev);
3618 struct rx_ring *rxr = adapter->rx_rings;
3619 struct tx_ring *txr = adapter->tx_rings;
3620 struct ixgbe_hw *hw = &adapter->hw;
3621 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3622 u32 ctrl_ext;
3623 int i;
3624
3625 INIT_DEBUGOUT("ixgbe_detach: begin");
3626 if (adapter->osdep.attached == false)
3627 return 0;
3628
3629 if (ixgbe_pci_iov_detach(dev) != 0) {
3630 device_printf(dev, "SR-IOV in use; detach first.\n");
3631 return (EBUSY);
3632 }
3633
3634 #if NVLAN > 0
3635 /* Make sure VLANs are not using driver */
3636 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3637 ; /* nothing to do: no VLANs */
3638 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3639 vlan_ifdetach(adapter->ifp);
3640 else {
3641 aprint_error_dev(dev, "VLANs in use, detach first\n");
3642 return (EBUSY);
3643 }
3644 #endif
3645
3646 adapter->osdep.detaching = true;
3647 /*
3648 * Stop the interface. ixgbe_setup_low_power_mode() calls
3649 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3650 * directly.
3651 */
3652 ixgbe_setup_low_power_mode(adapter);
3653
3654 callout_halt(&adapter->timer, NULL);
3655 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3656 callout_halt(&adapter->recovery_mode_timer, NULL);
3657
3658 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3659 atomic_store_relaxed(&adapter->admin_pending, 0);
3660 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3661 atomic_store_relaxed(&adapter->timer_pending, 0);
3662
3663 pmf_device_deregister(dev);
3664
3665 ether_ifdetach(adapter->ifp);
3666
3667 ixgbe_free_deferred_handlers(adapter);
3668
3669 /* let hardware know driver is unloading */
3670 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3671 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3673
3674 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3675 netmap_detach(adapter->ifp);
3676
3677 ixgbe_free_pci_resources(adapter);
3678 #if 0 /* XXX the NetBSD port is probably missing something here */
3679 bus_generic_detach(dev);
3680 #endif
3681 if_detach(adapter->ifp);
3682 ifmedia_fini(&adapter->media);
3683 if_percpuq_destroy(adapter->ipq);
3684
3685 sysctl_teardown(&adapter->sysctllog);
3686 evcnt_detach(&adapter->efbig_tx_dma_setup);
3687 evcnt_detach(&adapter->mbuf_defrag_failed);
3688 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3689 evcnt_detach(&adapter->einval_tx_dma_setup);
3690 evcnt_detach(&adapter->other_tx_dma_setup);
3691 evcnt_detach(&adapter->eagain_tx_dma_setup);
3692 evcnt_detach(&adapter->enomem_tx_dma_setup);
3693 evcnt_detach(&adapter->watchdog_events);
3694 evcnt_detach(&adapter->tso_err);
3695 evcnt_detach(&adapter->admin_irqev);
3696 evcnt_detach(&adapter->link_workev);
3697 evcnt_detach(&adapter->mod_workev);
3698 evcnt_detach(&adapter->msf_workev);
3699 evcnt_detach(&adapter->phy_workev);
3700
3701 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3702 if (i < __arraycount(stats->mpc)) {
3703 evcnt_detach(&stats->mpc[i]);
3704 if (hw->mac.type == ixgbe_mac_82598EB)
3705 evcnt_detach(&stats->rnbc[i]);
3706 }
3707 if (i < __arraycount(stats->pxontxc)) {
3708 evcnt_detach(&stats->pxontxc[i]);
3709 evcnt_detach(&stats->pxonrxc[i]);
3710 evcnt_detach(&stats->pxofftxc[i]);
3711 evcnt_detach(&stats->pxoffrxc[i]);
3712 if (hw->mac.type >= ixgbe_mac_82599EB)
3713 evcnt_detach(&stats->pxon2offc[i]);
3714 }
3715 }
3716
3717 txr = adapter->tx_rings;
3718 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3719 evcnt_detach(&adapter->queues[i].irqs);
3720 evcnt_detach(&adapter->queues[i].handleq);
3721 evcnt_detach(&adapter->queues[i].req);
3722 evcnt_detach(&txr->no_desc_avail);
3723 evcnt_detach(&txr->total_packets);
3724 evcnt_detach(&txr->tso_tx);
3725 #ifndef IXGBE_LEGACY_TX
3726 evcnt_detach(&txr->pcq_drops);
3727 #endif
3728
3729 if (i < __arraycount(stats->qprc)) {
3730 evcnt_detach(&stats->qprc[i]);
3731 evcnt_detach(&stats->qptc[i]);
3732 evcnt_detach(&stats->qbrc[i]);
3733 evcnt_detach(&stats->qbtc[i]);
3734 if (hw->mac.type >= ixgbe_mac_82599EB)
3735 evcnt_detach(&stats->qprdc[i]);
3736 }
3737
3738 evcnt_detach(&rxr->rx_packets);
3739 evcnt_detach(&rxr->rx_bytes);
3740 evcnt_detach(&rxr->rx_copies);
3741 evcnt_detach(&rxr->no_jmbuf);
3742 evcnt_detach(&rxr->rx_discarded);
3743 }
3744 evcnt_detach(&stats->ipcs);
3745 evcnt_detach(&stats->l4cs);
3746 evcnt_detach(&stats->ipcs_bad);
3747 evcnt_detach(&stats->l4cs_bad);
3748 evcnt_detach(&stats->intzero);
3749 evcnt_detach(&stats->legint);
3750 evcnt_detach(&stats->crcerrs);
3751 evcnt_detach(&stats->illerrc);
3752 evcnt_detach(&stats->errbc);
3753 evcnt_detach(&stats->mspdc);
3754 if (hw->mac.type >= ixgbe_mac_X550)
3755 evcnt_detach(&stats->mbsdc);
3756 evcnt_detach(&stats->mpctotal);
3757 evcnt_detach(&stats->mlfc);
3758 evcnt_detach(&stats->mrfc);
3759 evcnt_detach(&stats->rlec);
3760 evcnt_detach(&stats->lxontxc);
3761 evcnt_detach(&stats->lxonrxc);
3762 evcnt_detach(&stats->lxofftxc);
3763 evcnt_detach(&stats->lxoffrxc);
3764
3765 /* Packet Reception Stats */
3766 evcnt_detach(&stats->tor);
3767 evcnt_detach(&stats->gorc);
3768 evcnt_detach(&stats->tpr);
3769 evcnt_detach(&stats->gprc);
3770 evcnt_detach(&stats->mprc);
3771 evcnt_detach(&stats->bprc);
3772 evcnt_detach(&stats->prc64);
3773 evcnt_detach(&stats->prc127);
3774 evcnt_detach(&stats->prc255);
3775 evcnt_detach(&stats->prc511);
3776 evcnt_detach(&stats->prc1023);
3777 evcnt_detach(&stats->prc1522);
3778 evcnt_detach(&stats->ruc);
3779 evcnt_detach(&stats->rfc);
3780 evcnt_detach(&stats->roc);
3781 evcnt_detach(&stats->rjc);
3782 evcnt_detach(&stats->mngprc);
3783 evcnt_detach(&stats->mngpdc);
3784 evcnt_detach(&stats->xec);
3785
3786 /* Packet Transmission Stats */
3787 evcnt_detach(&stats->gotc);
3788 evcnt_detach(&stats->tpt);
3789 evcnt_detach(&stats->gptc);
3790 evcnt_detach(&stats->bptc);
3791 evcnt_detach(&stats->mptc);
3792 evcnt_detach(&stats->mngptc);
3793 evcnt_detach(&stats->ptc64);
3794 evcnt_detach(&stats->ptc127);
3795 evcnt_detach(&stats->ptc255);
3796 evcnt_detach(&stats->ptc511);
3797 evcnt_detach(&stats->ptc1023);
3798 evcnt_detach(&stats->ptc1522);
3799
3800 ixgbe_free_queues(adapter);
3801 free(adapter->mta, M_DEVBUF);
3802
3803 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3804 IXGBE_CORE_LOCK_DESTROY(adapter);
3805
3806 return (0);
3807 } /* ixgbe_detach */
3808
3809 /************************************************************************
3810 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3811 *
3812 * Prepare the adapter/port for LPLU and/or WoL
3813 ************************************************************************/
3814 static int
3815 ixgbe_setup_low_power_mode(struct adapter *adapter)
3816 {
3817 struct ixgbe_hw *hw = &adapter->hw;
3818 device_t dev = adapter->dev;
3819 struct ifnet *ifp = adapter->ifp;
3820 s32 error = 0;
3821
3822 /* Limit power management flow to X550EM baseT */
3823 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3824 hw->phy.ops.enter_lplu) {
3825 /* X550EM baseT adapters need a special LPLU flow */
3826 hw->phy.reset_disable = true;
3827 ixgbe_ifstop(ifp, 1);
3828 error = hw->phy.ops.enter_lplu(hw);
3829 if (error)
3830 device_printf(dev,
3831 "Error entering LPLU: %d\n", error);
3832 hw->phy.reset_disable = false;
3833 } else {
3834 /* Just stop for other adapters */
3835 ixgbe_ifstop(ifp, 1);
3836 }
3837
3838 IXGBE_CORE_LOCK(adapter);
3839
3840 if (!hw->wol_enabled) {
3841 ixgbe_set_phy_power(hw, FALSE);
3842 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3843 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3844 } else {
3845 /* Turn off support for APM wakeup. (Using ACPI instead) */
3846 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3847 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3848
3849 /*
3850 * Clear Wake Up Status register to prevent any previous wakeup
3851 * events from waking us up immediately after we suspend.
3852 */
3853 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3854
3855 /*
3856 * Program the Wakeup Filter Control register with user filter
3857 * settings
3858 */
3859 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3860
3861 /* Enable wakeups and power management in Wakeup Control */
3862 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3863 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3864
3865 }
3866
3867 IXGBE_CORE_UNLOCK(adapter);
3868
3869 return error;
3870 } /* ixgbe_setup_low_power_mode */
3871
3872 /************************************************************************
3873 * ixgbe_shutdown - Shutdown entry point
3874 ************************************************************************/
3875 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3876 static int
3877 ixgbe_shutdown(device_t dev)
3878 {
3879 struct adapter *adapter = device_private(dev);
3880 int error = 0;
3881
3882 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3883
3884 error = ixgbe_setup_low_power_mode(adapter);
3885
3886 return (error);
3887 } /* ixgbe_shutdown */
3888 #endif
3889
3890 /************************************************************************
3891 * ixgbe_suspend
3892 *
3893 * From D0 to D3
3894 ************************************************************************/
3895 static bool
3896 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3897 {
3898 struct adapter *adapter = device_private(dev);
3899 int error = 0;
3900
3901 INIT_DEBUGOUT("ixgbe_suspend: begin");
3902
3903 error = ixgbe_setup_low_power_mode(adapter);
3904
3905 return (error);
3906 } /* ixgbe_suspend */
3907
3908 /************************************************************************
3909 * ixgbe_resume
3910 *
3911 * From D3 to D0
3912 ************************************************************************/
3913 static bool
3914 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3915 {
3916 struct adapter *adapter = device_private(dev);
3917 struct ifnet *ifp = adapter->ifp;
3918 struct ixgbe_hw *hw = &adapter->hw;
3919 u32 wus;
3920
3921 INIT_DEBUGOUT("ixgbe_resume: begin");
3922
3923 IXGBE_CORE_LOCK(adapter);
3924
3925 /* Read & clear WUS register */
3926 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3927 if (wus)
3928 device_printf(dev, "Woken up by (WUS): %#010x\n",
3929 IXGBE_READ_REG(hw, IXGBE_WUS));
3930 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3931 /* And clear WUFC until next low-power transition */
3932 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3933
3934 /*
3935 * Required after D3->D0 transition;
3936 * will re-advertise all previous advertised speeds
3937 */
3938 if (ifp->if_flags & IFF_UP)
3939 ixgbe_init_locked(adapter);
3940
3941 IXGBE_CORE_UNLOCK(adapter);
3942
3943 return true;
3944 } /* ixgbe_resume */
3945
3946 /*
3947 * Set the various hardware offload abilities.
3948 *
3949 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3950 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3951 * mbuf offload flags the driver will understand.
3952 */
3953 static void
3954 ixgbe_set_if_hwassist(struct adapter *adapter)
3955 {
3956 /* XXX */
3957 }
3958
3959 /************************************************************************
3960 * ixgbe_init_locked - Init entry point
3961 *
3962 * Used in two ways: It is used by the stack as an init
3963 * entry point in network interface structure. It is also
3964 * used by the driver as a hw/sw initialization routine to
3965 * get to a consistent state.
3966 *
3967 * return 0 on success, positive on failure
3968 ************************************************************************/
3969 static void
3970 ixgbe_init_locked(struct adapter *adapter)
3971 {
3972 struct ifnet *ifp = adapter->ifp;
3973 device_t dev = adapter->dev;
3974 struct ixgbe_hw *hw = &adapter->hw;
3975 struct ix_queue *que;
3976 struct tx_ring *txr;
3977 struct rx_ring *rxr;
3978 u32 txdctl, mhadd;
3979 u32 rxdctl, rxctrl;
3980 u32 ctrl_ext;
3981 bool unsupported_sfp = false;
3982 int i, j, error;
3983
3984 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3985
3986 KASSERT(mutex_owned(&adapter->core_mtx));
3987 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3988
3989 hw->need_unsupported_sfp_recovery = false;
3990 hw->adapter_stopped = FALSE;
3991 ixgbe_stop_adapter(hw);
3992 callout_stop(&adapter->timer);
3993 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3994 callout_stop(&adapter->recovery_mode_timer);
3995 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3996 que->disabled_count = 0;
3997
3998 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3999 adapter->max_frame_size =
4000 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4001
4002 /* Queue indices may change with IOV mode */
4003 ixgbe_align_all_queue_indices(adapter);
4004
4005 /* reprogram the RAR[0] in case user changed it. */
4006 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
4007
4008 /* Get the latest mac address, User can use a LAA */
4009 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4010 IXGBE_ETH_LENGTH_OF_ADDRESS);
4011 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
4012 hw->addr_ctrl.rar_used_count = 1;
4013
4014 /* Set hardware offload abilities from ifnet flags */
4015 ixgbe_set_if_hwassist(adapter);
4016
4017 /* Prepare transmit descriptors and buffers */
4018 if (ixgbe_setup_transmit_structures(adapter)) {
4019 device_printf(dev, "Could not setup transmit structures\n");
4020 ixgbe_stop_locked(adapter);
4021 return;
4022 }
4023
4024 ixgbe_init_hw(hw);
4025
4026 ixgbe_initialize_iov(adapter);
4027
4028 ixgbe_initialize_transmit_units(adapter);
4029
4030 /* Setup Multicast table */
4031 ixgbe_set_rxfilter(adapter);
4032
4033 /* Determine the correct mbuf pool, based on frame size */
4034 if (adapter->max_frame_size <= MCLBYTES)
4035 adapter->rx_mbuf_sz = MCLBYTES;
4036 else
4037 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4038
4039 /* Prepare receive descriptors and buffers */
4040 error = ixgbe_setup_receive_structures(adapter);
4041 if (error) {
4042 device_printf(dev,
4043 "Could not setup receive structures (err = %d)\n", error);
4044 ixgbe_stop_locked(adapter);
4045 return;
4046 }
4047
4048 /* Configure RX settings */
4049 ixgbe_initialize_receive_units(adapter);
4050
4051 /* Initialize variable holding task enqueue requests interrupts */
4052 adapter->task_requests = 0;
4053
4054 /* Enable SDP & MSI-X interrupts based on adapter */
4055 ixgbe_config_gpie(adapter);
4056
4057 /* Set MTU size */
4058 if (ifp->if_mtu > ETHERMTU) {
4059 /* aka IXGBE_MAXFRS on 82599 and newer */
4060 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4061 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4062 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4063 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4064 }
4065
4066 /* Now enable all the queues */
4067 for (i = 0; i < adapter->num_queues; i++) {
4068 txr = &adapter->tx_rings[i];
4069 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4070 txdctl |= IXGBE_TXDCTL_ENABLE;
4071 /* Set WTHRESH to 8, burst writeback */
4072 txdctl |= (8 << 16);
4073 /*
4074 * When the internal queue falls below PTHRESH (32),
4075 * start prefetching as long as there are at least
4076 * HTHRESH (1) buffers ready. The values are taken
4077 * from the Intel linux driver 3.8.21.
4078 * Prefetching enables tx line rate even with 1 queue.
4079 */
4080 txdctl |= (32 << 0) | (1 << 8);
4081 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4082 }
4083
4084 for (i = 0; i < adapter->num_queues; i++) {
4085 rxr = &adapter->rx_rings[i];
4086 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4087 if (hw->mac.type == ixgbe_mac_82598EB) {
4088 /*
4089 * PTHRESH = 21
4090 * HTHRESH = 4
4091 * WTHRESH = 8
4092 */
4093 rxdctl &= ~0x3FFFFF;
4094 rxdctl |= 0x080420;
4095 }
4096 rxdctl |= IXGBE_RXDCTL_ENABLE;
4097 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4098 for (j = 0; j < 10; j++) {
4099 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4100 IXGBE_RXDCTL_ENABLE)
4101 break;
4102 else
4103 msec_delay(1);
4104 }
4105 IXGBE_WRITE_BARRIER(hw);
4106
4107 /*
4108 * In netmap mode, we must preserve the buffers made
4109 * available to userspace before the if_init()
4110 * (this is true by default on the TX side, because
4111 * init makes all buffers available to userspace).
4112 *
4113 * netmap_reset() and the device specific routines
4114 * (e.g. ixgbe_setup_receive_rings()) map these
4115 * buffers at the end of the NIC ring, so here we
4116 * must set the RDT (tail) register to make sure
4117 * they are not overwritten.
4118 *
4119 * In this driver the NIC ring starts at RDH = 0,
4120 * RDT points to the last slot available for reception (?),
4121 * so RDT = num_rx_desc - 1 means the whole ring is available.
4122 */
4123 #ifdef DEV_NETMAP
4124 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4125 (ifp->if_capenable & IFCAP_NETMAP)) {
4126 struct netmap_adapter *na = NA(adapter->ifp);
4127 struct netmap_kring *kring = na->rx_rings[i];
4128 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4129
4130 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4131 } else
4132 #endif /* DEV_NETMAP */
4133 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4134 adapter->num_rx_desc - 1);
4135 }
4136
4137 /* Enable Receive engine */
4138 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4139 if (hw->mac.type == ixgbe_mac_82598EB)
4140 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4141 rxctrl |= IXGBE_RXCTRL_RXEN;
4142 ixgbe_enable_rx_dma(hw, rxctrl);
4143
4144 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4145 atomic_store_relaxed(&adapter->timer_pending, 0);
4146 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4147 callout_reset(&adapter->recovery_mode_timer, hz,
4148 ixgbe_recovery_mode_timer, adapter);
4149
4150 /* Set up MSI/MSI-X routing */
4151 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4152 ixgbe_configure_ivars(adapter);
4153 /* Set up auto-mask */
4154 if (hw->mac.type == ixgbe_mac_82598EB)
4155 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4156 else {
4157 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4158 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4159 }
4160 } else { /* Simple settings for Legacy/MSI */
4161 ixgbe_set_ivar(adapter, 0, 0, 0);
4162 ixgbe_set_ivar(adapter, 0, 0, 1);
4163 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4164 }
4165
4166 ixgbe_init_fdir(adapter);
4167
4168 /*
4169 * Check on any SFP devices that
4170 * need to be kick-started
4171 */
4172 if (hw->phy.type == ixgbe_phy_none) {
4173 error = hw->phy.ops.identify(hw);
4174 if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4175 unsupported_sfp = true;
4176 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4177 unsupported_sfp = true;
4178
4179 if (unsupported_sfp)
4180 device_printf(dev,
4181 "Unsupported SFP+ module type was detected.\n");
4182
4183 /* Set moderation on the Link interrupt */
4184 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4185
4186 /* Enable EEE power saving */
4187 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4188 hw->mac.ops.setup_eee(hw,
4189 adapter->feat_en & IXGBE_FEATURE_EEE);
4190
4191 /* Enable power to the phy. */
4192 if (!unsupported_sfp) {
4193 ixgbe_set_phy_power(hw, TRUE);
4194
4195 /* Config/Enable Link */
4196 ixgbe_config_link(adapter);
4197 }
4198
4199 /* Hardware Packet Buffer & Flow Control setup */
4200 ixgbe_config_delay_values(adapter);
4201
4202 /* Initialize the FC settings */
4203 ixgbe_start_hw(hw);
4204
4205 /* Set up VLAN support and filter */
4206 ixgbe_setup_vlan_hw_support(adapter);
4207
4208 /* Setup DMA Coalescing */
4209 ixgbe_config_dmac(adapter);
4210
4211 /* OK to schedule workqueues. */
4212 adapter->schedule_wqs_ok = true;
4213
4214 /* And now turn on interrupts */
4215 ixgbe_enable_intr(adapter);
4216
4217 /* Enable the use of the MBX by the VF's */
4218 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4219 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4220 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4221 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4222 }
4223
4224 /* Update saved flags. See ixgbe_ifflags_cb() */
4225 adapter->if_flags = ifp->if_flags;
4226 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4227
4228 /* Now inform the stack we're ready */
4229 ifp->if_flags |= IFF_RUNNING;
4230
4231 return;
4232 } /* ixgbe_init_locked */
4233
4234 /************************************************************************
4235 * ixgbe_init
4236 ************************************************************************/
4237 static int
4238 ixgbe_init(struct ifnet *ifp)
4239 {
4240 struct adapter *adapter = ifp->if_softc;
4241
4242 IXGBE_CORE_LOCK(adapter);
4243 ixgbe_init_locked(adapter);
4244 IXGBE_CORE_UNLOCK(adapter);
4245
4246 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4247 } /* ixgbe_init */
4248
4249 /************************************************************************
4250 * ixgbe_set_ivar
4251 *
4252 * Setup the correct IVAR register for a particular MSI-X interrupt
4253 * (yes this is all very magic and confusing :)
4254 * - entry is the register array entry
4255 * - vector is the MSI-X vector for this queue
4256 * - type is RX/TX/MISC
4257 ************************************************************************/
4258 static void
4259 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4260 {
4261 struct ixgbe_hw *hw = &adapter->hw;
4262 u32 ivar, index;
4263
4264 vector |= IXGBE_IVAR_ALLOC_VAL;
4265
4266 switch (hw->mac.type) {
4267 case ixgbe_mac_82598EB:
4268 if (type == -1)
4269 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4270 else
4271 entry += (type * 64);
4272 index = (entry >> 2) & 0x1F;
4273 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4274 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4275 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4277 break;
4278 case ixgbe_mac_82599EB:
4279 case ixgbe_mac_X540:
4280 case ixgbe_mac_X550:
4281 case ixgbe_mac_X550EM_x:
4282 case ixgbe_mac_X550EM_a:
4283 if (type == -1) { /* MISC IVAR */
4284 index = (entry & 1) * 8;
4285 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4286 ivar &= ~(0xffUL << index);
4287 ivar |= ((u32)vector << index);
4288 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4289 } else { /* RX/TX IVARS */
4290 index = (16 * (entry & 1)) + (8 * type);
4291 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4292 ivar &= ~(0xffUL << index);
4293 ivar |= ((u32)vector << index);
4294 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4295 }
4296 break;
4297 default:
4298 break;
4299 }
4300 } /* ixgbe_set_ivar */
4301
4302 /************************************************************************
4303 * ixgbe_configure_ivars
4304 ************************************************************************/
4305 static void
4306 ixgbe_configure_ivars(struct adapter *adapter)
4307 {
4308 struct ix_queue *que = adapter->queues;
4309 u32 newitr;
4310
4311 if (ixgbe_max_interrupt_rate > 0)
4312 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4313 else {
4314 /*
4315 * Disable DMA coalescing if interrupt moderation is
4316 * disabled.
4317 */
4318 adapter->dmac = 0;
4319 newitr = 0;
4320 }
4321
4322 for (int i = 0; i < adapter->num_queues; i++, que++) {
4323 struct rx_ring *rxr = &adapter->rx_rings[i];
4324 struct tx_ring *txr = &adapter->tx_rings[i];
4325 /* First the RX queue entry */
4326 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4327 /* ... and the TX */
4328 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4329 /* Set an Initial EITR value */
4330 ixgbe_eitr_write(adapter, que->msix, newitr);
4331 /*
4332 * To eliminate influence of the previous state.
4333 * At this point, Tx/Rx interrupt handler
4334 * (ixgbe_msix_que()) cannot be called, so both
4335 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4336 */
4337 que->eitr_setting = 0;
4338 }
4339
4340 /* For the Link interrupt */
4341 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4342 } /* ixgbe_configure_ivars */
4343
4344 /************************************************************************
4345 * ixgbe_config_gpie
4346 ************************************************************************/
4347 static void
4348 ixgbe_config_gpie(struct adapter *adapter)
4349 {
4350 struct ixgbe_hw *hw = &adapter->hw;
4351 u32 gpie;
4352
4353 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4354
4355 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4356 /* Enable Enhanced MSI-X mode */
4357 gpie |= IXGBE_GPIE_MSIX_MODE
4358 | IXGBE_GPIE_EIAME
4359 | IXGBE_GPIE_PBA_SUPPORT
4360 | IXGBE_GPIE_OCD;
4361 }
4362
4363 /* Fan Failure Interrupt */
4364 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4365 gpie |= IXGBE_SDP1_GPIEN;
4366
4367 /* Thermal Sensor Interrupt */
4368 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4369 gpie |= IXGBE_SDP0_GPIEN_X540;
4370
4371 /* Link detection */
4372 switch (hw->mac.type) {
4373 case ixgbe_mac_82599EB:
4374 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4375 break;
4376 case ixgbe_mac_X550EM_x:
4377 case ixgbe_mac_X550EM_a:
4378 gpie |= IXGBE_SDP0_GPIEN_X540;
4379 break;
4380 default:
4381 break;
4382 }
4383
4384 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4385
4386 } /* ixgbe_config_gpie */
4387
4388 /************************************************************************
4389 * ixgbe_config_delay_values
4390 *
4391 * Requires adapter->max_frame_size to be set.
4392 ************************************************************************/
4393 static void
4394 ixgbe_config_delay_values(struct adapter *adapter)
4395 {
4396 struct ixgbe_hw *hw = &adapter->hw;
4397 u32 rxpb, frame, size, tmp;
4398
4399 frame = adapter->max_frame_size;
4400
4401 /* Calculate High Water */
4402 switch (hw->mac.type) {
4403 case ixgbe_mac_X540:
4404 case ixgbe_mac_X550:
4405 case ixgbe_mac_X550EM_x:
4406 case ixgbe_mac_X550EM_a:
4407 tmp = IXGBE_DV_X540(frame, frame);
4408 break;
4409 default:
4410 tmp = IXGBE_DV(frame, frame);
4411 break;
4412 }
4413 size = IXGBE_BT2KB(tmp);
4414 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4415 hw->fc.high_water[0] = rxpb - size;
4416
4417 /* Now calculate Low Water */
4418 switch (hw->mac.type) {
4419 case ixgbe_mac_X540:
4420 case ixgbe_mac_X550:
4421 case ixgbe_mac_X550EM_x:
4422 case ixgbe_mac_X550EM_a:
4423 tmp = IXGBE_LOW_DV_X540(frame);
4424 break;
4425 default:
4426 tmp = IXGBE_LOW_DV(frame);
4427 break;
4428 }
4429 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4430
4431 hw->fc.pause_time = IXGBE_FC_PAUSE;
4432 hw->fc.send_xon = TRUE;
4433 } /* ixgbe_config_delay_values */
4434
4435 /************************************************************************
4436 * ixgbe_set_rxfilter - Multicast Update
4437 *
4438 * Called whenever multicast address list is updated.
4439 ************************************************************************/
4440 static void
4441 ixgbe_set_rxfilter(struct adapter *adapter)
4442 {
4443 struct ixgbe_mc_addr *mta;
4444 struct ifnet *ifp = adapter->ifp;
4445 u8 *update_ptr;
4446 int mcnt = 0;
4447 u32 fctrl;
4448 struct ethercom *ec = &adapter->osdep.ec;
4449 struct ether_multi *enm;
4450 struct ether_multistep step;
4451
4452 KASSERT(mutex_owned(&adapter->core_mtx));
4453 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4454
4455 mta = adapter->mta;
4456 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4457
4458 ETHER_LOCK(ec);
4459 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4460 ETHER_FIRST_MULTI(step, ec, enm);
4461 while (enm != NULL) {
4462 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4463 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4464 ETHER_ADDR_LEN) != 0)) {
4465 ec->ec_flags |= ETHER_F_ALLMULTI;
4466 break;
4467 }
4468 bcopy(enm->enm_addrlo,
4469 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4470 mta[mcnt].vmdq = adapter->pool;
4471 mcnt++;
4472 ETHER_NEXT_MULTI(step, enm);
4473 }
4474
4475 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4476 if (ifp->if_flags & IFF_PROMISC)
4477 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4478 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4479 fctrl |= IXGBE_FCTRL_MPE;
4480 fctrl &= ~IXGBE_FCTRL_UPE;
4481 } else
4482 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4483
4484 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4485
4486 /* Update multicast filter entries only when it's not ALLMULTI */
4487 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4488 ETHER_UNLOCK(ec);
4489 update_ptr = (u8 *)mta;
4490 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4491 ixgbe_mc_array_itr, TRUE);
4492 } else
4493 ETHER_UNLOCK(ec);
4494 } /* ixgbe_set_rxfilter */
4495
4496 /************************************************************************
4497 * ixgbe_mc_array_itr
4498 *
4499 * An iterator function needed by the multicast shared code.
4500 * It feeds the shared code routine the addresses in the
4501 * array of ixgbe_set_rxfilter() one by one.
4502 ************************************************************************/
4503 static u8 *
4504 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4505 {
4506 struct ixgbe_mc_addr *mta;
4507
4508 mta = (struct ixgbe_mc_addr *)*update_ptr;
4509 *vmdq = mta->vmdq;
4510
4511 *update_ptr = (u8*)(mta + 1);
4512
4513 return (mta->addr);
4514 } /* ixgbe_mc_array_itr */
4515
4516 /************************************************************************
4517 * ixgbe_local_timer - Timer routine
4518 *
4519 * Checks for link status, updates statistics,
4520 * and runs the watchdog check.
4521 ************************************************************************/
4522 static void
4523 ixgbe_local_timer(void *arg)
4524 {
4525 struct adapter *adapter = arg;
4526
4527 if (adapter->schedule_wqs_ok) {
4528 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4529 workqueue_enqueue(adapter->timer_wq,
4530 &adapter->timer_wc, NULL);
4531 }
4532 }
4533
4534 static void
4535 ixgbe_handle_timer(struct work *wk, void *context)
4536 {
4537 struct adapter *adapter = context;
4538 struct ixgbe_hw *hw = &adapter->hw;
4539 device_t dev = adapter->dev;
4540 struct ix_queue *que = adapter->queues;
4541 u64 queues = 0;
4542 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4543 int hung = 0;
4544 int i;
4545
4546 IXGBE_CORE_LOCK(adapter);
4547
4548 /* Check for pluggable optics */
4549 if (ixgbe_is_sfp(hw)) {
4550 bool sched_mod_task = false;
4551
4552 if (hw->mac.type == ixgbe_mac_82598EB) {
4553 /*
4554 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4555 * any GPIO(SDP). So just schedule TASK_MOD.
4556 */
4557 sched_mod_task = true;
4558 } else {
4559 bool was_full, is_full;
4560
4561 was_full =
4562 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4563 is_full = ixgbe_sfp_cage_full(hw);
4564
4565 /* Do probe if cage state changed */
4566 if (was_full ^ is_full)
4567 sched_mod_task = true;
4568 }
4569 if (sched_mod_task) {
4570 mutex_enter(&adapter->admin_mtx);
4571 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4572 ixgbe_schedule_admin_tasklet(adapter);
4573 mutex_exit(&adapter->admin_mtx);
4574 }
4575 }
4576
4577 ixgbe_update_link_status(adapter);
4578 ixgbe_update_stats_counters(adapter);
4579
4580 /* Update some event counters */
4581 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4582 que = adapter->queues;
4583 for (i = 0; i < adapter->num_queues; i++, que++) {
4584 struct tx_ring *txr = que->txr;
4585
4586 v0 += txr->q_efbig_tx_dma_setup;
4587 v1 += txr->q_mbuf_defrag_failed;
4588 v2 += txr->q_efbig2_tx_dma_setup;
4589 v3 += txr->q_einval_tx_dma_setup;
4590 v4 += txr->q_other_tx_dma_setup;
4591 v5 += txr->q_eagain_tx_dma_setup;
4592 v6 += txr->q_enomem_tx_dma_setup;
4593 v7 += txr->q_tso_err;
4594 }
4595 adapter->efbig_tx_dma_setup.ev_count = v0;
4596 adapter->mbuf_defrag_failed.ev_count = v1;
4597 adapter->efbig2_tx_dma_setup.ev_count = v2;
4598 adapter->einval_tx_dma_setup.ev_count = v3;
4599 adapter->other_tx_dma_setup.ev_count = v4;
4600 adapter->eagain_tx_dma_setup.ev_count = v5;
4601 adapter->enomem_tx_dma_setup.ev_count = v6;
4602 adapter->tso_err.ev_count = v7;
4603
4604 /*
4605 * Check the TX queues status
4606 * - mark hung queues so we don't schedule on them
4607 * - watchdog only if all queues show hung
4608 */
4609 que = adapter->queues;
4610 for (i = 0; i < adapter->num_queues; i++, que++) {
4611 /* Keep track of queues with work for soft irq */
4612 if (que->txr->busy)
4613 queues |= 1ULL << que->me;
4614 /*
4615 * Each time txeof runs without cleaning, but there
4616 * are uncleaned descriptors it increments busy. If
4617 * we get to the MAX we declare it hung.
4618 */
4619 if (que->busy == IXGBE_QUEUE_HUNG) {
4620 ++hung;
4621 /* Mark the queue as inactive */
4622 adapter->active_queues &= ~(1ULL << que->me);
4623 continue;
4624 } else {
4625 /* Check if we've come back from hung */
4626 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4627 adapter->active_queues |= 1ULL << que->me;
4628 }
4629 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4630 device_printf(dev,
4631 "Warning queue %d appears to be hung!\n", i);
4632 que->txr->busy = IXGBE_QUEUE_HUNG;
4633 ++hung;
4634 }
4635 }
4636
4637 /* Only truly watchdog if all queues show hung */
4638 if (hung == adapter->num_queues)
4639 goto watchdog;
4640 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4641 else if (queues != 0) { /* Force an IRQ on queues with work */
4642 que = adapter->queues;
4643 for (i = 0; i < adapter->num_queues; i++, que++) {
4644 mutex_enter(&que->dc_mtx);
4645 if (que->disabled_count == 0)
4646 ixgbe_rearm_queues(adapter,
4647 queues & ((u64)1 << i));
4648 mutex_exit(&que->dc_mtx);
4649 }
4650 }
4651 #endif
4652
4653 atomic_store_relaxed(&adapter->timer_pending, 0);
4654 IXGBE_CORE_UNLOCK(adapter);
4655 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4656 return;
4657
4658 watchdog:
4659 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4660 adapter->ifp->if_flags &= ~IFF_RUNNING;
4661 adapter->watchdog_events.ev_count++;
4662 ixgbe_init_locked(adapter);
4663 IXGBE_CORE_UNLOCK(adapter);
4664 } /* ixgbe_handle_timer */
4665
4666 /************************************************************************
4667 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4668 ************************************************************************/
4669 static void
4670 ixgbe_recovery_mode_timer(void *arg)
4671 {
4672 struct adapter *adapter = arg;
4673
4674 if (__predict_true(adapter->osdep.detaching == false)) {
4675 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4676 0, 1) == 0) {
4677 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4678 &adapter->recovery_mode_timer_wc, NULL);
4679 }
4680 }
4681 }
4682
4683 static void
4684 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4685 {
4686 struct adapter *adapter = context;
4687 struct ixgbe_hw *hw = &adapter->hw;
4688
4689 IXGBE_CORE_LOCK(adapter);
4690 if (ixgbe_fw_recovery_mode(hw)) {
4691 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4692 /* Firmware error detected, entering recovery mode */
4693 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4694
4695 if (hw->adapter_stopped == FALSE)
4696 ixgbe_stop_locked(adapter);
4697 }
4698 } else
4699 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4700
4701 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4702 callout_reset(&adapter->recovery_mode_timer, hz,
4703 ixgbe_recovery_mode_timer, adapter);
4704 IXGBE_CORE_UNLOCK(adapter);
4705 } /* ixgbe_handle_recovery_mode_timer */
4706
4707 /************************************************************************
4708 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4709 * bool int_en: true if it's called when the interrupt is enabled.
4710 ************************************************************************/
4711 static void
4712 ixgbe_handle_mod(void *context, bool int_en)
4713 {
4714 struct adapter *adapter = context;
4715 struct ixgbe_hw *hw = &adapter->hw;
4716 device_t dev = adapter->dev;
4717 enum ixgbe_sfp_type last_sfp_type;
4718 u32 err;
4719 bool last_unsupported_sfp_recovery;
4720
4721 KASSERT(mutex_owned(&adapter->core_mtx));
4722
4723 last_sfp_type = hw->phy.sfp_type;
4724 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4725 ++adapter->mod_workev.ev_count;
4726 if (adapter->hw.need_crosstalk_fix) {
4727 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4728 !ixgbe_sfp_cage_full(hw))
4729 goto out;
4730 }
4731
4732 err = hw->phy.ops.identify_sfp(hw);
4733 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4734 if (last_unsupported_sfp_recovery == false)
4735 device_printf(dev,
4736 "Unsupported SFP+ module type was detected.\n");
4737 goto out;
4738 }
4739
4740 if (hw->need_unsupported_sfp_recovery) {
4741 device_printf(dev, "Recovering from unsupported SFP\n");
4742 /*
4743 * We could recover the status by calling setup_sfp(),
4744 * setup_link() and some others. It's complex and might not
4745 * work correctly on some unknown cases. To avoid such type of
4746 * problem, call ixgbe_init_locked(). It's simple and safe
4747 * approach.
4748 */
4749 ixgbe_init_locked(adapter);
4750 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4751 (hw->phy.sfp_type != last_sfp_type)) {
4752 /* A module is inserted and changed. */
4753
4754 if (hw->mac.type == ixgbe_mac_82598EB)
4755 err = hw->phy.ops.reset(hw);
4756 else {
4757 err = hw->mac.ops.setup_sfp(hw);
4758 hw->phy.sfp_setup_needed = FALSE;
4759 }
4760 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4761 device_printf(dev,
4762 "Setup failure - unsupported SFP+ module type.\n");
4763 goto out;
4764 }
4765 }
4766
4767 out:
4768 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4769 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4770
4771 /* Adjust media types shown in ifconfig */
4772 IXGBE_CORE_UNLOCK(adapter);
4773 ifmedia_removeall(&adapter->media);
4774 ixgbe_add_media_types(adapter);
4775 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4776 IXGBE_CORE_LOCK(adapter);
4777
4778 /*
4779 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4780 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4781 * flap because the function calls setup_link().
4782 */
4783 if (hw->mac.type != ixgbe_mac_82598EB) {
4784 mutex_enter(&adapter->admin_mtx);
4785 if (int_en)
4786 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4787 else
4788 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4789 mutex_exit(&adapter->admin_mtx);
4790 }
4791
4792 /*
4793 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4794 * the workqueue now.
4795 */
4796 } /* ixgbe_handle_mod */
4797
4798
4799 /************************************************************************
4800 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4801 ************************************************************************/
4802 static void
4803 ixgbe_handle_msf(void *context)
4804 {
4805 struct adapter *adapter = context;
4806 struct ixgbe_hw *hw = &adapter->hw;
4807 u32 autoneg;
4808 bool negotiate;
4809
4810 KASSERT(mutex_owned(&adapter->core_mtx));
4811
4812 ++adapter->msf_workev.ev_count;
4813
4814 autoneg = hw->phy.autoneg_advertised;
4815 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4816 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4817 if (hw->mac.ops.setup_link)
4818 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4819 } /* ixgbe_handle_msf */
4820
4821 /************************************************************************
4822 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4823 ************************************************************************/
4824 static void
4825 ixgbe_handle_phy(void *context)
4826 {
4827 struct adapter *adapter = context;
4828 struct ixgbe_hw *hw = &adapter->hw;
4829 int error;
4830
4831 KASSERT(mutex_owned(&adapter->core_mtx));
4832
4833 ++adapter->phy_workev.ev_count;
4834 error = hw->phy.ops.handle_lasi(hw);
4835 if (error == IXGBE_ERR_OVERTEMP)
4836 device_printf(adapter->dev,
4837 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4838 " PHY will downshift to lower power state!\n");
4839 else if (error)
4840 device_printf(adapter->dev,
4841 "Error handling LASI interrupt: %d\n", error);
4842 } /* ixgbe_handle_phy */
4843
4844 static void
4845 ixgbe_handle_admin(struct work *wk, void *context)
4846 {
4847 struct adapter *adapter = context;
4848 struct ifnet *ifp = adapter->ifp;
4849 struct ixgbe_hw *hw = &adapter->hw;
4850 u32 task_requests;
4851 u32 eims_enable = 0;
4852
4853 mutex_enter(&adapter->admin_mtx);
4854 adapter->admin_pending = 0;
4855 task_requests = adapter->task_requests;
4856 adapter->task_requests = 0;
4857 mutex_exit(&adapter->admin_mtx);
4858
4859 /*
4860 * Hold the IFNET_LOCK across this entire call. This will
4861 * prevent additional changes to adapter->phy_layer
4862 * and serialize calls to this tasklet. We cannot hold the
4863 * CORE_LOCK while calling into the ifmedia functions as
4864 * they call ifmedia_lock() and the lock is CORE_LOCK.
4865 */
4866 IFNET_LOCK(ifp);
4867 IXGBE_CORE_LOCK(adapter);
4868 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4869 ixgbe_handle_link(adapter);
4870 eims_enable |= IXGBE_EIMS_LSC;
4871 }
4872 if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) {
4873 ixgbe_handle_mod(adapter, false);
4874 }
4875 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4876 ixgbe_handle_mod(adapter, true);
4877 if (hw->mac.type >= ixgbe_mac_X540)
4878 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4879 else
4880 eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4881 }
4882 if ((task_requests
4883 & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4884 ixgbe_handle_msf(adapter);
4885 if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4886 (hw->mac.type == ixgbe_mac_82599EB))
4887 eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4888 }
4889 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4890 ixgbe_handle_phy(adapter);
4891 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4892 }
4893 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4894 ixgbe_reinit_fdir(adapter);
4895 eims_enable |= IXGBE_EIMS_FLOW_DIR;
4896 }
4897 #if 0 /* notyet */
4898 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4899 ixgbe_handle_mbx(adapter);
4900 eims_enable |= IXGBE_EIMS_MAILBOX;
4901 }
4902 #endif
4903 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4904
4905 IXGBE_CORE_UNLOCK(adapter);
4906 IFNET_UNLOCK(ifp);
4907 } /* ixgbe_handle_admin */
4908
4909 static void
4910 ixgbe_ifstop(struct ifnet *ifp, int disable)
4911 {
4912 struct adapter *adapter = ifp->if_softc;
4913
4914 IXGBE_CORE_LOCK(adapter);
4915 ixgbe_stop_locked(adapter);
4916 IXGBE_CORE_UNLOCK(adapter);
4917
4918 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4919 atomic_store_relaxed(&adapter->timer_pending, 0);
4920 }
4921
4922 /************************************************************************
4923 * ixgbe_stop_locked - Stop the hardware
4924 *
4925 * Disables all traffic on the adapter by issuing a
4926 * global reset on the MAC and deallocates TX/RX buffers.
4927 ************************************************************************/
4928 static void
4929 ixgbe_stop_locked(void *arg)
4930 {
4931 struct ifnet *ifp;
4932 struct adapter *adapter = arg;
4933 struct ixgbe_hw *hw = &adapter->hw;
4934
4935 ifp = adapter->ifp;
4936
4937 KASSERT(mutex_owned(&adapter->core_mtx));
4938
4939 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4940 ixgbe_disable_intr(adapter);
4941 callout_stop(&adapter->timer);
4942
4943 /* Don't schedule workqueues. */
4944 adapter->schedule_wqs_ok = false;
4945
4946 /* Let the stack know...*/
4947 ifp->if_flags &= ~IFF_RUNNING;
4948
4949 ixgbe_reset_hw(hw);
4950 hw->adapter_stopped = FALSE;
4951 ixgbe_stop_adapter(hw);
4952 if (hw->mac.type == ixgbe_mac_82599EB)
4953 ixgbe_stop_mac_link_on_d3_82599(hw);
4954 /* Turn off the laser - noop with no optics */
4955 ixgbe_disable_tx_laser(hw);
4956
4957 /* Update the stack */
4958 adapter->link_up = FALSE;
4959 ixgbe_update_link_status(adapter);
4960
4961 /* reprogram the RAR[0] in case user changed it. */
4962 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4963
4964 return;
4965 } /* ixgbe_stop_locked */
4966
4967 /************************************************************************
4968 * ixgbe_update_link_status - Update OS on link state
4969 *
4970 * Note: Only updates the OS on the cached link state.
4971 * The real check of the hardware only happens with
4972 * a link interrupt.
4973 ************************************************************************/
4974 static void
4975 ixgbe_update_link_status(struct adapter *adapter)
4976 {
4977 struct ifnet *ifp = adapter->ifp;
4978 device_t dev = adapter->dev;
4979 struct ixgbe_hw *hw = &adapter->hw;
4980
4981 KASSERT(mutex_owned(&adapter->core_mtx));
4982
4983 if (adapter->link_up) {
4984 if (adapter->link_active != LINK_STATE_UP) {
4985 /*
4986 * To eliminate influence of the previous state
4987 * in the same way as ixgbe_init_locked().
4988 */
4989 struct ix_queue *que = adapter->queues;
4990 for (int i = 0; i < adapter->num_queues; i++, que++)
4991 que->eitr_setting = 0;
4992
4993 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4994 /*
4995 * Discard count for both MAC Local Fault and
4996 * Remote Fault because those registers are
4997 * valid only when the link speed is up and
4998 * 10Gbps.
4999 */
5000 IXGBE_READ_REG(hw, IXGBE_MLFC);
5001 IXGBE_READ_REG(hw, IXGBE_MRFC);
5002 }
5003
5004 if (bootverbose) {
5005 const char *bpsmsg;
5006
5007 switch (adapter->link_speed) {
5008 case IXGBE_LINK_SPEED_10GB_FULL:
5009 bpsmsg = "10 Gbps";
5010 break;
5011 case IXGBE_LINK_SPEED_5GB_FULL:
5012 bpsmsg = "5 Gbps";
5013 break;
5014 case IXGBE_LINK_SPEED_2_5GB_FULL:
5015 bpsmsg = "2.5 Gbps";
5016 break;
5017 case IXGBE_LINK_SPEED_1GB_FULL:
5018 bpsmsg = "1 Gbps";
5019 break;
5020 case IXGBE_LINK_SPEED_100_FULL:
5021 bpsmsg = "100 Mbps";
5022 break;
5023 case IXGBE_LINK_SPEED_10_FULL:
5024 bpsmsg = "10 Mbps";
5025 break;
5026 default:
5027 bpsmsg = "unknown speed";
5028 break;
5029 }
5030 device_printf(dev, "Link is up %s %s \n",
5031 bpsmsg, "Full Duplex");
5032 }
5033 adapter->link_active = LINK_STATE_UP;
5034 /* Update any Flow Control changes */
5035 ixgbe_fc_enable(&adapter->hw);
5036 /* Update DMA coalescing config */
5037 ixgbe_config_dmac(adapter);
5038 if_link_state_change(ifp, LINK_STATE_UP);
5039
5040 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5041 ixgbe_ping_all_vfs(adapter);
5042 }
5043 } else {
5044 /*
5045 * Do it when link active changes to DOWN. i.e.
5046 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5047 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5048 */
5049 if (adapter->link_active != LINK_STATE_DOWN) {
5050 if (bootverbose)
5051 device_printf(dev, "Link is Down\n");
5052 if_link_state_change(ifp, LINK_STATE_DOWN);
5053 adapter->link_active = LINK_STATE_DOWN;
5054 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5055 ixgbe_ping_all_vfs(adapter);
5056 ixgbe_drain_all(adapter);
5057 }
5058 }
5059 } /* ixgbe_update_link_status */
5060
5061 /************************************************************************
5062 * ixgbe_config_dmac - Configure DMA Coalescing
5063 ************************************************************************/
5064 static void
5065 ixgbe_config_dmac(struct adapter *adapter)
5066 {
5067 struct ixgbe_hw *hw = &adapter->hw;
5068 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5069
5070 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5071 return;
5072
5073 if (dcfg->watchdog_timer ^ adapter->dmac ||
5074 dcfg->link_speed ^ adapter->link_speed) {
5075 dcfg->watchdog_timer = adapter->dmac;
5076 dcfg->fcoe_en = false;
5077 dcfg->link_speed = adapter->link_speed;
5078 dcfg->num_tcs = 1;
5079
5080 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5081 dcfg->watchdog_timer, dcfg->link_speed);
5082
5083 hw->mac.ops.dmac_config(hw);
5084 }
5085 } /* ixgbe_config_dmac */
5086
5087 /************************************************************************
5088 * ixgbe_enable_intr
5089 ************************************************************************/
5090 static void
5091 ixgbe_enable_intr(struct adapter *adapter)
5092 {
5093 struct ixgbe_hw *hw = &adapter->hw;
5094 struct ix_queue *que = adapter->queues;
5095 u32 mask, fwsm;
5096
5097 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5098
5099 switch (adapter->hw.mac.type) {
5100 case ixgbe_mac_82599EB:
5101 mask |= IXGBE_EIMS_ECC;
5102 /* Temperature sensor on some adapters */
5103 mask |= IXGBE_EIMS_GPI_SDP0;
5104 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5105 mask |= IXGBE_EIMS_GPI_SDP1;
5106 mask |= IXGBE_EIMS_GPI_SDP2;
5107 break;
5108 case ixgbe_mac_X540:
5109 /* Detect if Thermal Sensor is enabled */
5110 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5111 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5112 mask |= IXGBE_EIMS_TS;
5113 mask |= IXGBE_EIMS_ECC;
5114 break;
5115 case ixgbe_mac_X550:
5116 /* MAC thermal sensor is automatically enabled */
5117 mask |= IXGBE_EIMS_TS;
5118 mask |= IXGBE_EIMS_ECC;
5119 break;
5120 case ixgbe_mac_X550EM_x:
5121 case ixgbe_mac_X550EM_a:
5122 /* Some devices use SDP0 for important information */
5123 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5124 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5125 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5126 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5127 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5128 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5129 mask |= IXGBE_EICR_GPI_SDP0_X540;
5130 mask |= IXGBE_EIMS_ECC;
5131 break;
5132 default:
5133 break;
5134 }
5135
5136 /* Enable Fan Failure detection */
5137 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5138 mask |= IXGBE_EIMS_GPI_SDP1;
5139 /* Enable SR-IOV */
5140 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5141 mask |= IXGBE_EIMS_MAILBOX;
5142 /* Enable Flow Director */
5143 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5144 mask |= IXGBE_EIMS_FLOW_DIR;
5145
5146 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5147
5148 /* With MSI-X we use auto clear */
5149 if (adapter->msix_mem) {
5150 /*
5151 * It's not required to set TCP_TIMER because we don't use
5152 * it.
5153 */
5154 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5155 }
5156
5157 /*
5158 * Now enable all queues, this is done separately to
5159 * allow for handling the extended (beyond 32) MSI-X
5160 * vectors that can be used by 82599
5161 */
5162 for (int i = 0; i < adapter->num_queues; i++, que++)
5163 ixgbe_enable_queue(adapter, que->msix);
5164
5165 IXGBE_WRITE_FLUSH(hw);
5166
5167 } /* ixgbe_enable_intr */
5168
5169 /************************************************************************
5170 * ixgbe_disable_intr_internal
5171 ************************************************************************/
5172 static void
5173 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5174 {
5175 struct ix_queue *que = adapter->queues;
5176
5177 /* disable interrupts other than queues */
5178 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5179
5180 if (adapter->msix_mem)
5181 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5182
5183 for (int i = 0; i < adapter->num_queues; i++, que++)
5184 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5185
5186 IXGBE_WRITE_FLUSH(&adapter->hw);
5187
5188 } /* ixgbe_do_disable_intr_internal */
5189
5190 /************************************************************************
5191 * ixgbe_disable_intr
5192 ************************************************************************/
5193 static void
5194 ixgbe_disable_intr(struct adapter *adapter)
5195 {
5196
5197 ixgbe_disable_intr_internal(adapter, true);
5198 } /* ixgbe_disable_intr */
5199
5200 /************************************************************************
5201 * ixgbe_ensure_disabled_intr
5202 ************************************************************************/
5203 void
5204 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5205 {
5206
5207 ixgbe_disable_intr_internal(adapter, false);
5208 } /* ixgbe_ensure_disabled_intr */
5209
5210 /************************************************************************
5211 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5212 ************************************************************************/
5213 static int
5214 ixgbe_legacy_irq(void *arg)
5215 {
5216 struct ix_queue *que = arg;
5217 struct adapter *adapter = que->adapter;
5218 struct ixgbe_hw *hw = &adapter->hw;
5219 struct tx_ring *txr = adapter->tx_rings;
5220 u32 eicr;
5221 u32 eims_orig;
5222 u32 eims_enable = 0;
5223 u32 eims_disable = 0;
5224
5225 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5226 /*
5227 * Silicon errata #26 on 82598. Disable all interrupts before reading
5228 * EICR.
5229 */
5230 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5231
5232 /* Read and clear EICR */
5233 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5234
5235 adapter->stats.pf.legint.ev_count++;
5236 if (eicr == 0) {
5237 adapter->stats.pf.intzero.ev_count++;
5238 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5239 return 0;
5240 }
5241
5242 /* Queue (0) intr */
5243 if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5244 ++que->irqs.ev_count;
5245
5246 /*
5247 * The same as ixgbe_msix_que() about
5248 * "que->txrx_use_workqueue".
5249 */
5250 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5251
5252 IXGBE_TX_LOCK(txr);
5253 ixgbe_txeof(txr);
5254 #ifdef notyet
5255 if (!ixgbe_ring_empty(ifp, txr->br))
5256 ixgbe_start_locked(ifp, txr);
5257 #endif
5258 IXGBE_TX_UNLOCK(txr);
5259
5260 que->req.ev_count++;
5261 ixgbe_sched_handle_que(adapter, que);
5262 /* Disable queue 0 interrupt */
5263 eims_disable |= 1UL << 0;
5264
5265 } else
5266 eims_enable |= IXGBE_EIMC_RTX_QUEUE;
5267
5268 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5269
5270 /* Re-enable some interrupts */
5271 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5272 (eims_orig & ~eims_disable) | eims_enable);
5273
5274 return 1;
5275 } /* ixgbe_legacy_irq */
5276
5277 /************************************************************************
5278 * ixgbe_free_pciintr_resources
5279 ************************************************************************/
5280 static void
5281 ixgbe_free_pciintr_resources(struct adapter *adapter)
5282 {
5283 struct ix_queue *que = adapter->queues;
5284 int rid;
5285
5286 /*
5287 * Release all msix queue resources:
5288 */
5289 for (int i = 0; i < adapter->num_queues; i++, que++) {
5290 if (que->res != NULL) {
5291 pci_intr_disestablish(adapter->osdep.pc,
5292 adapter->osdep.ihs[i]);
5293 adapter->osdep.ihs[i] = NULL;
5294 }
5295 }
5296
5297 /* Clean the Legacy or Link interrupt last */
5298 if (adapter->vector) /* we are doing MSIX */
5299 rid = adapter->vector;
5300 else
5301 rid = 0;
5302
5303 if (adapter->osdep.ihs[rid] != NULL) {
5304 pci_intr_disestablish(adapter->osdep.pc,
5305 adapter->osdep.ihs[rid]);
5306 adapter->osdep.ihs[rid] = NULL;
5307 }
5308
5309 if (adapter->osdep.intrs != NULL) {
5310 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5311 adapter->osdep.nintrs);
5312 adapter->osdep.intrs = NULL;
5313 }
5314 } /* ixgbe_free_pciintr_resources */
5315
5316 /************************************************************************
5317 * ixgbe_free_pci_resources
5318 ************************************************************************/
5319 static void
5320 ixgbe_free_pci_resources(struct adapter *adapter)
5321 {
5322
5323 ixgbe_free_pciintr_resources(adapter);
5324
5325 if (adapter->osdep.mem_size != 0) {
5326 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5327 adapter->osdep.mem_bus_space_handle,
5328 adapter->osdep.mem_size);
5329 }
5330
5331 } /* ixgbe_free_pci_resources */
5332
5333 /************************************************************************
5334 * ixgbe_set_sysctl_value
5335 ************************************************************************/
5336 static void
5337 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5338 const char *description, int *limit, int value)
5339 {
5340 device_t dev = adapter->dev;
5341 struct sysctllog **log;
5342 const struct sysctlnode *rnode, *cnode;
5343
5344 /*
5345 * It's not required to check recovery mode because this function never
5346 * touches hardware.
5347 */
5348
5349 log = &adapter->sysctllog;
5350 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5351 aprint_error_dev(dev, "could not create sysctl root\n");
5352 return;
5353 }
5354 if (sysctl_createv(log, 0, &rnode, &cnode,
5355 CTLFLAG_READWRITE, CTLTYPE_INT,
5356 name, SYSCTL_DESCR(description),
5357 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5358 aprint_error_dev(dev, "could not create sysctl\n");
5359 *limit = value;
5360 } /* ixgbe_set_sysctl_value */
5361
5362 /************************************************************************
5363 * ixgbe_sysctl_flowcntl
5364 *
5365 * SYSCTL wrapper around setting Flow Control
5366 ************************************************************************/
5367 static int
5368 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5369 {
5370 struct sysctlnode node = *rnode;
5371 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5372 int error, fc;
5373
5374 if (ixgbe_fw_recovery_mode_swflag(adapter))
5375 return (EPERM);
5376
5377 fc = adapter->hw.fc.current_mode;
5378 node.sysctl_data = &fc;
5379 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5380 if (error != 0 || newp == NULL)
5381 return error;
5382
5383 /* Don't bother if it's not changed */
5384 if (fc == adapter->hw.fc.current_mode)
5385 return (0);
5386
5387 return ixgbe_set_flowcntl(adapter, fc);
5388 } /* ixgbe_sysctl_flowcntl */
5389
5390 /************************************************************************
5391 * ixgbe_set_flowcntl - Set flow control
5392 *
5393 * Flow control values:
5394 * 0 - off
5395 * 1 - rx pause
5396 * 2 - tx pause
5397 * 3 - full
5398 ************************************************************************/
5399 static int
5400 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5401 {
5402 switch (fc) {
5403 case ixgbe_fc_rx_pause:
5404 case ixgbe_fc_tx_pause:
5405 case ixgbe_fc_full:
5406 adapter->hw.fc.requested_mode = fc;
5407 if (adapter->num_queues > 1)
5408 ixgbe_disable_rx_drop(adapter);
5409 break;
5410 case ixgbe_fc_none:
5411 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5412 if (adapter->num_queues > 1)
5413 ixgbe_enable_rx_drop(adapter);
5414 break;
5415 default:
5416 return (EINVAL);
5417 }
5418
5419 #if 0 /* XXX NetBSD */
5420 /* Don't autoneg if forcing a value */
5421 adapter->hw.fc.disable_fc_autoneg = TRUE;
5422 #endif
5423 ixgbe_fc_enable(&adapter->hw);
5424
5425 return (0);
5426 } /* ixgbe_set_flowcntl */
5427
5428 /************************************************************************
5429 * ixgbe_enable_rx_drop
5430 *
5431 * Enable the hardware to drop packets when the buffer is
5432 * full. This is useful with multiqueue, so that no single
5433 * queue being full stalls the entire RX engine. We only
5434 * enable this when Multiqueue is enabled AND Flow Control
5435 * is disabled.
5436 ************************************************************************/
5437 static void
5438 ixgbe_enable_rx_drop(struct adapter *adapter)
5439 {
5440 struct ixgbe_hw *hw = &adapter->hw;
5441 struct rx_ring *rxr;
5442 u32 srrctl;
5443
5444 for (int i = 0; i < adapter->num_queues; i++) {
5445 rxr = &adapter->rx_rings[i];
5446 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5447 srrctl |= IXGBE_SRRCTL_DROP_EN;
5448 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5449 }
5450
5451 /* enable drop for each vf */
5452 for (int i = 0; i < adapter->num_vfs; i++) {
5453 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5454 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5455 IXGBE_QDE_ENABLE));
5456 }
5457 } /* ixgbe_enable_rx_drop */
5458
5459 /************************************************************************
5460 * ixgbe_disable_rx_drop
5461 ************************************************************************/
5462 static void
5463 ixgbe_disable_rx_drop(struct adapter *adapter)
5464 {
5465 struct ixgbe_hw *hw = &adapter->hw;
5466 struct rx_ring *rxr;
5467 u32 srrctl;
5468
5469 for (int i = 0; i < adapter->num_queues; i++) {
5470 rxr = &adapter->rx_rings[i];
5471 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5472 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5473 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5474 }
5475
5476 /* disable drop for each vf */
5477 for (int i = 0; i < adapter->num_vfs; i++) {
5478 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5479 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5480 }
5481 } /* ixgbe_disable_rx_drop */
5482
5483 /************************************************************************
5484 * ixgbe_sysctl_advertise
5485 *
5486 * SYSCTL wrapper around setting advertised speed
5487 ************************************************************************/
5488 static int
5489 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5490 {
5491 struct sysctlnode node = *rnode;
5492 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5493 int error = 0, advertise;
5494
5495 if (ixgbe_fw_recovery_mode_swflag(adapter))
5496 return (EPERM);
5497
5498 advertise = adapter->advertise;
5499 node.sysctl_data = &advertise;
5500 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5501 if (error != 0 || newp == NULL)
5502 return error;
5503
5504 return ixgbe_set_advertise(adapter, advertise);
5505 } /* ixgbe_sysctl_advertise */
5506
5507 /************************************************************************
5508 * ixgbe_set_advertise - Control advertised link speed
5509 *
5510 * Flags:
5511 * 0x00 - Default (all capable link speed)
5512 * 0x01 - advertise 100 Mb
5513 * 0x02 - advertise 1G
5514 * 0x04 - advertise 10G
5515 * 0x08 - advertise 10 Mb
5516 * 0x10 - advertise 2.5G
5517 * 0x20 - advertise 5G
5518 ************************************************************************/
5519 static int
5520 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5521 {
5522 device_t dev;
5523 struct ixgbe_hw *hw;
5524 ixgbe_link_speed speed = 0;
5525 ixgbe_link_speed link_caps = 0;
5526 s32 err = IXGBE_NOT_IMPLEMENTED;
5527 bool negotiate = FALSE;
5528
5529 /* Checks to validate new value */
5530 if (adapter->advertise == advertise) /* no change */
5531 return (0);
5532
5533 dev = adapter->dev;
5534 hw = &adapter->hw;
5535
5536 /* No speed changes for backplane media */
5537 if (hw->phy.media_type == ixgbe_media_type_backplane)
5538 return (ENODEV);
5539
5540 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5541 (hw->phy.multispeed_fiber))) {
5542 device_printf(dev,
5543 "Advertised speed can only be set on copper or "
5544 "multispeed fiber media types.\n");
5545 return (EINVAL);
5546 }
5547
5548 if (advertise < 0x0 || advertise > 0x3f) {
5549 device_printf(dev, "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5550 return (EINVAL);
5551 }
5552
5553 if (hw->mac.ops.get_link_capabilities) {
5554 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5555 &negotiate);
5556 if (err != IXGBE_SUCCESS) {
5557 device_printf(dev, "Unable to determine supported advertise speeds\n");
5558 return (ENODEV);
5559 }
5560 }
5561
5562 /* Set new value and report new advertised mode */
5563 if (advertise & 0x1) {
5564 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5565 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5566 return (EINVAL);
5567 }
5568 speed |= IXGBE_LINK_SPEED_100_FULL;
5569 }
5570 if (advertise & 0x2) {
5571 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5572 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5573 return (EINVAL);
5574 }
5575 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5576 }
5577 if (advertise & 0x4) {
5578 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5579 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5580 return (EINVAL);
5581 }
5582 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5583 }
5584 if (advertise & 0x8) {
5585 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5586 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5587 return (EINVAL);
5588 }
5589 speed |= IXGBE_LINK_SPEED_10_FULL;
5590 }
5591 if (advertise & 0x10) {
5592 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5593 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5594 return (EINVAL);
5595 }
5596 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5597 }
5598 if (advertise & 0x20) {
5599 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5600 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5601 return (EINVAL);
5602 }
5603 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5604 }
5605 if (advertise == 0)
5606 speed = link_caps; /* All capable link speed */
5607
5608 hw->mac.autotry_restart = TRUE;
5609 hw->mac.ops.setup_link(hw, speed, TRUE);
5610 adapter->advertise = advertise;
5611
5612 return (0);
5613 } /* ixgbe_set_advertise */
5614
5615 /************************************************************************
5616 * ixgbe_get_advertise - Get current advertised speed settings
5617 *
5618 * Formatted for sysctl usage.
5619 * Flags:
5620 * 0x01 - advertise 100 Mb
5621 * 0x02 - advertise 1G
5622 * 0x04 - advertise 10G
5623 * 0x08 - advertise 10 Mb (yes, Mb)
5624 * 0x10 - advertise 2.5G
5625 * 0x20 - advertise 5G
5626 ************************************************************************/
5627 static int
5628 ixgbe_get_advertise(struct adapter *adapter)
5629 {
5630 struct ixgbe_hw *hw = &adapter->hw;
5631 int speed;
5632 ixgbe_link_speed link_caps = 0;
5633 s32 err;
5634 bool negotiate = FALSE;
5635
5636 /*
5637 * Advertised speed means nothing unless it's copper or
5638 * multi-speed fiber
5639 */
5640 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5641 !(hw->phy.multispeed_fiber))
5642 return (0);
5643
5644 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5645 if (err != IXGBE_SUCCESS)
5646 return (0);
5647
5648 speed =
5649 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5650 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5651 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5652 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5653 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5654 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5655
5656 return speed;
5657 } /* ixgbe_get_advertise */
5658
5659 /************************************************************************
5660 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5661 *
5662 * Control values:
5663 * 0/1 - off / on (use default value of 1000)
5664 *
5665 * Legal timer values are:
5666 * 50,100,250,500,1000,2000,5000,10000
5667 *
5668 * Turning off interrupt moderation will also turn this off.
5669 ************************************************************************/
5670 static int
5671 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5672 {
5673 struct sysctlnode node = *rnode;
5674 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5675 struct ifnet *ifp = adapter->ifp;
5676 int error;
5677 int newval;
5678
5679 if (ixgbe_fw_recovery_mode_swflag(adapter))
5680 return (EPERM);
5681
5682 newval = adapter->dmac;
5683 node.sysctl_data = &newval;
5684 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5685 if ((error) || (newp == NULL))
5686 return (error);
5687
5688 switch (newval) {
5689 case 0:
5690 /* Disabled */
5691 adapter->dmac = 0;
5692 break;
5693 case 1:
5694 /* Enable and use default */
5695 adapter->dmac = 1000;
5696 break;
5697 case 50:
5698 case 100:
5699 case 250:
5700 case 500:
5701 case 1000:
5702 case 2000:
5703 case 5000:
5704 case 10000:
5705 /* Legal values - allow */
5706 adapter->dmac = newval;
5707 break;
5708 default:
5709 /* Do nothing, illegal value */
5710 return (EINVAL);
5711 }
5712
5713 /* Re-initialize hardware if it's already running */
5714 if (ifp->if_flags & IFF_RUNNING)
5715 ifp->if_init(ifp);
5716
5717 return (0);
5718 }
5719
5720 #ifdef IXGBE_DEBUG
5721 /************************************************************************
5722 * ixgbe_sysctl_power_state
5723 *
5724 * Sysctl to test power states
5725 * Values:
5726 * 0 - set device to D0
5727 * 3 - set device to D3
5728 * (none) - get current device power state
5729 ************************************************************************/
5730 static int
5731 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5732 {
5733 #ifdef notyet
5734 struct sysctlnode node = *rnode;
5735 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5736 device_t dev = adapter->dev;
5737 int curr_ps, new_ps, error = 0;
5738
5739 if (ixgbe_fw_recovery_mode_swflag(adapter))
5740 return (EPERM);
5741
5742 curr_ps = new_ps = pci_get_powerstate(dev);
5743
5744 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5745 if ((error) || (req->newp == NULL))
5746 return (error);
5747
5748 if (new_ps == curr_ps)
5749 return (0);
5750
5751 if (new_ps == 3 && curr_ps == 0)
5752 error = DEVICE_SUSPEND(dev);
5753 else if (new_ps == 0 && curr_ps == 3)
5754 error = DEVICE_RESUME(dev);
5755 else
5756 return (EINVAL);
5757
5758 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5759
5760 return (error);
5761 #else
5762 return 0;
5763 #endif
5764 } /* ixgbe_sysctl_power_state */
5765 #endif
5766
5767 /************************************************************************
5768 * ixgbe_sysctl_wol_enable
5769 *
5770 * Sysctl to enable/disable the WoL capability,
5771 * if supported by the adapter.
5772 *
5773 * Values:
5774 * 0 - disabled
5775 * 1 - enabled
5776 ************************************************************************/
5777 static int
5778 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5779 {
5780 struct sysctlnode node = *rnode;
5781 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5782 struct ixgbe_hw *hw = &adapter->hw;
5783 bool new_wol_enabled;
5784 int error = 0;
5785
5786 /*
5787 * It's not required to check recovery mode because this function never
5788 * touches hardware.
5789 */
5790 new_wol_enabled = hw->wol_enabled;
5791 node.sysctl_data = &new_wol_enabled;
5792 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5793 if ((error) || (newp == NULL))
5794 return (error);
5795 if (new_wol_enabled == hw->wol_enabled)
5796 return (0);
5797
5798 if (new_wol_enabled && !adapter->wol_support)
5799 return (ENODEV);
5800 else
5801 hw->wol_enabled = new_wol_enabled;
5802
5803 return (0);
5804 } /* ixgbe_sysctl_wol_enable */
5805
5806 /************************************************************************
5807 * ixgbe_sysctl_wufc - Wake Up Filter Control
5808 *
5809 * Sysctl to enable/disable the types of packets that the
5810 * adapter will wake up on upon receipt.
5811 * Flags:
5812 * 0x1 - Link Status Change
5813 * 0x2 - Magic Packet
5814 * 0x4 - Direct Exact
5815 * 0x8 - Directed Multicast
5816 * 0x10 - Broadcast
5817 * 0x20 - ARP/IPv4 Request Packet
5818 * 0x40 - Direct IPv4 Packet
5819 * 0x80 - Direct IPv6 Packet
5820 *
5821 * Settings not listed above will cause the sysctl to return an error.
5822 ************************************************************************/
5823 static int
5824 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5825 {
5826 struct sysctlnode node = *rnode;
5827 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5828 int error = 0;
5829 u32 new_wufc;
5830
5831 /*
5832 * It's not required to check recovery mode because this function never
5833 * touches hardware.
5834 */
5835 new_wufc = adapter->wufc;
5836 node.sysctl_data = &new_wufc;
5837 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5838 if ((error) || (newp == NULL))
5839 return (error);
5840 if (new_wufc == adapter->wufc)
5841 return (0);
5842
5843 if (new_wufc & 0xffffff00)
5844 return (EINVAL);
5845
5846 new_wufc &= 0xff;
5847 new_wufc |= (0xffffff & adapter->wufc);
5848 adapter->wufc = new_wufc;
5849
5850 return (0);
5851 } /* ixgbe_sysctl_wufc */
5852
5853 #ifdef IXGBE_DEBUG
5854 /************************************************************************
5855 * ixgbe_sysctl_print_rss_config
5856 ************************************************************************/
5857 static int
5858 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5859 {
5860 #ifdef notyet
5861 struct sysctlnode node = *rnode;
5862 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5863 struct ixgbe_hw *hw = &adapter->hw;
5864 device_t dev = adapter->dev;
5865 struct sbuf *buf;
5866 int error = 0, reta_size;
5867 u32 reg;
5868
5869 if (ixgbe_fw_recovery_mode_swflag(adapter))
5870 return (EPERM);
5871
5872 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5873 if (!buf) {
5874 device_printf(dev, "Could not allocate sbuf for output.\n");
5875 return (ENOMEM);
5876 }
5877
5878 // TODO: use sbufs to make a string to print out
5879 /* Set multiplier for RETA setup and table size based on MAC */
5880 switch (adapter->hw.mac.type) {
5881 case ixgbe_mac_X550:
5882 case ixgbe_mac_X550EM_x:
5883 case ixgbe_mac_X550EM_a:
5884 reta_size = 128;
5885 break;
5886 default:
5887 reta_size = 32;
5888 break;
5889 }
5890
5891 /* Print out the redirection table */
5892 sbuf_cat(buf, "\n");
5893 for (int i = 0; i < reta_size; i++) {
5894 if (i < 32) {
5895 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5896 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5897 } else {
5898 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5899 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5900 }
5901 }
5902
5903 // TODO: print more config
5904
5905 error = sbuf_finish(buf);
5906 if (error)
5907 device_printf(dev, "Error finishing sbuf: %d\n", error);
5908
5909 sbuf_delete(buf);
5910 #endif
5911 return (0);
5912 } /* ixgbe_sysctl_print_rss_config */
5913 #endif /* IXGBE_DEBUG */
5914
5915 /************************************************************************
5916 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5917 *
5918 * For X552/X557-AT devices using an external PHY
5919 ************************************************************************/
5920 static int
5921 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5922 {
5923 struct sysctlnode node = *rnode;
5924 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5925 struct ixgbe_hw *hw = &adapter->hw;
5926 int val;
5927 u16 reg;
5928 int error;
5929
5930 if (ixgbe_fw_recovery_mode_swflag(adapter))
5931 return (EPERM);
5932
5933 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5934 device_printf(adapter->dev,
5935 "Device has no supported external thermal sensor.\n");
5936 return (ENODEV);
5937 }
5938
5939 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5940 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5941 device_printf(adapter->dev,
5942 "Error reading from PHY's current temperature register\n");
5943 return (EAGAIN);
5944 }
5945
5946 node.sysctl_data = &val;
5947
5948 /* Shift temp for output */
5949 val = reg >> 8;
5950
5951 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5952 if ((error) || (newp == NULL))
5953 return (error);
5954
5955 return (0);
5956 } /* ixgbe_sysctl_phy_temp */
5957
5958 /************************************************************************
5959 * ixgbe_sysctl_phy_overtemp_occurred
5960 *
5961 * Reports (directly from the PHY) whether the current PHY
5962 * temperature is over the overtemp threshold.
5963 ************************************************************************/
5964 static int
5965 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5966 {
5967 struct sysctlnode node = *rnode;
5968 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5969 struct ixgbe_hw *hw = &adapter->hw;
5970 int val, error;
5971 u16 reg;
5972
5973 if (ixgbe_fw_recovery_mode_swflag(adapter))
5974 return (EPERM);
5975
5976 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5977 device_printf(adapter->dev,
5978 "Device has no supported external thermal sensor.\n");
5979 return (ENODEV);
5980 }
5981
5982 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5983 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5984 device_printf(adapter->dev,
5985 "Error reading from PHY's temperature status register\n");
5986 return (EAGAIN);
5987 }
5988
5989 node.sysctl_data = &val;
5990
5991 /* Get occurrence bit */
5992 val = !!(reg & 0x4000);
5993
5994 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5995 if ((error) || (newp == NULL))
5996 return (error);
5997
5998 return (0);
5999 } /* ixgbe_sysctl_phy_overtemp_occurred */
6000
6001 /************************************************************************
6002 * ixgbe_sysctl_eee_state
6003 *
6004 * Sysctl to set EEE power saving feature
6005 * Values:
6006 * 0 - disable EEE
6007 * 1 - enable EEE
6008 * (none) - get current device EEE state
6009 ************************************************************************/
6010 static int
6011 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6012 {
6013 struct sysctlnode node = *rnode;
6014 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6015 struct ifnet *ifp = adapter->ifp;
6016 device_t dev = adapter->dev;
6017 int curr_eee, new_eee, error = 0;
6018 s32 retval;
6019
6020 if (ixgbe_fw_recovery_mode_swflag(adapter))
6021 return (EPERM);
6022
6023 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6024 node.sysctl_data = &new_eee;
6025 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6026 if ((error) || (newp == NULL))
6027 return (error);
6028
6029 /* Nothing to do */
6030 if (new_eee == curr_eee)
6031 return (0);
6032
6033 /* Not supported */
6034 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6035 return (EINVAL);
6036
6037 /* Bounds checking */
6038 if ((new_eee < 0) || (new_eee > 1))
6039 return (EINVAL);
6040
6041 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6042 if (retval) {
6043 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6044 return (EINVAL);
6045 }
6046
6047 /* Restart auto-neg */
6048 ifp->if_init(ifp);
6049
6050 device_printf(dev, "New EEE state: %d\n", new_eee);
6051
6052 /* Cache new value */
6053 if (new_eee)
6054 adapter->feat_en |= IXGBE_FEATURE_EEE;
6055 else
6056 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6057
6058 return (error);
6059 } /* ixgbe_sysctl_eee_state */
6060
6061 #define PRINTQS(adapter, regname) \
6062 do { \
6063 struct ixgbe_hw *_hw = &(adapter)->hw; \
6064 int _i; \
6065 \
6066 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6067 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6068 printf((_i == 0) ? "\t" : " "); \
6069 printf("%08x", IXGBE_READ_REG(_hw, \
6070 IXGBE_##regname(_i))); \
6071 } \
6072 printf("\n"); \
6073 } while (0)
6074
6075 /************************************************************************
6076 * ixgbe_print_debug_info
6077 *
6078 * Called only when em_display_debug_stats is enabled.
6079 * Provides a way to take a look at important statistics
6080 * maintained by the driver and hardware.
6081 ************************************************************************/
6082 static void
6083 ixgbe_print_debug_info(struct adapter *adapter)
6084 {
6085 device_t dev = adapter->dev;
6086 struct ixgbe_hw *hw = &adapter->hw;
6087 int table_size;
6088 int i;
6089
6090 switch (adapter->hw.mac.type) {
6091 case ixgbe_mac_X550:
6092 case ixgbe_mac_X550EM_x:
6093 case ixgbe_mac_X550EM_a:
6094 table_size = 128;
6095 break;
6096 default:
6097 table_size = 32;
6098 break;
6099 }
6100
6101 device_printf(dev, "[E]RETA:\n");
6102 for (i = 0; i < table_size; i++) {
6103 if (i < 32)
6104 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6105 IXGBE_RETA(i)));
6106 else
6107 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6108 IXGBE_ERETA(i - 32)));
6109 }
6110
6111 device_printf(dev, "queue:");
6112 for (i = 0; i < adapter->num_queues; i++) {
6113 printf((i == 0) ? "\t" : " ");
6114 printf("%8d", i);
6115 }
6116 printf("\n");
6117 PRINTQS(adapter, RDBAL);
6118 PRINTQS(adapter, RDBAH);
6119 PRINTQS(adapter, RDLEN);
6120 PRINTQS(adapter, SRRCTL);
6121 PRINTQS(adapter, RDH);
6122 PRINTQS(adapter, RDT);
6123 PRINTQS(adapter, RXDCTL);
6124
6125 device_printf(dev, "RQSMR:");
6126 for (i = 0; i < adapter->num_queues / 4; i++) {
6127 printf((i == 0) ? "\t" : " ");
6128 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6129 }
6130 printf("\n");
6131
6132 device_printf(dev, "disabled_count:");
6133 for (i = 0; i < adapter->num_queues; i++) {
6134 printf((i == 0) ? "\t" : " ");
6135 printf("%8d", adapter->queues[i].disabled_count);
6136 }
6137 printf("\n");
6138
6139 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6140 if (hw->mac.type != ixgbe_mac_82598EB) {
6141 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6142 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6143 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6144 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6145 }
6146 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6147 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6148 } /* ixgbe_print_debug_info */
6149
6150 /************************************************************************
6151 * ixgbe_sysctl_debug
6152 ************************************************************************/
6153 static int
6154 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6155 {
6156 struct sysctlnode node = *rnode;
6157 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6158 int error, result = 0;
6159
6160 if (ixgbe_fw_recovery_mode_swflag(adapter))
6161 return (EPERM);
6162
6163 node.sysctl_data = &result;
6164 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6165
6166 if (error || newp == NULL)
6167 return error;
6168
6169 if (result == 1)
6170 ixgbe_print_debug_info(adapter);
6171
6172 return 0;
6173 } /* ixgbe_sysctl_debug */
6174
6175 /************************************************************************
6176 * ixgbe_init_device_features
6177 ************************************************************************/
6178 static void
6179 ixgbe_init_device_features(struct adapter *adapter)
6180 {
6181 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6182 | IXGBE_FEATURE_RSS
6183 | IXGBE_FEATURE_MSI
6184 | IXGBE_FEATURE_MSIX
6185 | IXGBE_FEATURE_LEGACY_IRQ
6186 | IXGBE_FEATURE_LEGACY_TX;
6187
6188 /* Set capabilities first... */
6189 switch (adapter->hw.mac.type) {
6190 case ixgbe_mac_82598EB:
6191 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6192 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6193 break;
6194 case ixgbe_mac_X540:
6195 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6196 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6197 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6198 (adapter->hw.bus.func == 0))
6199 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6200 break;
6201 case ixgbe_mac_X550:
6202 /*
6203 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6204 * NVM Image version.
6205 */
6206 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6207 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6208 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6209 break;
6210 case ixgbe_mac_X550EM_x:
6211 /*
6212 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6213 * NVM Image version.
6214 */
6215 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6216 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6217 break;
6218 case ixgbe_mac_X550EM_a:
6219 /*
6220 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6221 * NVM Image version.
6222 */
6223 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6224 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6225 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6226 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6227 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6228 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6229 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6230 }
6231 break;
6232 case ixgbe_mac_82599EB:
6233 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6234 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6235 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6236 (adapter->hw.bus.func == 0))
6237 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6238 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6239 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6240 break;
6241 default:
6242 break;
6243 }
6244
6245 /* Enabled by default... */
6246 /* Fan failure detection */
6247 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6248 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6249 /* Netmap */
6250 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6251 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6252 /* EEE */
6253 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6254 adapter->feat_en |= IXGBE_FEATURE_EEE;
6255 /* Thermal Sensor */
6256 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6257 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6258 /*
6259 * Recovery mode:
6260 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6261 * NVM Image version.
6262 */
6263
6264 /* Enabled via global sysctl... */
6265 /* Flow Director */
6266 if (ixgbe_enable_fdir) {
6267 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6268 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6269 else
6270 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6271 }
6272 /* Legacy (single queue) transmit */
6273 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6274 ixgbe_enable_legacy_tx)
6275 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6276 /*
6277 * Message Signal Interrupts - Extended (MSI-X)
6278 * Normal MSI is only enabled if MSI-X calls fail.
6279 */
6280 if (!ixgbe_enable_msix)
6281 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6282 /* Receive-Side Scaling (RSS) */
6283 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6284 adapter->feat_en |= IXGBE_FEATURE_RSS;
6285
6286 /* Disable features with unmet dependencies... */
6287 /* No MSI-X */
6288 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6289 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6290 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6291 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6292 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6293 }
6294 } /* ixgbe_init_device_features */
6295
6296 /************************************************************************
6297 * ixgbe_probe - Device identification routine
6298 *
6299 * Determines if the driver should be loaded on
6300 * adapter based on its PCI vendor/device ID.
6301 *
6302 * return BUS_PROBE_DEFAULT on success, positive on failure
6303 ************************************************************************/
6304 static int
6305 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6306 {
6307 const struct pci_attach_args *pa = aux;
6308
6309 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6310 }
6311
6312 static const ixgbe_vendor_info_t *
6313 ixgbe_lookup(const struct pci_attach_args *pa)
6314 {
6315 const ixgbe_vendor_info_t *ent;
6316 pcireg_t subid;
6317
6318 INIT_DEBUGOUT("ixgbe_lookup: begin");
6319
6320 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6321 return NULL;
6322
6323 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6324
6325 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6326 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6327 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6328 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6329 (ent->subvendor_id == 0)) &&
6330 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6331 (ent->subdevice_id == 0))) {
6332 return ent;
6333 }
6334 }
6335 return NULL;
6336 }
6337
6338 static int
6339 ixgbe_ifflags_cb(struct ethercom *ec)
6340 {
6341 struct ifnet *ifp = &ec->ec_if;
6342 struct adapter *adapter = ifp->if_softc;
6343 u_short change;
6344 int rv = 0;
6345
6346 IXGBE_CORE_LOCK(adapter);
6347
6348 change = ifp->if_flags ^ adapter->if_flags;
6349 if (change != 0)
6350 adapter->if_flags = ifp->if_flags;
6351
6352 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6353 rv = ENETRESET;
6354 goto out;
6355 } else if ((change & IFF_PROMISC) != 0)
6356 ixgbe_set_rxfilter(adapter);
6357
6358 /* Check for ec_capenable. */
6359 change = ec->ec_capenable ^ adapter->ec_capenable;
6360 adapter->ec_capenable = ec->ec_capenable;
6361 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6362 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6363 rv = ENETRESET;
6364 goto out;
6365 }
6366
6367 /*
6368 * Special handling is not required for ETHERCAP_VLAN_MTU.
6369 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6370 */
6371
6372 /* Set up VLAN support and filter */
6373 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6374 ixgbe_setup_vlan_hw_support(adapter);
6375
6376 out:
6377 IXGBE_CORE_UNLOCK(adapter);
6378
6379 return rv;
6380 }
6381
6382 /************************************************************************
6383 * ixgbe_ioctl - Ioctl entry point
6384 *
6385 * Called when the user wants to configure the interface.
6386 *
6387 * return 0 on success, positive on failure
6388 ************************************************************************/
6389 static int
6390 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6391 {
6392 struct adapter *adapter = ifp->if_softc;
6393 struct ixgbe_hw *hw = &adapter->hw;
6394 struct ifcapreq *ifcr = data;
6395 struct ifreq *ifr = data;
6396 int error = 0;
6397 int l4csum_en;
6398 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6399 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6400
6401 if (ixgbe_fw_recovery_mode_swflag(adapter))
6402 return (EPERM);
6403
6404 switch (command) {
6405 case SIOCSIFFLAGS:
6406 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6407 break;
6408 case SIOCADDMULTI:
6409 case SIOCDELMULTI:
6410 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6411 break;
6412 case SIOCSIFMEDIA:
6413 case SIOCGIFMEDIA:
6414 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6415 break;
6416 case SIOCSIFCAP:
6417 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6418 break;
6419 case SIOCSIFMTU:
6420 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6421 break;
6422 #ifdef __NetBSD__
6423 case SIOCINITIFADDR:
6424 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6425 break;
6426 case SIOCGIFFLAGS:
6427 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6428 break;
6429 case SIOCGIFAFLAG_IN:
6430 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6431 break;
6432 case SIOCGIFADDR:
6433 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6434 break;
6435 case SIOCGIFMTU:
6436 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6437 break;
6438 case SIOCGIFCAP:
6439 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6440 break;
6441 case SIOCGETHERCAP:
6442 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6443 break;
6444 case SIOCGLIFADDR:
6445 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6446 break;
6447 case SIOCZIFDATA:
6448 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6449 hw->mac.ops.clear_hw_cntrs(hw);
6450 ixgbe_clear_evcnt(adapter);
6451 break;
6452 case SIOCAIFADDR:
6453 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6454 break;
6455 #endif
6456 default:
6457 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6458 break;
6459 }
6460
6461 switch (command) {
6462 case SIOCGI2C:
6463 {
6464 struct ixgbe_i2c_req i2c;
6465
6466 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6467 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6468 if (error != 0)
6469 break;
6470 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6471 error = EINVAL;
6472 break;
6473 }
6474 if (i2c.len > sizeof(i2c.data)) {
6475 error = EINVAL;
6476 break;
6477 }
6478
6479 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6480 i2c.dev_addr, i2c.data);
6481 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6482 break;
6483 }
6484 case SIOCSIFCAP:
6485 /* Layer-4 Rx checksum offload has to be turned on and
6486 * off as a unit.
6487 */
6488 l4csum_en = ifcr->ifcr_capenable & l4csum;
6489 if (l4csum_en != l4csum && l4csum_en != 0)
6490 return EINVAL;
6491 /*FALLTHROUGH*/
6492 case SIOCADDMULTI:
6493 case SIOCDELMULTI:
6494 case SIOCSIFFLAGS:
6495 case SIOCSIFMTU:
6496 default:
6497 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6498 return error;
6499 if ((ifp->if_flags & IFF_RUNNING) == 0)
6500 ;
6501 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6502 IXGBE_CORE_LOCK(adapter);
6503 if ((ifp->if_flags & IFF_RUNNING) != 0)
6504 ixgbe_init_locked(adapter);
6505 ixgbe_recalculate_max_frame(adapter);
6506 IXGBE_CORE_UNLOCK(adapter);
6507 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6508 /*
6509 * Multicast list has changed; set the hardware filter
6510 * accordingly.
6511 */
6512 IXGBE_CORE_LOCK(adapter);
6513 ixgbe_disable_intr(adapter);
6514 ixgbe_set_rxfilter(adapter);
6515 ixgbe_enable_intr(adapter);
6516 IXGBE_CORE_UNLOCK(adapter);
6517 }
6518 return 0;
6519 }
6520
6521 return error;
6522 } /* ixgbe_ioctl */
6523
6524 /************************************************************************
6525 * ixgbe_check_fan_failure
6526 ************************************************************************/
6527 static int
6528 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6529 {
6530 u32 mask;
6531
6532 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6533 IXGBE_ESDP_SDP1;
6534
6535 if (reg & mask) {
6536 device_printf(adapter->dev,
6537 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6538 return IXGBE_ERR_FAN_FAILURE;
6539 }
6540
6541 return IXGBE_SUCCESS;
6542 } /* ixgbe_check_fan_failure */
6543
6544 /************************************************************************
6545 * ixgbe_handle_que
6546 ************************************************************************/
6547 static void
6548 ixgbe_handle_que(void *context)
6549 {
6550 struct ix_queue *que = context;
6551 struct adapter *adapter = que->adapter;
6552 struct tx_ring *txr = que->txr;
6553 struct ifnet *ifp = adapter->ifp;
6554 bool more = false;
6555
6556 que->handleq.ev_count++;
6557
6558 if (ifp->if_flags & IFF_RUNNING) {
6559 more = ixgbe_rxeof(que);
6560 IXGBE_TX_LOCK(txr);
6561 more |= ixgbe_txeof(txr);
6562 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6563 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6564 ixgbe_mq_start_locked(ifp, txr);
6565 /* Only for queue 0 */
6566 /* NetBSD still needs this for CBQ */
6567 if ((&adapter->queues[0] == que)
6568 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6569 ixgbe_legacy_start_locked(ifp, txr);
6570 IXGBE_TX_UNLOCK(txr);
6571 }
6572
6573 if (more) {
6574 que->req.ev_count++;
6575 ixgbe_sched_handle_que(adapter, que);
6576 } else if (que->res != NULL) {
6577 /* MSIX: Re-enable this interrupt */
6578 ixgbe_enable_queue(adapter, que->msix);
6579 } else {
6580 /* INTx or MSI */
6581 ixgbe_enable_queue(adapter, 0);
6582 }
6583
6584 return;
6585 } /* ixgbe_handle_que */
6586
6587 /************************************************************************
6588 * ixgbe_handle_que_work
6589 ************************************************************************/
6590 static void
6591 ixgbe_handle_que_work(struct work *wk, void *context)
6592 {
6593 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6594
6595 /*
6596 * "enqueued flag" is not required here.
6597 * See ixgbe_msix_que().
6598 */
6599 ixgbe_handle_que(que);
6600 }
6601
6602 /************************************************************************
6603 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6604 ************************************************************************/
6605 static int
6606 ixgbe_allocate_legacy(struct adapter *adapter,
6607 const struct pci_attach_args *pa)
6608 {
6609 device_t dev = adapter->dev;
6610 struct ix_queue *que = adapter->queues;
6611 struct tx_ring *txr = adapter->tx_rings;
6612 int counts[PCI_INTR_TYPE_SIZE];
6613 pci_intr_type_t intr_type, max_type;
6614 char intrbuf[PCI_INTRSTR_LEN];
6615 char wqname[MAXCOMLEN];
6616 const char *intrstr = NULL;
6617 int defertx_error = 0, error;
6618
6619 /* We allocate a single interrupt resource */
6620 max_type = PCI_INTR_TYPE_MSI;
6621 counts[PCI_INTR_TYPE_MSIX] = 0;
6622 counts[PCI_INTR_TYPE_MSI] =
6623 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6624 /* Check not feat_en but feat_cap to fallback to INTx */
6625 counts[PCI_INTR_TYPE_INTX] =
6626 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6627
6628 alloc_retry:
6629 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6630 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6631 return ENXIO;
6632 }
6633 adapter->osdep.nintrs = 1;
6634 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6635 intrbuf, sizeof(intrbuf));
6636 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6637 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6638 device_xname(dev));
6639 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6640 if (adapter->osdep.ihs[0] == NULL) {
6641 aprint_error_dev(dev,"unable to establish %s\n",
6642 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6643 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6644 adapter->osdep.intrs = NULL;
6645 switch (intr_type) {
6646 case PCI_INTR_TYPE_MSI:
6647 /* The next try is for INTx: Disable MSI */
6648 max_type = PCI_INTR_TYPE_INTX;
6649 counts[PCI_INTR_TYPE_INTX] = 1;
6650 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6651 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6652 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6653 goto alloc_retry;
6654 } else
6655 break;
6656 case PCI_INTR_TYPE_INTX:
6657 default:
6658 /* See below */
6659 break;
6660 }
6661 }
6662 if (intr_type == PCI_INTR_TYPE_INTX) {
6663 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6664 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6665 }
6666 if (adapter->osdep.ihs[0] == NULL) {
6667 aprint_error_dev(dev,
6668 "couldn't establish interrupt%s%s\n",
6669 intrstr ? " at " : "", intrstr ? intrstr : "");
6670 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6671 adapter->osdep.intrs = NULL;
6672 return ENXIO;
6673 }
6674 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6675 /*
6676 * Try allocating a fast interrupt and the associated deferred
6677 * processing contexts.
6678 */
6679 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6680 txr->txr_si =
6681 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6682 ixgbe_deferred_mq_start, txr);
6683
6684 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6685 device_xname(dev));
6686 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6687 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6688 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6689 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6690 }
6691 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6692 ixgbe_handle_que, que);
6693 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6694 error = workqueue_create(&adapter->que_wq, wqname,
6695 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6696 IXGBE_WORKQUEUE_FLAGS);
6697
6698 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6699 && ((txr->txr_si == NULL) || defertx_error != 0))
6700 || (que->que_si == NULL) || error != 0) {
6701 aprint_error_dev(dev,
6702 "could not establish software interrupts\n");
6703
6704 return ENXIO;
6705 }
6706 /* For simplicity in the handlers */
6707 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6708
6709 return (0);
6710 } /* ixgbe_allocate_legacy */
6711
6712 /************************************************************************
6713 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6714 ************************************************************************/
6715 static int
6716 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6717 {
6718 device_t dev = adapter->dev;
6719 struct ix_queue *que = adapter->queues;
6720 struct tx_ring *txr = adapter->tx_rings;
6721 pci_chipset_tag_t pc;
6722 char intrbuf[PCI_INTRSTR_LEN];
6723 char intr_xname[32];
6724 char wqname[MAXCOMLEN];
6725 const char *intrstr = NULL;
6726 int error, vector = 0;
6727 int cpu_id = 0;
6728 kcpuset_t *affinity;
6729 #ifdef RSS
6730 unsigned int rss_buckets = 0;
6731 kcpuset_t cpu_mask;
6732 #endif
6733
6734 pc = adapter->osdep.pc;
6735 #ifdef RSS
6736 /*
6737 * If we're doing RSS, the number of queues needs to
6738 * match the number of RSS buckets that are configured.
6739 *
6740 * + If there's more queues than RSS buckets, we'll end
6741 * up with queues that get no traffic.
6742 *
6743 * + If there's more RSS buckets than queues, we'll end
6744 * up having multiple RSS buckets map to the same queue,
6745 * so there'll be some contention.
6746 */
6747 rss_buckets = rss_getnumbuckets();
6748 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6749 (adapter->num_queues != rss_buckets)) {
6750 device_printf(dev,
6751 "%s: number of queues (%d) != number of RSS buckets (%d)"
6752 "; performance will be impacted.\n",
6753 __func__, adapter->num_queues, rss_buckets);
6754 }
6755 #endif
6756
6757 adapter->osdep.nintrs = adapter->num_queues + 1;
6758 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6759 adapter->osdep.nintrs) != 0) {
6760 aprint_error_dev(dev,
6761 "failed to allocate MSI-X interrupt\n");
6762 return (ENXIO);
6763 }
6764
6765 kcpuset_create(&affinity, false);
6766 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6767 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6768 device_xname(dev), i);
6769 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6770 sizeof(intrbuf));
6771 #ifdef IXGBE_MPSAFE
6772 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6773 true);
6774 #endif
6775 /* Set the handler function */
6776 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6777 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6778 intr_xname);
6779 if (que->res == NULL) {
6780 aprint_error_dev(dev,
6781 "Failed to register QUE handler\n");
6782 error = ENXIO;
6783 goto err_out;
6784 }
6785 que->msix = vector;
6786 adapter->active_queues |= 1ULL << que->msix;
6787
6788 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6789 #ifdef RSS
6790 /*
6791 * The queue ID is used as the RSS layer bucket ID.
6792 * We look up the queue ID -> RSS CPU ID and select
6793 * that.
6794 */
6795 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6796 CPU_SETOF(cpu_id, &cpu_mask);
6797 #endif
6798 } else {
6799 /*
6800 * Bind the MSI-X vector, and thus the
6801 * rings to the corresponding CPU.
6802 *
6803 * This just happens to match the default RSS
6804 * round-robin bucket -> queue -> CPU allocation.
6805 */
6806 if (adapter->num_queues > 1)
6807 cpu_id = i;
6808 }
6809 /* Round-robin affinity */
6810 kcpuset_zero(affinity);
6811 kcpuset_set(affinity, cpu_id % ncpu);
6812 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6813 NULL);
6814 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6815 intrstr);
6816 if (error == 0) {
6817 #if 1 /* def IXGBE_DEBUG */
6818 #ifdef RSS
6819 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6820 cpu_id % ncpu);
6821 #else
6822 aprint_normal(", bound queue %d to cpu %d", i,
6823 cpu_id % ncpu);
6824 #endif
6825 #endif /* IXGBE_DEBUG */
6826 }
6827 aprint_normal("\n");
6828
6829 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6830 txr->txr_si = softint_establish(
6831 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6832 ixgbe_deferred_mq_start, txr);
6833 if (txr->txr_si == NULL) {
6834 aprint_error_dev(dev,
6835 "couldn't establish software interrupt\n");
6836 error = ENXIO;
6837 goto err_out;
6838 }
6839 }
6840 que->que_si
6841 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6842 ixgbe_handle_que, que);
6843 if (que->que_si == NULL) {
6844 aprint_error_dev(dev,
6845 "couldn't establish software interrupt\n");
6846 error = ENXIO;
6847 goto err_out;
6848 }
6849 }
6850 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6851 error = workqueue_create(&adapter->txr_wq, wqname,
6852 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6853 IXGBE_WORKQUEUE_FLAGS);
6854 if (error) {
6855 aprint_error_dev(dev,
6856 "couldn't create workqueue for deferred Tx\n");
6857 goto err_out;
6858 }
6859 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6860
6861 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6862 error = workqueue_create(&adapter->que_wq, wqname,
6863 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6864 IXGBE_WORKQUEUE_FLAGS);
6865 if (error) {
6866 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6867 goto err_out;
6868 }
6869
6870 /* and Link */
6871 cpu_id++;
6872 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6873 adapter->vector = vector;
6874 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6875 sizeof(intrbuf));
6876 #ifdef IXGBE_MPSAFE
6877 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6878 true);
6879 #endif
6880 /* Set the link handler function */
6881 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6882 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6883 intr_xname);
6884 if (adapter->osdep.ihs[vector] == NULL) {
6885 aprint_error_dev(dev, "Failed to register LINK handler\n");
6886 error = ENXIO;
6887 goto err_out;
6888 }
6889 /* Round-robin affinity */
6890 kcpuset_zero(affinity);
6891 kcpuset_set(affinity, cpu_id % ncpu);
6892 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6893 NULL);
6894
6895 aprint_normal_dev(dev,
6896 "for link, interrupting at %s", intrstr);
6897 if (error == 0)
6898 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6899 else
6900 aprint_normal("\n");
6901
6902 kcpuset_destroy(affinity);
6903 aprint_normal_dev(dev,
6904 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6905
6906 return (0);
6907
6908 err_out:
6909 kcpuset_destroy(affinity);
6910 ixgbe_free_deferred_handlers(adapter);
6911 ixgbe_free_pciintr_resources(adapter);
6912 return (error);
6913 } /* ixgbe_allocate_msix */
6914
6915 /************************************************************************
6916 * ixgbe_configure_interrupts
6917 *
6918 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6919 * This will also depend on user settings.
6920 ************************************************************************/
6921 static int
6922 ixgbe_configure_interrupts(struct adapter *adapter)
6923 {
6924 device_t dev = adapter->dev;
6925 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6926 int want, queues, msgs;
6927
6928 /* Default to 1 queue if MSI-X setup fails */
6929 adapter->num_queues = 1;
6930
6931 /* Override by tuneable */
6932 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6933 goto msi;
6934
6935 /*
6936 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6937 * interrupt slot.
6938 */
6939 if (ncpu == 1)
6940 goto msi;
6941
6942 /* First try MSI-X */
6943 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6944 msgs = MIN(msgs, IXG_MAX_NINTR);
6945 if (msgs < 2)
6946 goto msi;
6947
6948 adapter->msix_mem = (void *)1; /* XXX */
6949
6950 /* Figure out a reasonable auto config value */
6951 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6952
6953 #ifdef RSS
6954 /* If we're doing RSS, clamp at the number of RSS buckets */
6955 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6956 queues = uimin(queues, rss_getnumbuckets());
6957 #endif
6958 if (ixgbe_num_queues > queues) {
6959 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6960 ixgbe_num_queues = queues;
6961 }
6962
6963 if (ixgbe_num_queues != 0)
6964 queues = ixgbe_num_queues;
6965 else
6966 queues = uimin(queues,
6967 uimin(mac->max_tx_queues, mac->max_rx_queues));
6968
6969 /* reflect correct sysctl value */
6970 ixgbe_num_queues = queues;
6971
6972 /*
6973 * Want one vector (RX/TX pair) per queue
6974 * plus an additional for Link.
6975 */
6976 want = queues + 1;
6977 if (msgs >= want)
6978 msgs = want;
6979 else {
6980 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6981 "%d vectors but %d queues wanted!\n",
6982 msgs, want);
6983 goto msi;
6984 }
6985 adapter->num_queues = queues;
6986 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6987 return (0);
6988
6989 /*
6990 * MSI-X allocation failed or provided us with
6991 * less vectors than needed. Free MSI-X resources
6992 * and we'll try enabling MSI.
6993 */
6994 msi:
6995 /* Without MSI-X, some features are no longer supported */
6996 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6997 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6998 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6999 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
7000
7001 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
7002 adapter->msix_mem = NULL; /* XXX */
7003 if (msgs > 1)
7004 msgs = 1;
7005 if (msgs != 0) {
7006 msgs = 1;
7007 adapter->feat_en |= IXGBE_FEATURE_MSI;
7008 return (0);
7009 }
7010
7011 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7012 aprint_error_dev(dev,
7013 "Device does not support legacy interrupts.\n");
7014 return 1;
7015 }
7016
7017 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7018
7019 return (0);
7020 } /* ixgbe_configure_interrupts */
7021
7022
7023 /************************************************************************
7024 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7025 *
7026 * Done outside of interrupt context since the driver might sleep
7027 ************************************************************************/
7028 static void
7029 ixgbe_handle_link(void *context)
7030 {
7031 struct adapter *adapter = context;
7032 struct ixgbe_hw *hw = &adapter->hw;
7033
7034 KASSERT(mutex_owned(&adapter->core_mtx));
7035
7036 ++adapter->link_workev.ev_count;
7037 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7038 ixgbe_update_link_status(adapter);
7039
7040 /* Re-enable link interrupts */
7041 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7042 } /* ixgbe_handle_link */
7043
7044 #if 0
7045 /************************************************************************
7046 * ixgbe_rearm_queues
7047 ************************************************************************/
7048 static __inline void
7049 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7050 {
7051 u32 mask;
7052
7053 switch (adapter->hw.mac.type) {
7054 case ixgbe_mac_82598EB:
7055 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7056 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7057 break;
7058 case ixgbe_mac_82599EB:
7059 case ixgbe_mac_X540:
7060 case ixgbe_mac_X550:
7061 case ixgbe_mac_X550EM_x:
7062 case ixgbe_mac_X550EM_a:
7063 mask = (queues & 0xFFFFFFFF);
7064 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7065 mask = (queues >> 32);
7066 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7067 break;
7068 default:
7069 break;
7070 }
7071 } /* ixgbe_rearm_queues */
7072 #endif
7073