ixgbe.c revision 1.281 1 /* $NetBSD: ixgbe.c,v 1.281 2021/04/30 06:55:32 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.281 2021/04/30 06:55:32 msaitoh Exp $");
68
69 #ifdef _KERNEL_OPT
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_net_mpsafe.h"
73 #include "opt_ixgbe.h"
74 #endif
75
76 #include "ixgbe.h"
77 #include "ixgbe_phy.h"
78 #include "ixgbe_sriov.h"
79 #include "vlan.h"
80
81 #include <sys/cprng.h>
82 #include <dev/mii/mii.h>
83 #include <dev/mii/miivar.h>
84
85 /************************************************************************
86 * Driver version
87 ************************************************************************/
88 static const char ixgbe_driver_version[] = "4.0.1-k";
89 /* XXX NetBSD: + 3.3.10 */
90
91 /************************************************************************
92 * PCI Device ID Table
93 *
94 * Used by probe to select devices to load on
95 * Last field stores an index into ixgbe_strings
96 * Last entry must be all 0s
97 *
98 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
99 ************************************************************************/
100 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
101 {
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
150 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
151 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
152 /* required last entry */
153 {0, 0, 0, 0, 0}
154 };
155
156 /************************************************************************
157 * Table of branding strings
158 ************************************************************************/
159 static const char *ixgbe_strings[] = {
160 "Intel(R) PRO/10GbE PCI-Express Network Driver"
161 };
162
163 /************************************************************************
164 * Function prototypes
165 ************************************************************************/
166 static int ixgbe_probe(device_t, cfdata_t, void *);
167 static void ixgbe_quirks(struct adapter *);
168 static void ixgbe_attach(device_t, device_t, void *);
169 static int ixgbe_detach(device_t, int);
170 #if 0
171 static int ixgbe_shutdown(device_t);
172 #endif
173 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
174 static bool ixgbe_resume(device_t, const pmf_qual_t *);
175 static int ixgbe_ifflags_cb(struct ethercom *);
176 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
177 static int ixgbe_init(struct ifnet *);
178 static void ixgbe_init_locked(struct adapter *);
179 static void ixgbe_ifstop(struct ifnet *, int);
180 static void ixgbe_stop_locked(void *);
181 static void ixgbe_init_device_features(struct adapter *);
182 static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
183 static void ixgbe_add_media_types(struct adapter *);
184 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
185 static int ixgbe_media_change(struct ifnet *);
186 static int ixgbe_allocate_pci_resources(struct adapter *,
187 const struct pci_attach_args *);
188 static void ixgbe_free_deferred_handlers(struct adapter *);
189 static void ixgbe_get_slot_info(struct adapter *);
190 static int ixgbe_allocate_msix(struct adapter *,
191 const struct pci_attach_args *);
192 static int ixgbe_allocate_legacy(struct adapter *,
193 const struct pci_attach_args *);
194 static int ixgbe_configure_interrupts(struct adapter *);
195 static void ixgbe_free_pciintr_resources(struct adapter *);
196 static void ixgbe_free_pci_resources(struct adapter *);
197 static void ixgbe_local_timer(void *);
198 static void ixgbe_handle_timer(struct work *, void *);
199 static void ixgbe_recovery_mode_timer(void *);
200 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
201 static int ixgbe_setup_interface(device_t, struct adapter *);
202 static void ixgbe_config_gpie(struct adapter *);
203 static void ixgbe_config_dmac(struct adapter *);
204 static void ixgbe_config_delay_values(struct adapter *);
205 static void ixgbe_schedule_admin_tasklet(struct adapter *);
206 static void ixgbe_config_link(struct adapter *);
207 static void ixgbe_check_wol_support(struct adapter *);
208 static int ixgbe_setup_low_power_mode(struct adapter *);
209 #if 0
210 static void ixgbe_rearm_queues(struct adapter *, u64);
211 #endif
212
213 static void ixgbe_initialize_transmit_units(struct adapter *);
214 static void ixgbe_initialize_receive_units(struct adapter *);
215 static void ixgbe_enable_rx_drop(struct adapter *);
216 static void ixgbe_disable_rx_drop(struct adapter *);
217 static void ixgbe_initialize_rss_mapping(struct adapter *);
218
219 static void ixgbe_enable_intr(struct adapter *);
220 static void ixgbe_disable_intr(struct adapter *);
221 static void ixgbe_update_stats_counters(struct adapter *);
222 static void ixgbe_set_rxfilter(struct adapter *);
223 static void ixgbe_update_link_status(struct adapter *);
224 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
225 static void ixgbe_configure_ivars(struct adapter *);
226 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
227 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
228
229 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
230 static void ixgbe_setup_vlan_hw_support(struct adapter *);
231 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
232 static int ixgbe_register_vlan(struct adapter *, u16);
233 static int ixgbe_unregister_vlan(struct adapter *, u16);
234
235 static void ixgbe_add_device_sysctls(struct adapter *);
236 static void ixgbe_add_hw_stats(struct adapter *);
237 static void ixgbe_clear_evcnt(struct adapter *);
238 static int ixgbe_set_flowcntl(struct adapter *, int);
239 static int ixgbe_set_advertise(struct adapter *, int);
240 static int ixgbe_get_advertise(struct adapter *);
241
242 /* Sysctl handlers */
243 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
244 const char *, int *, int);
245 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
251 #ifdef IXGBE_DEBUG
252 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
254 #endif
255 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
260 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
261 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
262 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
263 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
264
265 /* Interrupt functions */
266 static int ixgbe_msix_que(void *);
267 static int ixgbe_msix_admin(void *);
268 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
269 static int ixgbe_legacy_irq(void *);
270
271 /* Event handlers running on workqueue */
272 static void ixgbe_handle_que(void *);
273 static void ixgbe_handle_link(void *);
274 static void ixgbe_handle_msf(void *);
275 static void ixgbe_handle_mod(void *, bool);
276 static void ixgbe_handle_phy(void *);
277
278 /* Deferred workqueue handlers */
279 static void ixgbe_handle_admin(struct work *, void *);
280 static void ixgbe_handle_que_work(struct work *, void *);
281
282 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
283
284 /************************************************************************
285 * NetBSD Device Interface Entry Points
286 ************************************************************************/
287 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
288 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
289 DVF_DETACH_SHUTDOWN);
290
291 #if 0
292 devclass_t ix_devclass;
293 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
294
295 MODULE_DEPEND(ix, pci, 1, 1, 1);
296 MODULE_DEPEND(ix, ether, 1, 1, 1);
297 #ifdef DEV_NETMAP
298 MODULE_DEPEND(ix, netmap, 1, 1, 1);
299 #endif
300 #endif
301
302 /*
303 * TUNEABLE PARAMETERS:
304 */
305
306 /*
307 * AIM: Adaptive Interrupt Moderation
308 * which means that the interrupt rate
309 * is varied over time based on the
310 * traffic for that interrupt vector
311 */
312 static bool ixgbe_enable_aim = true;
313 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
314 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
315 "Enable adaptive interrupt moderation");
316
317 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
318 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
319 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
320
321 /* How many packets rxeof tries to clean at a time */
322 static int ixgbe_rx_process_limit = 256;
323 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
325
326 /* How many packets txeof tries to clean at a time */
327 static int ixgbe_tx_process_limit = 256;
328 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
329 &ixgbe_tx_process_limit, 0,
330 "Maximum number of sent packets to process at a time, -1 means unlimited");
331
332 /* Flow control setting, default to full */
333 static int ixgbe_flow_control = ixgbe_fc_full;
334 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
335 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
336
337 /* Which packet processing uses workqueue or softint */
338 static bool ixgbe_txrx_workqueue = false;
339
340 /*
341 * Smart speed setting, default to on
342 * this only works as a compile option
343 * right now as its during attach, set
344 * this to 'ixgbe_smart_speed_off' to
345 * disable.
346 */
347 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
348
349 /*
350 * MSI-X should be the default for best performance,
351 * but this allows it to be forced off for testing.
352 */
353 static int ixgbe_enable_msix = 1;
354 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
355 "Enable MSI-X interrupts");
356
357 /*
358 * Number of Queues, can be set to 0,
359 * it then autoconfigures based on the
360 * number of cpus with a max of 8. This
361 * can be overridden manually here.
362 */
363 static int ixgbe_num_queues = 0;
364 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
365 "Number of queues to configure, 0 indicates autoconfigure");
366
367 /*
368 * Number of TX descriptors per ring,
369 * setting higher than RX as this seems
370 * the better performing choice.
371 */
372 static int ixgbe_txd = PERFORM_TXD;
373 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
374 "Number of transmit descriptors per queue");
375
376 /* Number of RX descriptors per ring */
377 static int ixgbe_rxd = PERFORM_RXD;
378 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
379 "Number of receive descriptors per queue");
380
381 /*
382 * Defining this on will allow the use
383 * of unsupported SFP+ modules, note that
384 * doing so you are on your own :)
385 */
386 static int allow_unsupported_sfp = false;
387 #define TUNABLE_INT(__x, __y)
388 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
389
390 /*
391 * Not sure if Flow Director is fully baked,
392 * so we'll default to turning it off.
393 */
394 static int ixgbe_enable_fdir = 0;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
396 "Enable Flow Director");
397
398 /* Legacy Transmit (single queue) */
399 static int ixgbe_enable_legacy_tx = 0;
400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
401 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
402
403 /* Receive-Side Scaling */
404 static int ixgbe_enable_rss = 1;
405 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
406 "Enable Receive-Side Scaling (RSS)");
407
408 #if 0
409 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
410 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
411 #endif
412
413 #ifdef NET_MPSAFE
414 #define IXGBE_MPSAFE 1
415 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
416 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
417 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
418 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
419 #else
420 #define IXGBE_CALLOUT_FLAGS 0
421 #define IXGBE_SOFTINT_FLAGS 0
422 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
423 #define IXGBE_TASKLET_WQ_FLAGS 0
424 #endif
425 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
426
427 /************************************************************************
428 * ixgbe_initialize_rss_mapping
429 ************************************************************************/
430 static void
431 ixgbe_initialize_rss_mapping(struct adapter *adapter)
432 {
433 struct ixgbe_hw *hw = &adapter->hw;
434 u32 reta = 0, mrqc, rss_key[10];
435 int queue_id, table_size, index_mult;
436 int i, j;
437 u32 rss_hash_config;
438
439 /* force use default RSS key. */
440 #ifdef __NetBSD__
441 rss_getkey((uint8_t *) &rss_key);
442 #else
443 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
444 /* Fetch the configured RSS key */
445 rss_getkey((uint8_t *) &rss_key);
446 } else {
447 /* set up random bits */
448 cprng_fast(&rss_key, sizeof(rss_key));
449 }
450 #endif
451
452 /* Set multiplier for RETA setup and table size based on MAC */
453 index_mult = 0x1;
454 table_size = 128;
455 switch (adapter->hw.mac.type) {
456 case ixgbe_mac_82598EB:
457 index_mult = 0x11;
458 break;
459 case ixgbe_mac_X550:
460 case ixgbe_mac_X550EM_x:
461 case ixgbe_mac_X550EM_a:
462 table_size = 512;
463 break;
464 default:
465 break;
466 }
467
468 /* Set up the redirection table */
469 for (i = 0, j = 0; i < table_size; i++, j++) {
470 if (j == adapter->num_queues)
471 j = 0;
472
473 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
474 /*
475 * Fetch the RSS bucket id for the given indirection
476 * entry. Cap it at the number of configured buckets
477 * (which is num_queues.)
478 */
479 queue_id = rss_get_indirection_to_bucket(i);
480 queue_id = queue_id % adapter->num_queues;
481 } else
482 queue_id = (j * index_mult);
483
484 /*
485 * The low 8 bits are for hash value (n+0);
486 * The next 8 bits are for hash value (n+1), etc.
487 */
488 reta = reta >> 8;
489 reta = reta | (((uint32_t) queue_id) << 24);
490 if ((i & 3) == 3) {
491 if (i < 128)
492 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
493 else
494 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
495 reta);
496 reta = 0;
497 }
498 }
499
500 /* Now fill our hash function seeds */
501 for (i = 0; i < 10; i++)
502 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
503
504 /* Perform hash on these packet types */
505 if (adapter->feat_en & IXGBE_FEATURE_RSS)
506 rss_hash_config = rss_gethashconfig();
507 else {
508 /*
509 * Disable UDP - IP fragments aren't currently being handled
510 * and so we end up with a mix of 2-tuple and 4-tuple
511 * traffic.
512 */
513 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
514 | RSS_HASHTYPE_RSS_TCP_IPV4
515 | RSS_HASHTYPE_RSS_IPV6
516 | RSS_HASHTYPE_RSS_TCP_IPV6
517 | RSS_HASHTYPE_RSS_IPV6_EX
518 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
519 }
520
521 mrqc = IXGBE_MRQC_RSSEN;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
540 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
542 } /* ixgbe_initialize_rss_mapping */
543
544 /************************************************************************
545 * ixgbe_initialize_receive_units - Setup receive registers and features.
546 ************************************************************************/
547 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
548
549 static void
550 ixgbe_initialize_receive_units(struct adapter *adapter)
551 {
552 struct rx_ring *rxr = adapter->rx_rings;
553 struct ixgbe_hw *hw = &adapter->hw;
554 struct ifnet *ifp = adapter->ifp;
555 int i, j;
556 u32 bufsz, fctrl, srrctl, rxcsum;
557 u32 hlreg;
558
559 /*
560 * Make sure receives are disabled while
561 * setting up the descriptor ring
562 */
563 ixgbe_disable_rx(hw);
564
565 /* Enable broadcasts */
566 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
567 fctrl |= IXGBE_FCTRL_BAM;
568 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
569 fctrl |= IXGBE_FCTRL_DPF;
570 fctrl |= IXGBE_FCTRL_PMCF;
571 }
572 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
573
574 /* Set for Jumbo Frames? */
575 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
576 if (ifp->if_mtu > ETHERMTU)
577 hlreg |= IXGBE_HLREG0_JUMBOEN;
578 else
579 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
580
581 #ifdef DEV_NETMAP
582 /* CRC stripping is conditional in Netmap */
583 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
584 (ifp->if_capenable & IFCAP_NETMAP) &&
585 !ix_crcstrip)
586 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
587 else
588 #endif /* DEV_NETMAP */
589 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
590
591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
592
593 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
594 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
595
596 for (i = 0; i < adapter->num_queues; i++, rxr++) {
597 u64 rdba = rxr->rxdma.dma_paddr;
598 u32 reg;
599 int regnum = i / 4; /* 1 register per 4 queues */
600 int regshift = i % 4; /* 4 bits per 1 queue */
601 j = rxr->me;
602
603 /* Setup the Base and Length of the Rx Descriptor Ring */
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
605 (rdba & 0x00000000ffffffffULL));
606 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
607 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
608 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
609
610 /* Set up the SRRCTL register */
611 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
612 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
613 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
614 srrctl |= bufsz;
615 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
616
617 /* Set RQSMR (Receive Queue Statistic Mapping) register */
618 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
619 reg &= ~(0x000000ffUL << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 }
635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641
642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me);
644 }
645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 }
653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655
656 ixgbe_initialize_rss_mapping(adapter);
657
658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 }
662
663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD;
665
666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671
672 } /* ixgbe_initialize_receive_units */
673
674 /************************************************************************
675 * ixgbe_initialize_transmit_units - Enable transmit units.
676 ************************************************************************/
677 static void
678 ixgbe_initialize_transmit_units(struct adapter *adapter)
679 {
680 struct tx_ring *txr = adapter->tx_rings;
681 struct ixgbe_hw *hw = &adapter->hw;
682 int i;
683
684 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
685
686 /* Setup the Base and Length of the Tx Descriptor Ring */
687 for (i = 0; i < adapter->num_queues; i++, txr++) {
688 u64 tdba = txr->txdma.dma_paddr;
689 u32 txctrl = 0;
690 u32 tqsmreg, reg;
691 int regnum = i / 4; /* 1 register per 4 queues */
692 int regshift = i % 4; /* 4 bits per 1 queue */
693 int j = txr->me;
694
695 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
696 (tdba & 0x00000000ffffffffULL));
697 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
698 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
699 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
700
701 /*
702 * Set TQSMR (Transmit Queue Statistic Mapping) register.
703 * Register location is different between 82598 and others.
704 */
705 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
706 tqsmreg = IXGBE_TQSMR(regnum);
707 else
708 tqsmreg = IXGBE_TQSM(regnum);
709 reg = IXGBE_READ_REG(hw, tqsmreg);
710 reg &= ~(0x000000ffUL << (regshift * 8));
711 reg |= i << (regshift * 8);
712 IXGBE_WRITE_REG(hw, tqsmreg, reg);
713
714 /* Setup the HW Tx Head and Tail descriptor pointers */
715 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
716 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
717
718 /* Cache the tail address */
719 txr->tail = IXGBE_TDT(j);
720
721 txr->txr_no_space = false;
722
723 /* Disable Head Writeback */
724 /*
725 * Note: for X550 series devices, these registers are actually
726 * prefixed with TPH_ isntead of DCA_, but the addresses and
727 * fields remain the same.
728 */
729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
732 break;
733 default:
734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
735 break;
736 }
737 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
738 switch (hw->mac.type) {
739 case ixgbe_mac_82598EB:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
741 break;
742 default:
743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
744 break;
745 }
746
747 }
748
749 if (hw->mac.type != ixgbe_mac_82598EB) {
750 u32 dmatxctl, rttdcs;
751
752 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
753 dmatxctl |= IXGBE_DMATXCTL_TE;
754 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
755 /* Disable arbiter to set MTQC */
756 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
757 rttdcs |= IXGBE_RTTDCS_ARBDIS;
758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
759 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
760 ixgbe_get_mtqc(adapter->iov_mode));
761 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
762 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
763 }
764
765 return;
766 } /* ixgbe_initialize_transmit_units */
767
768 static void
769 ixgbe_quirks(struct adapter *adapter)
770 {
771 device_t dev = adapter->dev;
772 struct ixgbe_hw *hw = &adapter->hw;
773 const char *vendor, *product;
774
775 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
776 /*
777 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
778 * MA10-ST0.
779 */
780 vendor = pmf_get_platform("system-vendor");
781 product = pmf_get_platform("system-product");
782
783 if ((vendor == NULL) || (product == NULL))
784 return;
785
786 if ((strcmp(vendor, "GIGABYTE") == 0) &&
787 (strcmp(product, "MA10-ST0") == 0)) {
788 aprint_verbose_dev(dev,
789 "Enable SFP+ MOD_ABS inverse quirk\n");
790 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
791 }
792 }
793 }
794
795 /************************************************************************
796 * ixgbe_attach - Device initialization routine
797 *
798 * Called when the driver is being loaded.
799 * Identifies the type of hardware, allocates all resources
800 * and initializes the hardware.
801 *
802 * return 0 on success, positive on failure
803 ************************************************************************/
804 static void
805 ixgbe_attach(device_t parent, device_t dev, void *aux)
806 {
807 struct adapter *adapter;
808 struct ixgbe_hw *hw;
809 int error = -1;
810 u32 ctrl_ext;
811 u16 high, low, nvmreg;
812 pcireg_t id, subid;
813 const ixgbe_vendor_info_t *ent;
814 struct pci_attach_args *pa = aux;
815 bool unsupported_sfp = false;
816 const char *str;
817 char wqname[MAXCOMLEN];
818 char buf[256];
819
820 INIT_DEBUGOUT("ixgbe_attach: begin");
821
822 /* Allocate, clear, and link in our adapter structure */
823 adapter = device_private(dev);
824 adapter->hw.back = adapter;
825 adapter->dev = dev;
826 hw = &adapter->hw;
827 adapter->osdep.pc = pa->pa_pc;
828 adapter->osdep.tag = pa->pa_tag;
829 if (pci_dma64_available(pa))
830 adapter->osdep.dmat = pa->pa_dmat64;
831 else
832 adapter->osdep.dmat = pa->pa_dmat;
833 adapter->osdep.attached = false;
834 adapter->osdep.detaching = false;
835
836 ent = ixgbe_lookup(pa);
837
838 KASSERT(ent != NULL);
839
840 aprint_normal(": %s, Version - %s\n",
841 ixgbe_strings[ent->index], ixgbe_driver_version);
842
843 /* Core Lock Init */
844 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
845
846 /* Set up the timer callout and workqueue */
847 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
848 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
849 error = workqueue_create(&adapter->timer_wq, wqname,
850 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
851 IXGBE_TASKLET_WQ_FLAGS);
852 if (error) {
853 aprint_error_dev(dev,
854 "could not create timer workqueue (%d)\n", error);
855 goto err_out;
856 }
857
858 /* Determine hardware revision */
859 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
860 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
861
862 hw->vendor_id = PCI_VENDOR(id);
863 hw->device_id = PCI_PRODUCT(id);
864 hw->revision_id =
865 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
866 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
867 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
868
869 /* Set quirk flags */
870 ixgbe_quirks(adapter);
871
872 /*
873 * Make sure BUSMASTER is set
874 */
875 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
876
877 /* Do base PCI setup - map BAR0 */
878 if (ixgbe_allocate_pci_resources(adapter, pa)) {
879 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
880 error = ENXIO;
881 goto err_out;
882 }
883
884 /* let hardware know driver is loaded */
885 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
886 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
887 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
888
889 /*
890 * Initialize the shared code
891 */
892 if (ixgbe_init_shared_code(hw) != 0) {
893 aprint_error_dev(dev, "Unable to initialize the shared code\n");
894 error = ENXIO;
895 goto err_out;
896 }
897
898 switch (hw->mac.type) {
899 case ixgbe_mac_82598EB:
900 str = "82598EB";
901 break;
902 case ixgbe_mac_82599EB:
903 str = "82599EB";
904 break;
905 case ixgbe_mac_X540:
906 str = "X540";
907 break;
908 case ixgbe_mac_X550:
909 str = "X550";
910 break;
911 case ixgbe_mac_X550EM_x:
912 str = "X550EM X";
913 break;
914 case ixgbe_mac_X550EM_a:
915 str = "X550EM A";
916 break;
917 default:
918 str = "Unknown";
919 break;
920 }
921 aprint_normal_dev(dev, "device %s\n", str);
922
923 if (hw->mbx.ops.init_params)
924 hw->mbx.ops.init_params(hw);
925
926 hw->allow_unsupported_sfp = allow_unsupported_sfp;
927
928 /* Pick up the 82599 settings */
929 if (hw->mac.type != ixgbe_mac_82598EB) {
930 hw->phy.smart_speed = ixgbe_smart_speed;
931 adapter->num_segs = IXGBE_82599_SCATTER;
932 } else
933 adapter->num_segs = IXGBE_82598_SCATTER;
934
935 /* Ensure SW/FW semaphore is free */
936 ixgbe_init_swfw_semaphore(hw);
937
938 hw->mac.ops.set_lan_id(hw);
939 ixgbe_init_device_features(adapter);
940
941 if (ixgbe_configure_interrupts(adapter)) {
942 error = ENXIO;
943 goto err_out;
944 }
945
946 /* Allocate multicast array memory. */
947 adapter->mta = malloc(sizeof(*adapter->mta) *
948 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949
950 /* Enable WoL (if supported) */
951 ixgbe_check_wol_support(adapter);
952
953 /* Register for VLAN events */
954 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
955
956 /* Verify adapter fan is still functional (if applicable) */
957 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 ixgbe_check_fan_failure(adapter, esdp, FALSE);
960 }
961
962 /* Set an initial default flow control value */
963 hw->fc.requested_mode = ixgbe_flow_control;
964
965 /* Sysctls for limiting the amount of work done in the taskqueues */
966 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
967 "max number of rx packets to process",
968 &adapter->rx_process_limit, ixgbe_rx_process_limit);
969
970 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
971 "max number of tx packets to process",
972 &adapter->tx_process_limit, ixgbe_tx_process_limit);
973
974 /* Do descriptor calc and sanity checks */
975 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
976 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
977 aprint_error_dev(dev, "TXD config issue, using default!\n");
978 adapter->num_tx_desc = DEFAULT_TXD;
979 } else
980 adapter->num_tx_desc = ixgbe_txd;
981
982 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
983 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
984 aprint_error_dev(dev, "RXD config issue, using default!\n");
985 adapter->num_rx_desc = DEFAULT_RXD;
986 } else
987 adapter->num_rx_desc = ixgbe_rxd;
988
989 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
990
991 /* Allocate our TX/RX Queues */
992 if (ixgbe_allocate_queues(adapter)) {
993 error = ENOMEM;
994 goto err_out;
995 }
996
997 hw->phy.reset_if_overtemp = TRUE;
998 error = ixgbe_reset_hw(hw);
999 hw->phy.reset_if_overtemp = FALSE;
1000 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1001 error = IXGBE_SUCCESS;
1002 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1003 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
1004 unsupported_sfp = true;
1005 error = IXGBE_SUCCESS;
1006 } else if (error) {
1007 aprint_error_dev(dev, "Hardware initialization failed\n");
1008 error = EIO;
1009 goto err_late;
1010 }
1011
1012 /* Make sure we have a good EEPROM before we read from it */
1013 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1014 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1015 error = EIO;
1016 goto err_late;
1017 }
1018
1019 aprint_normal("%s:", device_xname(dev));
1020 /* NVM Image Version */
1021 high = low = 0;
1022 switch (hw->mac.type) {
1023 case ixgbe_mac_X540:
1024 case ixgbe_mac_X550EM_a:
1025 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1026 if (nvmreg == 0xffff)
1027 break;
1028 high = (nvmreg >> 12) & 0x0f;
1029 low = (nvmreg >> 4) & 0xff;
1030 id = nvmreg & 0x0f;
1031 aprint_normal(" NVM Image Version %u.", high);
1032 if (hw->mac.type == ixgbe_mac_X540)
1033 str = "%x";
1034 else
1035 str = "%02x";
1036 aprint_normal(str, low);
1037 aprint_normal(" ID 0x%x,", id);
1038 break;
1039 case ixgbe_mac_X550EM_x:
1040 case ixgbe_mac_X550:
1041 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1042 if (nvmreg == 0xffff)
1043 break;
1044 high = (nvmreg >> 12) & 0x0f;
1045 low = nvmreg & 0xff;
1046 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1047 break;
1048 default:
1049 break;
1050 }
1051 hw->eeprom.nvm_image_ver_high = high;
1052 hw->eeprom.nvm_image_ver_low = low;
1053
1054 /* PHY firmware revision */
1055 switch (hw->mac.type) {
1056 case ixgbe_mac_X540:
1057 case ixgbe_mac_X550:
1058 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1059 if (nvmreg == 0xffff)
1060 break;
1061 high = (nvmreg >> 12) & 0x0f;
1062 low = (nvmreg >> 4) & 0xff;
1063 id = nvmreg & 0x000f;
1064 aprint_normal(" PHY FW Revision %u.", high);
1065 if (hw->mac.type == ixgbe_mac_X540)
1066 str = "%x";
1067 else
1068 str = "%02x";
1069 aprint_normal(str, low);
1070 aprint_normal(" ID 0x%x,", id);
1071 break;
1072 default:
1073 break;
1074 }
1075
1076 /* NVM Map version & OEM NVM Image version */
1077 switch (hw->mac.type) {
1078 case ixgbe_mac_X550:
1079 case ixgbe_mac_X550EM_x:
1080 case ixgbe_mac_X550EM_a:
1081 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1082 if (nvmreg != 0xffff) {
1083 high = (nvmreg >> 12) & 0x0f;
1084 low = nvmreg & 0x00ff;
1085 aprint_normal(" NVM Map version %u.%02x,", high, low);
1086 }
1087 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1088 if (nvmreg != 0xffff) {
1089 high = (nvmreg >> 12) & 0x0f;
1090 low = nvmreg & 0x00ff;
1091 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1092 low);
1093 }
1094 break;
1095 default:
1096 break;
1097 }
1098
1099 /* Print the ETrackID */
1100 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1101 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1102 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1103
1104 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1105 error = ixgbe_allocate_msix(adapter, pa);
1106 if (error) {
1107 /* Free allocated queue structures first */
1108 ixgbe_free_queues(adapter);
1109
1110 /* Fallback to legacy interrupt */
1111 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1112 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1113 adapter->feat_en |= IXGBE_FEATURE_MSI;
1114 adapter->num_queues = 1;
1115
1116 /* Allocate our TX/RX Queues again */
1117 if (ixgbe_allocate_queues(adapter)) {
1118 error = ENOMEM;
1119 goto err_out;
1120 }
1121 }
1122 }
1123 /* Recovery mode */
1124 switch (adapter->hw.mac.type) {
1125 case ixgbe_mac_X550:
1126 case ixgbe_mac_X550EM_x:
1127 case ixgbe_mac_X550EM_a:
1128 /* >= 2.00 */
1129 if (hw->eeprom.nvm_image_ver_high >= 2) {
1130 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1131 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1132 }
1133 break;
1134 default:
1135 break;
1136 }
1137
1138 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1139 error = ixgbe_allocate_legacy(adapter, pa);
1140 if (error)
1141 goto err_late;
1142
1143 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1144 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1145 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1146 error = workqueue_create(&adapter->admin_wq, wqname,
1147 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1148 IXGBE_TASKLET_WQ_FLAGS);
1149 if (error) {
1150 aprint_error_dev(dev,
1151 "could not create admin workqueue (%d)\n", error);
1152 goto err_out;
1153 }
1154
1155 error = ixgbe_start_hw(hw);
1156 switch (error) {
1157 case IXGBE_ERR_EEPROM_VERSION:
1158 aprint_error_dev(dev, "This device is a pre-production adapter/"
1159 "LOM. Please be aware there may be issues associated "
1160 "with your hardware.\nIf you are experiencing problems "
1161 "please contact your Intel or hardware representative "
1162 "who provided you with this hardware.\n");
1163 break;
1164 default:
1165 break;
1166 }
1167
1168 /* Setup OS specific network interface */
1169 if (ixgbe_setup_interface(dev, adapter) != 0)
1170 goto err_late;
1171
1172 /*
1173 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1174 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1175 */
1176 if (hw->phy.media_type == ixgbe_media_type_copper) {
1177 uint16_t id1, id2;
1178 int oui, model, rev;
1179 const char *descr;
1180
1181 id1 = hw->phy.id >> 16;
1182 id2 = hw->phy.id & 0xffff;
1183 oui = MII_OUI(id1, id2);
1184 model = MII_MODEL(id2);
1185 rev = MII_REV(id2);
1186 if ((descr = mii_get_descr(oui, model)) != NULL)
1187 aprint_normal_dev(dev,
1188 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1189 descr, oui, model, rev);
1190 else
1191 aprint_normal_dev(dev,
1192 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1193 oui, model, rev);
1194 }
1195
1196 /* Enable EEE power saving */
1197 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1198 hw->mac.ops.setup_eee(hw,
1199 adapter->feat_en & IXGBE_FEATURE_EEE);
1200
1201 /* Enable power to the phy. */
1202 if (!unsupported_sfp) {
1203 /* Enable the optics for 82599 SFP+ fiber */
1204 ixgbe_enable_tx_laser(hw);
1205
1206 /*
1207 * XXX Currently, ixgbe_set_phy_power() supports only copper
1208 * PHY, so it's not required to test with !unsupported_sfp.
1209 */
1210 ixgbe_set_phy_power(hw, TRUE);
1211 }
1212
1213 /* Initialize statistics */
1214 ixgbe_update_stats_counters(adapter);
1215
1216 /* Check PCIE slot type/speed/width */
1217 ixgbe_get_slot_info(adapter);
1218
1219 /*
1220 * Do time init and sysctl init here, but
1221 * only on the first port of a bypass adapter.
1222 */
1223 ixgbe_bypass_init(adapter);
1224
1225 /* Set an initial dmac value */
1226 adapter->dmac = 0;
1227 /* Set initial advertised speeds (if applicable) */
1228 adapter->advertise = ixgbe_get_advertise(adapter);
1229
1230 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1231 ixgbe_define_iov_schemas(dev, &error);
1232
1233 /* Add sysctls */
1234 ixgbe_add_device_sysctls(adapter);
1235 ixgbe_add_hw_stats(adapter);
1236
1237 /* For Netmap */
1238 adapter->init_locked = ixgbe_init_locked;
1239 adapter->stop_locked = ixgbe_stop_locked;
1240
1241 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1242 ixgbe_netmap_attach(adapter);
1243
1244 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1245 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1246 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1247 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1248
1249 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1250 pmf_class_network_register(dev, adapter->ifp);
1251 else
1252 aprint_error_dev(dev, "couldn't establish power handler\n");
1253
1254 /* Init recovery mode timer and state variable */
1255 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1256 adapter->recovery_mode = 0;
1257
1258 /* Set up the timer callout */
1259 callout_init(&adapter->recovery_mode_timer,
1260 IXGBE_CALLOUT_FLAGS);
1261 snprintf(wqname, sizeof(wqname), "%s-recovery",
1262 device_xname(dev));
1263 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1264 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1265 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1266 if (error) {
1267 aprint_error_dev(dev, "could not create "
1268 "recovery_mode_timer workqueue (%d)\n", error);
1269 goto err_out;
1270 }
1271
1272 /* Start the task */
1273 callout_reset(&adapter->recovery_mode_timer, hz,
1274 ixgbe_recovery_mode_timer, adapter);
1275 }
1276
1277 INIT_DEBUGOUT("ixgbe_attach: end");
1278 adapter->osdep.attached = true;
1279
1280 return;
1281
1282 err_late:
1283 ixgbe_free_queues(adapter);
1284 err_out:
1285 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1286 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1287 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1288 ixgbe_free_deferred_handlers(adapter);
1289 ixgbe_free_pci_resources(adapter);
1290 if (adapter->mta != NULL)
1291 free(adapter->mta, M_DEVBUF);
1292 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1293 IXGBE_CORE_LOCK_DESTROY(adapter);
1294
1295 return;
1296 } /* ixgbe_attach */
1297
1298 /************************************************************************
1299 * ixgbe_check_wol_support
1300 *
1301 * Checks whether the adapter's ports are capable of
1302 * Wake On LAN by reading the adapter's NVM.
1303 *
1304 * Sets each port's hw->wol_enabled value depending
1305 * on the value read here.
1306 ************************************************************************/
1307 static void
1308 ixgbe_check_wol_support(struct adapter *adapter)
1309 {
1310 struct ixgbe_hw *hw = &adapter->hw;
1311 u16 dev_caps = 0;
1312
1313 /* Find out WoL support for port */
1314 adapter->wol_support = hw->wol_enabled = 0;
1315 ixgbe_get_device_caps(hw, &dev_caps);
1316 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1317 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1318 hw->bus.func == 0))
1319 adapter->wol_support = hw->wol_enabled = 1;
1320
1321 /* Save initial wake up filter configuration */
1322 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1323
1324 return;
1325 } /* ixgbe_check_wol_support */
1326
1327 /************************************************************************
1328 * ixgbe_setup_interface
1329 *
1330 * Setup networking device structure and register an interface.
1331 ************************************************************************/
1332 static int
1333 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1334 {
1335 struct ethercom *ec = &adapter->osdep.ec;
1336 struct ifnet *ifp;
1337 int rv;
1338
1339 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1340
1341 ifp = adapter->ifp = &ec->ec_if;
1342 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1343 ifp->if_baudrate = IF_Gbps(10);
1344 ifp->if_init = ixgbe_init;
1345 ifp->if_stop = ixgbe_ifstop;
1346 ifp->if_softc = adapter;
1347 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1348 #ifdef IXGBE_MPSAFE
1349 ifp->if_extflags = IFEF_MPSAFE;
1350 #endif
1351 ifp->if_ioctl = ixgbe_ioctl;
1352 #if __FreeBSD_version >= 1100045
1353 /* TSO parameters */
1354 ifp->if_hw_tsomax = 65518;
1355 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1356 ifp->if_hw_tsomaxsegsize = 2048;
1357 #endif
1358 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1359 #if 0
1360 ixgbe_start_locked = ixgbe_legacy_start_locked;
1361 #endif
1362 } else {
1363 ifp->if_transmit = ixgbe_mq_start;
1364 #if 0
1365 ixgbe_start_locked = ixgbe_mq_start_locked;
1366 #endif
1367 }
1368 ifp->if_start = ixgbe_legacy_start;
1369 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1370 IFQ_SET_READY(&ifp->if_snd);
1371
1372 rv = if_initialize(ifp);
1373 if (rv != 0) {
1374 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1375 return rv;
1376 }
1377 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1378 ether_ifattach(ifp, adapter->hw.mac.addr);
1379 aprint_normal_dev(dev, "Ethernet address %s\n",
1380 ether_sprintf(adapter->hw.mac.addr));
1381 /*
1382 * We use per TX queue softint, so if_deferred_start_init() isn't
1383 * used.
1384 */
1385 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1386
1387 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1388
1389 /*
1390 * Tell the upper layer(s) we support long frames.
1391 */
1392 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1393
1394 /* Set capability flags */
1395 ifp->if_capabilities |= IFCAP_RXCSUM
1396 | IFCAP_TXCSUM
1397 | IFCAP_TSOv4
1398 | IFCAP_TSOv6;
1399 ifp->if_capenable = 0;
1400
1401 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1402 | ETHERCAP_VLAN_HWCSUM
1403 | ETHERCAP_JUMBO_MTU
1404 | ETHERCAP_VLAN_MTU;
1405
1406 /* Enable the above capabilities by default */
1407 ec->ec_capenable = ec->ec_capabilities;
1408
1409 /*
1410 * Don't turn this on by default, if vlans are
1411 * created on another pseudo device (eg. lagg)
1412 * then vlan events are not passed thru, breaking
1413 * operation, but with HW FILTER off it works. If
1414 * using vlans directly on the ixgbe driver you can
1415 * enable this and get full hardware tag filtering.
1416 */
1417 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1418
1419 /*
1420 * Specify the media types supported by this adapter and register
1421 * callbacks to update media and link information
1422 */
1423 ec->ec_ifmedia = &adapter->media;
1424 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1425 ixgbe_media_status, &adapter->core_mtx);
1426
1427 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1428 ixgbe_add_media_types(adapter);
1429
1430 /* Set autoselect media by default */
1431 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1432
1433 if_register(ifp);
1434
1435 return (0);
1436 } /* ixgbe_setup_interface */
1437
1438 /************************************************************************
1439 * ixgbe_add_media_types
1440 ************************************************************************/
1441 static void
1442 ixgbe_add_media_types(struct adapter *adapter)
1443 {
1444 struct ixgbe_hw *hw = &adapter->hw;
1445 u64 layer;
1446
1447 layer = adapter->phy_layer;
1448
1449 #define ADD(mm, dd) \
1450 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1451
1452 ADD(IFM_NONE, 0);
1453
1454 /* Media types with matching NetBSD media defines */
1455 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1456 ADD(IFM_10G_T | IFM_FDX, 0);
1457 }
1458 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1459 ADD(IFM_1000_T | IFM_FDX, 0);
1460 }
1461 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1462 ADD(IFM_100_TX | IFM_FDX, 0);
1463 }
1464 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1465 ADD(IFM_10_T | IFM_FDX, 0);
1466 }
1467
1468 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1469 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1470 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1471 }
1472
1473 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1474 ADD(IFM_10G_LR | IFM_FDX, 0);
1475 if (hw->phy.multispeed_fiber) {
1476 ADD(IFM_1000_LX | IFM_FDX, 0);
1477 }
1478 }
1479 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1480 ADD(IFM_10G_SR | IFM_FDX, 0);
1481 if (hw->phy.multispeed_fiber) {
1482 ADD(IFM_1000_SX | IFM_FDX, 0);
1483 }
1484 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1485 ADD(IFM_1000_SX | IFM_FDX, 0);
1486 }
1487 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1488 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1489 }
1490
1491 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1492 ADD(IFM_10G_KR | IFM_FDX, 0);
1493 }
1494 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1495 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1496 }
1497 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1498 ADD(IFM_1000_KX | IFM_FDX, 0);
1499 }
1500 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1501 ADD(IFM_2500_KX | IFM_FDX, 0);
1502 }
1503 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1504 ADD(IFM_2500_T | IFM_FDX, 0);
1505 }
1506 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1507 ADD(IFM_5000_T | IFM_FDX, 0);
1508 }
1509 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1510 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1511 /* XXX no ifmedia_set? */
1512
1513 ADD(IFM_AUTO, 0);
1514
1515 #undef ADD
1516 } /* ixgbe_add_media_types */
1517
1518 /************************************************************************
1519 * ixgbe_is_sfp
1520 ************************************************************************/
1521 static inline bool
1522 ixgbe_is_sfp(struct ixgbe_hw *hw)
1523 {
1524 switch (hw->mac.type) {
1525 case ixgbe_mac_82598EB:
1526 if (hw->phy.type == ixgbe_phy_nl)
1527 return (TRUE);
1528 return (FALSE);
1529 case ixgbe_mac_82599EB:
1530 case ixgbe_mac_X550EM_x:
1531 case ixgbe_mac_X550EM_a:
1532 switch (hw->mac.ops.get_media_type(hw)) {
1533 case ixgbe_media_type_fiber:
1534 case ixgbe_media_type_fiber_qsfp:
1535 return (TRUE);
1536 default:
1537 return (FALSE);
1538 }
1539 default:
1540 return (FALSE);
1541 }
1542 } /* ixgbe_is_sfp */
1543
1544 static void
1545 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1546 {
1547
1548 KASSERT(mutex_owned(&adapter->admin_mtx));
1549
1550 if (__predict_true(adapter->osdep.detaching == false)) {
1551 if (adapter->admin_pending == 0)
1552 workqueue_enqueue(adapter->admin_wq,
1553 &adapter->admin_wc, NULL);
1554 adapter->admin_pending = 1;
1555 }
1556 }
1557
1558 /************************************************************************
1559 * ixgbe_config_link
1560 ************************************************************************/
1561 static void
1562 ixgbe_config_link(struct adapter *adapter)
1563 {
1564 struct ixgbe_hw *hw = &adapter->hw;
1565 u32 autoneg, err = 0;
1566 u32 task_requests = 0;
1567 bool sfp, negotiate = false;
1568
1569 sfp = ixgbe_is_sfp(hw);
1570
1571 if (sfp) {
1572 if (hw->phy.multispeed_fiber) {
1573 ixgbe_enable_tx_laser(hw);
1574 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1575 }
1576 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1577
1578 mutex_enter(&adapter->admin_mtx);
1579 adapter->task_requests |= task_requests;
1580 ixgbe_schedule_admin_tasklet(adapter);
1581 mutex_exit(&adapter->admin_mtx);
1582 } else {
1583 struct ifmedia *ifm = &adapter->media;
1584
1585 if (hw->mac.ops.check_link)
1586 err = ixgbe_check_link(hw, &adapter->link_speed,
1587 &adapter->link_up, FALSE);
1588 if (err)
1589 return;
1590
1591 /*
1592 * Check if it's the first call. If it's the first call,
1593 * get value for auto negotiation.
1594 */
1595 autoneg = hw->phy.autoneg_advertised;
1596 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1597 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1598 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1599 &negotiate);
1600 if (err)
1601 return;
1602 if (hw->mac.ops.setup_link)
1603 err = hw->mac.ops.setup_link(hw, autoneg,
1604 adapter->link_up);
1605 }
1606
1607 } /* ixgbe_config_link */
1608
1609 /************************************************************************
1610 * ixgbe_update_stats_counters - Update board statistics counters.
1611 ************************************************************************/
1612 static void
1613 ixgbe_update_stats_counters(struct adapter *adapter)
1614 {
1615 struct ifnet *ifp = adapter->ifp;
1616 struct ixgbe_hw *hw = &adapter->hw;
1617 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1618 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1619 u64 total_missed_rx = 0;
1620 uint64_t crcerrs, rlec;
1621 unsigned int queue_counters;
1622 int i;
1623
1624 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1625 stats->crcerrs.ev_count += crcerrs;
1626 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1627 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1628 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1629 if (hw->mac.type >= ixgbe_mac_X550)
1630 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1631
1632 /* 16 registers exist */
1633 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1634 for (i = 0; i < queue_counters; i++) {
1635 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1636 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1637 if (hw->mac.type >= ixgbe_mac_82599EB) {
1638 stats->qprdc[i].ev_count
1639 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1640 }
1641 }
1642
1643 /* 8 registers exist */
1644 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1645 uint32_t mp;
1646
1647 /* MPC */
1648 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1649 /* global total per queue */
1650 stats->mpc[i].ev_count += mp;
1651 /* running comprehensive total for stats display */
1652 total_missed_rx += mp;
1653
1654 if (hw->mac.type == ixgbe_mac_82598EB)
1655 stats->rnbc[i].ev_count
1656 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1657
1658 stats->pxontxc[i].ev_count
1659 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1660 stats->pxofftxc[i].ev_count
1661 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1662 if (hw->mac.type >= ixgbe_mac_82599EB) {
1663 stats->pxonrxc[i].ev_count
1664 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1665 stats->pxoffrxc[i].ev_count
1666 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1667 stats->pxon2offc[i].ev_count
1668 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1669 } else {
1670 stats->pxonrxc[i].ev_count
1671 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1672 stats->pxoffrxc[i].ev_count
1673 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1674 }
1675 }
1676 stats->mpctotal.ev_count += total_missed_rx;
1677
1678 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1679 if ((adapter->link_active == LINK_STATE_UP)
1680 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1681 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1682 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1683 }
1684 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1685 stats->rlec.ev_count += rlec;
1686
1687 /* Hardware workaround, gprc counts missed packets */
1688 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1689
1690 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1691 stats->lxontxc.ev_count += lxon;
1692 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1693 stats->lxofftxc.ev_count += lxoff;
1694 total = lxon + lxoff;
1695
1696 if (hw->mac.type != ixgbe_mac_82598EB) {
1697 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1698 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1699 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1700 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1701 - total * ETHER_MIN_LEN;
1702 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1703 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1704 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1705 stats->lxoffrxc.ev_count
1706 += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1707 } else {
1708 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1709 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1710 /* 82598 only has a counter in the high register */
1711 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1712 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH)
1713 - total * ETHER_MIN_LEN;
1714 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1715 }
1716
1717 /*
1718 * Workaround: mprc hardware is incorrectly counting
1719 * broadcasts, so for now we subtract those.
1720 */
1721 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1722 stats->bprc.ev_count += bprc;
1723 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1724 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1725
1726 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1727 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1728 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1729 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1730 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1731 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1732
1733 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1734 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1735 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1736
1737 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1738 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1739 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1740 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1741 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1742 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1743 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1744 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1745 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1746 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1747 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1748 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1749 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1750 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1751 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1752 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1753 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1754 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1755 /* Only read FCOE on 82599 */
1756 if (hw->mac.type != ixgbe_mac_82598EB) {
1757 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1758 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1759 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1760 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1761 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1762 }
1763
1764 /*
1765 * Fill out the OS statistics structure. Only RX errors are required
1766 * here because all TX counters are incremented in the TX path and
1767 * normal RX counters are prepared in ether_input().
1768 */
1769 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1770 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1771 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1772 IF_STAT_PUTREF(ifp);
1773 } /* ixgbe_update_stats_counters */
1774
1775 /************************************************************************
1776 * ixgbe_add_hw_stats
1777 *
1778 * Add sysctl variables, one per statistic, to the system.
1779 ************************************************************************/
1780 static void
1781 ixgbe_add_hw_stats(struct adapter *adapter)
1782 {
1783 device_t dev = adapter->dev;
1784 const struct sysctlnode *rnode, *cnode;
1785 struct sysctllog **log = &adapter->sysctllog;
1786 struct tx_ring *txr = adapter->tx_rings;
1787 struct rx_ring *rxr = adapter->rx_rings;
1788 struct ixgbe_hw *hw = &adapter->hw;
1789 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1790 const char *xname = device_xname(dev);
1791 int i;
1792
1793 /* Driver Statistics */
1794 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1795 NULL, xname, "Driver tx dma soft fail EFBIG");
1796 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1797 NULL, xname, "m_defrag() failed");
1798 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1799 NULL, xname, "Driver tx dma hard fail EFBIG");
1800 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1801 NULL, xname, "Driver tx dma hard fail EINVAL");
1802 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1803 NULL, xname, "Driver tx dma hard fail other");
1804 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1805 NULL, xname, "Driver tx dma soft fail EAGAIN");
1806 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1807 NULL, xname, "Driver tx dma soft fail ENOMEM");
1808 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1809 NULL, xname, "Watchdog timeouts");
1810 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1811 NULL, xname, "TSO errors");
1812 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1813 NULL, xname, "Admin MSI-X IRQ Handled");
1814 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1815 NULL, xname, "Link event");
1816 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1817 NULL, xname, "SFP+ module event");
1818 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1819 NULL, xname, "Multispeed event");
1820 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1821 NULL, xname, "External PHY event");
1822
1823 /* Max number of traffic class is 8 */
1824 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1825 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1826 snprintf(adapter->tcs[i].evnamebuf,
1827 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1828 xname, i);
1829 if (i < __arraycount(stats->mpc)) {
1830 evcnt_attach_dynamic(&stats->mpc[i],
1831 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1832 "RX Missed Packet Count");
1833 if (hw->mac.type == ixgbe_mac_82598EB)
1834 evcnt_attach_dynamic(&stats->rnbc[i],
1835 EVCNT_TYPE_MISC, NULL,
1836 adapter->tcs[i].evnamebuf,
1837 "Receive No Buffers");
1838 }
1839 if (i < __arraycount(stats->pxontxc)) {
1840 evcnt_attach_dynamic(&stats->pxontxc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1842 "pxontxc");
1843 evcnt_attach_dynamic(&stats->pxonrxc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1845 "pxonrxc");
1846 evcnt_attach_dynamic(&stats->pxofftxc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1848 "pxofftxc");
1849 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1850 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1851 "pxoffrxc");
1852 if (hw->mac.type >= ixgbe_mac_82599EB)
1853 evcnt_attach_dynamic(&stats->pxon2offc[i],
1854 EVCNT_TYPE_MISC, NULL,
1855 adapter->tcs[i].evnamebuf,
1856 "pxon2offc");
1857 }
1858 }
1859
1860 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1861 #ifdef LRO
1862 struct lro_ctrl *lro = &rxr->lro;
1863 #endif /* LRO */
1864
1865 snprintf(adapter->queues[i].evnamebuf,
1866 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1867 xname, i);
1868 snprintf(adapter->queues[i].namebuf,
1869 sizeof(adapter->queues[i].namebuf), "q%d", i);
1870
1871 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1872 aprint_error_dev(dev, "could not create sysctl root\n");
1873 break;
1874 }
1875
1876 if (sysctl_createv(log, 0, &rnode, &rnode,
1877 0, CTLTYPE_NODE,
1878 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1879 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1880 break;
1881
1882 if (sysctl_createv(log, 0, &rnode, &cnode,
1883 CTLFLAG_READWRITE, CTLTYPE_INT,
1884 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1885 ixgbe_sysctl_interrupt_rate_handler, 0,
1886 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1887 break;
1888
1889 if (sysctl_createv(log, 0, &rnode, &cnode,
1890 CTLFLAG_READONLY, CTLTYPE_INT,
1891 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1892 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1893 0, CTL_CREATE, CTL_EOL) != 0)
1894 break;
1895
1896 if (sysctl_createv(log, 0, &rnode, &cnode,
1897 CTLFLAG_READONLY, CTLTYPE_INT,
1898 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1899 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1900 0, CTL_CREATE, CTL_EOL) != 0)
1901 break;
1902
1903 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1904 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1905 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1906 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1907 "Handled queue in softint");
1908 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1909 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1910 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1911 NULL, adapter->queues[i].evnamebuf, "TSO");
1912 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1913 NULL, adapter->queues[i].evnamebuf,
1914 "TX Queue No Descriptor Available");
1915 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1916 NULL, adapter->queues[i].evnamebuf,
1917 "Queue Packets Transmitted");
1918 #ifndef IXGBE_LEGACY_TX
1919 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1920 NULL, adapter->queues[i].evnamebuf,
1921 "Packets dropped in pcq");
1922 #endif
1923
1924 if (sysctl_createv(log, 0, &rnode, &cnode,
1925 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1926 SYSCTL_DESCR("Receive Descriptor next to check"),
1927 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1928 CTL_CREATE, CTL_EOL) != 0)
1929 break;
1930
1931 if (sysctl_createv(log, 0, &rnode, &cnode,
1932 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1933 SYSCTL_DESCR("Receive Descriptor Head"),
1934 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1935 CTL_CREATE, CTL_EOL) != 0)
1936 break;
1937
1938 if (sysctl_createv(log, 0, &rnode, &cnode,
1939 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1940 SYSCTL_DESCR("Receive Descriptor Tail"),
1941 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1942 CTL_CREATE, CTL_EOL) != 0)
1943 break;
1944
1945 if (i < __arraycount(stats->qprc)) {
1946 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1947 NULL, adapter->queues[i].evnamebuf, "qprc");
1948 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1949 NULL, adapter->queues[i].evnamebuf, "qptc");
1950 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1951 NULL, adapter->queues[i].evnamebuf, "qbrc");
1952 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1953 NULL, adapter->queues[i].evnamebuf, "qbtc");
1954 if (hw->mac.type >= ixgbe_mac_82599EB)
1955 evcnt_attach_dynamic(&stats->qprdc[i],
1956 EVCNT_TYPE_MISC, NULL,
1957 adapter->queues[i].evnamebuf, "qprdc");
1958 }
1959
1960 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1961 NULL, adapter->queues[i].evnamebuf,
1962 "Queue Packets Received");
1963 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1964 NULL, adapter->queues[i].evnamebuf,
1965 "Queue Bytes Received");
1966 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1967 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1968 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1969 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1970 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1971 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1972 #ifdef LRO
1973 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1974 CTLFLAG_RD, &lro->lro_queued, 0,
1975 "LRO Queued");
1976 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1977 CTLFLAG_RD, &lro->lro_flushed, 0,
1978 "LRO Flushed");
1979 #endif /* LRO */
1980 }
1981
1982 /* MAC stats get their own sub node */
1983
1984 snprintf(stats->namebuf,
1985 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1986
1987 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "rx csum offload - IP");
1989 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "rx csum offload - L4");
1991 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "rx csum offload - IP bad");
1993 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "rx csum offload - L4 bad");
1995 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Interrupt conditions zero");
1997 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "Legacy interrupts");
1999
2000 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "CRC Errors");
2002 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Illegal Byte Errors");
2004 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Byte Errors");
2006 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "MAC Short Packets Discarded");
2008 if (hw->mac.type >= ixgbe_mac_X550)
2009 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Bad SFD");
2011 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Total Packets Missed");
2013 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "MAC Local Faults");
2015 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "MAC Remote Faults");
2017 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Receive Length Errors");
2019 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Link XON Transmitted");
2021 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Link XON Received");
2023 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2024 stats->namebuf, "Link XOFF Transmitted");
2025 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Link XOFF Received");
2027
2028 /* Packet Reception Stats */
2029 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Total Octets Received");
2031 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Good Octets Received");
2033 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "Total Packets Received");
2035 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "Good Packets Received");
2037 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "Multicast Packets Received");
2039 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "Broadcast Packets Received");
2041 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "64 byte frames received ");
2043 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "65-127 byte frames received");
2045 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "128-255 byte frames received");
2047 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2048 stats->namebuf, "256-511 byte frames received");
2049 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2050 stats->namebuf, "512-1023 byte frames received");
2051 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2052 stats->namebuf, "1023-1522 byte frames received");
2053 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2054 stats->namebuf, "Receive Undersized");
2055 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2056 stats->namebuf, "Fragmented Packets Received ");
2057 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2058 stats->namebuf, "Oversized Packets Received");
2059 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2060 stats->namebuf, "Received Jabber");
2061 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2062 stats->namebuf, "Management Packets Received");
2063 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2064 stats->namebuf, "Management Packets Dropped");
2065 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2066 stats->namebuf, "Checksum Errors");
2067
2068 /* Packet Transmission Stats */
2069 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2070 stats->namebuf, "Good Octets Transmitted");
2071 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2072 stats->namebuf, "Total Packets Transmitted");
2073 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2074 stats->namebuf, "Good Packets Transmitted");
2075 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2076 stats->namebuf, "Broadcast Packets Transmitted");
2077 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2078 stats->namebuf, "Multicast Packets Transmitted");
2079 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2080 stats->namebuf, "Management Packets Transmitted");
2081 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2082 stats->namebuf, "64 byte frames transmitted ");
2083 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2084 stats->namebuf, "65-127 byte frames transmitted");
2085 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2086 stats->namebuf, "128-255 byte frames transmitted");
2087 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2088 stats->namebuf, "256-511 byte frames transmitted");
2089 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2090 stats->namebuf, "512-1023 byte frames transmitted");
2091 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2092 stats->namebuf, "1024-1522 byte frames transmitted");
2093 } /* ixgbe_add_hw_stats */
2094
2095 static void
2096 ixgbe_clear_evcnt(struct adapter *adapter)
2097 {
2098 struct tx_ring *txr = adapter->tx_rings;
2099 struct rx_ring *rxr = adapter->rx_rings;
2100 struct ixgbe_hw *hw = &adapter->hw;
2101 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2102 int i;
2103
2104 adapter->efbig_tx_dma_setup.ev_count = 0;
2105 adapter->mbuf_defrag_failed.ev_count = 0;
2106 adapter->efbig2_tx_dma_setup.ev_count = 0;
2107 adapter->einval_tx_dma_setup.ev_count = 0;
2108 adapter->other_tx_dma_setup.ev_count = 0;
2109 adapter->eagain_tx_dma_setup.ev_count = 0;
2110 adapter->enomem_tx_dma_setup.ev_count = 0;
2111 adapter->tso_err.ev_count = 0;
2112 adapter->watchdog_events.ev_count = 0;
2113 adapter->admin_irqev.ev_count = 0;
2114 adapter->link_workev.ev_count = 0;
2115 adapter->mod_workev.ev_count = 0;
2116 adapter->msf_workev.ev_count = 0;
2117 adapter->phy_workev.ev_count = 0;
2118
2119 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2120 if (i < __arraycount(stats->mpc)) {
2121 stats->mpc[i].ev_count = 0;
2122 if (hw->mac.type == ixgbe_mac_82598EB)
2123 stats->rnbc[i].ev_count = 0;
2124 }
2125 if (i < __arraycount(stats->pxontxc)) {
2126 stats->pxontxc[i].ev_count = 0;
2127 stats->pxonrxc[i].ev_count = 0;
2128 stats->pxofftxc[i].ev_count = 0;
2129 stats->pxoffrxc[i].ev_count = 0;
2130 if (hw->mac.type >= ixgbe_mac_82599EB)
2131 stats->pxon2offc[i].ev_count = 0;
2132 }
2133 }
2134
2135 txr = adapter->tx_rings;
2136 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2137 adapter->queues[i].irqs.ev_count = 0;
2138 adapter->queues[i].handleq.ev_count = 0;
2139 adapter->queues[i].req.ev_count = 0;
2140 txr->no_desc_avail.ev_count = 0;
2141 txr->total_packets.ev_count = 0;
2142 txr->tso_tx.ev_count = 0;
2143 #ifndef IXGBE_LEGACY_TX
2144 txr->pcq_drops.ev_count = 0;
2145 #endif
2146 txr->q_efbig_tx_dma_setup = 0;
2147 txr->q_mbuf_defrag_failed = 0;
2148 txr->q_efbig2_tx_dma_setup = 0;
2149 txr->q_einval_tx_dma_setup = 0;
2150 txr->q_other_tx_dma_setup = 0;
2151 txr->q_eagain_tx_dma_setup = 0;
2152 txr->q_enomem_tx_dma_setup = 0;
2153 txr->q_tso_err = 0;
2154
2155 if (i < __arraycount(stats->qprc)) {
2156 stats->qprc[i].ev_count = 0;
2157 stats->qptc[i].ev_count = 0;
2158 stats->qbrc[i].ev_count = 0;
2159 stats->qbtc[i].ev_count = 0;
2160 if (hw->mac.type >= ixgbe_mac_82599EB)
2161 stats->qprdc[i].ev_count = 0;
2162 }
2163
2164 rxr->rx_packets.ev_count = 0;
2165 rxr->rx_bytes.ev_count = 0;
2166 rxr->rx_copies.ev_count = 0;
2167 rxr->no_jmbuf.ev_count = 0;
2168 rxr->rx_discarded.ev_count = 0;
2169 }
2170 stats->ipcs.ev_count = 0;
2171 stats->l4cs.ev_count = 0;
2172 stats->ipcs_bad.ev_count = 0;
2173 stats->l4cs_bad.ev_count = 0;
2174 stats->intzero.ev_count = 0;
2175 stats->legint.ev_count = 0;
2176 stats->crcerrs.ev_count = 0;
2177 stats->illerrc.ev_count = 0;
2178 stats->errbc.ev_count = 0;
2179 stats->mspdc.ev_count = 0;
2180 if (hw->mac.type >= ixgbe_mac_X550)
2181 stats->mbsdc.ev_count = 0;
2182 stats->mpctotal.ev_count = 0;
2183 stats->mlfc.ev_count = 0;
2184 stats->mrfc.ev_count = 0;
2185 stats->rlec.ev_count = 0;
2186 stats->lxontxc.ev_count = 0;
2187 stats->lxonrxc.ev_count = 0;
2188 stats->lxofftxc.ev_count = 0;
2189 stats->lxoffrxc.ev_count = 0;
2190
2191 /* Packet Reception Stats */
2192 stats->tor.ev_count = 0;
2193 stats->gorc.ev_count = 0;
2194 stats->tpr.ev_count = 0;
2195 stats->gprc.ev_count = 0;
2196 stats->mprc.ev_count = 0;
2197 stats->bprc.ev_count = 0;
2198 stats->prc64.ev_count = 0;
2199 stats->prc127.ev_count = 0;
2200 stats->prc255.ev_count = 0;
2201 stats->prc511.ev_count = 0;
2202 stats->prc1023.ev_count = 0;
2203 stats->prc1522.ev_count = 0;
2204 stats->ruc.ev_count = 0;
2205 stats->rfc.ev_count = 0;
2206 stats->roc.ev_count = 0;
2207 stats->rjc.ev_count = 0;
2208 stats->mngprc.ev_count = 0;
2209 stats->mngpdc.ev_count = 0;
2210 stats->xec.ev_count = 0;
2211
2212 /* Packet Transmission Stats */
2213 stats->gotc.ev_count = 0;
2214 stats->tpt.ev_count = 0;
2215 stats->gptc.ev_count = 0;
2216 stats->bptc.ev_count = 0;
2217 stats->mptc.ev_count = 0;
2218 stats->mngptc.ev_count = 0;
2219 stats->ptc64.ev_count = 0;
2220 stats->ptc127.ev_count = 0;
2221 stats->ptc255.ev_count = 0;
2222 stats->ptc511.ev_count = 0;
2223 stats->ptc1023.ev_count = 0;
2224 stats->ptc1522.ev_count = 0;
2225 }
2226
2227 /************************************************************************
2228 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2229 *
2230 * Retrieves the TDH value from the hardware
2231 ************************************************************************/
2232 static int
2233 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2234 {
2235 struct sysctlnode node = *rnode;
2236 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2237 struct adapter *adapter;
2238 uint32_t val;
2239
2240 if (!txr)
2241 return (0);
2242
2243 adapter = txr->adapter;
2244 if (ixgbe_fw_recovery_mode_swflag(adapter))
2245 return (EPERM);
2246
2247 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2248 node.sysctl_data = &val;
2249 return sysctl_lookup(SYSCTLFN_CALL(&node));
2250 } /* ixgbe_sysctl_tdh_handler */
2251
2252 /************************************************************************
2253 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2254 *
2255 * Retrieves the TDT value from the hardware
2256 ************************************************************************/
2257 static int
2258 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2259 {
2260 struct sysctlnode node = *rnode;
2261 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2262 struct adapter *adapter;
2263 uint32_t val;
2264
2265 if (!txr)
2266 return (0);
2267
2268 adapter = txr->adapter;
2269 if (ixgbe_fw_recovery_mode_swflag(adapter))
2270 return (EPERM);
2271
2272 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2273 node.sysctl_data = &val;
2274 return sysctl_lookup(SYSCTLFN_CALL(&node));
2275 } /* ixgbe_sysctl_tdt_handler */
2276
2277 /************************************************************************
2278 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2279 * handler function
2280 *
2281 * Retrieves the next_to_check value
2282 ************************************************************************/
2283 static int
2284 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2285 {
2286 struct sysctlnode node = *rnode;
2287 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2288 struct adapter *adapter;
2289 uint32_t val;
2290
2291 if (!rxr)
2292 return (0);
2293
2294 adapter = rxr->adapter;
2295 if (ixgbe_fw_recovery_mode_swflag(adapter))
2296 return (EPERM);
2297
2298 val = rxr->next_to_check;
2299 node.sysctl_data = &val;
2300 return sysctl_lookup(SYSCTLFN_CALL(&node));
2301 } /* ixgbe_sysctl_next_to_check_handler */
2302
2303 /************************************************************************
2304 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2305 *
2306 * Retrieves the RDH value from the hardware
2307 ************************************************************************/
2308 static int
2309 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2310 {
2311 struct sysctlnode node = *rnode;
2312 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2313 struct adapter *adapter;
2314 uint32_t val;
2315
2316 if (!rxr)
2317 return (0);
2318
2319 adapter = rxr->adapter;
2320 if (ixgbe_fw_recovery_mode_swflag(adapter))
2321 return (EPERM);
2322
2323 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2324 node.sysctl_data = &val;
2325 return sysctl_lookup(SYSCTLFN_CALL(&node));
2326 } /* ixgbe_sysctl_rdh_handler */
2327
2328 /************************************************************************
2329 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2330 *
2331 * Retrieves the RDT value from the hardware
2332 ************************************************************************/
2333 static int
2334 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2335 {
2336 struct sysctlnode node = *rnode;
2337 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2338 struct adapter *adapter;
2339 uint32_t val;
2340
2341 if (!rxr)
2342 return (0);
2343
2344 adapter = rxr->adapter;
2345 if (ixgbe_fw_recovery_mode_swflag(adapter))
2346 return (EPERM);
2347
2348 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2349 node.sysctl_data = &val;
2350 return sysctl_lookup(SYSCTLFN_CALL(&node));
2351 } /* ixgbe_sysctl_rdt_handler */
2352
2353 static int
2354 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2355 {
2356 struct ifnet *ifp = &ec->ec_if;
2357 struct adapter *adapter = ifp->if_softc;
2358 int rv;
2359
2360 if (set)
2361 rv = ixgbe_register_vlan(adapter, vid);
2362 else
2363 rv = ixgbe_unregister_vlan(adapter, vid);
2364
2365 if (rv != 0)
2366 return rv;
2367
2368 /*
2369 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2370 * or 0 to 1.
2371 */
2372 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2373 ixgbe_setup_vlan_hw_tagging(adapter);
2374
2375 return rv;
2376 }
2377
2378 /************************************************************************
2379 * ixgbe_register_vlan
2380 *
2381 * Run via vlan config EVENT, it enables us to use the
2382 * HW Filter table since we can get the vlan id. This
2383 * just creates the entry in the soft version of the
2384 * VFTA, init will repopulate the real table.
2385 ************************************************************************/
2386 static int
2387 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2388 {
2389 u16 index, bit;
2390 int error;
2391
2392 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2393 return EINVAL;
2394
2395 IXGBE_CORE_LOCK(adapter);
2396 index = (vtag >> 5) & 0x7F;
2397 bit = vtag & 0x1F;
2398 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2399 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2400 true);
2401 IXGBE_CORE_UNLOCK(adapter);
2402 if (error != 0)
2403 error = EACCES;
2404
2405 return error;
2406 } /* ixgbe_register_vlan */
2407
2408 /************************************************************************
2409 * ixgbe_unregister_vlan
2410 *
2411 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2412 ************************************************************************/
2413 static int
2414 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2415 {
2416 u16 index, bit;
2417 int error;
2418
2419 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2420 return EINVAL;
2421
2422 IXGBE_CORE_LOCK(adapter);
2423 index = (vtag >> 5) & 0x7F;
2424 bit = vtag & 0x1F;
2425 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2426 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2427 true);
2428 IXGBE_CORE_UNLOCK(adapter);
2429 if (error != 0)
2430 error = EACCES;
2431
2432 return error;
2433 } /* ixgbe_unregister_vlan */
2434
2435 static void
2436 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2437 {
2438 struct ethercom *ec = &adapter->osdep.ec;
2439 struct ixgbe_hw *hw = &adapter->hw;
2440 struct rx_ring *rxr;
2441 u32 ctrl;
2442 int i;
2443 bool hwtagging;
2444
2445 /* Enable HW tagging only if any vlan is attached */
2446 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2447 && VLAN_ATTACHED(ec);
2448
2449 /* Setup the queues for vlans */
2450 for (i = 0; i < adapter->num_queues; i++) {
2451 rxr = &adapter->rx_rings[i];
2452 /*
2453 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2454 */
2455 if (hw->mac.type != ixgbe_mac_82598EB) {
2456 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2457 if (hwtagging)
2458 ctrl |= IXGBE_RXDCTL_VME;
2459 else
2460 ctrl &= ~IXGBE_RXDCTL_VME;
2461 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2462 }
2463 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2464 }
2465
2466 /* VLAN hw tagging for 82598 */
2467 if (hw->mac.type == ixgbe_mac_82598EB) {
2468 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2469 if (hwtagging)
2470 ctrl |= IXGBE_VLNCTRL_VME;
2471 else
2472 ctrl &= ~IXGBE_VLNCTRL_VME;
2473 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2474 }
2475 } /* ixgbe_setup_vlan_hw_tagging */
2476
2477 static void
2478 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2479 {
2480 struct ethercom *ec = &adapter->osdep.ec;
2481 struct ixgbe_hw *hw = &adapter->hw;
2482 int i;
2483 u32 ctrl;
2484 struct vlanid_list *vlanidp;
2485
2486 /*
2487 * This function is called from both if_init and ifflags_cb()
2488 * on NetBSD.
2489 */
2490
2491 /*
2492 * Part 1:
2493 * Setup VLAN HW tagging
2494 */
2495 ixgbe_setup_vlan_hw_tagging(adapter);
2496
2497 /*
2498 * Part 2:
2499 * Setup VLAN HW filter
2500 */
2501 /* Cleanup shadow_vfta */
2502 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2503 adapter->shadow_vfta[i] = 0;
2504 /* Generate shadow_vfta from ec_vids */
2505 ETHER_LOCK(ec);
2506 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2507 uint32_t idx;
2508
2509 idx = vlanidp->vid / 32;
2510 KASSERT(idx < IXGBE_VFTA_SIZE);
2511 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2512 }
2513 ETHER_UNLOCK(ec);
2514 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2515 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2516
2517 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2518 /* Enable the Filter Table if enabled */
2519 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2520 ctrl |= IXGBE_VLNCTRL_VFE;
2521 else
2522 ctrl &= ~IXGBE_VLNCTRL_VFE;
2523 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2524 } /* ixgbe_setup_vlan_hw_support */
2525
2526 /************************************************************************
2527 * ixgbe_get_slot_info
2528 *
2529 * Get the width and transaction speed of
2530 * the slot this adapter is plugged into.
2531 ************************************************************************/
2532 static void
2533 ixgbe_get_slot_info(struct adapter *adapter)
2534 {
2535 device_t dev = adapter->dev;
2536 struct ixgbe_hw *hw = &adapter->hw;
2537 u32 offset;
2538 u16 link;
2539 int bus_info_valid = TRUE;
2540
2541 /* Some devices are behind an internal bridge */
2542 switch (hw->device_id) {
2543 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2544 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2545 goto get_parent_info;
2546 default:
2547 break;
2548 }
2549
2550 ixgbe_get_bus_info(hw);
2551
2552 /*
2553 * Some devices don't use PCI-E, but there is no need
2554 * to display "Unknown" for bus speed and width.
2555 */
2556 switch (hw->mac.type) {
2557 case ixgbe_mac_X550EM_x:
2558 case ixgbe_mac_X550EM_a:
2559 return;
2560 default:
2561 goto display;
2562 }
2563
2564 get_parent_info:
2565 /*
2566 * For the Quad port adapter we need to parse back
2567 * up the PCI tree to find the speed of the expansion
2568 * slot into which this adapter is plugged. A bit more work.
2569 */
2570 dev = device_parent(device_parent(dev));
2571 #if 0
2572 #ifdef IXGBE_DEBUG
2573 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2574 pci_get_slot(dev), pci_get_function(dev));
2575 #endif
2576 dev = device_parent(device_parent(dev));
2577 #ifdef IXGBE_DEBUG
2578 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2579 pci_get_slot(dev), pci_get_function(dev));
2580 #endif
2581 #endif
2582 /* Now get the PCI Express Capabilities offset */
2583 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2584 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2585 /*
2586 * Hmm...can't get PCI-Express capabilities.
2587 * Falling back to default method.
2588 */
2589 bus_info_valid = FALSE;
2590 ixgbe_get_bus_info(hw);
2591 goto display;
2592 }
2593 /* ...and read the Link Status Register */
2594 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2595 offset + PCIE_LCSR) >> 16;
2596 ixgbe_set_pci_config_data_generic(hw, link);
2597
2598 display:
2599 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2600 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2601 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2602 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2603 "Unknown"),
2604 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2605 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2606 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2607 "Unknown"));
2608
2609 if (bus_info_valid) {
2610 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2611 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2612 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2613 device_printf(dev, "PCI-Express bandwidth available"
2614 " for this card\n is not sufficient for"
2615 " optimal performance.\n");
2616 device_printf(dev, "For optimal performance a x8 "
2617 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2618 }
2619 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2620 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2621 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2622 device_printf(dev, "PCI-Express bandwidth available"
2623 " for this card\n is not sufficient for"
2624 " optimal performance.\n");
2625 device_printf(dev, "For optimal performance a x8 "
2626 "PCIE Gen3 slot is required.\n");
2627 }
2628 } else
2629 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2630
2631 return;
2632 } /* ixgbe_get_slot_info */
2633
2634 /************************************************************************
2635 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2636 ************************************************************************/
2637 static inline void
2638 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2639 {
2640 struct ixgbe_hw *hw = &adapter->hw;
2641 struct ix_queue *que = &adapter->queues[vector];
2642 u64 queue = 1ULL << vector;
2643 u32 mask;
2644
2645 mutex_enter(&que->dc_mtx);
2646 if (que->disabled_count > 0 && --que->disabled_count > 0)
2647 goto out;
2648
2649 if (hw->mac.type == ixgbe_mac_82598EB) {
2650 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2651 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2652 } else {
2653 mask = (queue & 0xFFFFFFFF);
2654 if (mask)
2655 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2656 mask = (queue >> 32);
2657 if (mask)
2658 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2659 }
2660 out:
2661 mutex_exit(&que->dc_mtx);
2662 } /* ixgbe_enable_queue */
2663
2664 /************************************************************************
2665 * ixgbe_disable_queue_internal
2666 ************************************************************************/
2667 static inline void
2668 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2669 {
2670 struct ixgbe_hw *hw = &adapter->hw;
2671 struct ix_queue *que = &adapter->queues[vector];
2672 u64 queue = 1ULL << vector;
2673 u32 mask;
2674
2675 mutex_enter(&que->dc_mtx);
2676
2677 if (que->disabled_count > 0) {
2678 if (nestok)
2679 que->disabled_count++;
2680 goto out;
2681 }
2682 que->disabled_count++;
2683
2684 if (hw->mac.type == ixgbe_mac_82598EB) {
2685 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2686 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2687 } else {
2688 mask = (queue & 0xFFFFFFFF);
2689 if (mask)
2690 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2691 mask = (queue >> 32);
2692 if (mask)
2693 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2694 }
2695 out:
2696 mutex_exit(&que->dc_mtx);
2697 } /* ixgbe_disable_queue_internal */
2698
2699 /************************************************************************
2700 * ixgbe_disable_queue
2701 ************************************************************************/
2702 static inline void
2703 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2704 {
2705
2706 ixgbe_disable_queue_internal(adapter, vector, true);
2707 } /* ixgbe_disable_queue */
2708
2709 /************************************************************************
2710 * ixgbe_sched_handle_que - schedule deferred packet processing
2711 ************************************************************************/
2712 static inline void
2713 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2714 {
2715
2716 if (que->txrx_use_workqueue) {
2717 /*
2718 * adapter->que_wq is bound to each CPU instead of
2719 * each NIC queue to reduce workqueue kthread. As we
2720 * should consider about interrupt affinity in this
2721 * function, the workqueue kthread must be WQ_PERCPU.
2722 * If create WQ_PERCPU workqueue kthread for each NIC
2723 * queue, that number of created workqueue kthread is
2724 * (number of used NIC queue) * (number of CPUs) =
2725 * (number of CPUs) ^ 2 most often.
2726 *
2727 * The same NIC queue's interrupts are avoided by
2728 * masking the queue's interrupt. And different
2729 * NIC queue's interrupts use different struct work
2730 * (que->wq_cookie). So, "enqueued flag" to avoid
2731 * twice workqueue_enqueue() is not required .
2732 */
2733 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2734 } else {
2735 softint_schedule(que->que_si);
2736 }
2737 }
2738
2739 /************************************************************************
2740 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2741 ************************************************************************/
2742 static int
2743 ixgbe_msix_que(void *arg)
2744 {
2745 struct ix_queue *que = arg;
2746 struct adapter *adapter = que->adapter;
2747 struct ifnet *ifp = adapter->ifp;
2748 struct tx_ring *txr = que->txr;
2749 struct rx_ring *rxr = que->rxr;
2750 bool more;
2751 u32 newitr = 0;
2752
2753 /* Protect against spurious interrupts */
2754 if ((ifp->if_flags & IFF_RUNNING) == 0)
2755 return 0;
2756
2757 ixgbe_disable_queue(adapter, que->msix);
2758 ++que->irqs.ev_count;
2759
2760 /*
2761 * Don't change "que->txrx_use_workqueue" from this point to avoid
2762 * flip-flopping softint/workqueue mode in one deferred processing.
2763 */
2764 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2765
2766 #ifdef __NetBSD__
2767 /* Don't run ixgbe_rxeof in interrupt context */
2768 more = true;
2769 #else
2770 more = ixgbe_rxeof(que);
2771 #endif
2772
2773 IXGBE_TX_LOCK(txr);
2774 ixgbe_txeof(txr);
2775 IXGBE_TX_UNLOCK(txr);
2776
2777 /* Do AIM now? */
2778
2779 if (adapter->enable_aim == false)
2780 goto no_calc;
2781 /*
2782 * Do Adaptive Interrupt Moderation:
2783 * - Write out last calculated setting
2784 * - Calculate based on average size over
2785 * the last interval.
2786 */
2787 if (que->eitr_setting)
2788 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2789
2790 que->eitr_setting = 0;
2791
2792 /* Idle, do nothing */
2793 if ((txr->bytes == 0) && (rxr->bytes == 0))
2794 goto no_calc;
2795
2796 if ((txr->bytes) && (txr->packets))
2797 newitr = txr->bytes/txr->packets;
2798 if ((rxr->bytes) && (rxr->packets))
2799 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2800 newitr += 24; /* account for hardware frame, crc */
2801
2802 /* set an upper boundary */
2803 newitr = uimin(newitr, 3000);
2804
2805 /* Be nice to the mid range */
2806 if ((newitr > 300) && (newitr < 1200))
2807 newitr = (newitr / 3);
2808 else
2809 newitr = (newitr / 2);
2810
2811 /*
2812 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2813 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2814 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2815 * on 1G and higher.
2816 */
2817 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2818 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2819 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2820 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2821 }
2822
2823 /* save for next interrupt */
2824 que->eitr_setting = newitr;
2825
2826 /* Reset state */
2827 txr->bytes = 0;
2828 txr->packets = 0;
2829 rxr->bytes = 0;
2830 rxr->packets = 0;
2831
2832 no_calc:
2833 if (more)
2834 ixgbe_sched_handle_que(adapter, que);
2835 else
2836 ixgbe_enable_queue(adapter, que->msix);
2837
2838 return 1;
2839 } /* ixgbe_msix_que */
2840
2841 /************************************************************************
2842 * ixgbe_media_status - Media Ioctl callback
2843 *
2844 * Called whenever the user queries the status of
2845 * the interface using ifconfig.
2846 ************************************************************************/
2847 static void
2848 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2849 {
2850 struct adapter *adapter = ifp->if_softc;
2851 struct ixgbe_hw *hw = &adapter->hw;
2852 int layer;
2853
2854 INIT_DEBUGOUT("ixgbe_media_status: begin");
2855 ixgbe_update_link_status(adapter);
2856
2857 ifmr->ifm_status = IFM_AVALID;
2858 ifmr->ifm_active = IFM_ETHER;
2859
2860 if (adapter->link_active != LINK_STATE_UP) {
2861 ifmr->ifm_active |= IFM_NONE;
2862 return;
2863 }
2864
2865 ifmr->ifm_status |= IFM_ACTIVE;
2866 layer = adapter->phy_layer;
2867
2868 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2869 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2870 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2871 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2872 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2873 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2874 switch (adapter->link_speed) {
2875 case IXGBE_LINK_SPEED_10GB_FULL:
2876 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2877 break;
2878 case IXGBE_LINK_SPEED_5GB_FULL:
2879 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2880 break;
2881 case IXGBE_LINK_SPEED_2_5GB_FULL:
2882 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2883 break;
2884 case IXGBE_LINK_SPEED_1GB_FULL:
2885 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2886 break;
2887 case IXGBE_LINK_SPEED_100_FULL:
2888 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2889 break;
2890 case IXGBE_LINK_SPEED_10_FULL:
2891 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2892 break;
2893 }
2894 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2895 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2896 switch (adapter->link_speed) {
2897 case IXGBE_LINK_SPEED_10GB_FULL:
2898 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2899 break;
2900 }
2901 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2902 switch (adapter->link_speed) {
2903 case IXGBE_LINK_SPEED_10GB_FULL:
2904 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2905 break;
2906 case IXGBE_LINK_SPEED_1GB_FULL:
2907 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2908 break;
2909 }
2910 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2911 switch (adapter->link_speed) {
2912 case IXGBE_LINK_SPEED_10GB_FULL:
2913 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2914 break;
2915 case IXGBE_LINK_SPEED_1GB_FULL:
2916 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2917 break;
2918 }
2919 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2920 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2921 switch (adapter->link_speed) {
2922 case IXGBE_LINK_SPEED_10GB_FULL:
2923 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2924 break;
2925 case IXGBE_LINK_SPEED_1GB_FULL:
2926 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2927 break;
2928 }
2929 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2930 switch (adapter->link_speed) {
2931 case IXGBE_LINK_SPEED_10GB_FULL:
2932 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2933 break;
2934 }
2935 /*
2936 * XXX: These need to use the proper media types once
2937 * they're added.
2938 */
2939 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2940 switch (adapter->link_speed) {
2941 case IXGBE_LINK_SPEED_10GB_FULL:
2942 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2943 break;
2944 case IXGBE_LINK_SPEED_2_5GB_FULL:
2945 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2946 break;
2947 case IXGBE_LINK_SPEED_1GB_FULL:
2948 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2949 break;
2950 }
2951 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2952 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2953 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2954 switch (adapter->link_speed) {
2955 case IXGBE_LINK_SPEED_10GB_FULL:
2956 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2957 break;
2958 case IXGBE_LINK_SPEED_2_5GB_FULL:
2959 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2960 break;
2961 case IXGBE_LINK_SPEED_1GB_FULL:
2962 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2963 break;
2964 }
2965
2966 /* If nothing is recognized... */
2967 #if 0
2968 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2969 ifmr->ifm_active |= IFM_UNKNOWN;
2970 #endif
2971
2972 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2973
2974 /* Display current flow control setting used on link */
2975 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2976 hw->fc.current_mode == ixgbe_fc_full)
2977 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2978 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2979 hw->fc.current_mode == ixgbe_fc_full)
2980 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2981
2982 return;
2983 } /* ixgbe_media_status */
2984
2985 /************************************************************************
2986 * ixgbe_media_change - Media Ioctl callback
2987 *
2988 * Called when the user changes speed/duplex using
2989 * media/mediopt option with ifconfig.
2990 ************************************************************************/
2991 static int
2992 ixgbe_media_change(struct ifnet *ifp)
2993 {
2994 struct adapter *adapter = ifp->if_softc;
2995 struct ifmedia *ifm = &adapter->media;
2996 struct ixgbe_hw *hw = &adapter->hw;
2997 ixgbe_link_speed speed = 0;
2998 ixgbe_link_speed link_caps = 0;
2999 bool negotiate = false;
3000 s32 err = IXGBE_NOT_IMPLEMENTED;
3001
3002 INIT_DEBUGOUT("ixgbe_media_change: begin");
3003
3004 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3005 return (EINVAL);
3006
3007 if (hw->phy.media_type == ixgbe_media_type_backplane)
3008 return (EPERM);
3009
3010 /*
3011 * We don't actually need to check against the supported
3012 * media types of the adapter; ifmedia will take care of
3013 * that for us.
3014 */
3015 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3016 case IFM_AUTO:
3017 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3018 &negotiate);
3019 if (err != IXGBE_SUCCESS) {
3020 device_printf(adapter->dev, "Unable to determine "
3021 "supported advertise speeds\n");
3022 return (ENODEV);
3023 }
3024 speed |= link_caps;
3025 break;
3026 case IFM_10G_T:
3027 case IFM_10G_LRM:
3028 case IFM_10G_LR:
3029 case IFM_10G_TWINAX:
3030 case IFM_10G_SR:
3031 case IFM_10G_CX4:
3032 case IFM_10G_KR:
3033 case IFM_10G_KX4:
3034 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3035 break;
3036 case IFM_5000_T:
3037 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3038 break;
3039 case IFM_2500_T:
3040 case IFM_2500_KX:
3041 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3042 break;
3043 case IFM_1000_T:
3044 case IFM_1000_LX:
3045 case IFM_1000_SX:
3046 case IFM_1000_KX:
3047 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3048 break;
3049 case IFM_100_TX:
3050 speed |= IXGBE_LINK_SPEED_100_FULL;
3051 break;
3052 case IFM_10_T:
3053 speed |= IXGBE_LINK_SPEED_10_FULL;
3054 break;
3055 case IFM_NONE:
3056 break;
3057 default:
3058 goto invalid;
3059 }
3060
3061 hw->mac.autotry_restart = TRUE;
3062 hw->mac.ops.setup_link(hw, speed, TRUE);
3063 adapter->advertise = 0;
3064 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3065 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3066 adapter->advertise |= 1 << 2;
3067 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3068 adapter->advertise |= 1 << 1;
3069 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3070 adapter->advertise |= 1 << 0;
3071 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3072 adapter->advertise |= 1 << 3;
3073 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3074 adapter->advertise |= 1 << 4;
3075 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3076 adapter->advertise |= 1 << 5;
3077 }
3078
3079 return (0);
3080
3081 invalid:
3082 device_printf(adapter->dev, "Invalid media type!\n");
3083
3084 return (EINVAL);
3085 } /* ixgbe_media_change */
3086
3087 /************************************************************************
3088 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3089 ************************************************************************/
3090 static int
3091 ixgbe_msix_admin(void *arg)
3092 {
3093 struct adapter *adapter = arg;
3094 struct ixgbe_hw *hw = &adapter->hw;
3095 u32 eicr;
3096 u32 eims_orig;
3097 u32 eims_disable = 0;
3098
3099 ++adapter->admin_irqev.ev_count;
3100
3101 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3102 /* Pause other interrupts */
3103 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3104
3105 /*
3106 * First get the cause.
3107 *
3108 * The specifications of 82598, 82599, X540 and X550 say EICS register
3109 * is write only. However, Linux says it is a workaround for silicon
3110 * errata to read EICS instead of EICR to get interrupt cause.
3111 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3112 */
3113 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3114 /* Be sure the queue bits are not cleared */
3115 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3116 /* Clear all OTHER interrupts with write */
3117 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3118
3119 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3120
3121 /* Re-enable some OTHER interrupts */
3122 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3123
3124 return 1;
3125 } /* ixgbe_msix_admin */
3126
3127 static void
3128 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3129 {
3130 struct ixgbe_hw *hw = &adapter->hw;
3131 u32 eicr_mask;
3132 u32 task_requests = 0;
3133 s32 retval;
3134
3135 /* Link status change */
3136 if (eicr & IXGBE_EICR_LSC) {
3137 task_requests |= IXGBE_REQUEST_TASK_LSC;
3138 *eims_disable |= IXGBE_EIMS_LSC;
3139 }
3140
3141 if (ixgbe_is_sfp(hw)) {
3142 /* Pluggable optics-related interrupt */
3143 if (hw->mac.type >= ixgbe_mac_X540)
3144 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3145 else
3146 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3147
3148 /*
3149 * An interrupt might not arrive when a module is inserted.
3150 * When an link status change interrupt occurred and the driver
3151 * still regard SFP as unplugged, issue the module softint
3152 * and then issue LSC interrupt.
3153 */
3154 if ((eicr & eicr_mask)
3155 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3156 && (eicr & IXGBE_EICR_LSC))) {
3157 task_requests |= IXGBE_REQUEST_TASK_MOD;
3158 *eims_disable |= IXGBE_EIMS_LSC;
3159 }
3160
3161 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3162 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3163 task_requests |= IXGBE_REQUEST_TASK_MSF;
3164 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3165 }
3166 }
3167
3168 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3169 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3170 (eicr & IXGBE_EICR_FLOW_DIR)) {
3171 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3172 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3173 /* Disable the interrupt */
3174 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3175 }
3176 }
3177
3178 if (eicr & IXGBE_EICR_ECC) {
3179 device_printf(adapter->dev,
3180 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3181 /* Disable interrupt to prevent log spam */
3182 *eims_disable |= IXGBE_EICR_ECC;
3183 }
3184
3185 /* Check for over temp condition */
3186 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3187 switch (adapter->hw.mac.type) {
3188 case ixgbe_mac_X550EM_a:
3189 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3190 break;
3191 /* Disable interrupt to prevent log spam */
3192 *eims_disable |= IXGBE_EICR_GPI_SDP0_X550EM_a;
3193
3194 retval = hw->phy.ops.check_overtemp(hw);
3195 if (retval != IXGBE_ERR_OVERTEMP)
3196 break;
3197 device_printf(adapter->dev,
3198 "CRITICAL: OVER TEMP!! "
3199 "PHY IS SHUT DOWN!!\n");
3200 device_printf(adapter->dev,
3201 "System shutdown required!\n");
3202 break;
3203 default:
3204 if (!(eicr & IXGBE_EICR_TS))
3205 break;
3206 /* Disable interrupt to prevent log spam */
3207 *eims_disable |= IXGBE_EIMS_TS;
3208
3209 retval = hw->phy.ops.check_overtemp(hw);
3210 if (retval != IXGBE_ERR_OVERTEMP)
3211 break;
3212 device_printf(adapter->dev,
3213 "CRITICAL: OVER TEMP!! "
3214 "PHY IS SHUT DOWN!!\n");
3215 device_printf(adapter->dev,
3216 "System shutdown required!\n");
3217 break;
3218 }
3219 }
3220
3221 /* Check for VF message */
3222 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3223 (eicr & IXGBE_EICR_MAILBOX)) {
3224 task_requests |= IXGBE_REQUEST_TASK_MBX;
3225 *eims_disable |= IXGBE_EIMS_MAILBOX;
3226 }
3227 }
3228
3229 /* Check for fan failure */
3230 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3231 retval = ixgbe_check_fan_failure(adapter, eicr, true);
3232 if (retval == IXGBE_ERR_FAN_FAILURE) {
3233 /* Disable interrupt to prevent log spam */
3234 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3235 }
3236 }
3237
3238 /* External PHY interrupt */
3239 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3240 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3241 task_requests |= IXGBE_REQUEST_TASK_PHY;
3242 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3243 }
3244
3245 if (task_requests != 0) {
3246 mutex_enter(&adapter->admin_mtx);
3247 adapter->task_requests |= task_requests;
3248 ixgbe_schedule_admin_tasklet(adapter);
3249 mutex_exit(&adapter->admin_mtx);
3250 }
3251
3252 }
3253
3254 static void
3255 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3256 {
3257
3258 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3259 itr |= itr << 16;
3260 else
3261 itr |= IXGBE_EITR_CNT_WDIS;
3262
3263 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3264 }
3265
3266
3267 /************************************************************************
3268 * ixgbe_sysctl_interrupt_rate_handler
3269 ************************************************************************/
3270 static int
3271 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3272 {
3273 struct sysctlnode node = *rnode;
3274 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3275 struct adapter *adapter;
3276 uint32_t reg, usec, rate;
3277 int error;
3278
3279 if (que == NULL)
3280 return 0;
3281
3282 adapter = que->adapter;
3283 if (ixgbe_fw_recovery_mode_swflag(adapter))
3284 return (EPERM);
3285
3286 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3287 usec = ((reg & 0x0FF8) >> 3);
3288 if (usec > 0)
3289 rate = 500000 / usec;
3290 else
3291 rate = 0;
3292 node.sysctl_data = &rate;
3293 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3294 if (error || newp == NULL)
3295 return error;
3296 reg &= ~0xfff; /* default, no limitation */
3297 if (rate > 0 && rate < 500000) {
3298 if (rate < 1000)
3299 rate = 1000;
3300 reg |= ((4000000 / rate) & 0xff8);
3301 /*
3302 * When RSC is used, ITR interval must be larger than
3303 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3304 * The minimum value is always greater than 2us on 100M
3305 * (and 10M?(not documented)), but it's not on 1G and higher.
3306 */
3307 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3308 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3309 if ((adapter->num_queues > 1)
3310 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3311 return EINVAL;
3312 }
3313 ixgbe_max_interrupt_rate = rate;
3314 } else
3315 ixgbe_max_interrupt_rate = 0;
3316 ixgbe_eitr_write(adapter, que->msix, reg);
3317
3318 return (0);
3319 } /* ixgbe_sysctl_interrupt_rate_handler */
3320
3321 const struct sysctlnode *
3322 ixgbe_sysctl_instance(struct adapter *adapter)
3323 {
3324 const char *dvname;
3325 struct sysctllog **log;
3326 int rc;
3327 const struct sysctlnode *rnode;
3328
3329 if (adapter->sysctltop != NULL)
3330 return adapter->sysctltop;
3331
3332 log = &adapter->sysctllog;
3333 dvname = device_xname(adapter->dev);
3334
3335 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3336 0, CTLTYPE_NODE, dvname,
3337 SYSCTL_DESCR("ixgbe information and settings"),
3338 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3339 goto err;
3340
3341 return rnode;
3342 err:
3343 device_printf(adapter->dev,
3344 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3345 return NULL;
3346 }
3347
3348 /************************************************************************
3349 * ixgbe_add_device_sysctls
3350 ************************************************************************/
3351 static void
3352 ixgbe_add_device_sysctls(struct adapter *adapter)
3353 {
3354 device_t dev = adapter->dev;
3355 struct ixgbe_hw *hw = &adapter->hw;
3356 struct sysctllog **log;
3357 const struct sysctlnode *rnode, *cnode;
3358
3359 log = &adapter->sysctllog;
3360
3361 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3362 aprint_error_dev(dev, "could not create sysctl root\n");
3363 return;
3364 }
3365
3366 if (sysctl_createv(log, 0, &rnode, &cnode,
3367 CTLFLAG_READWRITE, CTLTYPE_INT,
3368 "debug", SYSCTL_DESCR("Debug Info"),
3369 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL)
3370 != 0)
3371 aprint_error_dev(dev, "could not create sysctl\n");
3372
3373 if (sysctl_createv(log, 0, &rnode, &cnode,
3374 CTLFLAG_READONLY, CTLTYPE_INT,
3375 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3376 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3377 aprint_error_dev(dev, "could not create sysctl\n");
3378
3379 if (sysctl_createv(log, 0, &rnode, &cnode,
3380 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
3381 SYSCTL_DESCR("Number of jumbo buffers per queue"),
3382 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
3383 CTL_EOL) != 0)
3384 aprint_error_dev(dev, "could not create sysctl\n");
3385
3386 if (sysctl_createv(log, 0, &rnode, &cnode,
3387 CTLFLAG_READONLY, CTLTYPE_INT,
3388 "num_queues", SYSCTL_DESCR("Number of queues"),
3389 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3390 aprint_error_dev(dev, "could not create sysctl\n");
3391
3392 /* Sysctls for all devices */
3393 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3394 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3395 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3396 CTL_EOL) != 0)
3397 aprint_error_dev(dev, "could not create sysctl\n");
3398
3399 adapter->enable_aim = ixgbe_enable_aim;
3400 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3401 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3402 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3403 aprint_error_dev(dev, "could not create sysctl\n");
3404
3405 if (sysctl_createv(log, 0, &rnode, &cnode,
3406 CTLFLAG_READWRITE, CTLTYPE_INT,
3407 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3408 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3409 CTL_EOL) != 0)
3410 aprint_error_dev(dev, "could not create sysctl\n");
3411
3412 /*
3413 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3414 * it causesflip-flopping softint/workqueue mode in one deferred
3415 * processing. Therefore, preempt_disable()/preempt_enable() are
3416 * required in ixgbe_sched_handle_que() to avoid
3417 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3418 * I think changing "que->txrx_use_workqueue" in interrupt handler
3419 * is lighter than doing preempt_disable()/preempt_enable() in every
3420 * ixgbe_sched_handle_que().
3421 */
3422 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3423 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3424 CTLTYPE_BOOL, "txrx_workqueue",
3425 SYSCTL_DESCR("Use workqueue for packet processing"),
3426 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE,
3427 CTL_EOL) != 0)
3428 aprint_error_dev(dev, "could not create sysctl\n");
3429
3430 #ifdef IXGBE_DEBUG
3431 /* testing sysctls (for all devices) */
3432 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3433 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3434 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3435 CTL_EOL) != 0)
3436 aprint_error_dev(dev, "could not create sysctl\n");
3437
3438 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3439 CTLTYPE_STRING, "print_rss_config",
3440 SYSCTL_DESCR("Prints RSS Configuration"),
3441 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3442 CTL_EOL) != 0)
3443 aprint_error_dev(dev, "could not create sysctl\n");
3444 #endif
3445 /* for X550 series devices */
3446 if (hw->mac.type >= ixgbe_mac_X550)
3447 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3448 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3449 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3450 CTL_EOL) != 0)
3451 aprint_error_dev(dev, "could not create sysctl\n");
3452
3453 /* for WoL-capable devices */
3454 if (adapter->wol_support) {
3455 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3456 CTLTYPE_BOOL, "wol_enable",
3457 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3458 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3459 CTL_EOL) != 0)
3460 aprint_error_dev(dev, "could not create sysctl\n");
3461
3462 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3463 CTLTYPE_INT, "wufc",
3464 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3465 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3466 CTL_EOL) != 0)
3467 aprint_error_dev(dev, "could not create sysctl\n");
3468 }
3469
3470 /* for X552/X557-AT devices */
3471 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3472 const struct sysctlnode *phy_node;
3473
3474 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3475 "phy", SYSCTL_DESCR("External PHY sysctls"),
3476 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3477 aprint_error_dev(dev, "could not create sysctl\n");
3478 return;
3479 }
3480
3481 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3482 CTLTYPE_INT, "temp",
3483 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3484 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3485 CTL_EOL) != 0)
3486 aprint_error_dev(dev, "could not create sysctl\n");
3487
3488 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3489 CTLTYPE_INT, "overtemp_occurred",
3490 SYSCTL_DESCR(
3491 "External PHY High Temperature Event Occurred"),
3492 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3493 CTL_CREATE, CTL_EOL) != 0)
3494 aprint_error_dev(dev, "could not create sysctl\n");
3495 }
3496
3497 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3498 && (hw->phy.type == ixgbe_phy_fw))
3499 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3500 CTLTYPE_BOOL, "force_10_100_autonego",
3501 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3502 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3503 CTL_CREATE, CTL_EOL) != 0)
3504 aprint_error_dev(dev, "could not create sysctl\n");
3505
3506 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3507 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3508 CTLTYPE_INT, "eee_state",
3509 SYSCTL_DESCR("EEE Power Save State"),
3510 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3511 CTL_EOL) != 0)
3512 aprint_error_dev(dev, "could not create sysctl\n");
3513 }
3514 } /* ixgbe_add_device_sysctls */
3515
3516 /************************************************************************
3517 * ixgbe_allocate_pci_resources
3518 ************************************************************************/
3519 static int
3520 ixgbe_allocate_pci_resources(struct adapter *adapter,
3521 const struct pci_attach_args *pa)
3522 {
3523 pcireg_t memtype, csr;
3524 device_t dev = adapter->dev;
3525 bus_addr_t addr;
3526 int flags;
3527
3528 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3529 switch (memtype) {
3530 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3531 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3532 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3533 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3534 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3535 goto map_err;
3536 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3537 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3538 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3539 }
3540 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3541 adapter->osdep.mem_size, flags,
3542 &adapter->osdep.mem_bus_space_handle) != 0) {
3543 map_err:
3544 adapter->osdep.mem_size = 0;
3545 aprint_error_dev(dev, "unable to map BAR0\n");
3546 return ENXIO;
3547 }
3548 /*
3549 * Enable address decoding for memory range in case BIOS or
3550 * UEFI don't set it.
3551 */
3552 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3553 PCI_COMMAND_STATUS_REG);
3554 csr |= PCI_COMMAND_MEM_ENABLE;
3555 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3556 csr);
3557 break;
3558 default:
3559 aprint_error_dev(dev, "unexpected type on BAR0\n");
3560 return ENXIO;
3561 }
3562
3563 return (0);
3564 } /* ixgbe_allocate_pci_resources */
3565
3566 static void
3567 ixgbe_free_deferred_handlers(struct adapter *adapter)
3568 {
3569 struct ix_queue *que = adapter->queues;
3570 struct tx_ring *txr = adapter->tx_rings;
3571 int i;
3572
3573 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3574 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3575 if (txr->txr_si != NULL)
3576 softint_disestablish(txr->txr_si);
3577 }
3578 if (que->que_si != NULL)
3579 softint_disestablish(que->que_si);
3580 }
3581 if (adapter->txr_wq != NULL)
3582 workqueue_destroy(adapter->txr_wq);
3583 if (adapter->txr_wq_enqueued != NULL)
3584 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3585 if (adapter->que_wq != NULL)
3586 workqueue_destroy(adapter->que_wq);
3587
3588 if (adapter->admin_wq != NULL) {
3589 workqueue_destroy(adapter->admin_wq);
3590 adapter->admin_wq = NULL;
3591 }
3592 if (adapter->timer_wq != NULL) {
3593 workqueue_destroy(adapter->timer_wq);
3594 adapter->timer_wq = NULL;
3595 }
3596 if (adapter->recovery_mode_timer_wq != NULL) {
3597 /*
3598 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3599 * the recovery_mode_timer workqueue, so call it here.
3600 */
3601 workqueue_wait(adapter->recovery_mode_timer_wq,
3602 &adapter->recovery_mode_timer_wc);
3603 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3604 workqueue_destroy(adapter->recovery_mode_timer_wq);
3605 adapter->recovery_mode_timer_wq = NULL;
3606 }
3607 } /* ixgbe_free_deferred_handlers */
3608
3609 /************************************************************************
3610 * ixgbe_detach - Device removal routine
3611 *
3612 * Called when the driver is being removed.
3613 * Stops the adapter and deallocates all the resources
3614 * that were allocated for driver operation.
3615 *
3616 * return 0 on success, positive on failure
3617 ************************************************************************/
3618 static int
3619 ixgbe_detach(device_t dev, int flags)
3620 {
3621 struct adapter *adapter = device_private(dev);
3622 struct rx_ring *rxr = adapter->rx_rings;
3623 struct tx_ring *txr = adapter->tx_rings;
3624 struct ixgbe_hw *hw = &adapter->hw;
3625 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3626 u32 ctrl_ext;
3627 int i;
3628
3629 INIT_DEBUGOUT("ixgbe_detach: begin");
3630 if (adapter->osdep.attached == false)
3631 return 0;
3632
3633 if (ixgbe_pci_iov_detach(dev) != 0) {
3634 device_printf(dev, "SR-IOV in use; detach first.\n");
3635 return (EBUSY);
3636 }
3637
3638 #if NVLAN > 0
3639 /* Make sure VLANs are not using driver */
3640 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3641 ; /* nothing to do: no VLANs */
3642 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3643 vlan_ifdetach(adapter->ifp);
3644 else {
3645 aprint_error_dev(dev, "VLANs in use, detach first\n");
3646 return (EBUSY);
3647 }
3648 #endif
3649
3650 adapter->osdep.detaching = true;
3651 /*
3652 * Stop the interface. ixgbe_setup_low_power_mode() calls
3653 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3654 * directly.
3655 */
3656 ixgbe_setup_low_power_mode(adapter);
3657
3658 callout_halt(&adapter->timer, NULL);
3659 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3660 callout_halt(&adapter->recovery_mode_timer, NULL);
3661
3662 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3663 atomic_store_relaxed(&adapter->admin_pending, 0);
3664 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3665 atomic_store_relaxed(&adapter->timer_pending, 0);
3666
3667 pmf_device_deregister(dev);
3668
3669 ether_ifdetach(adapter->ifp);
3670
3671 ixgbe_free_deferred_handlers(adapter);
3672
3673 /* let hardware know driver is unloading */
3674 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3675 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3676 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3677
3678 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3679 netmap_detach(adapter->ifp);
3680
3681 ixgbe_free_pci_resources(adapter);
3682 #if 0 /* XXX the NetBSD port is probably missing something here */
3683 bus_generic_detach(dev);
3684 #endif
3685 if_detach(adapter->ifp);
3686 ifmedia_fini(&adapter->media);
3687 if_percpuq_destroy(adapter->ipq);
3688
3689 sysctl_teardown(&adapter->sysctllog);
3690 evcnt_detach(&adapter->efbig_tx_dma_setup);
3691 evcnt_detach(&adapter->mbuf_defrag_failed);
3692 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3693 evcnt_detach(&adapter->einval_tx_dma_setup);
3694 evcnt_detach(&adapter->other_tx_dma_setup);
3695 evcnt_detach(&adapter->eagain_tx_dma_setup);
3696 evcnt_detach(&adapter->enomem_tx_dma_setup);
3697 evcnt_detach(&adapter->watchdog_events);
3698 evcnt_detach(&adapter->tso_err);
3699 evcnt_detach(&adapter->admin_irqev);
3700 evcnt_detach(&adapter->link_workev);
3701 evcnt_detach(&adapter->mod_workev);
3702 evcnt_detach(&adapter->msf_workev);
3703 evcnt_detach(&adapter->phy_workev);
3704
3705 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3706 if (i < __arraycount(stats->mpc)) {
3707 evcnt_detach(&stats->mpc[i]);
3708 if (hw->mac.type == ixgbe_mac_82598EB)
3709 evcnt_detach(&stats->rnbc[i]);
3710 }
3711 if (i < __arraycount(stats->pxontxc)) {
3712 evcnt_detach(&stats->pxontxc[i]);
3713 evcnt_detach(&stats->pxonrxc[i]);
3714 evcnt_detach(&stats->pxofftxc[i]);
3715 evcnt_detach(&stats->pxoffrxc[i]);
3716 if (hw->mac.type >= ixgbe_mac_82599EB)
3717 evcnt_detach(&stats->pxon2offc[i]);
3718 }
3719 }
3720
3721 txr = adapter->tx_rings;
3722 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3723 evcnt_detach(&adapter->queues[i].irqs);
3724 evcnt_detach(&adapter->queues[i].handleq);
3725 evcnt_detach(&adapter->queues[i].req);
3726 evcnt_detach(&txr->no_desc_avail);
3727 evcnt_detach(&txr->total_packets);
3728 evcnt_detach(&txr->tso_tx);
3729 #ifndef IXGBE_LEGACY_TX
3730 evcnt_detach(&txr->pcq_drops);
3731 #endif
3732
3733 if (i < __arraycount(stats->qprc)) {
3734 evcnt_detach(&stats->qprc[i]);
3735 evcnt_detach(&stats->qptc[i]);
3736 evcnt_detach(&stats->qbrc[i]);
3737 evcnt_detach(&stats->qbtc[i]);
3738 if (hw->mac.type >= ixgbe_mac_82599EB)
3739 evcnt_detach(&stats->qprdc[i]);
3740 }
3741
3742 evcnt_detach(&rxr->rx_packets);
3743 evcnt_detach(&rxr->rx_bytes);
3744 evcnt_detach(&rxr->rx_copies);
3745 evcnt_detach(&rxr->no_jmbuf);
3746 evcnt_detach(&rxr->rx_discarded);
3747 }
3748 evcnt_detach(&stats->ipcs);
3749 evcnt_detach(&stats->l4cs);
3750 evcnt_detach(&stats->ipcs_bad);
3751 evcnt_detach(&stats->l4cs_bad);
3752 evcnt_detach(&stats->intzero);
3753 evcnt_detach(&stats->legint);
3754 evcnt_detach(&stats->crcerrs);
3755 evcnt_detach(&stats->illerrc);
3756 evcnt_detach(&stats->errbc);
3757 evcnt_detach(&stats->mspdc);
3758 if (hw->mac.type >= ixgbe_mac_X550)
3759 evcnt_detach(&stats->mbsdc);
3760 evcnt_detach(&stats->mpctotal);
3761 evcnt_detach(&stats->mlfc);
3762 evcnt_detach(&stats->mrfc);
3763 evcnt_detach(&stats->rlec);
3764 evcnt_detach(&stats->lxontxc);
3765 evcnt_detach(&stats->lxonrxc);
3766 evcnt_detach(&stats->lxofftxc);
3767 evcnt_detach(&stats->lxoffrxc);
3768
3769 /* Packet Reception Stats */
3770 evcnt_detach(&stats->tor);
3771 evcnt_detach(&stats->gorc);
3772 evcnt_detach(&stats->tpr);
3773 evcnt_detach(&stats->gprc);
3774 evcnt_detach(&stats->mprc);
3775 evcnt_detach(&stats->bprc);
3776 evcnt_detach(&stats->prc64);
3777 evcnt_detach(&stats->prc127);
3778 evcnt_detach(&stats->prc255);
3779 evcnt_detach(&stats->prc511);
3780 evcnt_detach(&stats->prc1023);
3781 evcnt_detach(&stats->prc1522);
3782 evcnt_detach(&stats->ruc);
3783 evcnt_detach(&stats->rfc);
3784 evcnt_detach(&stats->roc);
3785 evcnt_detach(&stats->rjc);
3786 evcnt_detach(&stats->mngprc);
3787 evcnt_detach(&stats->mngpdc);
3788 evcnt_detach(&stats->xec);
3789
3790 /* Packet Transmission Stats */
3791 evcnt_detach(&stats->gotc);
3792 evcnt_detach(&stats->tpt);
3793 evcnt_detach(&stats->gptc);
3794 evcnt_detach(&stats->bptc);
3795 evcnt_detach(&stats->mptc);
3796 evcnt_detach(&stats->mngptc);
3797 evcnt_detach(&stats->ptc64);
3798 evcnt_detach(&stats->ptc127);
3799 evcnt_detach(&stats->ptc255);
3800 evcnt_detach(&stats->ptc511);
3801 evcnt_detach(&stats->ptc1023);
3802 evcnt_detach(&stats->ptc1522);
3803
3804 ixgbe_free_queues(adapter);
3805 free(adapter->mta, M_DEVBUF);
3806
3807 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3808 IXGBE_CORE_LOCK_DESTROY(adapter);
3809
3810 return (0);
3811 } /* ixgbe_detach */
3812
3813 /************************************************************************
3814 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3815 *
3816 * Prepare the adapter/port for LPLU and/or WoL
3817 ************************************************************************/
3818 static int
3819 ixgbe_setup_low_power_mode(struct adapter *adapter)
3820 {
3821 struct ixgbe_hw *hw = &adapter->hw;
3822 device_t dev = adapter->dev;
3823 struct ifnet *ifp = adapter->ifp;
3824 s32 error = 0;
3825
3826 /* Limit power management flow to X550EM baseT */
3827 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3828 hw->phy.ops.enter_lplu) {
3829 /* X550EM baseT adapters need a special LPLU flow */
3830 hw->phy.reset_disable = true;
3831 ixgbe_ifstop(ifp, 1);
3832 error = hw->phy.ops.enter_lplu(hw);
3833 if (error)
3834 device_printf(dev,
3835 "Error entering LPLU: %d\n", error);
3836 hw->phy.reset_disable = false;
3837 } else {
3838 /* Just stop for other adapters */
3839 ixgbe_ifstop(ifp, 1);
3840 }
3841
3842 IXGBE_CORE_LOCK(adapter);
3843
3844 if (!hw->wol_enabled) {
3845 ixgbe_set_phy_power(hw, FALSE);
3846 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3847 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3848 } else {
3849 /* Turn off support for APM wakeup. (Using ACPI instead) */
3850 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3851 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3852
3853 /*
3854 * Clear Wake Up Status register to prevent any previous wakeup
3855 * events from waking us up immediately after we suspend.
3856 */
3857 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3858
3859 /*
3860 * Program the Wakeup Filter Control register with user filter
3861 * settings
3862 */
3863 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3864
3865 /* Enable wakeups and power management in Wakeup Control */
3866 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3867 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3868
3869 }
3870
3871 IXGBE_CORE_UNLOCK(adapter);
3872
3873 return error;
3874 } /* ixgbe_setup_low_power_mode */
3875
3876 /************************************************************************
3877 * ixgbe_shutdown - Shutdown entry point
3878 ************************************************************************/
3879 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3880 static int
3881 ixgbe_shutdown(device_t dev)
3882 {
3883 struct adapter *adapter = device_private(dev);
3884 int error = 0;
3885
3886 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3887
3888 error = ixgbe_setup_low_power_mode(adapter);
3889
3890 return (error);
3891 } /* ixgbe_shutdown */
3892 #endif
3893
3894 /************************************************************************
3895 * ixgbe_suspend
3896 *
3897 * From D0 to D3
3898 ************************************************************************/
3899 static bool
3900 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3901 {
3902 struct adapter *adapter = device_private(dev);
3903 int error = 0;
3904
3905 INIT_DEBUGOUT("ixgbe_suspend: begin");
3906
3907 error = ixgbe_setup_low_power_mode(adapter);
3908
3909 return (error);
3910 } /* ixgbe_suspend */
3911
3912 /************************************************************************
3913 * ixgbe_resume
3914 *
3915 * From D3 to D0
3916 ************************************************************************/
3917 static bool
3918 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3919 {
3920 struct adapter *adapter = device_private(dev);
3921 struct ifnet *ifp = adapter->ifp;
3922 struct ixgbe_hw *hw = &adapter->hw;
3923 u32 wus;
3924
3925 INIT_DEBUGOUT("ixgbe_resume: begin");
3926
3927 IXGBE_CORE_LOCK(adapter);
3928
3929 /* Read & clear WUS register */
3930 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3931 if (wus)
3932 device_printf(dev, "Woken up by (WUS): %#010x\n",
3933 IXGBE_READ_REG(hw, IXGBE_WUS));
3934 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3935 /* And clear WUFC until next low-power transition */
3936 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3937
3938 /*
3939 * Required after D3->D0 transition;
3940 * will re-advertise all previous advertised speeds
3941 */
3942 if (ifp->if_flags & IFF_UP)
3943 ixgbe_init_locked(adapter);
3944
3945 IXGBE_CORE_UNLOCK(adapter);
3946
3947 return true;
3948 } /* ixgbe_resume */
3949
3950 /*
3951 * Set the various hardware offload abilities.
3952 *
3953 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3954 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3955 * mbuf offload flags the driver will understand.
3956 */
3957 static void
3958 ixgbe_set_if_hwassist(struct adapter *adapter)
3959 {
3960 /* XXX */
3961 }
3962
3963 /************************************************************************
3964 * ixgbe_init_locked - Init entry point
3965 *
3966 * Used in two ways: It is used by the stack as an init
3967 * entry point in network interface structure. It is also
3968 * used by the driver as a hw/sw initialization routine to
3969 * get to a consistent state.
3970 *
3971 * return 0 on success, positive on failure
3972 ************************************************************************/
3973 static void
3974 ixgbe_init_locked(struct adapter *adapter)
3975 {
3976 struct ifnet *ifp = adapter->ifp;
3977 device_t dev = adapter->dev;
3978 struct ixgbe_hw *hw = &adapter->hw;
3979 struct ix_queue *que;
3980 struct tx_ring *txr;
3981 struct rx_ring *rxr;
3982 u32 txdctl, mhadd;
3983 u32 rxdctl, rxctrl;
3984 u32 ctrl_ext;
3985 bool unsupported_sfp = false;
3986 int i, j, err;
3987
3988 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3989
3990 KASSERT(mutex_owned(&adapter->core_mtx));
3991 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3992
3993 hw->need_unsupported_sfp_recovery = false;
3994 hw->adapter_stopped = FALSE;
3995 ixgbe_stop_adapter(hw);
3996 callout_stop(&adapter->timer);
3997 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3998 callout_stop(&adapter->recovery_mode_timer);
3999 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
4000 que->disabled_count = 0;
4001
4002 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4003 adapter->max_frame_size =
4004 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4005
4006 /* Queue indices may change with IOV mode */
4007 ixgbe_align_all_queue_indices(adapter);
4008
4009 /* reprogram the RAR[0] in case user changed it. */
4010 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
4011
4012 /* Get the latest mac address, User can use a LAA */
4013 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4014 IXGBE_ETH_LENGTH_OF_ADDRESS);
4015 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
4016 hw->addr_ctrl.rar_used_count = 1;
4017
4018 /* Set hardware offload abilities from ifnet flags */
4019 ixgbe_set_if_hwassist(adapter);
4020
4021 /* Prepare transmit descriptors and buffers */
4022 if (ixgbe_setup_transmit_structures(adapter)) {
4023 device_printf(dev, "Could not setup transmit structures\n");
4024 ixgbe_stop_locked(adapter);
4025 return;
4026 }
4027
4028 ixgbe_init_hw(hw);
4029
4030 ixgbe_initialize_iov(adapter);
4031
4032 ixgbe_initialize_transmit_units(adapter);
4033
4034 /* Setup Multicast table */
4035 ixgbe_set_rxfilter(adapter);
4036
4037 /* Determine the correct mbuf pool, based on frame size */
4038 if (adapter->max_frame_size <= MCLBYTES)
4039 adapter->rx_mbuf_sz = MCLBYTES;
4040 else
4041 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4042
4043 /* Prepare receive descriptors and buffers */
4044 if (ixgbe_setup_receive_structures(adapter)) {
4045 device_printf(dev, "Could not setup receive structures\n");
4046 ixgbe_stop_locked(adapter);
4047 return;
4048 }
4049
4050 /* Configure RX settings */
4051 ixgbe_initialize_receive_units(adapter);
4052
4053 /* Initialize variable holding task enqueue requests interrupts */
4054 adapter->task_requests = 0;
4055
4056 /* Enable SDP & MSI-X interrupts based on adapter */
4057 ixgbe_config_gpie(adapter);
4058
4059 /* Set MTU size */
4060 if (ifp->if_mtu > ETHERMTU) {
4061 /* aka IXGBE_MAXFRS on 82599 and newer */
4062 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4063 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4064 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4065 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4066 }
4067
4068 /* Now enable all the queues */
4069 for (i = 0; i < adapter->num_queues; i++) {
4070 txr = &adapter->tx_rings[i];
4071 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4072 txdctl |= IXGBE_TXDCTL_ENABLE;
4073 /* Set WTHRESH to 8, burst writeback */
4074 txdctl |= (8 << 16);
4075 /*
4076 * When the internal queue falls below PTHRESH (32),
4077 * start prefetching as long as there are at least
4078 * HTHRESH (1) buffers ready. The values are taken
4079 * from the Intel linux driver 3.8.21.
4080 * Prefetching enables tx line rate even with 1 queue.
4081 */
4082 txdctl |= (32 << 0) | (1 << 8);
4083 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4084 }
4085
4086 for (i = 0; i < adapter->num_queues; i++) {
4087 rxr = &adapter->rx_rings[i];
4088 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4089 if (hw->mac.type == ixgbe_mac_82598EB) {
4090 /*
4091 * PTHRESH = 21
4092 * HTHRESH = 4
4093 * WTHRESH = 8
4094 */
4095 rxdctl &= ~0x3FFFFF;
4096 rxdctl |= 0x080420;
4097 }
4098 rxdctl |= IXGBE_RXDCTL_ENABLE;
4099 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4100 for (j = 0; j < 10; j++) {
4101 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4102 IXGBE_RXDCTL_ENABLE)
4103 break;
4104 else
4105 msec_delay(1);
4106 }
4107 IXGBE_WRITE_BARRIER(hw);
4108
4109 /*
4110 * In netmap mode, we must preserve the buffers made
4111 * available to userspace before the if_init()
4112 * (this is true by default on the TX side, because
4113 * init makes all buffers available to userspace).
4114 *
4115 * netmap_reset() and the device specific routines
4116 * (e.g. ixgbe_setup_receive_rings()) map these
4117 * buffers at the end of the NIC ring, so here we
4118 * must set the RDT (tail) register to make sure
4119 * they are not overwritten.
4120 *
4121 * In this driver the NIC ring starts at RDH = 0,
4122 * RDT points to the last slot available for reception (?),
4123 * so RDT = num_rx_desc - 1 means the whole ring is available.
4124 */
4125 #ifdef DEV_NETMAP
4126 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4127 (ifp->if_capenable & IFCAP_NETMAP)) {
4128 struct netmap_adapter *na = NA(adapter->ifp);
4129 struct netmap_kring *kring = na->rx_rings[i];
4130 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4131
4132 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4133 } else
4134 #endif /* DEV_NETMAP */
4135 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4136 adapter->num_rx_desc - 1);
4137 }
4138
4139 /* Enable Receive engine */
4140 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4141 if (hw->mac.type == ixgbe_mac_82598EB)
4142 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4143 rxctrl |= IXGBE_RXCTRL_RXEN;
4144 ixgbe_enable_rx_dma(hw, rxctrl);
4145
4146 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4147 atomic_store_relaxed(&adapter->timer_pending, 0);
4148 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4149 callout_reset(&adapter->recovery_mode_timer, hz,
4150 ixgbe_recovery_mode_timer, adapter);
4151
4152 /* Set up MSI/MSI-X routing */
4153 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4154 ixgbe_configure_ivars(adapter);
4155 /* Set up auto-mask */
4156 if (hw->mac.type == ixgbe_mac_82598EB)
4157 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4158 else {
4159 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4160 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4161 }
4162 } else { /* Simple settings for Legacy/MSI */
4163 ixgbe_set_ivar(adapter, 0, 0, 0);
4164 ixgbe_set_ivar(adapter, 0, 0, 1);
4165 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4166 }
4167
4168 ixgbe_init_fdir(adapter);
4169
4170 /*
4171 * Check on any SFP devices that
4172 * need to be kick-started
4173 */
4174 if (hw->phy.type == ixgbe_phy_none) {
4175 err = hw->phy.ops.identify(hw);
4176 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4177 unsupported_sfp = true;
4178 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4179 unsupported_sfp = true;
4180
4181 if (unsupported_sfp)
4182 device_printf(dev,
4183 "Unsupported SFP+ module type was detected.\n");
4184
4185 /* Set moderation on the Link interrupt */
4186 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4187
4188 /* Enable EEE power saving */
4189 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4190 hw->mac.ops.setup_eee(hw,
4191 adapter->feat_en & IXGBE_FEATURE_EEE);
4192
4193 /* Enable power to the phy. */
4194 if (!unsupported_sfp) {
4195 ixgbe_set_phy_power(hw, TRUE);
4196
4197 /* Config/Enable Link */
4198 ixgbe_config_link(adapter);
4199 }
4200
4201 /* Hardware Packet Buffer & Flow Control setup */
4202 ixgbe_config_delay_values(adapter);
4203
4204 /* Initialize the FC settings */
4205 ixgbe_start_hw(hw);
4206
4207 /* Set up VLAN support and filter */
4208 ixgbe_setup_vlan_hw_support(adapter);
4209
4210 /* Setup DMA Coalescing */
4211 ixgbe_config_dmac(adapter);
4212
4213 /* OK to schedule workqueues. */
4214 adapter->schedule_wqs_ok = true;
4215
4216 /* And now turn on interrupts */
4217 ixgbe_enable_intr(adapter);
4218
4219 /* Enable the use of the MBX by the VF's */
4220 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4221 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4222 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4223 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4224 }
4225
4226 /* Update saved flags. See ixgbe_ifflags_cb() */
4227 adapter->if_flags = ifp->if_flags;
4228 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4229
4230 /* Now inform the stack we're ready */
4231 ifp->if_flags |= IFF_RUNNING;
4232
4233 return;
4234 } /* ixgbe_init_locked */
4235
4236 /************************************************************************
4237 * ixgbe_init
4238 ************************************************************************/
4239 static int
4240 ixgbe_init(struct ifnet *ifp)
4241 {
4242 struct adapter *adapter = ifp->if_softc;
4243
4244 IXGBE_CORE_LOCK(adapter);
4245 ixgbe_init_locked(adapter);
4246 IXGBE_CORE_UNLOCK(adapter);
4247
4248 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4249 } /* ixgbe_init */
4250
4251 /************************************************************************
4252 * ixgbe_set_ivar
4253 *
4254 * Setup the correct IVAR register for a particular MSI-X interrupt
4255 * (yes this is all very magic and confusing :)
4256 * - entry is the register array entry
4257 * - vector is the MSI-X vector for this queue
4258 * - type is RX/TX/MISC
4259 ************************************************************************/
4260 static void
4261 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4262 {
4263 struct ixgbe_hw *hw = &adapter->hw;
4264 u32 ivar, index;
4265
4266 vector |= IXGBE_IVAR_ALLOC_VAL;
4267
4268 switch (hw->mac.type) {
4269 case ixgbe_mac_82598EB:
4270 if (type == -1)
4271 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4272 else
4273 entry += (type * 64);
4274 index = (entry >> 2) & 0x1F;
4275 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4276 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4277 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4278 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4279 break;
4280 case ixgbe_mac_82599EB:
4281 case ixgbe_mac_X540:
4282 case ixgbe_mac_X550:
4283 case ixgbe_mac_X550EM_x:
4284 case ixgbe_mac_X550EM_a:
4285 if (type == -1) { /* MISC IVAR */
4286 index = (entry & 1) * 8;
4287 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4288 ivar &= ~(0xffUL << index);
4289 ivar |= ((u32)vector << index);
4290 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4291 } else { /* RX/TX IVARS */
4292 index = (16 * (entry & 1)) + (8 * type);
4293 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4294 ivar &= ~(0xffUL << index);
4295 ivar |= ((u32)vector << index);
4296 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4297 }
4298 break;
4299 default:
4300 break;
4301 }
4302 } /* ixgbe_set_ivar */
4303
4304 /************************************************************************
4305 * ixgbe_configure_ivars
4306 ************************************************************************/
4307 static void
4308 ixgbe_configure_ivars(struct adapter *adapter)
4309 {
4310 struct ix_queue *que = adapter->queues;
4311 u32 newitr;
4312
4313 if (ixgbe_max_interrupt_rate > 0)
4314 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4315 else {
4316 /*
4317 * Disable DMA coalescing if interrupt moderation is
4318 * disabled.
4319 */
4320 adapter->dmac = 0;
4321 newitr = 0;
4322 }
4323
4324 for (int i = 0; i < adapter->num_queues; i++, que++) {
4325 struct rx_ring *rxr = &adapter->rx_rings[i];
4326 struct tx_ring *txr = &adapter->tx_rings[i];
4327 /* First the RX queue entry */
4328 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4329 /* ... and the TX */
4330 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4331 /* Set an Initial EITR value */
4332 ixgbe_eitr_write(adapter, que->msix, newitr);
4333 /*
4334 * To eliminate influence of the previous state.
4335 * At this point, Tx/Rx interrupt handler
4336 * (ixgbe_msix_que()) cannot be called, so both
4337 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4338 */
4339 que->eitr_setting = 0;
4340 }
4341
4342 /* For the Link interrupt */
4343 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4344 } /* ixgbe_configure_ivars */
4345
4346 /************************************************************************
4347 * ixgbe_config_gpie
4348 ************************************************************************/
4349 static void
4350 ixgbe_config_gpie(struct adapter *adapter)
4351 {
4352 struct ixgbe_hw *hw = &adapter->hw;
4353 u32 gpie;
4354
4355 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4356
4357 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4358 /* Enable Enhanced MSI-X mode */
4359 gpie |= IXGBE_GPIE_MSIX_MODE
4360 | IXGBE_GPIE_EIAME
4361 | IXGBE_GPIE_PBA_SUPPORT
4362 | IXGBE_GPIE_OCD;
4363 }
4364
4365 /* Fan Failure Interrupt */
4366 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4367 gpie |= IXGBE_SDP1_GPIEN;
4368
4369 /* Thermal Sensor Interrupt */
4370 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4371 gpie |= IXGBE_SDP0_GPIEN_X540;
4372
4373 /* Link detection */
4374 switch (hw->mac.type) {
4375 case ixgbe_mac_82599EB:
4376 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4377 break;
4378 case ixgbe_mac_X550EM_x:
4379 case ixgbe_mac_X550EM_a:
4380 gpie |= IXGBE_SDP0_GPIEN_X540;
4381 break;
4382 default:
4383 break;
4384 }
4385
4386 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4387
4388 } /* ixgbe_config_gpie */
4389
4390 /************************************************************************
4391 * ixgbe_config_delay_values
4392 *
4393 * Requires adapter->max_frame_size to be set.
4394 ************************************************************************/
4395 static void
4396 ixgbe_config_delay_values(struct adapter *adapter)
4397 {
4398 struct ixgbe_hw *hw = &adapter->hw;
4399 u32 rxpb, frame, size, tmp;
4400
4401 frame = adapter->max_frame_size;
4402
4403 /* Calculate High Water */
4404 switch (hw->mac.type) {
4405 case ixgbe_mac_X540:
4406 case ixgbe_mac_X550:
4407 case ixgbe_mac_X550EM_x:
4408 case ixgbe_mac_X550EM_a:
4409 tmp = IXGBE_DV_X540(frame, frame);
4410 break;
4411 default:
4412 tmp = IXGBE_DV(frame, frame);
4413 break;
4414 }
4415 size = IXGBE_BT2KB(tmp);
4416 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4417 hw->fc.high_water[0] = rxpb - size;
4418
4419 /* Now calculate Low Water */
4420 switch (hw->mac.type) {
4421 case ixgbe_mac_X540:
4422 case ixgbe_mac_X550:
4423 case ixgbe_mac_X550EM_x:
4424 case ixgbe_mac_X550EM_a:
4425 tmp = IXGBE_LOW_DV_X540(frame);
4426 break;
4427 default:
4428 tmp = IXGBE_LOW_DV(frame);
4429 break;
4430 }
4431 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4432
4433 hw->fc.pause_time = IXGBE_FC_PAUSE;
4434 hw->fc.send_xon = TRUE;
4435 } /* ixgbe_config_delay_values */
4436
4437 /************************************************************************
4438 * ixgbe_set_rxfilter - Multicast Update
4439 *
4440 * Called whenever multicast address list is updated.
4441 ************************************************************************/
4442 static void
4443 ixgbe_set_rxfilter(struct adapter *adapter)
4444 {
4445 struct ixgbe_mc_addr *mta;
4446 struct ifnet *ifp = adapter->ifp;
4447 u8 *update_ptr;
4448 int mcnt = 0;
4449 u32 fctrl;
4450 struct ethercom *ec = &adapter->osdep.ec;
4451 struct ether_multi *enm;
4452 struct ether_multistep step;
4453
4454 KASSERT(mutex_owned(&adapter->core_mtx));
4455 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4456
4457 mta = adapter->mta;
4458 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4459
4460 ETHER_LOCK(ec);
4461 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4462 ETHER_FIRST_MULTI(step, ec, enm);
4463 while (enm != NULL) {
4464 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4465 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4466 ETHER_ADDR_LEN) != 0)) {
4467 ec->ec_flags |= ETHER_F_ALLMULTI;
4468 break;
4469 }
4470 bcopy(enm->enm_addrlo,
4471 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4472 mta[mcnt].vmdq = adapter->pool;
4473 mcnt++;
4474 ETHER_NEXT_MULTI(step, enm);
4475 }
4476
4477 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4478 if (ifp->if_flags & IFF_PROMISC)
4479 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4480 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4481 fctrl |= IXGBE_FCTRL_MPE;
4482 fctrl &= ~IXGBE_FCTRL_UPE;
4483 } else
4484 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4485
4486 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4487
4488 /* Update multicast filter entries only when it's not ALLMULTI */
4489 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4490 ETHER_UNLOCK(ec);
4491 update_ptr = (u8 *)mta;
4492 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4493 ixgbe_mc_array_itr, TRUE);
4494 } else
4495 ETHER_UNLOCK(ec);
4496 } /* ixgbe_set_rxfilter */
4497
4498 /************************************************************************
4499 * ixgbe_mc_array_itr
4500 *
4501 * An iterator function needed by the multicast shared code.
4502 * It feeds the shared code routine the addresses in the
4503 * array of ixgbe_set_rxfilter() one by one.
4504 ************************************************************************/
4505 static u8 *
4506 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4507 {
4508 struct ixgbe_mc_addr *mta;
4509
4510 mta = (struct ixgbe_mc_addr *)*update_ptr;
4511 *vmdq = mta->vmdq;
4512
4513 *update_ptr = (u8*)(mta + 1);
4514
4515 return (mta->addr);
4516 } /* ixgbe_mc_array_itr */
4517
4518 /************************************************************************
4519 * ixgbe_local_timer - Timer routine
4520 *
4521 * Checks for link status, updates statistics,
4522 * and runs the watchdog check.
4523 ************************************************************************/
4524 static void
4525 ixgbe_local_timer(void *arg)
4526 {
4527 struct adapter *adapter = arg;
4528
4529 if (adapter->schedule_wqs_ok) {
4530 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4531 workqueue_enqueue(adapter->timer_wq,
4532 &adapter->timer_wc, NULL);
4533 }
4534 }
4535
4536 static void
4537 ixgbe_handle_timer(struct work *wk, void *context)
4538 {
4539 struct adapter *adapter = context;
4540 struct ixgbe_hw *hw = &adapter->hw;
4541 device_t dev = adapter->dev;
4542 struct ix_queue *que = adapter->queues;
4543 u64 queues = 0;
4544 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4545 int hung = 0;
4546 int i;
4547
4548 IXGBE_CORE_LOCK(adapter);
4549
4550 /* Check for pluggable optics */
4551 if (ixgbe_is_sfp(hw)) {
4552 bool sched_mod_task = false;
4553
4554 if (hw->mac.type == ixgbe_mac_82598EB) {
4555 /*
4556 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4557 * any GPIO(SDP). So just schedule TASK_MOD.
4558 */
4559 sched_mod_task = true;
4560 } else {
4561 bool was_full, is_full;
4562
4563 was_full =
4564 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4565 is_full = ixgbe_sfp_cage_full(hw);
4566
4567 /* Do probe if cage state changed */
4568 if (was_full ^ is_full)
4569 sched_mod_task = true;
4570 }
4571 if (sched_mod_task) {
4572 mutex_enter(&adapter->admin_mtx);
4573 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4574 ixgbe_schedule_admin_tasklet(adapter);
4575 mutex_exit(&adapter->admin_mtx);
4576 }
4577 }
4578
4579 ixgbe_update_link_status(adapter);
4580 ixgbe_update_stats_counters(adapter);
4581
4582 /* Update some event counters */
4583 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4584 que = adapter->queues;
4585 for (i = 0; i < adapter->num_queues; i++, que++) {
4586 struct tx_ring *txr = que->txr;
4587
4588 v0 += txr->q_efbig_tx_dma_setup;
4589 v1 += txr->q_mbuf_defrag_failed;
4590 v2 += txr->q_efbig2_tx_dma_setup;
4591 v3 += txr->q_einval_tx_dma_setup;
4592 v4 += txr->q_other_tx_dma_setup;
4593 v5 += txr->q_eagain_tx_dma_setup;
4594 v6 += txr->q_enomem_tx_dma_setup;
4595 v7 += txr->q_tso_err;
4596 }
4597 adapter->efbig_tx_dma_setup.ev_count = v0;
4598 adapter->mbuf_defrag_failed.ev_count = v1;
4599 adapter->efbig2_tx_dma_setup.ev_count = v2;
4600 adapter->einval_tx_dma_setup.ev_count = v3;
4601 adapter->other_tx_dma_setup.ev_count = v4;
4602 adapter->eagain_tx_dma_setup.ev_count = v5;
4603 adapter->enomem_tx_dma_setup.ev_count = v6;
4604 adapter->tso_err.ev_count = v7;
4605
4606 /*
4607 * Check the TX queues status
4608 * - mark hung queues so we don't schedule on them
4609 * - watchdog only if all queues show hung
4610 */
4611 que = adapter->queues;
4612 for (i = 0; i < adapter->num_queues; i++, que++) {
4613 /* Keep track of queues with work for soft irq */
4614 if (que->txr->busy)
4615 queues |= 1ULL << que->me;
4616 /*
4617 * Each time txeof runs without cleaning, but there
4618 * are uncleaned descriptors it increments busy. If
4619 * we get to the MAX we declare it hung.
4620 */
4621 if (que->busy == IXGBE_QUEUE_HUNG) {
4622 ++hung;
4623 /* Mark the queue as inactive */
4624 adapter->active_queues &= ~(1ULL << que->me);
4625 continue;
4626 } else {
4627 /* Check if we've come back from hung */
4628 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4629 adapter->active_queues |= 1ULL << que->me;
4630 }
4631 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4632 device_printf(dev,
4633 "Warning queue %d appears to be hung!\n", i);
4634 que->txr->busy = IXGBE_QUEUE_HUNG;
4635 ++hung;
4636 }
4637 }
4638
4639 /* Only truly watchdog if all queues show hung */
4640 if (hung == adapter->num_queues)
4641 goto watchdog;
4642 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4643 else if (queues != 0) { /* Force an IRQ on queues with work */
4644 que = adapter->queues;
4645 for (i = 0; i < adapter->num_queues; i++, que++) {
4646 mutex_enter(&que->dc_mtx);
4647 if (que->disabled_count == 0)
4648 ixgbe_rearm_queues(adapter,
4649 queues & ((u64)1 << i));
4650 mutex_exit(&que->dc_mtx);
4651 }
4652 }
4653 #endif
4654
4655 atomic_store_relaxed(&adapter->timer_pending, 0);
4656 IXGBE_CORE_UNLOCK(adapter);
4657 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4658 return;
4659
4660 watchdog:
4661 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4662 adapter->ifp->if_flags &= ~IFF_RUNNING;
4663 adapter->watchdog_events.ev_count++;
4664 ixgbe_init_locked(adapter);
4665 IXGBE_CORE_UNLOCK(adapter);
4666 } /* ixgbe_handle_timer */
4667
4668 /************************************************************************
4669 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4670 ************************************************************************/
4671 static void
4672 ixgbe_recovery_mode_timer(void *arg)
4673 {
4674 struct adapter *adapter = arg;
4675
4676 if (__predict_true(adapter->osdep.detaching == false)) {
4677 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4678 0, 1) == 0) {
4679 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4680 &adapter->recovery_mode_timer_wc, NULL);
4681 }
4682 }
4683 }
4684
4685 static void
4686 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4687 {
4688 struct adapter *adapter = context;
4689 struct ixgbe_hw *hw = &adapter->hw;
4690
4691 IXGBE_CORE_LOCK(adapter);
4692 if (ixgbe_fw_recovery_mode(hw)) {
4693 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4694 /* Firmware error detected, entering recovery mode */
4695 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4696
4697 if (hw->adapter_stopped == FALSE)
4698 ixgbe_stop_locked(adapter);
4699 }
4700 } else
4701 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4702
4703 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4704 callout_reset(&adapter->recovery_mode_timer, hz,
4705 ixgbe_recovery_mode_timer, adapter);
4706 IXGBE_CORE_UNLOCK(adapter);
4707 } /* ixgbe_handle_recovery_mode_timer */
4708
4709 /************************************************************************
4710 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4711 * bool int_en: true if it's called when the interrupt is enabled.
4712 ************************************************************************/
4713 static void
4714 ixgbe_handle_mod(void *context, bool int_en)
4715 {
4716 struct adapter *adapter = context;
4717 struct ixgbe_hw *hw = &adapter->hw;
4718 device_t dev = adapter->dev;
4719 enum ixgbe_sfp_type last_sfp_type;
4720 u32 err;
4721 bool last_unsupported_sfp_recovery;
4722
4723 KASSERT(mutex_owned(&adapter->core_mtx));
4724
4725 last_sfp_type = hw->phy.sfp_type;
4726 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4727 ++adapter->mod_workev.ev_count;
4728 if (adapter->hw.need_crosstalk_fix) {
4729 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4730 !ixgbe_sfp_cage_full(hw))
4731 goto out;
4732 }
4733
4734 err = hw->phy.ops.identify_sfp(hw);
4735 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4736 if (last_unsupported_sfp_recovery == false)
4737 device_printf(dev,
4738 "Unsupported SFP+ module type was detected.\n");
4739 goto out;
4740 }
4741
4742 if (hw->need_unsupported_sfp_recovery) {
4743 device_printf(dev, "Recovering from unsupported SFP\n");
4744 /*
4745 * We could recover the status by calling setup_sfp(),
4746 * setup_link() and some others. It's complex and might not
4747 * work correctly on some unknown cases. To avoid such type of
4748 * problem, call ixgbe_init_locked(). It's simple and safe
4749 * approach.
4750 */
4751 ixgbe_init_locked(adapter);
4752 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4753 (hw->phy.sfp_type != last_sfp_type)) {
4754 /* A module is inserted and changed. */
4755
4756 if (hw->mac.type == ixgbe_mac_82598EB)
4757 err = hw->phy.ops.reset(hw);
4758 else {
4759 err = hw->mac.ops.setup_sfp(hw);
4760 hw->phy.sfp_setup_needed = FALSE;
4761 }
4762 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4763 device_printf(dev,
4764 "Setup failure - unsupported SFP+ module type.\n");
4765 goto out;
4766 }
4767 }
4768
4769 out:
4770 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4771 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4772
4773 /* Adjust media types shown in ifconfig */
4774 IXGBE_CORE_UNLOCK(adapter);
4775 ifmedia_removeall(&adapter->media);
4776 ixgbe_add_media_types(adapter);
4777 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4778 IXGBE_CORE_LOCK(adapter);
4779
4780 /*
4781 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4782 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4783 * flap because the function calls setup_link().
4784 */
4785 if (hw->mac.type != ixgbe_mac_82598EB) {
4786 mutex_enter(&adapter->admin_mtx);
4787 if (int_en)
4788 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4789 else
4790 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4791 mutex_exit(&adapter->admin_mtx);
4792 }
4793
4794 /*
4795 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4796 * the workqueue now.
4797 */
4798 } /* ixgbe_handle_mod */
4799
4800
4801 /************************************************************************
4802 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4803 ************************************************************************/
4804 static void
4805 ixgbe_handle_msf(void *context)
4806 {
4807 struct adapter *adapter = context;
4808 struct ixgbe_hw *hw = &adapter->hw;
4809 u32 autoneg;
4810 bool negotiate;
4811
4812 KASSERT(mutex_owned(&adapter->core_mtx));
4813
4814 ++adapter->msf_workev.ev_count;
4815
4816 autoneg = hw->phy.autoneg_advertised;
4817 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4818 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4819 if (hw->mac.ops.setup_link)
4820 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4821 } /* ixgbe_handle_msf */
4822
4823 /************************************************************************
4824 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4825 ************************************************************************/
4826 static void
4827 ixgbe_handle_phy(void *context)
4828 {
4829 struct adapter *adapter = context;
4830 struct ixgbe_hw *hw = &adapter->hw;
4831 int error;
4832
4833 KASSERT(mutex_owned(&adapter->core_mtx));
4834
4835 ++adapter->phy_workev.ev_count;
4836 error = hw->phy.ops.handle_lasi(hw);
4837 if (error == IXGBE_ERR_OVERTEMP)
4838 device_printf(adapter->dev,
4839 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4840 " PHY will downshift to lower power state!\n");
4841 else if (error)
4842 device_printf(adapter->dev,
4843 "Error handling LASI interrupt: %d\n", error);
4844 } /* ixgbe_handle_phy */
4845
4846 static void
4847 ixgbe_handle_admin(struct work *wk, void *context)
4848 {
4849 struct adapter *adapter = context;
4850 struct ifnet *ifp = adapter->ifp;
4851 struct ixgbe_hw *hw = &adapter->hw;
4852 u32 task_requests;
4853 u32 eims_enable = 0;
4854
4855 mutex_enter(&adapter->admin_mtx);
4856 adapter->admin_pending = 0;
4857 task_requests = adapter->task_requests;
4858 adapter->task_requests = 0;
4859 mutex_exit(&adapter->admin_mtx);
4860
4861 /*
4862 * Hold the IFNET_LOCK across this entire call. This will
4863 * prevent additional changes to adapter->phy_layer
4864 * and serialize calls to this tasklet. We cannot hold the
4865 * CORE_LOCK while calling into the ifmedia functions as
4866 * they call ifmedia_lock() and the lock is CORE_LOCK.
4867 */
4868 IFNET_LOCK(ifp);
4869 IXGBE_CORE_LOCK(adapter);
4870 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4871 ixgbe_handle_link(adapter);
4872 eims_enable |= IXGBE_EIMS_LSC;
4873 }
4874 if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) {
4875 ixgbe_handle_mod(adapter, false);
4876 }
4877 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4878 ixgbe_handle_mod(adapter, true);
4879 if (hw->mac.type >= ixgbe_mac_X540)
4880 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4881 else
4882 eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4883 }
4884 if ((task_requests
4885 & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4886 ixgbe_handle_msf(adapter);
4887 if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4888 (hw->mac.type == ixgbe_mac_82599EB))
4889 eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4890 }
4891 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4892 ixgbe_handle_phy(adapter);
4893 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4894 }
4895 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4896 ixgbe_reinit_fdir(adapter);
4897 eims_enable |= IXGBE_EIMS_FLOW_DIR;
4898 }
4899 #if 0 /* notyet */
4900 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4901 ixgbe_handle_mbx(adapter);
4902 eims_enable |= IXGBE_EIMS_MAILBOX;
4903 }
4904 #endif
4905 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4906
4907 IXGBE_CORE_UNLOCK(adapter);
4908 IFNET_UNLOCK(ifp);
4909 } /* ixgbe_handle_admin */
4910
4911 static void
4912 ixgbe_ifstop(struct ifnet *ifp, int disable)
4913 {
4914 struct adapter *adapter = ifp->if_softc;
4915
4916 IXGBE_CORE_LOCK(adapter);
4917 ixgbe_stop_locked(adapter);
4918 IXGBE_CORE_UNLOCK(adapter);
4919
4920 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4921 atomic_store_relaxed(&adapter->timer_pending, 0);
4922 }
4923
4924 /************************************************************************
4925 * ixgbe_stop_locked - Stop the hardware
4926 *
4927 * Disables all traffic on the adapter by issuing a
4928 * global reset on the MAC and deallocates TX/RX buffers.
4929 ************************************************************************/
4930 static void
4931 ixgbe_stop_locked(void *arg)
4932 {
4933 struct ifnet *ifp;
4934 struct adapter *adapter = arg;
4935 struct ixgbe_hw *hw = &adapter->hw;
4936
4937 ifp = adapter->ifp;
4938
4939 KASSERT(mutex_owned(&adapter->core_mtx));
4940
4941 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4942 ixgbe_disable_intr(adapter);
4943 callout_stop(&adapter->timer);
4944
4945 /* Don't schedule workqueues. */
4946 adapter->schedule_wqs_ok = false;
4947
4948 /* Let the stack know...*/
4949 ifp->if_flags &= ~IFF_RUNNING;
4950
4951 ixgbe_reset_hw(hw);
4952 hw->adapter_stopped = FALSE;
4953 ixgbe_stop_adapter(hw);
4954 if (hw->mac.type == ixgbe_mac_82599EB)
4955 ixgbe_stop_mac_link_on_d3_82599(hw);
4956 /* Turn off the laser - noop with no optics */
4957 ixgbe_disable_tx_laser(hw);
4958
4959 /* Update the stack */
4960 adapter->link_up = FALSE;
4961 ixgbe_update_link_status(adapter);
4962
4963 /* reprogram the RAR[0] in case user changed it. */
4964 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4965
4966 return;
4967 } /* ixgbe_stop_locked */
4968
4969 /************************************************************************
4970 * ixgbe_update_link_status - Update OS on link state
4971 *
4972 * Note: Only updates the OS on the cached link state.
4973 * The real check of the hardware only happens with
4974 * a link interrupt.
4975 ************************************************************************/
4976 static void
4977 ixgbe_update_link_status(struct adapter *adapter)
4978 {
4979 struct ifnet *ifp = adapter->ifp;
4980 device_t dev = adapter->dev;
4981 struct ixgbe_hw *hw = &adapter->hw;
4982
4983 KASSERT(mutex_owned(&adapter->core_mtx));
4984
4985 if (adapter->link_up) {
4986 if (adapter->link_active != LINK_STATE_UP) {
4987 /*
4988 * To eliminate influence of the previous state
4989 * in the same way as ixgbe_init_locked().
4990 */
4991 struct ix_queue *que = adapter->queues;
4992 for (int i = 0; i < adapter->num_queues; i++, que++)
4993 que->eitr_setting = 0;
4994
4995 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4996 /*
4997 * Discard count for both MAC Local Fault and
4998 * Remote Fault because those registers are
4999 * valid only when the link speed is up and
5000 * 10Gbps.
5001 */
5002 IXGBE_READ_REG(hw, IXGBE_MLFC);
5003 IXGBE_READ_REG(hw, IXGBE_MRFC);
5004 }
5005
5006 if (bootverbose) {
5007 const char *bpsmsg;
5008
5009 switch (adapter->link_speed) {
5010 case IXGBE_LINK_SPEED_10GB_FULL:
5011 bpsmsg = "10 Gbps";
5012 break;
5013 case IXGBE_LINK_SPEED_5GB_FULL:
5014 bpsmsg = "5 Gbps";
5015 break;
5016 case IXGBE_LINK_SPEED_2_5GB_FULL:
5017 bpsmsg = "2.5 Gbps";
5018 break;
5019 case IXGBE_LINK_SPEED_1GB_FULL:
5020 bpsmsg = "1 Gbps";
5021 break;
5022 case IXGBE_LINK_SPEED_100_FULL:
5023 bpsmsg = "100 Mbps";
5024 break;
5025 case IXGBE_LINK_SPEED_10_FULL:
5026 bpsmsg = "10 Mbps";
5027 break;
5028 default:
5029 bpsmsg = "unknown speed";
5030 break;
5031 }
5032 device_printf(dev, "Link is up %s %s \n",
5033 bpsmsg, "Full Duplex");
5034 }
5035 adapter->link_active = LINK_STATE_UP;
5036 /* Update any Flow Control changes */
5037 ixgbe_fc_enable(&adapter->hw);
5038 /* Update DMA coalescing config */
5039 ixgbe_config_dmac(adapter);
5040 if_link_state_change(ifp, LINK_STATE_UP);
5041
5042 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5043 ixgbe_ping_all_vfs(adapter);
5044 }
5045 } else {
5046 /*
5047 * Do it when link active changes to DOWN. i.e.
5048 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5049 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5050 */
5051 if (adapter->link_active != LINK_STATE_DOWN) {
5052 if (bootverbose)
5053 device_printf(dev, "Link is Down\n");
5054 if_link_state_change(ifp, LINK_STATE_DOWN);
5055 adapter->link_active = LINK_STATE_DOWN;
5056 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5057 ixgbe_ping_all_vfs(adapter);
5058 ixgbe_drain_all(adapter);
5059 }
5060 }
5061 } /* ixgbe_update_link_status */
5062
5063 /************************************************************************
5064 * ixgbe_config_dmac - Configure DMA Coalescing
5065 ************************************************************************/
5066 static void
5067 ixgbe_config_dmac(struct adapter *adapter)
5068 {
5069 struct ixgbe_hw *hw = &adapter->hw;
5070 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5071
5072 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5073 return;
5074
5075 if (dcfg->watchdog_timer ^ adapter->dmac ||
5076 dcfg->link_speed ^ adapter->link_speed) {
5077 dcfg->watchdog_timer = adapter->dmac;
5078 dcfg->fcoe_en = false;
5079 dcfg->link_speed = adapter->link_speed;
5080 dcfg->num_tcs = 1;
5081
5082 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5083 dcfg->watchdog_timer, dcfg->link_speed);
5084
5085 hw->mac.ops.dmac_config(hw);
5086 }
5087 } /* ixgbe_config_dmac */
5088
5089 /************************************************************************
5090 * ixgbe_enable_intr
5091 ************************************************************************/
5092 static void
5093 ixgbe_enable_intr(struct adapter *adapter)
5094 {
5095 struct ixgbe_hw *hw = &adapter->hw;
5096 struct ix_queue *que = adapter->queues;
5097 u32 mask, fwsm;
5098
5099 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5100
5101 switch (adapter->hw.mac.type) {
5102 case ixgbe_mac_82599EB:
5103 mask |= IXGBE_EIMS_ECC;
5104 /* Temperature sensor on some adapters */
5105 mask |= IXGBE_EIMS_GPI_SDP0;
5106 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5107 mask |= IXGBE_EIMS_GPI_SDP1;
5108 mask |= IXGBE_EIMS_GPI_SDP2;
5109 break;
5110 case ixgbe_mac_X540:
5111 /* Detect if Thermal Sensor is enabled */
5112 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5113 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5114 mask |= IXGBE_EIMS_TS;
5115 mask |= IXGBE_EIMS_ECC;
5116 break;
5117 case ixgbe_mac_X550:
5118 /* MAC thermal sensor is automatically enabled */
5119 mask |= IXGBE_EIMS_TS;
5120 mask |= IXGBE_EIMS_ECC;
5121 break;
5122 case ixgbe_mac_X550EM_x:
5123 case ixgbe_mac_X550EM_a:
5124 /* Some devices use SDP0 for important information */
5125 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5126 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5127 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5128 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5129 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5130 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5131 mask |= IXGBE_EICR_GPI_SDP0_X540;
5132 mask |= IXGBE_EIMS_ECC;
5133 break;
5134 default:
5135 break;
5136 }
5137
5138 /* Enable Fan Failure detection */
5139 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5140 mask |= IXGBE_EIMS_GPI_SDP1;
5141 /* Enable SR-IOV */
5142 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5143 mask |= IXGBE_EIMS_MAILBOX;
5144 /* Enable Flow Director */
5145 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5146 mask |= IXGBE_EIMS_FLOW_DIR;
5147
5148 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5149
5150 /* With MSI-X we use auto clear */
5151 if (adapter->msix_mem) {
5152 /*
5153 * It's not required to set TCP_TIMER because we don't use
5154 * it.
5155 */
5156 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5157 }
5158
5159 /*
5160 * Now enable all queues, this is done separately to
5161 * allow for handling the extended (beyond 32) MSI-X
5162 * vectors that can be used by 82599
5163 */
5164 for (int i = 0; i < adapter->num_queues; i++, que++)
5165 ixgbe_enable_queue(adapter, que->msix);
5166
5167 IXGBE_WRITE_FLUSH(hw);
5168
5169 } /* ixgbe_enable_intr */
5170
5171 /************************************************************************
5172 * ixgbe_disable_intr_internal
5173 ************************************************************************/
5174 static void
5175 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5176 {
5177 struct ix_queue *que = adapter->queues;
5178
5179 /* disable interrupts other than queues */
5180 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5181
5182 if (adapter->msix_mem)
5183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5184
5185 for (int i = 0; i < adapter->num_queues; i++, que++)
5186 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5187
5188 IXGBE_WRITE_FLUSH(&adapter->hw);
5189
5190 } /* ixgbe_do_disable_intr_internal */
5191
5192 /************************************************************************
5193 * ixgbe_disable_intr
5194 ************************************************************************/
5195 static void
5196 ixgbe_disable_intr(struct adapter *adapter)
5197 {
5198
5199 ixgbe_disable_intr_internal(adapter, true);
5200 } /* ixgbe_disable_intr */
5201
5202 /************************************************************************
5203 * ixgbe_ensure_disabled_intr
5204 ************************************************************************/
5205 void
5206 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5207 {
5208
5209 ixgbe_disable_intr_internal(adapter, false);
5210 } /* ixgbe_ensure_disabled_intr */
5211
5212 /************************************************************************
5213 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5214 ************************************************************************/
5215 static int
5216 ixgbe_legacy_irq(void *arg)
5217 {
5218 struct ix_queue *que = arg;
5219 struct adapter *adapter = que->adapter;
5220 struct ixgbe_hw *hw = &adapter->hw;
5221 struct tx_ring *txr = adapter->tx_rings;
5222 u32 eicr;
5223 u32 eims_orig;
5224 u32 eims_enable = 0;
5225 u32 eims_disable = 0;
5226
5227 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5228 /*
5229 * Silicon errata #26 on 82598. Disable all interrupts before reading
5230 * EICR.
5231 */
5232 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5233
5234 /* Read and clear EICR */
5235 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5236
5237 adapter->stats.pf.legint.ev_count++;
5238 if (eicr == 0) {
5239 adapter->stats.pf.intzero.ev_count++;
5240 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5241 return 0;
5242 }
5243
5244 /* Queue (0) intr */
5245 if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5246 ++que->irqs.ev_count;
5247
5248 /*
5249 * The same as ixgbe_msix_que() about
5250 * "que->txrx_use_workqueue".
5251 */
5252 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5253
5254 IXGBE_TX_LOCK(txr);
5255 ixgbe_txeof(txr);
5256 #ifdef notyet
5257 if (!ixgbe_ring_empty(ifp, txr->br))
5258 ixgbe_start_locked(ifp, txr);
5259 #endif
5260 IXGBE_TX_UNLOCK(txr);
5261
5262 que->req.ev_count++;
5263 ixgbe_sched_handle_que(adapter, que);
5264 /* Disable queue 0 interrupt */
5265 eims_disable |= 1UL << 0;
5266
5267 } else
5268 eims_enable |= IXGBE_EIMC_RTX_QUEUE;
5269
5270 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5271
5272 /* Re-enable some interrupts */
5273 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5274 (eims_orig & ~eims_disable) | eims_enable);
5275
5276 return 1;
5277 } /* ixgbe_legacy_irq */
5278
5279 /************************************************************************
5280 * ixgbe_free_pciintr_resources
5281 ************************************************************************/
5282 static void
5283 ixgbe_free_pciintr_resources(struct adapter *adapter)
5284 {
5285 struct ix_queue *que = adapter->queues;
5286 int rid;
5287
5288 /*
5289 * Release all msix queue resources:
5290 */
5291 for (int i = 0; i < adapter->num_queues; i++, que++) {
5292 if (que->res != NULL) {
5293 pci_intr_disestablish(adapter->osdep.pc,
5294 adapter->osdep.ihs[i]);
5295 adapter->osdep.ihs[i] = NULL;
5296 }
5297 }
5298
5299 /* Clean the Legacy or Link interrupt last */
5300 if (adapter->vector) /* we are doing MSIX */
5301 rid = adapter->vector;
5302 else
5303 rid = 0;
5304
5305 if (adapter->osdep.ihs[rid] != NULL) {
5306 pci_intr_disestablish(adapter->osdep.pc,
5307 adapter->osdep.ihs[rid]);
5308 adapter->osdep.ihs[rid] = NULL;
5309 }
5310
5311 if (adapter->osdep.intrs != NULL) {
5312 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5313 adapter->osdep.nintrs);
5314 adapter->osdep.intrs = NULL;
5315 }
5316 } /* ixgbe_free_pciintr_resources */
5317
5318 /************************************************************************
5319 * ixgbe_free_pci_resources
5320 ************************************************************************/
5321 static void
5322 ixgbe_free_pci_resources(struct adapter *adapter)
5323 {
5324
5325 ixgbe_free_pciintr_resources(adapter);
5326
5327 if (adapter->osdep.mem_size != 0) {
5328 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5329 adapter->osdep.mem_bus_space_handle,
5330 adapter->osdep.mem_size);
5331 }
5332
5333 } /* ixgbe_free_pci_resources */
5334
5335 /************************************************************************
5336 * ixgbe_set_sysctl_value
5337 ************************************************************************/
5338 static void
5339 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5340 const char *description, int *limit, int value)
5341 {
5342 device_t dev = adapter->dev;
5343 struct sysctllog **log;
5344 const struct sysctlnode *rnode, *cnode;
5345
5346 /*
5347 * It's not required to check recovery mode because this function never
5348 * touches hardware.
5349 */
5350
5351 log = &adapter->sysctllog;
5352 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5353 aprint_error_dev(dev, "could not create sysctl root\n");
5354 return;
5355 }
5356 if (sysctl_createv(log, 0, &rnode, &cnode,
5357 CTLFLAG_READWRITE, CTLTYPE_INT,
5358 name, SYSCTL_DESCR(description),
5359 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5360 aprint_error_dev(dev, "could not create sysctl\n");
5361 *limit = value;
5362 } /* ixgbe_set_sysctl_value */
5363
5364 /************************************************************************
5365 * ixgbe_sysctl_flowcntl
5366 *
5367 * SYSCTL wrapper around setting Flow Control
5368 ************************************************************************/
5369 static int
5370 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5371 {
5372 struct sysctlnode node = *rnode;
5373 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5374 int error, fc;
5375
5376 if (ixgbe_fw_recovery_mode_swflag(adapter))
5377 return (EPERM);
5378
5379 fc = adapter->hw.fc.current_mode;
5380 node.sysctl_data = &fc;
5381 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5382 if (error != 0 || newp == NULL)
5383 return error;
5384
5385 /* Don't bother if it's not changed */
5386 if (fc == adapter->hw.fc.current_mode)
5387 return (0);
5388
5389 return ixgbe_set_flowcntl(adapter, fc);
5390 } /* ixgbe_sysctl_flowcntl */
5391
5392 /************************************************************************
5393 * ixgbe_set_flowcntl - Set flow control
5394 *
5395 * Flow control values:
5396 * 0 - off
5397 * 1 - rx pause
5398 * 2 - tx pause
5399 * 3 - full
5400 ************************************************************************/
5401 static int
5402 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5403 {
5404 switch (fc) {
5405 case ixgbe_fc_rx_pause:
5406 case ixgbe_fc_tx_pause:
5407 case ixgbe_fc_full:
5408 adapter->hw.fc.requested_mode = fc;
5409 if (adapter->num_queues > 1)
5410 ixgbe_disable_rx_drop(adapter);
5411 break;
5412 case ixgbe_fc_none:
5413 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5414 if (adapter->num_queues > 1)
5415 ixgbe_enable_rx_drop(adapter);
5416 break;
5417 default:
5418 return (EINVAL);
5419 }
5420
5421 #if 0 /* XXX NetBSD */
5422 /* Don't autoneg if forcing a value */
5423 adapter->hw.fc.disable_fc_autoneg = TRUE;
5424 #endif
5425 ixgbe_fc_enable(&adapter->hw);
5426
5427 return (0);
5428 } /* ixgbe_set_flowcntl */
5429
5430 /************************************************************************
5431 * ixgbe_enable_rx_drop
5432 *
5433 * Enable the hardware to drop packets when the buffer is
5434 * full. This is useful with multiqueue, so that no single
5435 * queue being full stalls the entire RX engine. We only
5436 * enable this when Multiqueue is enabled AND Flow Control
5437 * is disabled.
5438 ************************************************************************/
5439 static void
5440 ixgbe_enable_rx_drop(struct adapter *adapter)
5441 {
5442 struct ixgbe_hw *hw = &adapter->hw;
5443 struct rx_ring *rxr;
5444 u32 srrctl;
5445
5446 for (int i = 0; i < adapter->num_queues; i++) {
5447 rxr = &adapter->rx_rings[i];
5448 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5449 srrctl |= IXGBE_SRRCTL_DROP_EN;
5450 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5451 }
5452
5453 /* enable drop for each vf */
5454 for (int i = 0; i < adapter->num_vfs; i++) {
5455 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5456 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5457 IXGBE_QDE_ENABLE));
5458 }
5459 } /* ixgbe_enable_rx_drop */
5460
5461 /************************************************************************
5462 * ixgbe_disable_rx_drop
5463 ************************************************************************/
5464 static void
5465 ixgbe_disable_rx_drop(struct adapter *adapter)
5466 {
5467 struct ixgbe_hw *hw = &adapter->hw;
5468 struct rx_ring *rxr;
5469 u32 srrctl;
5470
5471 for (int i = 0; i < adapter->num_queues; i++) {
5472 rxr = &adapter->rx_rings[i];
5473 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5474 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5475 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5476 }
5477
5478 /* disable drop for each vf */
5479 for (int i = 0; i < adapter->num_vfs; i++) {
5480 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5481 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5482 }
5483 } /* ixgbe_disable_rx_drop */
5484
5485 /************************************************************************
5486 * ixgbe_sysctl_advertise
5487 *
5488 * SYSCTL wrapper around setting advertised speed
5489 ************************************************************************/
5490 static int
5491 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5492 {
5493 struct sysctlnode node = *rnode;
5494 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5495 int error = 0, advertise;
5496
5497 if (ixgbe_fw_recovery_mode_swflag(adapter))
5498 return (EPERM);
5499
5500 advertise = adapter->advertise;
5501 node.sysctl_data = &advertise;
5502 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5503 if (error != 0 || newp == NULL)
5504 return error;
5505
5506 return ixgbe_set_advertise(adapter, advertise);
5507 } /* ixgbe_sysctl_advertise */
5508
5509 /************************************************************************
5510 * ixgbe_set_advertise - Control advertised link speed
5511 *
5512 * Flags:
5513 * 0x00 - Default (all capable link speed)
5514 * 0x01 - advertise 100 Mb
5515 * 0x02 - advertise 1G
5516 * 0x04 - advertise 10G
5517 * 0x08 - advertise 10 Mb
5518 * 0x10 - advertise 2.5G
5519 * 0x20 - advertise 5G
5520 ************************************************************************/
5521 static int
5522 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5523 {
5524 device_t dev;
5525 struct ixgbe_hw *hw;
5526 ixgbe_link_speed speed = 0;
5527 ixgbe_link_speed link_caps = 0;
5528 s32 err = IXGBE_NOT_IMPLEMENTED;
5529 bool negotiate = FALSE;
5530
5531 /* Checks to validate new value */
5532 if (adapter->advertise == advertise) /* no change */
5533 return (0);
5534
5535 dev = adapter->dev;
5536 hw = &adapter->hw;
5537
5538 /* No speed changes for backplane media */
5539 if (hw->phy.media_type == ixgbe_media_type_backplane)
5540 return (ENODEV);
5541
5542 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5543 (hw->phy.multispeed_fiber))) {
5544 device_printf(dev,
5545 "Advertised speed can only be set on copper or "
5546 "multispeed fiber media types.\n");
5547 return (EINVAL);
5548 }
5549
5550 if (advertise < 0x0 || advertise > 0x3f) {
5551 device_printf(dev, "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5552 return (EINVAL);
5553 }
5554
5555 if (hw->mac.ops.get_link_capabilities) {
5556 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5557 &negotiate);
5558 if (err != IXGBE_SUCCESS) {
5559 device_printf(dev, "Unable to determine supported advertise speeds\n");
5560 return (ENODEV);
5561 }
5562 }
5563
5564 /* Set new value and report new advertised mode */
5565 if (advertise & 0x1) {
5566 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5567 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5568 return (EINVAL);
5569 }
5570 speed |= IXGBE_LINK_SPEED_100_FULL;
5571 }
5572 if (advertise & 0x2) {
5573 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5574 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5575 return (EINVAL);
5576 }
5577 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5578 }
5579 if (advertise & 0x4) {
5580 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5581 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5582 return (EINVAL);
5583 }
5584 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5585 }
5586 if (advertise & 0x8) {
5587 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5588 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5589 return (EINVAL);
5590 }
5591 speed |= IXGBE_LINK_SPEED_10_FULL;
5592 }
5593 if (advertise & 0x10) {
5594 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5595 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5596 return (EINVAL);
5597 }
5598 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5599 }
5600 if (advertise & 0x20) {
5601 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5602 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5603 return (EINVAL);
5604 }
5605 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5606 }
5607 if (advertise == 0)
5608 speed = link_caps; /* All capable link speed */
5609
5610 hw->mac.autotry_restart = TRUE;
5611 hw->mac.ops.setup_link(hw, speed, TRUE);
5612 adapter->advertise = advertise;
5613
5614 return (0);
5615 } /* ixgbe_set_advertise */
5616
5617 /************************************************************************
5618 * ixgbe_get_advertise - Get current advertised speed settings
5619 *
5620 * Formatted for sysctl usage.
5621 * Flags:
5622 * 0x01 - advertise 100 Mb
5623 * 0x02 - advertise 1G
5624 * 0x04 - advertise 10G
5625 * 0x08 - advertise 10 Mb (yes, Mb)
5626 * 0x10 - advertise 2.5G
5627 * 0x20 - advertise 5G
5628 ************************************************************************/
5629 static int
5630 ixgbe_get_advertise(struct adapter *adapter)
5631 {
5632 struct ixgbe_hw *hw = &adapter->hw;
5633 int speed;
5634 ixgbe_link_speed link_caps = 0;
5635 s32 err;
5636 bool negotiate = FALSE;
5637
5638 /*
5639 * Advertised speed means nothing unless it's copper or
5640 * multi-speed fiber
5641 */
5642 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5643 !(hw->phy.multispeed_fiber))
5644 return (0);
5645
5646 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5647 if (err != IXGBE_SUCCESS)
5648 return (0);
5649
5650 speed =
5651 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5652 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5653 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5654 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5655 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5656 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5657
5658 return speed;
5659 } /* ixgbe_get_advertise */
5660
5661 /************************************************************************
5662 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5663 *
5664 * Control values:
5665 * 0/1 - off / on (use default value of 1000)
5666 *
5667 * Legal timer values are:
5668 * 50,100,250,500,1000,2000,5000,10000
5669 *
5670 * Turning off interrupt moderation will also turn this off.
5671 ************************************************************************/
5672 static int
5673 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5674 {
5675 struct sysctlnode node = *rnode;
5676 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5677 struct ifnet *ifp = adapter->ifp;
5678 int error;
5679 int newval;
5680
5681 if (ixgbe_fw_recovery_mode_swflag(adapter))
5682 return (EPERM);
5683
5684 newval = adapter->dmac;
5685 node.sysctl_data = &newval;
5686 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5687 if ((error) || (newp == NULL))
5688 return (error);
5689
5690 switch (newval) {
5691 case 0:
5692 /* Disabled */
5693 adapter->dmac = 0;
5694 break;
5695 case 1:
5696 /* Enable and use default */
5697 adapter->dmac = 1000;
5698 break;
5699 case 50:
5700 case 100:
5701 case 250:
5702 case 500:
5703 case 1000:
5704 case 2000:
5705 case 5000:
5706 case 10000:
5707 /* Legal values - allow */
5708 adapter->dmac = newval;
5709 break;
5710 default:
5711 /* Do nothing, illegal value */
5712 return (EINVAL);
5713 }
5714
5715 /* Re-initialize hardware if it's already running */
5716 if (ifp->if_flags & IFF_RUNNING)
5717 ifp->if_init(ifp);
5718
5719 return (0);
5720 }
5721
5722 #ifdef IXGBE_DEBUG
5723 /************************************************************************
5724 * ixgbe_sysctl_power_state
5725 *
5726 * Sysctl to test power states
5727 * Values:
5728 * 0 - set device to D0
5729 * 3 - set device to D3
5730 * (none) - get current device power state
5731 ************************************************************************/
5732 static int
5733 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5734 {
5735 #ifdef notyet
5736 struct sysctlnode node = *rnode;
5737 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5738 device_t dev = adapter->dev;
5739 int curr_ps, new_ps, error = 0;
5740
5741 if (ixgbe_fw_recovery_mode_swflag(adapter))
5742 return (EPERM);
5743
5744 curr_ps = new_ps = pci_get_powerstate(dev);
5745
5746 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5747 if ((error) || (req->newp == NULL))
5748 return (error);
5749
5750 if (new_ps == curr_ps)
5751 return (0);
5752
5753 if (new_ps == 3 && curr_ps == 0)
5754 error = DEVICE_SUSPEND(dev);
5755 else if (new_ps == 0 && curr_ps == 3)
5756 error = DEVICE_RESUME(dev);
5757 else
5758 return (EINVAL);
5759
5760 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5761
5762 return (error);
5763 #else
5764 return 0;
5765 #endif
5766 } /* ixgbe_sysctl_power_state */
5767 #endif
5768
5769 /************************************************************************
5770 * ixgbe_sysctl_wol_enable
5771 *
5772 * Sysctl to enable/disable the WoL capability,
5773 * if supported by the adapter.
5774 *
5775 * Values:
5776 * 0 - disabled
5777 * 1 - enabled
5778 ************************************************************************/
5779 static int
5780 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5781 {
5782 struct sysctlnode node = *rnode;
5783 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5784 struct ixgbe_hw *hw = &adapter->hw;
5785 bool new_wol_enabled;
5786 int error = 0;
5787
5788 /*
5789 * It's not required to check recovery mode because this function never
5790 * touches hardware.
5791 */
5792 new_wol_enabled = hw->wol_enabled;
5793 node.sysctl_data = &new_wol_enabled;
5794 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5795 if ((error) || (newp == NULL))
5796 return (error);
5797 if (new_wol_enabled == hw->wol_enabled)
5798 return (0);
5799
5800 if (new_wol_enabled && !adapter->wol_support)
5801 return (ENODEV);
5802 else
5803 hw->wol_enabled = new_wol_enabled;
5804
5805 return (0);
5806 } /* ixgbe_sysctl_wol_enable */
5807
5808 /************************************************************************
5809 * ixgbe_sysctl_wufc - Wake Up Filter Control
5810 *
5811 * Sysctl to enable/disable the types of packets that the
5812 * adapter will wake up on upon receipt.
5813 * Flags:
5814 * 0x1 - Link Status Change
5815 * 0x2 - Magic Packet
5816 * 0x4 - Direct Exact
5817 * 0x8 - Directed Multicast
5818 * 0x10 - Broadcast
5819 * 0x20 - ARP/IPv4 Request Packet
5820 * 0x40 - Direct IPv4 Packet
5821 * 0x80 - Direct IPv6 Packet
5822 *
5823 * Settings not listed above will cause the sysctl to return an error.
5824 ************************************************************************/
5825 static int
5826 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5827 {
5828 struct sysctlnode node = *rnode;
5829 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5830 int error = 0;
5831 u32 new_wufc;
5832
5833 /*
5834 * It's not required to check recovery mode because this function never
5835 * touches hardware.
5836 */
5837 new_wufc = adapter->wufc;
5838 node.sysctl_data = &new_wufc;
5839 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5840 if ((error) || (newp == NULL))
5841 return (error);
5842 if (new_wufc == adapter->wufc)
5843 return (0);
5844
5845 if (new_wufc & 0xffffff00)
5846 return (EINVAL);
5847
5848 new_wufc &= 0xff;
5849 new_wufc |= (0xffffff & adapter->wufc);
5850 adapter->wufc = new_wufc;
5851
5852 return (0);
5853 } /* ixgbe_sysctl_wufc */
5854
5855 #ifdef IXGBE_DEBUG
5856 /************************************************************************
5857 * ixgbe_sysctl_print_rss_config
5858 ************************************************************************/
5859 static int
5860 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5861 {
5862 #ifdef notyet
5863 struct sysctlnode node = *rnode;
5864 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5865 struct ixgbe_hw *hw = &adapter->hw;
5866 device_t dev = adapter->dev;
5867 struct sbuf *buf;
5868 int error = 0, reta_size;
5869 u32 reg;
5870
5871 if (ixgbe_fw_recovery_mode_swflag(adapter))
5872 return (EPERM);
5873
5874 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5875 if (!buf) {
5876 device_printf(dev, "Could not allocate sbuf for output.\n");
5877 return (ENOMEM);
5878 }
5879
5880 // TODO: use sbufs to make a string to print out
5881 /* Set multiplier for RETA setup and table size based on MAC */
5882 switch (adapter->hw.mac.type) {
5883 case ixgbe_mac_X550:
5884 case ixgbe_mac_X550EM_x:
5885 case ixgbe_mac_X550EM_a:
5886 reta_size = 128;
5887 break;
5888 default:
5889 reta_size = 32;
5890 break;
5891 }
5892
5893 /* Print out the redirection table */
5894 sbuf_cat(buf, "\n");
5895 for (int i = 0; i < reta_size; i++) {
5896 if (i < 32) {
5897 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5898 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5899 } else {
5900 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5901 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5902 }
5903 }
5904
5905 // TODO: print more config
5906
5907 error = sbuf_finish(buf);
5908 if (error)
5909 device_printf(dev, "Error finishing sbuf: %d\n", error);
5910
5911 sbuf_delete(buf);
5912 #endif
5913 return (0);
5914 } /* ixgbe_sysctl_print_rss_config */
5915 #endif /* IXGBE_DEBUG */
5916
5917 /************************************************************************
5918 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5919 *
5920 * For X552/X557-AT devices using an external PHY
5921 ************************************************************************/
5922 static int
5923 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5924 {
5925 struct sysctlnode node = *rnode;
5926 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5927 struct ixgbe_hw *hw = &adapter->hw;
5928 int val;
5929 u16 reg;
5930 int error;
5931
5932 if (ixgbe_fw_recovery_mode_swflag(adapter))
5933 return (EPERM);
5934
5935 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5936 device_printf(adapter->dev,
5937 "Device has no supported external thermal sensor.\n");
5938 return (ENODEV);
5939 }
5940
5941 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5942 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5943 device_printf(adapter->dev,
5944 "Error reading from PHY's current temperature register\n");
5945 return (EAGAIN);
5946 }
5947
5948 node.sysctl_data = &val;
5949
5950 /* Shift temp for output */
5951 val = reg >> 8;
5952
5953 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5954 if ((error) || (newp == NULL))
5955 return (error);
5956
5957 return (0);
5958 } /* ixgbe_sysctl_phy_temp */
5959
5960 /************************************************************************
5961 * ixgbe_sysctl_phy_overtemp_occurred
5962 *
5963 * Reports (directly from the PHY) whether the current PHY
5964 * temperature is over the overtemp threshold.
5965 ************************************************************************/
5966 static int
5967 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5968 {
5969 struct sysctlnode node = *rnode;
5970 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5971 struct ixgbe_hw *hw = &adapter->hw;
5972 int val, error;
5973 u16 reg;
5974
5975 if (ixgbe_fw_recovery_mode_swflag(adapter))
5976 return (EPERM);
5977
5978 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5979 device_printf(adapter->dev,
5980 "Device has no supported external thermal sensor.\n");
5981 return (ENODEV);
5982 }
5983
5984 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5985 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5986 device_printf(adapter->dev,
5987 "Error reading from PHY's temperature status register\n");
5988 return (EAGAIN);
5989 }
5990
5991 node.sysctl_data = &val;
5992
5993 /* Get occurrence bit */
5994 val = !!(reg & 0x4000);
5995
5996 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5997 if ((error) || (newp == NULL))
5998 return (error);
5999
6000 return (0);
6001 } /* ixgbe_sysctl_phy_overtemp_occurred */
6002
6003 /************************************************************************
6004 * ixgbe_sysctl_eee_state
6005 *
6006 * Sysctl to set EEE power saving feature
6007 * Values:
6008 * 0 - disable EEE
6009 * 1 - enable EEE
6010 * (none) - get current device EEE state
6011 ************************************************************************/
6012 static int
6013 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6014 {
6015 struct sysctlnode node = *rnode;
6016 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6017 struct ifnet *ifp = adapter->ifp;
6018 device_t dev = adapter->dev;
6019 int curr_eee, new_eee, error = 0;
6020 s32 retval;
6021
6022 if (ixgbe_fw_recovery_mode_swflag(adapter))
6023 return (EPERM);
6024
6025 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6026 node.sysctl_data = &new_eee;
6027 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6028 if ((error) || (newp == NULL))
6029 return (error);
6030
6031 /* Nothing to do */
6032 if (new_eee == curr_eee)
6033 return (0);
6034
6035 /* Not supported */
6036 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6037 return (EINVAL);
6038
6039 /* Bounds checking */
6040 if ((new_eee < 0) || (new_eee > 1))
6041 return (EINVAL);
6042
6043 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6044 if (retval) {
6045 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6046 return (EINVAL);
6047 }
6048
6049 /* Restart auto-neg */
6050 ifp->if_init(ifp);
6051
6052 device_printf(dev, "New EEE state: %d\n", new_eee);
6053
6054 /* Cache new value */
6055 if (new_eee)
6056 adapter->feat_en |= IXGBE_FEATURE_EEE;
6057 else
6058 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6059
6060 return (error);
6061 } /* ixgbe_sysctl_eee_state */
6062
6063 #define PRINTQS(adapter, regname) \
6064 do { \
6065 struct ixgbe_hw *_hw = &(adapter)->hw; \
6066 int _i; \
6067 \
6068 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6069 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6070 printf((_i == 0) ? "\t" : " "); \
6071 printf("%08x", IXGBE_READ_REG(_hw, \
6072 IXGBE_##regname(_i))); \
6073 } \
6074 printf("\n"); \
6075 } while (0)
6076
6077 /************************************************************************
6078 * ixgbe_print_debug_info
6079 *
6080 * Called only when em_display_debug_stats is enabled.
6081 * Provides a way to take a look at important statistics
6082 * maintained by the driver and hardware.
6083 ************************************************************************/
6084 static void
6085 ixgbe_print_debug_info(struct adapter *adapter)
6086 {
6087 device_t dev = adapter->dev;
6088 struct ixgbe_hw *hw = &adapter->hw;
6089 int table_size;
6090 int i;
6091
6092 switch (adapter->hw.mac.type) {
6093 case ixgbe_mac_X550:
6094 case ixgbe_mac_X550EM_x:
6095 case ixgbe_mac_X550EM_a:
6096 table_size = 128;
6097 break;
6098 default:
6099 table_size = 32;
6100 break;
6101 }
6102
6103 device_printf(dev, "[E]RETA:\n");
6104 for (i = 0; i < table_size; i++) {
6105 if (i < 32)
6106 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6107 IXGBE_RETA(i)));
6108 else
6109 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6110 IXGBE_ERETA(i - 32)));
6111 }
6112
6113 device_printf(dev, "queue:");
6114 for (i = 0; i < adapter->num_queues; i++) {
6115 printf((i == 0) ? "\t" : " ");
6116 printf("%8d", i);
6117 }
6118 printf("\n");
6119 PRINTQS(adapter, RDBAL);
6120 PRINTQS(adapter, RDBAH);
6121 PRINTQS(adapter, RDLEN);
6122 PRINTQS(adapter, SRRCTL);
6123 PRINTQS(adapter, RDH);
6124 PRINTQS(adapter, RDT);
6125 PRINTQS(adapter, RXDCTL);
6126
6127 device_printf(dev, "RQSMR:");
6128 for (i = 0; i < adapter->num_queues / 4; i++) {
6129 printf((i == 0) ? "\t" : " ");
6130 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6131 }
6132 printf("\n");
6133
6134 device_printf(dev, "disabled_count:");
6135 for (i = 0; i < adapter->num_queues; i++) {
6136 printf((i == 0) ? "\t" : " ");
6137 printf("%8d", adapter->queues[i].disabled_count);
6138 }
6139 printf("\n");
6140
6141 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6142 if (hw->mac.type != ixgbe_mac_82598EB) {
6143 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6144 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6145 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6146 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6147 }
6148 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6149 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6150 } /* ixgbe_print_debug_info */
6151
6152 /************************************************************************
6153 * ixgbe_sysctl_debug
6154 ************************************************************************/
6155 static int
6156 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6157 {
6158 struct sysctlnode node = *rnode;
6159 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6160 int error, result = 0;
6161
6162 if (ixgbe_fw_recovery_mode_swflag(adapter))
6163 return (EPERM);
6164
6165 node.sysctl_data = &result;
6166 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6167
6168 if (error || newp == NULL)
6169 return error;
6170
6171 if (result == 1)
6172 ixgbe_print_debug_info(adapter);
6173
6174 return 0;
6175 } /* ixgbe_sysctl_debug */
6176
6177 /************************************************************************
6178 * ixgbe_init_device_features
6179 ************************************************************************/
6180 static void
6181 ixgbe_init_device_features(struct adapter *adapter)
6182 {
6183 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6184 | IXGBE_FEATURE_RSS
6185 | IXGBE_FEATURE_MSI
6186 | IXGBE_FEATURE_MSIX
6187 | IXGBE_FEATURE_LEGACY_IRQ
6188 | IXGBE_FEATURE_LEGACY_TX;
6189
6190 /* Set capabilities first... */
6191 switch (adapter->hw.mac.type) {
6192 case ixgbe_mac_82598EB:
6193 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6194 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6195 break;
6196 case ixgbe_mac_X540:
6197 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6198 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6199 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6200 (adapter->hw.bus.func == 0))
6201 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6202 break;
6203 case ixgbe_mac_X550:
6204 /*
6205 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6206 * NVM Image version.
6207 */
6208 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6209 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6210 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6211 break;
6212 case ixgbe_mac_X550EM_x:
6213 /*
6214 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6215 * NVM Image version.
6216 */
6217 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6218 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6219 break;
6220 case ixgbe_mac_X550EM_a:
6221 /*
6222 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6223 * NVM Image version.
6224 */
6225 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6226 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6227 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6228 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6229 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6230 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6231 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6232 }
6233 break;
6234 case ixgbe_mac_82599EB:
6235 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6236 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6237 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6238 (adapter->hw.bus.func == 0))
6239 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6240 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6241 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6242 break;
6243 default:
6244 break;
6245 }
6246
6247 /* Enabled by default... */
6248 /* Fan failure detection */
6249 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6250 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6251 /* Netmap */
6252 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6253 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6254 /* EEE */
6255 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6256 adapter->feat_en |= IXGBE_FEATURE_EEE;
6257 /* Thermal Sensor */
6258 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6259 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6260 /*
6261 * Recovery mode:
6262 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6263 * NVM Image version.
6264 */
6265
6266 /* Enabled via global sysctl... */
6267 /* Flow Director */
6268 if (ixgbe_enable_fdir) {
6269 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6270 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6271 else
6272 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6273 }
6274 /* Legacy (single queue) transmit */
6275 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6276 ixgbe_enable_legacy_tx)
6277 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6278 /*
6279 * Message Signal Interrupts - Extended (MSI-X)
6280 * Normal MSI is only enabled if MSI-X calls fail.
6281 */
6282 if (!ixgbe_enable_msix)
6283 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6284 /* Receive-Side Scaling (RSS) */
6285 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6286 adapter->feat_en |= IXGBE_FEATURE_RSS;
6287
6288 /* Disable features with unmet dependencies... */
6289 /* No MSI-X */
6290 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6291 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6292 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6293 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6294 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6295 }
6296 } /* ixgbe_init_device_features */
6297
6298 /************************************************************************
6299 * ixgbe_probe - Device identification routine
6300 *
6301 * Determines if the driver should be loaded on
6302 * adapter based on its PCI vendor/device ID.
6303 *
6304 * return BUS_PROBE_DEFAULT on success, positive on failure
6305 ************************************************************************/
6306 static int
6307 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6308 {
6309 const struct pci_attach_args *pa = aux;
6310
6311 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6312 }
6313
6314 static const ixgbe_vendor_info_t *
6315 ixgbe_lookup(const struct pci_attach_args *pa)
6316 {
6317 const ixgbe_vendor_info_t *ent;
6318 pcireg_t subid;
6319
6320 INIT_DEBUGOUT("ixgbe_lookup: begin");
6321
6322 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6323 return NULL;
6324
6325 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6326
6327 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6328 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6329 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6330 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6331 (ent->subvendor_id == 0)) &&
6332 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6333 (ent->subdevice_id == 0))) {
6334 return ent;
6335 }
6336 }
6337 return NULL;
6338 }
6339
6340 static int
6341 ixgbe_ifflags_cb(struct ethercom *ec)
6342 {
6343 struct ifnet *ifp = &ec->ec_if;
6344 struct adapter *adapter = ifp->if_softc;
6345 u_short change;
6346 int rv = 0;
6347
6348 IXGBE_CORE_LOCK(adapter);
6349
6350 change = ifp->if_flags ^ adapter->if_flags;
6351 if (change != 0)
6352 adapter->if_flags = ifp->if_flags;
6353
6354 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6355 rv = ENETRESET;
6356 goto out;
6357 } else if ((change & IFF_PROMISC) != 0)
6358 ixgbe_set_rxfilter(adapter);
6359
6360 /* Check for ec_capenable. */
6361 change = ec->ec_capenable ^ adapter->ec_capenable;
6362 adapter->ec_capenable = ec->ec_capenable;
6363 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6364 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6365 rv = ENETRESET;
6366 goto out;
6367 }
6368
6369 /*
6370 * Special handling is not required for ETHERCAP_VLAN_MTU.
6371 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6372 */
6373
6374 /* Set up VLAN support and filter */
6375 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6376 ixgbe_setup_vlan_hw_support(adapter);
6377
6378 out:
6379 IXGBE_CORE_UNLOCK(adapter);
6380
6381 return rv;
6382 }
6383
6384 /************************************************************************
6385 * ixgbe_ioctl - Ioctl entry point
6386 *
6387 * Called when the user wants to configure the interface.
6388 *
6389 * return 0 on success, positive on failure
6390 ************************************************************************/
6391 static int
6392 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6393 {
6394 struct adapter *adapter = ifp->if_softc;
6395 struct ixgbe_hw *hw = &adapter->hw;
6396 struct ifcapreq *ifcr = data;
6397 struct ifreq *ifr = data;
6398 int error = 0;
6399 int l4csum_en;
6400 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6401 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6402
6403 if (ixgbe_fw_recovery_mode_swflag(adapter))
6404 return (EPERM);
6405
6406 switch (command) {
6407 case SIOCSIFFLAGS:
6408 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6409 break;
6410 case SIOCADDMULTI:
6411 case SIOCDELMULTI:
6412 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6413 break;
6414 case SIOCSIFMEDIA:
6415 case SIOCGIFMEDIA:
6416 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6417 break;
6418 case SIOCSIFCAP:
6419 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6420 break;
6421 case SIOCSIFMTU:
6422 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6423 break;
6424 #ifdef __NetBSD__
6425 case SIOCINITIFADDR:
6426 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6427 break;
6428 case SIOCGIFFLAGS:
6429 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6430 break;
6431 case SIOCGIFAFLAG_IN:
6432 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6433 break;
6434 case SIOCGIFADDR:
6435 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6436 break;
6437 case SIOCGIFMTU:
6438 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6439 break;
6440 case SIOCGIFCAP:
6441 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6442 break;
6443 case SIOCGETHERCAP:
6444 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6445 break;
6446 case SIOCGLIFADDR:
6447 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6448 break;
6449 case SIOCZIFDATA:
6450 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6451 hw->mac.ops.clear_hw_cntrs(hw);
6452 ixgbe_clear_evcnt(adapter);
6453 break;
6454 case SIOCAIFADDR:
6455 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6456 break;
6457 #endif
6458 default:
6459 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6460 break;
6461 }
6462
6463 switch (command) {
6464 case SIOCGI2C:
6465 {
6466 struct ixgbe_i2c_req i2c;
6467
6468 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6469 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6470 if (error != 0)
6471 break;
6472 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6473 error = EINVAL;
6474 break;
6475 }
6476 if (i2c.len > sizeof(i2c.data)) {
6477 error = EINVAL;
6478 break;
6479 }
6480
6481 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6482 i2c.dev_addr, i2c.data);
6483 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6484 break;
6485 }
6486 case SIOCSIFCAP:
6487 /* Layer-4 Rx checksum offload has to be turned on and
6488 * off as a unit.
6489 */
6490 l4csum_en = ifcr->ifcr_capenable & l4csum;
6491 if (l4csum_en != l4csum && l4csum_en != 0)
6492 return EINVAL;
6493 /*FALLTHROUGH*/
6494 case SIOCADDMULTI:
6495 case SIOCDELMULTI:
6496 case SIOCSIFFLAGS:
6497 case SIOCSIFMTU:
6498 default:
6499 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6500 return error;
6501 if ((ifp->if_flags & IFF_RUNNING) == 0)
6502 ;
6503 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6504 IXGBE_CORE_LOCK(adapter);
6505 if ((ifp->if_flags & IFF_RUNNING) != 0)
6506 ixgbe_init_locked(adapter);
6507 ixgbe_recalculate_max_frame(adapter);
6508 IXGBE_CORE_UNLOCK(adapter);
6509 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6510 /*
6511 * Multicast list has changed; set the hardware filter
6512 * accordingly.
6513 */
6514 IXGBE_CORE_LOCK(adapter);
6515 ixgbe_disable_intr(adapter);
6516 ixgbe_set_rxfilter(adapter);
6517 ixgbe_enable_intr(adapter);
6518 IXGBE_CORE_UNLOCK(adapter);
6519 }
6520 return 0;
6521 }
6522
6523 return error;
6524 } /* ixgbe_ioctl */
6525
6526 /************************************************************************
6527 * ixgbe_check_fan_failure
6528 ************************************************************************/
6529 static int
6530 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6531 {
6532 u32 mask;
6533
6534 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6535 IXGBE_ESDP_SDP1;
6536
6537 if (reg & mask) {
6538 device_printf(adapter->dev,
6539 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6540 return IXGBE_ERR_FAN_FAILURE;
6541 }
6542
6543 return IXGBE_SUCCESS;
6544 } /* ixgbe_check_fan_failure */
6545
6546 /************************************************************************
6547 * ixgbe_handle_que
6548 ************************************************************************/
6549 static void
6550 ixgbe_handle_que(void *context)
6551 {
6552 struct ix_queue *que = context;
6553 struct adapter *adapter = que->adapter;
6554 struct tx_ring *txr = que->txr;
6555 struct ifnet *ifp = adapter->ifp;
6556 bool more = false;
6557
6558 que->handleq.ev_count++;
6559
6560 if (ifp->if_flags & IFF_RUNNING) {
6561 more = ixgbe_rxeof(que);
6562 IXGBE_TX_LOCK(txr);
6563 more |= ixgbe_txeof(txr);
6564 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6565 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6566 ixgbe_mq_start_locked(ifp, txr);
6567 /* Only for queue 0 */
6568 /* NetBSD still needs this for CBQ */
6569 if ((&adapter->queues[0] == que)
6570 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6571 ixgbe_legacy_start_locked(ifp, txr);
6572 IXGBE_TX_UNLOCK(txr);
6573 }
6574
6575 if (more) {
6576 que->req.ev_count++;
6577 ixgbe_sched_handle_que(adapter, que);
6578 } else if (que->res != NULL) {
6579 /* MSIX: Re-enable this interrupt */
6580 ixgbe_enable_queue(adapter, que->msix);
6581 } else {
6582 /* INTx or MSI */
6583 ixgbe_enable_queue(adapter, 0);
6584 }
6585
6586 return;
6587 } /* ixgbe_handle_que */
6588
6589 /************************************************************************
6590 * ixgbe_handle_que_work
6591 ************************************************************************/
6592 static void
6593 ixgbe_handle_que_work(struct work *wk, void *context)
6594 {
6595 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6596
6597 /*
6598 * "enqueued flag" is not required here.
6599 * See ixgbe_msix_que().
6600 */
6601 ixgbe_handle_que(que);
6602 }
6603
6604 /************************************************************************
6605 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6606 ************************************************************************/
6607 static int
6608 ixgbe_allocate_legacy(struct adapter *adapter,
6609 const struct pci_attach_args *pa)
6610 {
6611 device_t dev = adapter->dev;
6612 struct ix_queue *que = adapter->queues;
6613 struct tx_ring *txr = adapter->tx_rings;
6614 int counts[PCI_INTR_TYPE_SIZE];
6615 pci_intr_type_t intr_type, max_type;
6616 char intrbuf[PCI_INTRSTR_LEN];
6617 char wqname[MAXCOMLEN];
6618 const char *intrstr = NULL;
6619 int defertx_error = 0, error;
6620
6621 /* We allocate a single interrupt resource */
6622 max_type = PCI_INTR_TYPE_MSI;
6623 counts[PCI_INTR_TYPE_MSIX] = 0;
6624 counts[PCI_INTR_TYPE_MSI] =
6625 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6626 /* Check not feat_en but feat_cap to fallback to INTx */
6627 counts[PCI_INTR_TYPE_INTX] =
6628 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6629
6630 alloc_retry:
6631 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6632 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6633 return ENXIO;
6634 }
6635 adapter->osdep.nintrs = 1;
6636 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6637 intrbuf, sizeof(intrbuf));
6638 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6639 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6640 device_xname(dev));
6641 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6642 if (adapter->osdep.ihs[0] == NULL) {
6643 aprint_error_dev(dev,"unable to establish %s\n",
6644 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6645 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6646 adapter->osdep.intrs = NULL;
6647 switch (intr_type) {
6648 case PCI_INTR_TYPE_MSI:
6649 /* The next try is for INTx: Disable MSI */
6650 max_type = PCI_INTR_TYPE_INTX;
6651 counts[PCI_INTR_TYPE_INTX] = 1;
6652 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6653 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6654 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6655 goto alloc_retry;
6656 } else
6657 break;
6658 case PCI_INTR_TYPE_INTX:
6659 default:
6660 /* See below */
6661 break;
6662 }
6663 }
6664 if (intr_type == PCI_INTR_TYPE_INTX) {
6665 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6666 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6667 }
6668 if (adapter->osdep.ihs[0] == NULL) {
6669 aprint_error_dev(dev,
6670 "couldn't establish interrupt%s%s\n",
6671 intrstr ? " at " : "", intrstr ? intrstr : "");
6672 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6673 adapter->osdep.intrs = NULL;
6674 return ENXIO;
6675 }
6676 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6677 /*
6678 * Try allocating a fast interrupt and the associated deferred
6679 * processing contexts.
6680 */
6681 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6682 txr->txr_si =
6683 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6684 ixgbe_deferred_mq_start, txr);
6685
6686 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6687 device_xname(dev));
6688 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6689 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6690 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6691 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6692 }
6693 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6694 ixgbe_handle_que, que);
6695 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6696 error = workqueue_create(&adapter->que_wq, wqname,
6697 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6698 IXGBE_WORKQUEUE_FLAGS);
6699
6700 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6701 && ((txr->txr_si == NULL) || defertx_error != 0))
6702 || (que->que_si == NULL) || error != 0) {
6703 aprint_error_dev(dev,
6704 "could not establish software interrupts\n");
6705
6706 return ENXIO;
6707 }
6708 /* For simplicity in the handlers */
6709 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6710
6711 return (0);
6712 } /* ixgbe_allocate_legacy */
6713
6714 /************************************************************************
6715 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6716 ************************************************************************/
6717 static int
6718 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6719 {
6720 device_t dev = adapter->dev;
6721 struct ix_queue *que = adapter->queues;
6722 struct tx_ring *txr = adapter->tx_rings;
6723 pci_chipset_tag_t pc;
6724 char intrbuf[PCI_INTRSTR_LEN];
6725 char intr_xname[32];
6726 char wqname[MAXCOMLEN];
6727 const char *intrstr = NULL;
6728 int error, vector = 0;
6729 int cpu_id = 0;
6730 kcpuset_t *affinity;
6731 #ifdef RSS
6732 unsigned int rss_buckets = 0;
6733 kcpuset_t cpu_mask;
6734 #endif
6735
6736 pc = adapter->osdep.pc;
6737 #ifdef RSS
6738 /*
6739 * If we're doing RSS, the number of queues needs to
6740 * match the number of RSS buckets that are configured.
6741 *
6742 * + If there's more queues than RSS buckets, we'll end
6743 * up with queues that get no traffic.
6744 *
6745 * + If there's more RSS buckets than queues, we'll end
6746 * up having multiple RSS buckets map to the same queue,
6747 * so there'll be some contention.
6748 */
6749 rss_buckets = rss_getnumbuckets();
6750 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6751 (adapter->num_queues != rss_buckets)) {
6752 device_printf(dev,
6753 "%s: number of queues (%d) != number of RSS buckets (%d)"
6754 "; performance will be impacted.\n",
6755 __func__, adapter->num_queues, rss_buckets);
6756 }
6757 #endif
6758
6759 adapter->osdep.nintrs = adapter->num_queues + 1;
6760 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6761 adapter->osdep.nintrs) != 0) {
6762 aprint_error_dev(dev,
6763 "failed to allocate MSI-X interrupt\n");
6764 return (ENXIO);
6765 }
6766
6767 kcpuset_create(&affinity, false);
6768 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6769 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6770 device_xname(dev), i);
6771 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6772 sizeof(intrbuf));
6773 #ifdef IXGBE_MPSAFE
6774 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6775 true);
6776 #endif
6777 /* Set the handler function */
6778 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6779 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6780 intr_xname);
6781 if (que->res == NULL) {
6782 aprint_error_dev(dev,
6783 "Failed to register QUE handler\n");
6784 error = ENXIO;
6785 goto err_out;
6786 }
6787 que->msix = vector;
6788 adapter->active_queues |= 1ULL << que->msix;
6789
6790 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6791 #ifdef RSS
6792 /*
6793 * The queue ID is used as the RSS layer bucket ID.
6794 * We look up the queue ID -> RSS CPU ID and select
6795 * that.
6796 */
6797 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6798 CPU_SETOF(cpu_id, &cpu_mask);
6799 #endif
6800 } else {
6801 /*
6802 * Bind the MSI-X vector, and thus the
6803 * rings to the corresponding CPU.
6804 *
6805 * This just happens to match the default RSS
6806 * round-robin bucket -> queue -> CPU allocation.
6807 */
6808 if (adapter->num_queues > 1)
6809 cpu_id = i;
6810 }
6811 /* Round-robin affinity */
6812 kcpuset_zero(affinity);
6813 kcpuset_set(affinity, cpu_id % ncpu);
6814 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6815 NULL);
6816 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6817 intrstr);
6818 if (error == 0) {
6819 #if 1 /* def IXGBE_DEBUG */
6820 #ifdef RSS
6821 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6822 cpu_id % ncpu);
6823 #else
6824 aprint_normal(", bound queue %d to cpu %d", i,
6825 cpu_id % ncpu);
6826 #endif
6827 #endif /* IXGBE_DEBUG */
6828 }
6829 aprint_normal("\n");
6830
6831 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6832 txr->txr_si = softint_establish(
6833 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6834 ixgbe_deferred_mq_start, txr);
6835 if (txr->txr_si == NULL) {
6836 aprint_error_dev(dev,
6837 "couldn't establish software interrupt\n");
6838 error = ENXIO;
6839 goto err_out;
6840 }
6841 }
6842 que->que_si
6843 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6844 ixgbe_handle_que, que);
6845 if (que->que_si == NULL) {
6846 aprint_error_dev(dev,
6847 "couldn't establish software interrupt\n");
6848 error = ENXIO;
6849 goto err_out;
6850 }
6851 }
6852 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6853 error = workqueue_create(&adapter->txr_wq, wqname,
6854 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6855 IXGBE_WORKQUEUE_FLAGS);
6856 if (error) {
6857 aprint_error_dev(dev,
6858 "couldn't create workqueue for deferred Tx\n");
6859 goto err_out;
6860 }
6861 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6862
6863 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6864 error = workqueue_create(&adapter->que_wq, wqname,
6865 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6866 IXGBE_WORKQUEUE_FLAGS);
6867 if (error) {
6868 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6869 goto err_out;
6870 }
6871
6872 /* and Link */
6873 cpu_id++;
6874 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6875 adapter->vector = vector;
6876 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6877 sizeof(intrbuf));
6878 #ifdef IXGBE_MPSAFE
6879 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6880 true);
6881 #endif
6882 /* Set the link handler function */
6883 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6884 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6885 intr_xname);
6886 if (adapter->osdep.ihs[vector] == NULL) {
6887 aprint_error_dev(dev, "Failed to register LINK handler\n");
6888 error = ENXIO;
6889 goto err_out;
6890 }
6891 /* Round-robin affinity */
6892 kcpuset_zero(affinity);
6893 kcpuset_set(affinity, cpu_id % ncpu);
6894 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6895 NULL);
6896
6897 aprint_normal_dev(dev,
6898 "for link, interrupting at %s", intrstr);
6899 if (error == 0)
6900 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6901 else
6902 aprint_normal("\n");
6903
6904 kcpuset_destroy(affinity);
6905 aprint_normal_dev(dev,
6906 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6907
6908 return (0);
6909
6910 err_out:
6911 kcpuset_destroy(affinity);
6912 ixgbe_free_deferred_handlers(adapter);
6913 ixgbe_free_pciintr_resources(adapter);
6914 return (error);
6915 } /* ixgbe_allocate_msix */
6916
6917 /************************************************************************
6918 * ixgbe_configure_interrupts
6919 *
6920 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6921 * This will also depend on user settings.
6922 ************************************************************************/
6923 static int
6924 ixgbe_configure_interrupts(struct adapter *adapter)
6925 {
6926 device_t dev = adapter->dev;
6927 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6928 int want, queues, msgs;
6929
6930 /* Default to 1 queue if MSI-X setup fails */
6931 adapter->num_queues = 1;
6932
6933 /* Override by tuneable */
6934 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6935 goto msi;
6936
6937 /*
6938 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6939 * interrupt slot.
6940 */
6941 if (ncpu == 1)
6942 goto msi;
6943
6944 /* First try MSI-X */
6945 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6946 msgs = MIN(msgs, IXG_MAX_NINTR);
6947 if (msgs < 2)
6948 goto msi;
6949
6950 adapter->msix_mem = (void *)1; /* XXX */
6951
6952 /* Figure out a reasonable auto config value */
6953 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6954
6955 #ifdef RSS
6956 /* If we're doing RSS, clamp at the number of RSS buckets */
6957 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6958 queues = uimin(queues, rss_getnumbuckets());
6959 #endif
6960 if (ixgbe_num_queues > queues) {
6961 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6962 ixgbe_num_queues = queues;
6963 }
6964
6965 if (ixgbe_num_queues != 0)
6966 queues = ixgbe_num_queues;
6967 else
6968 queues = uimin(queues,
6969 uimin(mac->max_tx_queues, mac->max_rx_queues));
6970
6971 /* reflect correct sysctl value */
6972 ixgbe_num_queues = queues;
6973
6974 /*
6975 * Want one vector (RX/TX pair) per queue
6976 * plus an additional for Link.
6977 */
6978 want = queues + 1;
6979 if (msgs >= want)
6980 msgs = want;
6981 else {
6982 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6983 "%d vectors but %d queues wanted!\n",
6984 msgs, want);
6985 goto msi;
6986 }
6987 adapter->num_queues = queues;
6988 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6989 return (0);
6990
6991 /*
6992 * MSI-X allocation failed or provided us with
6993 * less vectors than needed. Free MSI-X resources
6994 * and we'll try enabling MSI.
6995 */
6996 msi:
6997 /* Without MSI-X, some features are no longer supported */
6998 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6999 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
7000 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7001 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
7002
7003 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
7004 adapter->msix_mem = NULL; /* XXX */
7005 if (msgs > 1)
7006 msgs = 1;
7007 if (msgs != 0) {
7008 msgs = 1;
7009 adapter->feat_en |= IXGBE_FEATURE_MSI;
7010 return (0);
7011 }
7012
7013 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7014 aprint_error_dev(dev,
7015 "Device does not support legacy interrupts.\n");
7016 return 1;
7017 }
7018
7019 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7020
7021 return (0);
7022 } /* ixgbe_configure_interrupts */
7023
7024
7025 /************************************************************************
7026 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7027 *
7028 * Done outside of interrupt context since the driver might sleep
7029 ************************************************************************/
7030 static void
7031 ixgbe_handle_link(void *context)
7032 {
7033 struct adapter *adapter = context;
7034 struct ixgbe_hw *hw = &adapter->hw;
7035
7036 KASSERT(mutex_owned(&adapter->core_mtx));
7037
7038 ++adapter->link_workev.ev_count;
7039 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7040 ixgbe_update_link_status(adapter);
7041
7042 /* Re-enable link interrupts */
7043 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7044 } /* ixgbe_handle_link */
7045
7046 #if 0
7047 /************************************************************************
7048 * ixgbe_rearm_queues
7049 ************************************************************************/
7050 static __inline void
7051 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7052 {
7053 u32 mask;
7054
7055 switch (adapter->hw.mac.type) {
7056 case ixgbe_mac_82598EB:
7057 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7058 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7059 break;
7060 case ixgbe_mac_82599EB:
7061 case ixgbe_mac_X540:
7062 case ixgbe_mac_X550:
7063 case ixgbe_mac_X550EM_x:
7064 case ixgbe_mac_X550EM_a:
7065 mask = (queues & 0xFFFFFFFF);
7066 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7067 mask = (queues >> 32);
7068 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7069 break;
7070 default:
7071 break;
7072 }
7073 } /* ixgbe_rearm_queues */
7074 #endif
7075