ixgbe.c revision 1.199.2.25 1 /* $NetBSD: ixgbe.c,v 1.199.2.25 2023/01/23 14:04:42 martin Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.199.2.25 2023/01/23 14:04:42 martin Exp $");
68
69 #ifdef _KERNEL_OPT
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_net_mpsafe.h"
73 #endif
74
75 #include "ixgbe.h"
76 #include "ixgbe_sriov.h"
77 #include "vlan.h"
78
79 #include <sys/cprng.h>
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82
83 /************************************************************************
84 * Driver version
85 ************************************************************************/
86 static const char ixgbe_driver_version[] = "4.0.1-k";
87 /* XXX NetBSD: + 3.3.24 */
88
89 /************************************************************************
90 * PCI Device ID Table
91 *
92 * Used by probe to select devices to load on
93 * Last field stores an index into ixgbe_strings
94 * Last entry must be all 0s
95 *
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 ************************************************************************/
98 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 {
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
150 /* required last entry */
151 {0, 0, 0, 0, 0}
152 };
153
154 /************************************************************************
155 * Table of branding strings
156 ************************************************************************/
157 static const char *ixgbe_strings[] = {
158 "Intel(R) PRO/10GbE PCI-Express Network Driver"
159 };
160
161 /************************************************************************
162 * Function prototypes
163 ************************************************************************/
164 static int ixgbe_probe(device_t, cfdata_t, void *);
165 static void ixgbe_attach(device_t, device_t, void *);
166 static int ixgbe_detach(device_t, int);
167 #if 0
168 static int ixgbe_shutdown(device_t);
169 #endif
170 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
171 static bool ixgbe_resume(device_t, const pmf_qual_t *);
172 static int ixgbe_ifflags_cb(struct ethercom *);
173 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
174 static int ixgbe_init(struct ifnet *);
175 static void ixgbe_init_locked(struct adapter *);
176 static void ixgbe_ifstop(struct ifnet *, int);
177 static void ixgbe_stop_locked(void *);
178 static void ixgbe_init_device_features(struct adapter *);
179 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
180 static void ixgbe_add_media_types(struct adapter *);
181 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
182 static int ixgbe_media_change(struct ifnet *);
183 static int ixgbe_allocate_pci_resources(struct adapter *,
184 const struct pci_attach_args *);
185 static void ixgbe_free_softint(struct adapter *);
186 static void ixgbe_get_slot_info(struct adapter *);
187 static int ixgbe_allocate_msix(struct adapter *,
188 const struct pci_attach_args *);
189 static int ixgbe_allocate_legacy(struct adapter *,
190 const struct pci_attach_args *);
191 static int ixgbe_configure_interrupts(struct adapter *);
192 static void ixgbe_free_pciintr_resources(struct adapter *);
193 static void ixgbe_free_pci_resources(struct adapter *);
194 static void ixgbe_local_timer(void *);
195 static void ixgbe_local_timer1(void *);
196 static void ixgbe_recovery_mode_timer(void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_config_link(struct adapter *);
202 static void ixgbe_check_wol_support(struct adapter *);
203 static int ixgbe_setup_low_power_mode(struct adapter *);
204 #if 0
205 static void ixgbe_rearm_queues(struct adapter *, u64);
206 #endif
207
208 static void ixgbe_initialize_transmit_units(struct adapter *);
209 static void ixgbe_initialize_receive_units(struct adapter *);
210 static void ixgbe_enable_rx_drop(struct adapter *);
211 static void ixgbe_disable_rx_drop(struct adapter *);
212 static void ixgbe_initialize_rss_mapping(struct adapter *);
213
214 static void ixgbe_enable_intr(struct adapter *);
215 static void ixgbe_disable_intr(struct adapter *);
216 static void ixgbe_update_stats_counters(struct adapter *);
217 static void ixgbe_set_rxfilter(struct adapter *);
218 static void ixgbe_update_link_status(struct adapter *);
219 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
220 static void ixgbe_configure_ivars(struct adapter *);
221 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
222 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
223
224 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
225 static void ixgbe_setup_vlan_hw_support(struct adapter *);
226 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
227 static int ixgbe_register_vlan(struct adapter *, u16);
228 static int ixgbe_unregister_vlan(struct adapter *, u16);
229
230 static void ixgbe_add_device_sysctls(struct adapter *);
231 static void ixgbe_add_hw_stats(struct adapter *);
232 static void ixgbe_clear_evcnt(struct adapter *);
233 static int ixgbe_set_flowcntl(struct adapter *, int);
234 static int ixgbe_set_advertise(struct adapter *, int);
235 static int ixgbe_get_default_advertise(struct adapter *);
236
237 /* Sysctl handlers */
238 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
244 #ifdef IXGBE_DEBUG
245 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
247 #endif
248 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
260 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
261
262 /* Support for pluggable optic modules */
263 static bool ixgbe_sfp_probe(struct adapter *);
264
265 /* Interrupt functions */
266 static int ixgbe_msix_que(void *);
267 static int ixgbe_msix_admin(void *);
268 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
269 static int ixgbe_legacy_irq(void *);
270
271 /* Software interrupts for deferred work */
272 static void ixgbe_handle_que(void *);
273 static void ixgbe_handle_link(void *);
274 static void ixgbe_handle_msf(void *);
275 static void ixgbe_handle_mod(void *);
276 static void ixgbe_handle_phy(void *);
277
278 /* Workqueue handler for deferred work */
279 static void ixgbe_handle_que_work(struct work *, void *);
280
281 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
282
283 /************************************************************************
284 * NetBSD Device Interface Entry Points
285 ************************************************************************/
286 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
287 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
288 DVF_DETACH_SHUTDOWN);
289
290 #if 0
291 devclass_t ix_devclass;
292 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
293
294 MODULE_DEPEND(ix, pci, 1, 1, 1);
295 MODULE_DEPEND(ix, ether, 1, 1, 1);
296 #ifdef DEV_NETMAP
297 MODULE_DEPEND(ix, netmap, 1, 1, 1);
298 #endif
299 #endif
300
301 /*
302 * TUNEABLE PARAMETERS:
303 */
304
305 /*
306 * AIM: Adaptive Interrupt Moderation
307 * which means that the interrupt rate
308 * is varied over time based on the
309 * traffic for that interrupt vector
310 */
311 static bool ixgbe_enable_aim = true;
312 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
313 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
314 "Enable adaptive interrupt moderation");
315
316 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
317 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
318 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
319
320 /* How many packets rxeof tries to clean at a time */
321 static int ixgbe_rx_process_limit = 256;
322 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
323 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
324
325 /* How many packets txeof tries to clean at a time */
326 static int ixgbe_tx_process_limit = 256;
327 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
328 &ixgbe_tx_process_limit, 0,
329 "Maximum number of sent packets to process at a time, -1 means unlimited");
330
331 /* Flow control setting, default to full */
332 static int ixgbe_flow_control = ixgbe_fc_full;
333 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
334 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
335
336 /* Which packet processing uses workqueue or softint */
337 static bool ixgbe_txrx_workqueue = false;
338
339 /*
340 * Smart speed setting, default to on
341 * this only works as a compile option
342 * right now as its during attach, set
343 * this to 'ixgbe_smart_speed_off' to
344 * disable.
345 */
346 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
347
348 /*
349 * MSI-X should be the default for best performance,
350 * but this allows it to be forced off for testing.
351 */
352 static int ixgbe_enable_msix = 1;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
354 "Enable MSI-X interrupts");
355
356 /*
357 * Number of Queues, can be set to 0,
358 * it then autoconfigures based on the
359 * number of cpus with a max of 8. This
360 * can be overridden manually here.
361 */
362 static int ixgbe_num_queues = 0;
363 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
364 "Number of queues to configure, 0 indicates autoconfigure");
365
366 /*
367 * Number of TX descriptors per ring,
368 * setting higher than RX as this seems
369 * the better performing choice.
370 */
371 static int ixgbe_txd = PERFORM_TXD;
372 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
373 "Number of transmit descriptors per queue");
374
375 /* Number of RX descriptors per ring */
376 static int ixgbe_rxd = PERFORM_RXD;
377 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
378 "Number of receive descriptors per queue");
379
380 /*
381 * Defining this on will allow the use
382 * of unsupported SFP+ modules, note that
383 * doing so you are on your own :)
384 */
385 static int allow_unsupported_sfp = false;
386 #define TUNABLE_INT(__x, __y)
387 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
388
389 /*
390 * Not sure if Flow Director is fully baked,
391 * so we'll default to turning it off.
392 */
393 static int ixgbe_enable_fdir = 0;
394 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
395 "Enable Flow Director");
396
397 /* Legacy Transmit (single queue) */
398 static int ixgbe_enable_legacy_tx = 0;
399 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
400 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
401
402 /* Receive-Side Scaling */
403 static int ixgbe_enable_rss = 1;
404 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
405 "Enable Receive-Side Scaling (RSS)");
406
407 #if 0
408 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
409 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
410 #endif
411
412 #ifdef NET_MPSAFE
413 #define IXGBE_MPSAFE 1
414 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
415 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
416 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
417 #else
418 #define IXGBE_CALLOUT_FLAGS 0
419 #define IXGBE_SOFTINT_FLAGS 0
420 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
421 #endif
422 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
423
424 /* Interval between reports of errors */
425 static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
426
427 /************************************************************************
428 * ixgbe_initialize_rss_mapping
429 ************************************************************************/
430 static void
431 ixgbe_initialize_rss_mapping(struct adapter *adapter)
432 {
433 struct ixgbe_hw *hw = &adapter->hw;
434 u32 reta = 0, mrqc, rss_key[10];
435 int queue_id, table_size, index_mult;
436 int i, j;
437 u32 rss_hash_config;
438
439 /* force use default RSS key. */
440 #ifdef __NetBSD__
441 rss_getkey((uint8_t *) &rss_key);
442 #else
443 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
444 /* Fetch the configured RSS key */
445 rss_getkey((uint8_t *) &rss_key);
446 } else {
447 /* set up random bits */
448 cprng_fast(&rss_key, sizeof(rss_key));
449 }
450 #endif
451
452 /* Set multiplier for RETA setup and table size based on MAC */
453 index_mult = 0x1;
454 table_size = 128;
455 switch (adapter->hw.mac.type) {
456 case ixgbe_mac_82598EB:
457 index_mult = 0x11;
458 break;
459 case ixgbe_mac_X550:
460 case ixgbe_mac_X550EM_x:
461 case ixgbe_mac_X550EM_a:
462 table_size = 512;
463 break;
464 default:
465 break;
466 }
467
468 /* Set up the redirection table */
469 for (i = 0, j = 0; i < table_size; i++, j++) {
470 if (j == adapter->num_queues)
471 j = 0;
472
473 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
474 /*
475 * Fetch the RSS bucket id for the given indirection
476 * entry. Cap it at the number of configured buckets
477 * (which is num_queues.)
478 */
479 queue_id = rss_get_indirection_to_bucket(i);
480 queue_id = queue_id % adapter->num_queues;
481 } else
482 queue_id = (j * index_mult);
483
484 /*
485 * The low 8 bits are for hash value (n+0);
486 * The next 8 bits are for hash value (n+1), etc.
487 */
488 reta = reta >> 8;
489 reta = reta | (((uint32_t) queue_id) << 24);
490 if ((i & 3) == 3) {
491 if (i < 128)
492 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
493 else
494 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
495 reta);
496 reta = 0;
497 }
498 }
499
500 /* Now fill our hash function seeds */
501 for (i = 0; i < 10; i++)
502 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
503
504 /* Perform hash on these packet types */
505 if (adapter->feat_en & IXGBE_FEATURE_RSS)
506 rss_hash_config = rss_gethashconfig();
507 else {
508 /*
509 * Disable UDP - IP fragments aren't currently being handled
510 * and so we end up with a mix of 2-tuple and 4-tuple
511 * traffic.
512 */
513 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
514 | RSS_HASHTYPE_RSS_TCP_IPV4
515 | RSS_HASHTYPE_RSS_IPV6
516 | RSS_HASHTYPE_RSS_TCP_IPV6
517 | RSS_HASHTYPE_RSS_IPV6_EX
518 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
519 }
520
521 mrqc = IXGBE_MRQC_RSSEN;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
540 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
542 } /* ixgbe_initialize_rss_mapping */
543
544 /************************************************************************
545 * ixgbe_initialize_receive_units - Setup receive registers and features.
546 ************************************************************************/
547 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
548
549 static void
550 ixgbe_initialize_receive_units(struct adapter *adapter)
551 {
552 struct rx_ring *rxr = adapter->rx_rings;
553 struct ixgbe_hw *hw = &adapter->hw;
554 struct ifnet *ifp = adapter->ifp;
555 int i, j;
556 u32 bufsz, fctrl, srrctl, rxcsum;
557 u32 hlreg;
558
559 /*
560 * Make sure receives are disabled while
561 * setting up the descriptor ring
562 */
563 ixgbe_disable_rx(hw);
564
565 /* Enable broadcasts */
566 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
567 fctrl |= IXGBE_FCTRL_BAM;
568 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
569 fctrl |= IXGBE_FCTRL_DPF;
570 fctrl |= IXGBE_FCTRL_PMCF;
571 }
572 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
573
574 /* Set for Jumbo Frames? */
575 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
576 if (ifp->if_mtu > ETHERMTU)
577 hlreg |= IXGBE_HLREG0_JUMBOEN;
578 else
579 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
580
581 #ifdef DEV_NETMAP
582 /* CRC stripping is conditional in Netmap */
583 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
584 (ifp->if_capenable & IFCAP_NETMAP) &&
585 !ix_crcstrip)
586 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
587 else
588 #endif /* DEV_NETMAP */
589 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
590
591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
592
593 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
594 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
595
596 for (i = 0; i < adapter->num_queues; i++, rxr++) {
597 u64 rdba = rxr->rxdma.dma_paddr;
598 u32 reg;
599 int regnum = i / 4; /* 1 register per 4 queues */
600 int regshift = i % 4; /* 4 bits per 1 queue */
601 j = rxr->me;
602
603 /* Setup the Base and Length of the Rx Descriptor Ring */
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
605 (rdba & 0x00000000ffffffffULL));
606 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
607 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
608 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
609
610 /* Set up the SRRCTL register */
611 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
612 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
613 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
614 srrctl |= bufsz;
615 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
616
617 /* Set RQSMR (Receive Queue Statistic Mapping) register */
618 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
619 reg &= ~(0x000000ffUL << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if ((adapter->num_queues > 1) &&
630 (adapter->hw.fc.requested_mode == ixgbe_fc_none))
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 else
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634
635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
636
637 /* Setup the HW Rx Head and Tail Descriptor Pointers */
638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
640
641 /* Set the driver rx tail address */
642 rxr->tail = IXGBE_RDT(rxr->me);
643 }
644
645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
647 | IXGBE_PSRTYPE_UDPHDR
648 | IXGBE_PSRTYPE_IPV4HDR
649 | IXGBE_PSRTYPE_IPV6HDR;
650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
651 }
652
653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
654
655 ixgbe_initialize_rss_mapping(adapter);
656
657 if (adapter->num_queues > 1) {
658 /* RSS and RX IPP Checksum are mutually exclusive */
659 rxcsum |= IXGBE_RXCSUM_PCSD;
660 }
661
662 if (ifp->if_capenable & IFCAP_RXCSUM)
663 rxcsum |= IXGBE_RXCSUM_PCSD;
664
665 /* This is useful for calculating UDP/IP fragment checksums */
666 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
667 rxcsum |= IXGBE_RXCSUM_IPPCSE;
668
669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
670
671 } /* ixgbe_initialize_receive_units */
672
673 /************************************************************************
674 * ixgbe_initialize_transmit_units - Enable transmit units.
675 ************************************************************************/
676 static void
677 ixgbe_initialize_transmit_units(struct adapter *adapter)
678 {
679 struct tx_ring *txr = adapter->tx_rings;
680 struct ixgbe_hw *hw = &adapter->hw;
681 int i;
682
683 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
684
685 /* Setup the Base and Length of the Tx Descriptor Ring */
686 for (i = 0; i < adapter->num_queues; i++, txr++) {
687 u64 tdba = txr->txdma.dma_paddr;
688 u32 txctrl = 0;
689 u32 tqsmreg, reg;
690 int regnum = i / 4; /* 1 register per 4 queues */
691 int regshift = i % 4; /* 4 bits per 1 queue */
692 int j = txr->me;
693
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
695 (tdba & 0x00000000ffffffffULL));
696 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
697 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
698 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
699
700 /*
701 * Set TQSMR (Transmit Queue Statistic Mapping) register.
702 * Register location is different between 82598 and others.
703 */
704 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
705 tqsmreg = IXGBE_TQSMR(regnum);
706 else
707 tqsmreg = IXGBE_TQSM(regnum);
708 reg = IXGBE_READ_REG(hw, tqsmreg);
709 reg &= ~(0x000000ffUL << (regshift * 8));
710 reg |= i << (regshift * 8);
711 IXGBE_WRITE_REG(hw, tqsmreg, reg);
712
713 /* Setup the HW Tx Head and Tail descriptor pointers */
714 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
715 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
716
717 /* Cache the tail address */
718 txr->tail = IXGBE_TDT(j);
719
720 txr->txr_no_space = false;
721
722 /* Disable Head Writeback */
723 /*
724 * Note: for X550 series devices, these registers are actually
725 * prefixed with TPH_ instead of DCA_, but the addresses and
726 * fields remain the same.
727 */
728 switch (hw->mac.type) {
729 case ixgbe_mac_82598EB:
730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
731 break;
732 default:
733 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
734 break;
735 }
736 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
737 switch (hw->mac.type) {
738 case ixgbe_mac_82598EB:
739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
740 break;
741 default:
742 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
743 break;
744 }
745
746 }
747
748 if (hw->mac.type != ixgbe_mac_82598EB) {
749 u32 dmatxctl, rttdcs;
750
751 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
752 dmatxctl |= IXGBE_DMATXCTL_TE;
753 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
754 /* Disable arbiter to set MTQC */
755 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
756 rttdcs |= IXGBE_RTTDCS_ARBDIS;
757 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
758 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
759 ixgbe_get_mtqc(adapter->iov_mode));
760 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 }
763
764 return;
765 } /* ixgbe_initialize_transmit_units */
766
767 /************************************************************************
768 * ixgbe_attach - Device initialization routine
769 *
770 * Called when the driver is being loaded.
771 * Identifies the type of hardware, allocates all resources
772 * and initializes the hardware.
773 *
774 * return 0 on success, positive on failure
775 ************************************************************************/
776 static void
777 ixgbe_attach(device_t parent, device_t dev, void *aux)
778 {
779 struct adapter *adapter;
780 struct ixgbe_hw *hw;
781 int error = -1;
782 u32 ctrl_ext;
783 u16 high, low, nvmreg;
784 pcireg_t id, subid;
785 const ixgbe_vendor_info_t *ent;
786 struct pci_attach_args *pa = aux;
787 bool unsupported_sfp = false;
788 const char *str;
789 char buf[256];
790
791 INIT_DEBUGOUT("ixgbe_attach: begin");
792
793 /* Allocate, clear, and link in our adapter structure */
794 adapter = device_private(dev);
795 adapter->hw.back = adapter;
796 adapter->dev = dev;
797 hw = &adapter->hw;
798 adapter->osdep.pc = pa->pa_pc;
799 adapter->osdep.tag = pa->pa_tag;
800 if (pci_dma64_available(pa))
801 adapter->osdep.dmat = pa->pa_dmat64;
802 else
803 adapter->osdep.dmat = pa->pa_dmat;
804 adapter->osdep.attached = false;
805
806 ent = ixgbe_lookup(pa);
807
808 KASSERT(ent != NULL);
809
810 aprint_normal(": %s, Version - %s\n",
811 ixgbe_strings[ent->index], ixgbe_driver_version);
812
813 /* Core Lock Init */
814 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
815
816 /* Set up the timer callout */
817 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
818
819 /* Determine hardware revision */
820 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
821 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
822
823 hw->vendor_id = PCI_VENDOR(id);
824 hw->device_id = PCI_PRODUCT(id);
825 hw->revision_id =
826 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
827 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
828 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
829
830 /*
831 * Make sure BUSMASTER is set
832 */
833 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
834
835 /* Do base PCI setup - map BAR0 */
836 if (ixgbe_allocate_pci_resources(adapter, pa)) {
837 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
838 error = ENXIO;
839 goto err_out;
840 }
841
842 /* let hardware know driver is loaded */
843 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
844 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
845 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
846
847 /*
848 * Initialize the shared code
849 */
850 if (ixgbe_init_shared_code(hw) != 0) {
851 aprint_error_dev(dev,
852 "Unable to initialize the shared code\n");
853 error = ENXIO;
854 goto err_out;
855 }
856
857 switch (hw->mac.type) {
858 case ixgbe_mac_82598EB:
859 str = "82598EB";
860 break;
861 case ixgbe_mac_82599EB:
862 str = "82599EB";
863 break;
864 case ixgbe_mac_X540:
865 str = "X540";
866 break;
867 case ixgbe_mac_X550:
868 str = "X550";
869 break;
870 case ixgbe_mac_X550EM_x:
871 str = "X550EM X";
872 break;
873 case ixgbe_mac_X550EM_a:
874 str = "X550EM A";
875 break;
876 default:
877 str = "Unknown";
878 break;
879 }
880 aprint_normal_dev(dev, "device %s\n", str);
881
882 hw->allow_unsupported_sfp = allow_unsupported_sfp;
883
884 /* Pick up the 82599 settings */
885 if (hw->mac.type != ixgbe_mac_82598EB)
886 hw->phy.smart_speed = ixgbe_smart_speed;
887
888 /* Set the right number of segments */
889 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
890 adapter->num_segs = IXGBE_SCATTER_DEFAULT;
891
892 /* Ensure SW/FW semaphore is free */
893 ixgbe_init_swfw_semaphore(hw);
894
895 hw->mac.ops.set_lan_id(hw);
896 ixgbe_init_device_features(adapter);
897
898 if (ixgbe_configure_interrupts(adapter)) {
899 error = ENXIO;
900 goto err_out;
901 }
902
903 /* Allocate multicast array memory. */
904 adapter->mta = malloc(sizeof(*adapter->mta) *
905 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
906 if (adapter->mta == NULL) {
907 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
908 error = ENOMEM;
909 goto err_out;
910 }
911
912 /* Enable WoL (if supported) */
913 ixgbe_check_wol_support(adapter);
914
915 /* Register for VLAN events */
916 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
917
918 /* Verify adapter fan is still functional (if applicable) */
919 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
920 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
921 ixgbe_check_fan_failure(adapter, esdp, FALSE);
922 }
923
924 /* Set an initial default flow control value */
925 hw->fc.requested_mode = ixgbe_flow_control;
926
927 /* Do descriptor calc and sanity checks */
928 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
929 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
930 aprint_error_dev(dev, "TXD config issue, using default!\n");
931 adapter->num_tx_desc = DEFAULT_TXD;
932 } else
933 adapter->num_tx_desc = ixgbe_txd;
934
935 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
936 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
937 aprint_error_dev(dev, "RXD config issue, using default!\n");
938 adapter->num_rx_desc = DEFAULT_RXD;
939 } else
940 adapter->num_rx_desc = ixgbe_rxd;
941
942 /* Sysctls for limiting the amount of work done in the taskqueues */
943 adapter->rx_process_limit
944 = (ixgbe_rx_process_limit <= adapter->num_rx_desc)
945 ? ixgbe_rx_process_limit : adapter->num_rx_desc;
946 adapter->tx_process_limit
947 = (ixgbe_tx_process_limit <= adapter->num_tx_desc)
948 ? ixgbe_tx_process_limit : adapter->num_tx_desc;
949
950 /* Set default high limit of copying mbuf in rxeof */
951 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
952
953 /* Allocate our TX/RX Queues */
954 if (ixgbe_allocate_queues(adapter)) {
955 error = ENOMEM;
956 goto err_out;
957 }
958
959 hw->phy.reset_if_overtemp = TRUE;
960 error = ixgbe_reset_hw(hw);
961 hw->phy.reset_if_overtemp = FALSE;
962 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
963 /*
964 * No optics in this port, set up
965 * so the timer routine will probe
966 * for later insertion.
967 */
968 adapter->sfp_probe = TRUE;
969 error = IXGBE_SUCCESS;
970 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
971 aprint_error_dev(dev,
972 "Unsupported SFP+ module detected!\n");
973 unsupported_sfp = true;
974 error = IXGBE_SUCCESS;
975 } else if (error) {
976 aprint_error_dev(dev,
977 "Hardware initialization failed(error = %d)\n", error);
978 error = EIO;
979 goto err_late;
980 }
981
982 /* Make sure we have a good EEPROM before we read from it */
983 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
984 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
985 error = EIO;
986 goto err_late;
987 }
988
989 aprint_normal("%s:", device_xname(dev));
990 /* NVM Image Version */
991 high = low = 0;
992 switch (hw->mac.type) {
993 case ixgbe_mac_82598EB:
994 /*
995 * Print version from the dev starter version (0x29). The
996 * location is the same as newer device's IXGBE_NVM_MAP_VER.
997 */
998 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
999 if (nvmreg == 0xffff)
1000 break;
1001 high = (nvmreg >> 12) & 0x0f;
1002 low = (nvmreg >> 4) & 0xff;
1003 id = nvmreg & 0x0f;
1004 /*
1005 * The following output might not be correct. Some 82598 cards
1006 * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1007 */
1008 aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1009 break;
1010 case ixgbe_mac_X540:
1011 case ixgbe_mac_X550EM_a:
1012 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1013 if (nvmreg == 0xffff)
1014 break;
1015 high = (nvmreg >> 12) & 0x0f;
1016 low = (nvmreg >> 4) & 0xff;
1017 id = nvmreg & 0x0f;
1018 aprint_normal(" NVM Image Version %u.", high);
1019 if (hw->mac.type == ixgbe_mac_X540)
1020 str = "%x";
1021 else
1022 str = "%02x";
1023 aprint_normal(str, low);
1024 aprint_normal(" ID 0x%x,", id);
1025 break;
1026 case ixgbe_mac_X550EM_x:
1027 case ixgbe_mac_X550:
1028 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1029 if (nvmreg == 0xffff)
1030 break;
1031 high = (nvmreg >> 12) & 0x0f;
1032 low = nvmreg & 0xff;
1033 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1034 break;
1035 default:
1036 break;
1037 }
1038 hw->eeprom.nvm_image_ver_high = high;
1039 hw->eeprom.nvm_image_ver_low = low;
1040
1041 /* PHY firmware revision */
1042 switch (hw->mac.type) {
1043 case ixgbe_mac_X540:
1044 case ixgbe_mac_X550:
1045 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1046 if (nvmreg == 0xffff)
1047 break;
1048 high = (nvmreg >> 12) & 0x0f;
1049 low = (nvmreg >> 4) & 0xff;
1050 id = nvmreg & 0x000f;
1051 aprint_normal(" PHY FW Revision %u.", high);
1052 if (hw->mac.type == ixgbe_mac_X540)
1053 str = "%x";
1054 else
1055 str = "%02x";
1056 aprint_normal(str, low);
1057 aprint_normal(" ID 0x%x,", id);
1058 break;
1059 default:
1060 break;
1061 }
1062
1063 /* NVM Map version & OEM NVM Image version */
1064 switch (hw->mac.type) {
1065 case ixgbe_mac_X550:
1066 case ixgbe_mac_X550EM_x:
1067 case ixgbe_mac_X550EM_a:
1068 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1069 if (nvmreg != 0xffff) {
1070 high = (nvmreg >> 12) & 0x0f;
1071 low = nvmreg & 0x00ff;
1072 aprint_normal(" NVM Map version %u.%02x,", high, low);
1073 }
1074 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1075 if (nvmreg != 0xffff) {
1076 high = (nvmreg >> 12) & 0x0f;
1077 low = nvmreg & 0x00ff;
1078 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1079 low);
1080 }
1081 break;
1082 default:
1083 break;
1084 }
1085
1086 /* Print the ETrackID */
1087 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1088 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1089 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1090
1091 /* Printed Board Assembly number */
1092 error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
1093 aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
1094
1095 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1096 error = ixgbe_allocate_msix(adapter, pa);
1097 if (error) {
1098 /* Free allocated queue structures first */
1099 ixgbe_free_queues(adapter);
1100
1101 /* Fallback to legacy interrupt */
1102 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1103 adapter->feat_en |= IXGBE_FEATURE_MSI;
1104 adapter->num_queues = 1;
1105
1106 /* Allocate our TX/RX Queues again */
1107 if (ixgbe_allocate_queues(adapter)) {
1108 error = ENOMEM;
1109 goto err_out;
1110 }
1111 }
1112 }
1113
1114 /* Recovery mode */
1115 switch (adapter->hw.mac.type) {
1116 case ixgbe_mac_X550:
1117 case ixgbe_mac_X550EM_x:
1118 case ixgbe_mac_X550EM_a:
1119 /* >= 2.00 */
1120 if (hw->eeprom.nvm_image_ver_high >= 2) {
1121 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1122 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1123 }
1124 break;
1125 default:
1126 break;
1127 }
1128
1129 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1130 error = ixgbe_allocate_legacy(adapter, pa);
1131 if (error)
1132 goto err_late;
1133
1134 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1135 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS,
1136 ixgbe_handle_link, adapter);
1137 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1138 ixgbe_handle_mod, adapter);
1139 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1140 ixgbe_handle_msf, adapter);
1141 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1142 ixgbe_handle_phy, adapter);
1143 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1144 adapter->fdir_si =
1145 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1146 ixgbe_reinit_fdir, adapter);
1147 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1148 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1149 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1150 && (adapter->fdir_si == NULL))) {
1151 aprint_error_dev(dev,
1152 "could not establish software interrupts ()\n");
1153 goto err_out;
1154 }
1155
1156 error = ixgbe_start_hw(hw);
1157 switch (error) {
1158 case IXGBE_ERR_EEPROM_VERSION:
1159 aprint_error_dev(dev,
1160 "This device is a pre-production adapter/"
1161 "LOM. Please be aware there may be issues associated "
1162 "with your hardware.\nIf you are experiencing problems "
1163 "please contact your Intel or hardware representative "
1164 "who provided you with this hardware.\n");
1165 break;
1166 default:
1167 break;
1168 }
1169
1170 /* Setup OS specific network interface */
1171 if (ixgbe_setup_interface(dev, adapter) != 0)
1172 goto err_late;
1173
1174 /*
1175 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1176 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1177 */
1178 if (hw->phy.media_type == ixgbe_media_type_copper) {
1179 uint16_t id1, id2;
1180 int oui, model, rev;
1181 const char *descr;
1182
1183 id1 = hw->phy.id >> 16;
1184 id2 = hw->phy.id & 0xffff;
1185 oui = MII_OUI(id1, id2);
1186 model = MII_MODEL(id2);
1187 rev = MII_REV(id2);
1188 if ((descr = mii_get_descr(oui, model)) != NULL)
1189 aprint_normal_dev(dev,
1190 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1191 descr, oui, model, rev);
1192 else
1193 aprint_normal_dev(dev,
1194 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1195 oui, model, rev);
1196 }
1197
1198 /* Enable EEE power saving */
1199 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1200 hw->mac.ops.setup_eee(hw,
1201 adapter->feat_en & IXGBE_FEATURE_EEE);
1202
1203 /* Enable power to the phy. */
1204 if (!unsupported_sfp) {
1205 /* Enable the optics for 82599 SFP+ fiber */
1206 ixgbe_enable_tx_laser(hw);
1207
1208 /*
1209 * XXX Currently, ixgbe_set_phy_power() supports only copper
1210 * PHY, so it's not required to test with !unsupported_sfp.
1211 */
1212 ixgbe_set_phy_power(hw, TRUE);
1213 }
1214
1215 /* Initialize statistics */
1216 ixgbe_update_stats_counters(adapter);
1217
1218 /* Check PCIE slot type/speed/width */
1219 ixgbe_get_slot_info(adapter);
1220
1221 /*
1222 * Do time init and sysctl init here, but
1223 * only on the first port of a bypass adapter.
1224 */
1225 ixgbe_bypass_init(adapter);
1226
1227 /* Set an initial dmac value */
1228 adapter->dmac = 0;
1229 /* Set initial advertised speeds (if applicable) */
1230 adapter->advertise = ixgbe_get_default_advertise(adapter);
1231
1232 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1233 ixgbe_define_iov_schemas(dev, &error);
1234
1235 /* Add sysctls */
1236 ixgbe_add_device_sysctls(adapter);
1237 ixgbe_add_hw_stats(adapter);
1238
1239 /* For Netmap */
1240 adapter->init_locked = ixgbe_init_locked;
1241 adapter->stop_locked = ixgbe_stop_locked;
1242
1243 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1244 ixgbe_netmap_attach(adapter);
1245
1246 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1247 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1248 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1249 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1250
1251 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1252 pmf_class_network_register(dev, adapter->ifp);
1253 else
1254 aprint_error_dev(dev, "couldn't establish power handler\n");
1255
1256 /* Init recovery mode timer and state variable */
1257 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1258 adapter->recovery_mode = 0;
1259
1260 /* Set up the timer callout */
1261 callout_init(&adapter->recovery_mode_timer,
1262 IXGBE_CALLOUT_FLAGS);
1263
1264 /* Start the task */
1265 callout_reset(&adapter->recovery_mode_timer, hz,
1266 ixgbe_recovery_mode_timer, adapter);
1267 }
1268
1269 INIT_DEBUGOUT("ixgbe_attach: end");
1270 adapter->osdep.attached = true;
1271
1272 return;
1273
1274 err_late:
1275 ixgbe_free_queues(adapter);
1276 err_out:
1277 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1278 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1279 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1280 ixgbe_free_softint(adapter);
1281 ixgbe_free_pci_resources(adapter);
1282 if (adapter->mta != NULL)
1283 free(adapter->mta, M_DEVBUF);
1284 IXGBE_CORE_LOCK_DESTROY(adapter);
1285
1286 return;
1287 } /* ixgbe_attach */
1288
1289 /************************************************************************
1290 * ixgbe_check_wol_support
1291 *
1292 * Checks whether the adapter's ports are capable of
1293 * Wake On LAN by reading the adapter's NVM.
1294 *
1295 * Sets each port's hw->wol_enabled value depending
1296 * on the value read here.
1297 ************************************************************************/
1298 static void
1299 ixgbe_check_wol_support(struct adapter *adapter)
1300 {
1301 struct ixgbe_hw *hw = &adapter->hw;
1302 u16 dev_caps = 0;
1303
1304 /* Find out WoL support for port */
1305 adapter->wol_support = hw->wol_enabled = 0;
1306 ixgbe_get_device_caps(hw, &dev_caps);
1307 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1308 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1309 hw->bus.func == 0))
1310 adapter->wol_support = hw->wol_enabled = 1;
1311
1312 /* Save initial wake up filter configuration */
1313 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1314
1315 return;
1316 } /* ixgbe_check_wol_support */
1317
1318 /************************************************************************
1319 * ixgbe_setup_interface
1320 *
1321 * Setup networking device structure and register an interface.
1322 ************************************************************************/
1323 static int
1324 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1325 {
1326 struct ethercom *ec = &adapter->osdep.ec;
1327 struct ifnet *ifp;
1328 int rv;
1329
1330 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1331
1332 ifp = adapter->ifp = &ec->ec_if;
1333 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1334 ifp->if_baudrate = IF_Gbps(10);
1335 ifp->if_init = ixgbe_init;
1336 ifp->if_stop = ixgbe_ifstop;
1337 ifp->if_softc = adapter;
1338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1339 #ifdef IXGBE_MPSAFE
1340 ifp->if_extflags = IFEF_MPSAFE;
1341 #endif
1342 ifp->if_ioctl = ixgbe_ioctl;
1343 #if __FreeBSD_version >= 1100045
1344 /* TSO parameters */
1345 ifp->if_hw_tsomax = 65518;
1346 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1347 ifp->if_hw_tsomaxsegsize = 2048;
1348 #endif
1349 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1350 #if 0
1351 ixgbe_start_locked = ixgbe_legacy_start_locked;
1352 #endif
1353 } else {
1354 ifp->if_transmit = ixgbe_mq_start;
1355 #if 0
1356 ixgbe_start_locked = ixgbe_mq_start_locked;
1357 #endif
1358 }
1359 ifp->if_start = ixgbe_legacy_start;
1360 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1361 IFQ_SET_READY(&ifp->if_snd);
1362
1363 rv = if_initialize(ifp);
1364 if (rv != 0) {
1365 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1366 return rv;
1367 }
1368 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1369 ether_ifattach(ifp, adapter->hw.mac.addr);
1370 aprint_normal_dev(dev, "Ethernet address %s\n",
1371 ether_sprintf(adapter->hw.mac.addr));
1372 /*
1373 * We use per TX queue softint, so if_deferred_start_init() isn't
1374 * used.
1375 */
1376 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1377
1378 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1379
1380 /*
1381 * Tell the upper layer(s) we support long frames.
1382 */
1383 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1384
1385 /* Set capability flags */
1386 ifp->if_capabilities |= IFCAP_RXCSUM
1387 | IFCAP_TXCSUM
1388 | IFCAP_TSOv4
1389 | IFCAP_TSOv6;
1390 ifp->if_capenable = 0;
1391
1392 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1393 | ETHERCAP_VLAN_HWCSUM
1394 | ETHERCAP_JUMBO_MTU
1395 | ETHERCAP_VLAN_MTU;
1396
1397 /* Enable the above capabilities by default */
1398 ec->ec_capenable = ec->ec_capabilities;
1399
1400 /*
1401 * Don't turn this on by default, if vlans are
1402 * created on another pseudo device (eg. lagg)
1403 * then vlan events are not passed thru, breaking
1404 * operation, but with HW FILTER off it works. If
1405 * using vlans directly on the ixgbe driver you can
1406 * enable this and get full hardware tag filtering.
1407 */
1408 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1409
1410 /*
1411 * Specify the media types supported by this adapter and register
1412 * callbacks to update media and link information
1413 */
1414 ec->ec_ifmedia = &adapter->media;
1415 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1416 ixgbe_media_status);
1417
1418 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1419 ixgbe_add_media_types(adapter);
1420
1421 /* Set autoselect media by default */
1422 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1423
1424 if_register(ifp);
1425
1426 return (0);
1427 } /* ixgbe_setup_interface */
1428
1429 /************************************************************************
1430 * ixgbe_add_media_types
1431 ************************************************************************/
1432 static void
1433 ixgbe_add_media_types(struct adapter *adapter)
1434 {
1435 struct ixgbe_hw *hw = &adapter->hw;
1436 u64 layer;
1437
1438 layer = adapter->phy_layer;
1439
1440 #define ADD(mm, dd) \
1441 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1442
1443 ADD(IFM_NONE, 0);
1444
1445 /* Media types with matching NetBSD media defines */
1446 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1447 ADD(IFM_10G_T | IFM_FDX, 0);
1448 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1449 ADD(IFM_1000_T | IFM_FDX, 0);
1450 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1451 ADD(IFM_100_TX | IFM_FDX, 0);
1452 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1453 ADD(IFM_10_T | IFM_FDX, 0);
1454
1455 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1456 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1457 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1458
1459 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1460 ADD(IFM_10G_LR | IFM_FDX, 0);
1461 if (hw->phy.multispeed_fiber)
1462 ADD(IFM_1000_LX | IFM_FDX, 0);
1463 }
1464 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1465 ADD(IFM_10G_SR | IFM_FDX, 0);
1466 if (hw->phy.multispeed_fiber)
1467 ADD(IFM_1000_SX | IFM_FDX, 0);
1468 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1469 ADD(IFM_1000_SX | IFM_FDX, 0);
1470 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1471 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1472
1473 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1474 ADD(IFM_10G_KR | IFM_FDX, 0);
1475 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1476 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1477 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1478 ADD(IFM_1000_KX | IFM_FDX, 0);
1479 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1480 ADD(IFM_2500_KX | IFM_FDX, 0);
1481 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1482 ADD(IFM_2500_T | IFM_FDX, 0);
1483 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T)
1484 ADD(IFM_5000_T | IFM_FDX, 0);
1485 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1486 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1487 /* XXX no ifmedia_set? */
1488
1489 ADD(IFM_AUTO, 0);
1490
1491 #undef ADD
1492 } /* ixgbe_add_media_types */
1493
1494 /************************************************************************
1495 * ixgbe_is_sfp
1496 ************************************************************************/
1497 static inline bool
1498 ixgbe_is_sfp(struct ixgbe_hw *hw)
1499 {
1500 switch (hw->mac.type) {
1501 case ixgbe_mac_82598EB:
1502 if (hw->phy.type == ixgbe_phy_nl)
1503 return (TRUE);
1504 return (FALSE);
1505 case ixgbe_mac_82599EB:
1506 case ixgbe_mac_X550EM_x:
1507 case ixgbe_mac_X550EM_a:
1508 switch (hw->mac.ops.get_media_type(hw)) {
1509 case ixgbe_media_type_fiber:
1510 case ixgbe_media_type_fiber_qsfp:
1511 return (TRUE);
1512 default:
1513 return (FALSE);
1514 }
1515 default:
1516 return (FALSE);
1517 }
1518 } /* ixgbe_is_sfp */
1519
1520 /************************************************************************
1521 * ixgbe_config_link
1522 ************************************************************************/
1523 static void
1524 ixgbe_config_link(struct adapter *adapter)
1525 {
1526 struct ixgbe_hw *hw = &adapter->hw;
1527 u32 autoneg, err = 0;
1528 bool sfp, negotiate = false;
1529
1530 sfp = ixgbe_is_sfp(hw);
1531
1532 if (sfp) {
1533 if (hw->phy.multispeed_fiber) {
1534 ixgbe_enable_tx_laser(hw);
1535 kpreempt_disable();
1536 softint_schedule(adapter->msf_si);
1537 kpreempt_enable();
1538 }
1539 kpreempt_disable();
1540 softint_schedule(adapter->mod_si);
1541 kpreempt_enable();
1542 } else {
1543 struct ifmedia *ifm = &adapter->media;
1544
1545 if (hw->mac.ops.check_link)
1546 err = ixgbe_check_link(hw, &adapter->link_speed,
1547 &adapter->link_up, FALSE);
1548 if (err)
1549 return;
1550
1551 /*
1552 * Check if it's the first call. If it's the first call,
1553 * get value for auto negotiation.
1554 */
1555 autoneg = hw->phy.autoneg_advertised;
1556 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1557 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1558 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1559 &negotiate);
1560 if (err)
1561 return;
1562 if (hw->mac.ops.setup_link)
1563 err = hw->mac.ops.setup_link(hw, autoneg,
1564 adapter->link_up);
1565 }
1566 } /* ixgbe_config_link */
1567
1568 /************************************************************************
1569 * ixgbe_update_stats_counters - Update board statistics counters.
1570 ************************************************************************/
1571 static void
1572 ixgbe_update_stats_counters(struct adapter *adapter)
1573 {
1574 struct ifnet *ifp = adapter->ifp;
1575 struct ixgbe_hw *hw = &adapter->hw;
1576 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1577 u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1578 u64 total, total_missed_rx = 0;
1579 uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1580 unsigned int queue_counters;
1581 int i;
1582
1583 IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1584 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1585
1586 IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1587 IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1588 if (hw->mac.type >= ixgbe_mac_X550)
1589 IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1590
1591 /* 16 registers exist */
1592 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1593 for (i = 0; i < queue_counters; i++) {
1594 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1595 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1596 if (hw->mac.type >= ixgbe_mac_82599EB)
1597 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
1598 }
1599
1600 /* 8 registers exist */
1601 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1602 uint32_t mp;
1603
1604 /* MPC */
1605 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1606 /* global total per queue */
1607 IXGBE_EVC_ADD(&stats->mpc[i], mp);
1608 /* running comprehensive total for stats display */
1609 total_missed_rx += mp;
1610
1611 if (hw->mac.type == ixgbe_mac_82598EB)
1612 IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1613
1614 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1615 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1616 if (hw->mac.type >= ixgbe_mac_82599EB) {
1617 IXGBE_EVC_REGADD(hw, stats,
1618 IXGBE_PXONRXCNT(i), pxonrxc[i]);
1619 IXGBE_EVC_REGADD(hw, stats,
1620 IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1621 IXGBE_EVC_REGADD(hw, stats,
1622 IXGBE_PXON2OFFCNT(i), pxon2offc[i]);
1623 } else {
1624 IXGBE_EVC_REGADD(hw, stats,
1625 IXGBE_PXONRXC(i), pxonrxc[i]);
1626 IXGBE_EVC_REGADD(hw, stats,
1627 IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1628 }
1629 }
1630 IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1631
1632 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1633 if ((adapter->link_active == LINK_STATE_UP)
1634 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1635 IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1636 IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1637 }
1638 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1639
1640 /* Hardware workaround, gprc counts missed packets */
1641 IXGBE_EVC_ADD(&stats->gprc,
1642 IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1643
1644 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1645 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1646 total = lxontxc + lxofftxc;
1647
1648 if (hw->mac.type != ixgbe_mac_82598EB) {
1649 IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1650 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1651 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1652 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1653 - total * ETHER_MIN_LEN);
1654 IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1655 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1656 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1657 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1658 } else {
1659 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1660 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1661 /* 82598 only has a counter in the high register */
1662 IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1663 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1664 - total * ETHER_MIN_LEN);
1665 IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1666 }
1667
1668 /*
1669 * Workaround: mprc hardware is incorrectly counting
1670 * broadcasts, so for now we subtract those.
1671 */
1672 IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1673 IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1674 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1675
1676 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1677 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1678 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1679 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1680 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1681 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1682
1683 IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1684 IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1685 IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1686
1687 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1688 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1689 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1690 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1691
1692 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1693 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1694 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1695 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1696 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1697 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1698 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1699 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1700 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1701 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1702 IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1703 IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1704 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1705 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1706 /* Only read FCOE on 82599 */
1707 if (hw->mac.type != ixgbe_mac_82598EB) {
1708 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1709 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1710 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1711 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1712 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1713 }
1714
1715 /* Fill out the OS statistics structure */
1716 /*
1717 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1718 * adapter->stats counters. It's required to make ifconfig -z
1719 * (SOICZIFDATA) work.
1720 */
1721 ifp->if_collisions = 0;
1722
1723 /* Rx Errors */
1724 ifp->if_iqdrops += total_missed_rx;
1725
1726 /*
1727 * Aggregate following types of errors as RX errors:
1728 * - CRC error count,
1729 * - illegal byte error count,
1730 * - length error count,
1731 * - undersized packets count,
1732 * - fragmented packets count,
1733 * - oversized packets count,
1734 * - jabber count.
1735 */
1736 ifp->if_ierrors +=
1737 crcerrs + illerrc + rlec + ruc + rfc + roc + rjc;
1738 } /* ixgbe_update_stats_counters */
1739
1740 /************************************************************************
1741 * ixgbe_add_hw_stats
1742 *
1743 * Add sysctl variables, one per statistic, to the system.
1744 ************************************************************************/
1745 static void
1746 ixgbe_add_hw_stats(struct adapter *adapter)
1747 {
1748 device_t dev = adapter->dev;
1749 const struct sysctlnode *rnode, *cnode;
1750 struct sysctllog **log = &adapter->sysctllog;
1751 struct tx_ring *txr = adapter->tx_rings;
1752 struct rx_ring *rxr = adapter->rx_rings;
1753 struct ixgbe_hw *hw = &adapter->hw;
1754 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1755 const char *xname = device_xname(dev);
1756 int i;
1757
1758 /* Driver Statistics */
1759 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1760 NULL, xname, "Driver tx dma soft fail EFBIG");
1761 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1762 NULL, xname, "m_defrag() failed");
1763 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1764 NULL, xname, "Driver tx dma hard fail EFBIG");
1765 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1766 NULL, xname, "Driver tx dma hard fail EINVAL");
1767 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1768 NULL, xname, "Driver tx dma hard fail other");
1769 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1770 NULL, xname, "Driver tx dma soft fail EAGAIN");
1771 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1772 NULL, xname, "Driver tx dma soft fail ENOMEM");
1773 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1774 NULL, xname, "Watchdog timeouts");
1775 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1776 NULL, xname, "TSO errors");
1777 evcnt_attach_dynamic(&adapter->admin_irq, EVCNT_TYPE_INTR,
1778 NULL, xname, "Admin MSI-X IRQ Handled");
1779 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1780 NULL, xname, "Link softint");
1781 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1782 NULL, xname, "module softint");
1783 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1784 NULL, xname, "multimode softint");
1785 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1786 NULL, xname, "external PHY softint");
1787
1788 /* Max number of traffic class is 8 */
1789 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1790 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1791 snprintf(adapter->tcs[i].evnamebuf,
1792 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", xname, i);
1793 if (i < __arraycount(stats->mpc)) {
1794 evcnt_attach_dynamic(&stats->mpc[i],
1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1796 "RX Missed Packet Count");
1797 if (hw->mac.type == ixgbe_mac_82598EB)
1798 evcnt_attach_dynamic(&stats->rnbc[i],
1799 EVCNT_TYPE_MISC, NULL,
1800 adapter->tcs[i].evnamebuf,
1801 "Receive No Buffers");
1802 }
1803 if (i < __arraycount(stats->pxontxc)) {
1804 evcnt_attach_dynamic(&stats->pxontxc[i],
1805 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1806 "pxontxc");
1807 evcnt_attach_dynamic(&stats->pxonrxc[i],
1808 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1809 "pxonrxc");
1810 evcnt_attach_dynamic(&stats->pxofftxc[i],
1811 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1812 "pxofftxc");
1813 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1814 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1815 "pxoffrxc");
1816 if (hw->mac.type >= ixgbe_mac_82599EB)
1817 evcnt_attach_dynamic(&stats->pxon2offc[i],
1818 EVCNT_TYPE_MISC, NULL,
1819 adapter->tcs[i].evnamebuf,
1820 "pxon2offc");
1821 }
1822 }
1823
1824 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1825 #ifdef LRO
1826 struct lro_ctrl *lro = &rxr->lro;
1827 #endif /* LRO */
1828
1829 snprintf(adapter->queues[i].evnamebuf,
1830 sizeof(adapter->queues[i].evnamebuf), "%s q%d", xname, i);
1831 snprintf(adapter->queues[i].namebuf,
1832 sizeof(adapter->queues[i].namebuf), "q%d", i);
1833
1834 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1835 aprint_error_dev(dev,
1836 "could not create sysctl root\n");
1837 break;
1838 }
1839
1840 if (sysctl_createv(log, 0, &rnode, &rnode,
1841 0, CTLTYPE_NODE,
1842 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1843 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1844 break;
1845
1846 if (sysctl_createv(log, 0, &rnode, &cnode,
1847 CTLFLAG_READWRITE, CTLTYPE_INT,
1848 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1849 ixgbe_sysctl_interrupt_rate_handler, 0,
1850 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1851 break;
1852
1853 if (sysctl_createv(log, 0, &rnode, &cnode,
1854 CTLFLAG_READONLY, CTLTYPE_INT,
1855 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1856 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1857 0, CTL_CREATE, CTL_EOL) != 0)
1858 break;
1859
1860 if (sysctl_createv(log, 0, &rnode, &cnode,
1861 CTLFLAG_READONLY, CTLTYPE_INT,
1862 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1863 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1864 0, CTL_CREATE, CTL_EOL) != 0)
1865 break;
1866
1867 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1868 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1869 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1870 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1871 "Handled queue in softint");
1872 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1873 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1874 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1875 NULL, adapter->queues[i].evnamebuf, "TSO");
1876 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1877 NULL, adapter->queues[i].evnamebuf,
1878 "TX Queue No Descriptor Available");
1879 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1880 NULL, adapter->queues[i].evnamebuf,
1881 "Queue Packets Transmitted");
1882 #ifndef IXGBE_LEGACY_TX
1883 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1884 NULL, adapter->queues[i].evnamebuf,
1885 "Packets dropped in pcq");
1886 #endif
1887
1888 if (sysctl_createv(log, 0, &rnode, &cnode,
1889 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1890 SYSCTL_DESCR("Receive Descriptor next to check"),
1891 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1892 CTL_CREATE, CTL_EOL) != 0)
1893 break;
1894
1895 if (sysctl_createv(log, 0, &rnode, &cnode,
1896 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1897 SYSCTL_DESCR("Receive Descriptor next to refresh"),
1898 ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1899 CTL_CREATE, CTL_EOL) != 0)
1900 break;
1901
1902 if (sysctl_createv(log, 0, &rnode, &cnode,
1903 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1904 SYSCTL_DESCR("Receive Descriptor Head"),
1905 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1906 CTL_CREATE, CTL_EOL) != 0)
1907 break;
1908
1909 if (sysctl_createv(log, 0, &rnode, &cnode,
1910 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1911 SYSCTL_DESCR("Receive Descriptor Tail"),
1912 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1913 CTL_CREATE, CTL_EOL) != 0)
1914 break;
1915
1916 if (i < __arraycount(stats->qprc)) {
1917 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1918 NULL, adapter->queues[i].evnamebuf, "qprc");
1919 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1920 NULL, adapter->queues[i].evnamebuf, "qptc");
1921 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1922 NULL, adapter->queues[i].evnamebuf, "qbrc");
1923 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1924 NULL, adapter->queues[i].evnamebuf, "qbtc");
1925 if (hw->mac.type >= ixgbe_mac_82599EB)
1926 evcnt_attach_dynamic(&stats->qprdc[i],
1927 EVCNT_TYPE_MISC, NULL,
1928 adapter->queues[i].evnamebuf, "qprdc");
1929 }
1930
1931 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1932 NULL, adapter->queues[i].evnamebuf,
1933 "Queue Packets Received");
1934 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1935 NULL, adapter->queues[i].evnamebuf,
1936 "Queue Bytes Received");
1937 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1938 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1939 evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
1940 NULL, adapter->queues[i].evnamebuf, "Rx no mbuf");
1941 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1942 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1943 #ifdef LRO
1944 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1945 CTLFLAG_RD, &lro->lro_queued, 0,
1946 "LRO Queued");
1947 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1948 CTLFLAG_RD, &lro->lro_flushed, 0,
1949 "LRO Flushed");
1950 #endif /* LRO */
1951 }
1952
1953 /* MAC stats get their own sub node */
1954
1955 snprintf(stats->namebuf,
1956 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1957
1958 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "rx csum offload - IP");
1960 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "rx csum offload - L4");
1962 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "rx csum offload - IP bad");
1964 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "rx csum offload - L4 bad");
1966 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "Interrupt conditions zero");
1968 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "Legacy interrupts");
1970
1971 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "CRC Errors");
1973 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Illegal Byte Errors");
1975 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Byte Errors");
1977 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "MAC Short Packets Discarded");
1979 if (hw->mac.type >= ixgbe_mac_X550)
1980 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "Bad SFD");
1982 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "Total Packets Missed");
1984 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "MAC Local Faults");
1986 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "MAC Remote Faults");
1988 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Receive Length Errors");
1990 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Link XON Transmitted");
1992 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Link XON Received");
1994 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Link XOFF Transmitted");
1996 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Link XOFF Received");
1998
1999 /* Packet Reception Stats */
2000 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Total Octets Received");
2002 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Good Octets Received");
2004 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Total Packets Received");
2006 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Good Packets Received");
2008 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Multicast Packets Received");
2010 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Broadcast Packets Received");
2012 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "64 byte frames received ");
2014 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "65-127 byte frames received");
2016 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "128-255 byte frames received");
2018 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "256-511 byte frames received");
2020 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "512-1023 byte frames received");
2022 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "1023-1522 byte frames received");
2024 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Receive Undersized");
2026 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Fragmented Packets Received ");
2028 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Oversized Packets Received");
2030 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Received Jabber");
2032 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Management Packets Received");
2034 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Management Packets Dropped");
2036 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Checksum Errors");
2038
2039 /* Packet Transmission Stats */
2040 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "Good Octets Transmitted");
2042 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "Total Packets Transmitted");
2044 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "Good Packets Transmitted");
2046 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "Broadcast Packets Transmitted");
2048 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "Multicast Packets Transmitted");
2050 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Management Packets Transmitted");
2052 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "64 byte frames transmitted ");
2054 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "65-127 byte frames transmitted");
2056 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "128-255 byte frames transmitted");
2058 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "256-511 byte frames transmitted");
2060 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "512-1023 byte frames transmitted");
2062 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "1024-1522 byte frames transmitted");
2064 } /* ixgbe_add_hw_stats */
2065
2066 static void
2067 ixgbe_clear_evcnt(struct adapter *adapter)
2068 {
2069 struct tx_ring *txr = adapter->tx_rings;
2070 struct rx_ring *rxr = adapter->rx_rings;
2071 struct ixgbe_hw *hw = &adapter->hw;
2072 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2073 int i;
2074
2075 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0);
2076 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0);
2077 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0);
2078 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0);
2079 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0);
2080 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0);
2081 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0);
2082 IXGBE_EVC_STORE(&adapter->tso_err, 0);
2083 IXGBE_EVC_STORE(&adapter->watchdog_events, 0);
2084 IXGBE_EVC_STORE(&adapter->admin_irq, 0);
2085 IXGBE_EVC_STORE(&adapter->link_sicount, 0);
2086 IXGBE_EVC_STORE(&adapter->mod_sicount, 0);
2087 IXGBE_EVC_STORE(&adapter->msf_sicount, 0);
2088 IXGBE_EVC_STORE(&adapter->phy_sicount, 0);
2089
2090 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2091 if (i < __arraycount(stats->mpc)) {
2092 IXGBE_EVC_STORE(&stats->mpc[i], 0);
2093 if (hw->mac.type == ixgbe_mac_82598EB)
2094 IXGBE_EVC_STORE(&stats->rnbc[i], 0);
2095 }
2096 if (i < __arraycount(stats->pxontxc)) {
2097 IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
2098 IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
2099 IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
2100 IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
2101 if (hw->mac.type >= ixgbe_mac_82599EB)
2102 IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
2103 }
2104 }
2105
2106 txr = adapter->tx_rings;
2107 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2108 IXGBE_EVC_STORE(&adapter->queues[i].irqs, 0);
2109 IXGBE_EVC_STORE(&adapter->queues[i].handleq, 0);
2110 IXGBE_EVC_STORE(&adapter->queues[i].req, 0);
2111 IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2112 IXGBE_EVC_STORE(&txr->total_packets, 0);
2113 IXGBE_EVC_STORE(&txr->tso_tx, 0);
2114 #ifndef IXGBE_LEGACY_TX
2115 IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2116 #endif
2117 txr->q_efbig_tx_dma_setup = 0;
2118 txr->q_mbuf_defrag_failed = 0;
2119 txr->q_efbig2_tx_dma_setup = 0;
2120 txr->q_einval_tx_dma_setup = 0;
2121 txr->q_other_tx_dma_setup = 0;
2122 txr->q_eagain_tx_dma_setup = 0;
2123 txr->q_enomem_tx_dma_setup = 0;
2124 txr->q_tso_err = 0;
2125
2126 if (i < __arraycount(stats->qprc)) {
2127 IXGBE_EVC_STORE(&stats->qprc[i], 0);
2128 IXGBE_EVC_STORE(&stats->qptc[i], 0);
2129 IXGBE_EVC_STORE(&stats->qbrc[i], 0);
2130 IXGBE_EVC_STORE(&stats->qbtc[i], 0);
2131 if (hw->mac.type >= ixgbe_mac_82599EB)
2132 IXGBE_EVC_STORE(&stats->qprdc[i], 0);
2133 }
2134
2135 IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2136 IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2137 IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2138 IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2139 IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2140 }
2141 IXGBE_EVC_STORE(&stats->ipcs, 0);
2142 IXGBE_EVC_STORE(&stats->l4cs, 0);
2143 IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2144 IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2145 IXGBE_EVC_STORE(&stats->intzero, 0);
2146 IXGBE_EVC_STORE(&stats->legint, 0);
2147 IXGBE_EVC_STORE(&stats->crcerrs, 0);
2148 IXGBE_EVC_STORE(&stats->illerrc, 0);
2149 IXGBE_EVC_STORE(&stats->errbc, 0);
2150 IXGBE_EVC_STORE(&stats->mspdc, 0);
2151 if (hw->mac.type >= ixgbe_mac_X550)
2152 IXGBE_EVC_STORE(&stats->mbsdc, 0);
2153 IXGBE_EVC_STORE(&stats->mpctotal, 0);
2154 IXGBE_EVC_STORE(&stats->mlfc, 0);
2155 IXGBE_EVC_STORE(&stats->mrfc, 0);
2156 IXGBE_EVC_STORE(&stats->rlec, 0);
2157 IXGBE_EVC_STORE(&stats->lxontxc, 0);
2158 IXGBE_EVC_STORE(&stats->lxonrxc, 0);
2159 IXGBE_EVC_STORE(&stats->lxofftxc, 0);
2160 IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
2161
2162 /* Packet Reception Stats */
2163 IXGBE_EVC_STORE(&stats->tor, 0);
2164 IXGBE_EVC_STORE(&stats->gorc, 0);
2165 IXGBE_EVC_STORE(&stats->tpr, 0);
2166 IXGBE_EVC_STORE(&stats->gprc, 0);
2167 IXGBE_EVC_STORE(&stats->mprc, 0);
2168 IXGBE_EVC_STORE(&stats->bprc, 0);
2169 IXGBE_EVC_STORE(&stats->prc64, 0);
2170 IXGBE_EVC_STORE(&stats->prc127, 0);
2171 IXGBE_EVC_STORE(&stats->prc255, 0);
2172 IXGBE_EVC_STORE(&stats->prc511, 0);
2173 IXGBE_EVC_STORE(&stats->prc1023, 0);
2174 IXGBE_EVC_STORE(&stats->prc1522, 0);
2175 IXGBE_EVC_STORE(&stats->ruc, 0);
2176 IXGBE_EVC_STORE(&stats->rfc, 0);
2177 IXGBE_EVC_STORE(&stats->roc, 0);
2178 IXGBE_EVC_STORE(&stats->rjc, 0);
2179 IXGBE_EVC_STORE(&stats->mngprc, 0);
2180 IXGBE_EVC_STORE(&stats->mngpdc, 0);
2181 IXGBE_EVC_STORE(&stats->xec, 0);
2182
2183 /* Packet Transmission Stats */
2184 IXGBE_EVC_STORE(&stats->gotc, 0);
2185 IXGBE_EVC_STORE(&stats->tpt, 0);
2186 IXGBE_EVC_STORE(&stats->gptc, 0);
2187 IXGBE_EVC_STORE(&stats->bptc, 0);
2188 IXGBE_EVC_STORE(&stats->mptc, 0);
2189 IXGBE_EVC_STORE(&stats->mngptc, 0);
2190 IXGBE_EVC_STORE(&stats->ptc64, 0);
2191 IXGBE_EVC_STORE(&stats->ptc127, 0);
2192 IXGBE_EVC_STORE(&stats->ptc255, 0);
2193 IXGBE_EVC_STORE(&stats->ptc511, 0);
2194 IXGBE_EVC_STORE(&stats->ptc1023, 0);
2195 IXGBE_EVC_STORE(&stats->ptc1522, 0);
2196 }
2197
2198 /************************************************************************
2199 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2200 *
2201 * Retrieves the TDH value from the hardware
2202 ************************************************************************/
2203 static int
2204 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2205 {
2206 struct sysctlnode node = *rnode;
2207 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2208 struct adapter *adapter;
2209 uint32_t val;
2210
2211 if (!txr)
2212 return (0);
2213
2214 adapter = txr->adapter;
2215 if (ixgbe_fw_recovery_mode_swflag(adapter))
2216 return (EPERM);
2217
2218 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2219 node.sysctl_data = &val;
2220 return sysctl_lookup(SYSCTLFN_CALL(&node));
2221 } /* ixgbe_sysctl_tdh_handler */
2222
2223 /************************************************************************
2224 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2225 *
2226 * Retrieves the TDT value from the hardware
2227 ************************************************************************/
2228 static int
2229 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2230 {
2231 struct sysctlnode node = *rnode;
2232 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2233 struct adapter *adapter;
2234 uint32_t val;
2235
2236 if (!txr)
2237 return (0);
2238
2239 adapter = txr->adapter;
2240 if (ixgbe_fw_recovery_mode_swflag(adapter))
2241 return (EPERM);
2242
2243 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2244 node.sysctl_data = &val;
2245 return sysctl_lookup(SYSCTLFN_CALL(&node));
2246 } /* ixgbe_sysctl_tdt_handler */
2247
2248 /************************************************************************
2249 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2250 * handler function
2251 *
2252 * Retrieves the next_to_check value
2253 ************************************************************************/
2254 static int
2255 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2256 {
2257 struct sysctlnode node = *rnode;
2258 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2259 struct adapter *adapter;
2260 uint32_t val;
2261
2262 if (!rxr)
2263 return (0);
2264
2265 adapter = rxr->adapter;
2266 if (ixgbe_fw_recovery_mode_swflag(adapter))
2267 return (EPERM);
2268
2269 val = rxr->next_to_check;
2270 node.sysctl_data = &val;
2271 return sysctl_lookup(SYSCTLFN_CALL(&node));
2272 } /* ixgbe_sysctl_next_to_check_handler */
2273
2274 /************************************************************************
2275 * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2276 * handler function
2277 *
2278 * Retrieves the next_to_refresh value
2279 ************************************************************************/
2280 static int
2281 ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2282 {
2283 struct sysctlnode node = *rnode;
2284 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2285 struct adapter *adapter;
2286 uint32_t val;
2287
2288 if (!rxr)
2289 return (0);
2290
2291 adapter = rxr->adapter;
2292 if (ixgbe_fw_recovery_mode_swflag(adapter))
2293 return (EPERM);
2294
2295 val = rxr->next_to_refresh;
2296 node.sysctl_data = &val;
2297 return sysctl_lookup(SYSCTLFN_CALL(&node));
2298 } /* ixgbe_sysctl_next_to_refresh_handler */
2299
2300 /************************************************************************
2301 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2302 *
2303 * Retrieves the RDH value from the hardware
2304 ************************************************************************/
2305 static int
2306 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2307 {
2308 struct sysctlnode node = *rnode;
2309 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2310 struct adapter *adapter;
2311 uint32_t val;
2312
2313 if (!rxr)
2314 return (0);
2315
2316 adapter = rxr->adapter;
2317 if (ixgbe_fw_recovery_mode_swflag(adapter))
2318 return (EPERM);
2319
2320 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2321 node.sysctl_data = &val;
2322 return sysctl_lookup(SYSCTLFN_CALL(&node));
2323 } /* ixgbe_sysctl_rdh_handler */
2324
2325 /************************************************************************
2326 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2327 *
2328 * Retrieves the RDT value from the hardware
2329 ************************************************************************/
2330 static int
2331 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2332 {
2333 struct sysctlnode node = *rnode;
2334 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2335 struct adapter *adapter;
2336 uint32_t val;
2337
2338 if (!rxr)
2339 return (0);
2340
2341 adapter = rxr->adapter;
2342 if (ixgbe_fw_recovery_mode_swflag(adapter))
2343 return (EPERM);
2344
2345 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2346 node.sysctl_data = &val;
2347 return sysctl_lookup(SYSCTLFN_CALL(&node));
2348 } /* ixgbe_sysctl_rdt_handler */
2349
2350 static int
2351 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2352 {
2353 struct ifnet *ifp = &ec->ec_if;
2354 struct adapter *adapter = ifp->if_softc;
2355 int rv;
2356
2357 if (set)
2358 rv = ixgbe_register_vlan(adapter, vid);
2359 else
2360 rv = ixgbe_unregister_vlan(adapter, vid);
2361
2362 if (rv != 0)
2363 return rv;
2364
2365 /*
2366 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2367 * or 0 to 1.
2368 */
2369 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2370 ixgbe_setup_vlan_hw_tagging(adapter);
2371
2372 return rv;
2373 }
2374
2375 /************************************************************************
2376 * ixgbe_register_vlan
2377 *
2378 * Run via vlan config EVENT, it enables us to use the
2379 * HW Filter table since we can get the vlan id. This
2380 * just creates the entry in the soft version of the
2381 * VFTA, init will repopulate the real table.
2382 ************************************************************************/
2383 static int
2384 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2385 {
2386 u16 index, bit;
2387 int error;
2388
2389 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2390 return EINVAL;
2391
2392 IXGBE_CORE_LOCK(adapter);
2393 index = (vtag >> 5) & 0x7F;
2394 bit = vtag & 0x1F;
2395 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2396 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2397 true);
2398 IXGBE_CORE_UNLOCK(adapter);
2399 if (error != 0)
2400 error = EACCES;
2401
2402 return error;
2403 } /* ixgbe_register_vlan */
2404
2405 /************************************************************************
2406 * ixgbe_unregister_vlan
2407 *
2408 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2409 ************************************************************************/
2410 static int
2411 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2412 {
2413 u16 index, bit;
2414 int error;
2415
2416 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2417 return EINVAL;
2418
2419 IXGBE_CORE_LOCK(adapter);
2420 index = (vtag >> 5) & 0x7F;
2421 bit = vtag & 0x1F;
2422 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2423 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2424 true);
2425 IXGBE_CORE_UNLOCK(adapter);
2426 if (error != 0)
2427 error = EACCES;
2428
2429 return error;
2430 } /* ixgbe_unregister_vlan */
2431
2432 static void
2433 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2434 {
2435 struct ethercom *ec = &adapter->osdep.ec;
2436 struct ixgbe_hw *hw = &adapter->hw;
2437 struct rx_ring *rxr;
2438 u32 ctrl;
2439 int i;
2440 bool hwtagging;
2441
2442 /* Enable HW tagging only if any vlan is attached */
2443 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2444 && VLAN_ATTACHED(ec);
2445
2446 /* Setup the queues for vlans */
2447 for (i = 0; i < adapter->num_queues; i++) {
2448 rxr = &adapter->rx_rings[i];
2449 /*
2450 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2451 */
2452 if (hw->mac.type != ixgbe_mac_82598EB) {
2453 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2454 if (hwtagging)
2455 ctrl |= IXGBE_RXDCTL_VME;
2456 else
2457 ctrl &= ~IXGBE_RXDCTL_VME;
2458 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2459 }
2460 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2461 }
2462
2463 /* VLAN hw tagging for 82598 */
2464 if (hw->mac.type == ixgbe_mac_82598EB) {
2465 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2466 if (hwtagging)
2467 ctrl |= IXGBE_VLNCTRL_VME;
2468 else
2469 ctrl &= ~IXGBE_VLNCTRL_VME;
2470 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2471 }
2472 } /* ixgbe_setup_vlan_hw_tagging */
2473
2474 static void
2475 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2476 {
2477 struct ethercom *ec = &adapter->osdep.ec;
2478 struct ixgbe_hw *hw = &adapter->hw;
2479 int i;
2480 u32 ctrl;
2481 struct vlanid_list *vlanidp;
2482
2483 /*
2484 * This function is called from both if_init and ifflags_cb()
2485 * on NetBSD.
2486 */
2487
2488 /*
2489 * Part 1:
2490 * Setup VLAN HW tagging
2491 */
2492 ixgbe_setup_vlan_hw_tagging(adapter);
2493
2494 /*
2495 * Part 2:
2496 * Setup VLAN HW filter
2497 */
2498 /* Cleanup shadow_vfta */
2499 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2500 adapter->shadow_vfta[i] = 0;
2501 /* Generate shadow_vfta from ec_vids */
2502 ETHER_LOCK(ec);
2503 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2504 uint32_t idx;
2505
2506 idx = vlanidp->vid / 32;
2507 KASSERT(idx < IXGBE_VFTA_SIZE);
2508 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2509 }
2510 ETHER_UNLOCK(ec);
2511 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2512 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2513
2514 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2515 /* Enable the Filter Table if enabled */
2516 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2517 ctrl |= IXGBE_VLNCTRL_VFE;
2518 else
2519 ctrl &= ~IXGBE_VLNCTRL_VFE;
2520 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2521 } /* ixgbe_setup_vlan_hw_support */
2522
2523 /************************************************************************
2524 * ixgbe_get_slot_info
2525 *
2526 * Get the width and transaction speed of
2527 * the slot this adapter is plugged into.
2528 ************************************************************************/
2529 static void
2530 ixgbe_get_slot_info(struct adapter *adapter)
2531 {
2532 device_t dev = adapter->dev;
2533 struct ixgbe_hw *hw = &adapter->hw;
2534 u32 offset;
2535 u16 link;
2536 int bus_info_valid = TRUE;
2537
2538 /* Some devices are behind an internal bridge */
2539 switch (hw->device_id) {
2540 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2541 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2542 goto get_parent_info;
2543 default:
2544 break;
2545 }
2546
2547 ixgbe_get_bus_info(hw);
2548
2549 /*
2550 * Some devices don't use PCI-E, but there is no need
2551 * to display "Unknown" for bus speed and width.
2552 */
2553 switch (hw->mac.type) {
2554 case ixgbe_mac_X550EM_x:
2555 case ixgbe_mac_X550EM_a:
2556 return;
2557 default:
2558 goto display;
2559 }
2560
2561 get_parent_info:
2562 /*
2563 * For the Quad port adapter we need to parse back
2564 * up the PCI tree to find the speed of the expansion
2565 * slot into which this adapter is plugged. A bit more work.
2566 */
2567 dev = device_parent(device_parent(dev));
2568 #if 0
2569 #ifdef IXGBE_DEBUG
2570 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2571 pci_get_slot(dev), pci_get_function(dev));
2572 #endif
2573 dev = device_parent(device_parent(dev));
2574 #ifdef IXGBE_DEBUG
2575 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2576 pci_get_slot(dev), pci_get_function(dev));
2577 #endif
2578 #endif
2579 /* Now get the PCI Express Capabilities offset */
2580 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2581 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2582 /*
2583 * Hmm...can't get PCI-Express capabilities.
2584 * Falling back to default method.
2585 */
2586 bus_info_valid = FALSE;
2587 ixgbe_get_bus_info(hw);
2588 goto display;
2589 }
2590 /* ...and read the Link Status Register */
2591 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2592 offset + PCIE_LCSR) >> 16;
2593 ixgbe_set_pci_config_data_generic(hw, link);
2594
2595 display:
2596 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2597 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2598 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2599 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2600 "Unknown"),
2601 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2602 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2603 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2604 "Unknown"));
2605
2606 if (bus_info_valid) {
2607 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2608 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2609 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2610 device_printf(dev, "PCI-Express bandwidth available"
2611 " for this card\n is not sufficient for"
2612 " optimal performance.\n");
2613 device_printf(dev, "For optimal performance a x8 "
2614 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2615 }
2616 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2617 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2618 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2619 device_printf(dev, "PCI-Express bandwidth available"
2620 " for this card\n is not sufficient for"
2621 " optimal performance.\n");
2622 device_printf(dev, "For optimal performance a x8 "
2623 "PCIE Gen3 slot is required.\n");
2624 }
2625 } else
2626 device_printf(dev,
2627 "Unable to determine slot speed/width. The speed/width "
2628 "reported are that of the internal switch.\n");
2629
2630 return;
2631 } /* ixgbe_get_slot_info */
2632
2633 /************************************************************************
2634 * ixgbe_enable_queue - Queue Interrupt Enabler
2635 ************************************************************************/
2636 static inline void
2637 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2638 {
2639 struct ixgbe_hw *hw = &adapter->hw;
2640 struct ix_queue *que = &adapter->queues[vector];
2641 u64 queue = 1ULL << vector;
2642 u32 mask;
2643
2644 mutex_enter(&que->dc_mtx);
2645 if (que->disabled_count > 0 && --que->disabled_count > 0)
2646 goto out;
2647
2648 if (hw->mac.type == ixgbe_mac_82598EB) {
2649 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2650 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2651 } else {
2652 mask = (queue & 0xFFFFFFFF);
2653 if (mask)
2654 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2655 mask = (queue >> 32);
2656 if (mask)
2657 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2658 }
2659 out:
2660 mutex_exit(&que->dc_mtx);
2661 } /* ixgbe_enable_queue */
2662
2663 /************************************************************************
2664 * ixgbe_disable_queue_internal
2665 ************************************************************************/
2666 static inline void
2667 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2668 {
2669 struct ixgbe_hw *hw = &adapter->hw;
2670 struct ix_queue *que = &adapter->queues[vector];
2671 u64 queue = 1ULL << vector;
2672 u32 mask;
2673
2674 mutex_enter(&que->dc_mtx);
2675
2676 if (que->disabled_count > 0) {
2677 if (nestok)
2678 que->disabled_count++;
2679 goto out;
2680 }
2681 que->disabled_count++;
2682
2683 if (hw->mac.type == ixgbe_mac_82598EB) {
2684 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2685 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2686 } else {
2687 mask = (queue & 0xFFFFFFFF);
2688 if (mask)
2689 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2690 mask = (queue >> 32);
2691 if (mask)
2692 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2693 }
2694 out:
2695 mutex_exit(&que->dc_mtx);
2696 } /* ixgbe_disable_queue_internal */
2697
2698 /************************************************************************
2699 * ixgbe_disable_queue
2700 ************************************************************************/
2701 static inline void
2702 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2703 {
2704
2705 ixgbe_disable_queue_internal(adapter, vector, true);
2706 } /* ixgbe_disable_queue */
2707
2708 /************************************************************************
2709 * ixgbe_sched_handle_que - schedule deferred packet processing
2710 ************************************************************************/
2711 static inline void
2712 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2713 {
2714
2715 if (que->txrx_use_workqueue) {
2716 /*
2717 * adapter->que_wq is bound to each CPU instead of
2718 * each NIC queue to reduce workqueue kthread. As we
2719 * should consider about interrupt affinity in this
2720 * function, the workqueue kthread must be WQ_PERCPU.
2721 * If create WQ_PERCPU workqueue kthread for each NIC
2722 * queue, that number of created workqueue kthread is
2723 * (number of used NIC queue) * (number of CPUs) =
2724 * (number of CPUs) ^ 2 most often.
2725 *
2726 * The same NIC queue's interrupts are avoided by
2727 * masking the queue's interrupt. And different
2728 * NIC queue's interrupts use different struct work
2729 * (que->wq_cookie). So, "enqueued flag" to avoid
2730 * twice workqueue_enqueue() is not required .
2731 */
2732 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2733 } else
2734 softint_schedule(que->que_si);
2735 }
2736
2737 /************************************************************************
2738 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2739 ************************************************************************/
2740 static int
2741 ixgbe_msix_que(void *arg)
2742 {
2743 struct ix_queue *que = arg;
2744 struct adapter *adapter = que->adapter;
2745 struct ifnet *ifp = adapter->ifp;
2746 struct tx_ring *txr = que->txr;
2747 struct rx_ring *rxr = que->rxr;
2748 u32 newitr = 0;
2749
2750 /* Protect against spurious interrupts */
2751 if ((ifp->if_flags & IFF_RUNNING) == 0)
2752 return 0;
2753
2754 ixgbe_disable_queue(adapter, que->msix);
2755 IXGBE_EVC_ADD(&que->irqs, 1);
2756
2757 /*
2758 * Don't change "que->txrx_use_workqueue" from this point to avoid
2759 * flip-flopping softint/workqueue mode in one deferred processing.
2760 */
2761 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2762
2763 IXGBE_TX_LOCK(txr);
2764 ixgbe_txeof(txr);
2765 IXGBE_TX_UNLOCK(txr);
2766
2767 /* Do AIM now? */
2768
2769 if (adapter->enable_aim == false)
2770 goto no_calc;
2771 /*
2772 * Do Adaptive Interrupt Moderation:
2773 * - Write out last calculated setting
2774 * - Calculate based on average size over
2775 * the last interval.
2776 */
2777 if (que->eitr_setting)
2778 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2779
2780 que->eitr_setting = 0;
2781
2782 /* Idle, do nothing */
2783 if ((txr->bytes == 0) && (rxr->bytes == 0))
2784 goto no_calc;
2785
2786 if ((txr->bytes) && (txr->packets))
2787 newitr = txr->bytes/txr->packets;
2788 if ((rxr->bytes) && (rxr->packets))
2789 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2790 newitr += 24; /* account for hardware frame, crc */
2791
2792 /* set an upper boundary */
2793 newitr = uimin(newitr, 3000);
2794
2795 /* Be nice to the mid range */
2796 if ((newitr > 300) && (newitr < 1200))
2797 newitr = (newitr / 3);
2798 else
2799 newitr = (newitr / 2);
2800
2801 /*
2802 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2803 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2804 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2805 * on 1G and higher.
2806 */
2807 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2808 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL))
2809 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2810 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2811
2812 /* save for next interrupt */
2813 que->eitr_setting = newitr;
2814
2815 /* Reset state */
2816 txr->bytes = 0;
2817 txr->packets = 0;
2818 rxr->bytes = 0;
2819 rxr->packets = 0;
2820
2821 no_calc:
2822 ixgbe_sched_handle_que(adapter, que);
2823
2824 return 1;
2825 } /* ixgbe_msix_que */
2826
2827 /************************************************************************
2828 * ixgbe_media_status - Media Ioctl callback
2829 *
2830 * Called whenever the user queries the status of
2831 * the interface using ifconfig.
2832 ************************************************************************/
2833 static void
2834 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2835 {
2836 struct adapter *adapter = ifp->if_softc;
2837 struct ixgbe_hw *hw = &adapter->hw;
2838 int layer;
2839
2840 INIT_DEBUGOUT("ixgbe_media_status: begin");
2841 IXGBE_CORE_LOCK(adapter);
2842 ixgbe_update_link_status(adapter);
2843
2844 ifmr->ifm_status = IFM_AVALID;
2845 ifmr->ifm_active = IFM_ETHER;
2846
2847 if (adapter->link_active != LINK_STATE_UP) {
2848 ifmr->ifm_active |= IFM_NONE;
2849 IXGBE_CORE_UNLOCK(adapter);
2850 return;
2851 }
2852
2853 ifmr->ifm_status |= IFM_ACTIVE;
2854 layer = adapter->phy_layer;
2855
2856 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2857 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2858 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2859 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2860 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2861 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2862 switch (adapter->link_speed) {
2863 case IXGBE_LINK_SPEED_10GB_FULL:
2864 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2865 break;
2866 case IXGBE_LINK_SPEED_5GB_FULL:
2867 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2868 break;
2869 case IXGBE_LINK_SPEED_2_5GB_FULL:
2870 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2871 break;
2872 case IXGBE_LINK_SPEED_1GB_FULL:
2873 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2874 break;
2875 case IXGBE_LINK_SPEED_100_FULL:
2876 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2877 break;
2878 case IXGBE_LINK_SPEED_10_FULL:
2879 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2880 break;
2881 }
2882 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2883 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2884 switch (adapter->link_speed) {
2885 case IXGBE_LINK_SPEED_10GB_FULL:
2886 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2887 break;
2888 }
2889 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2890 switch (adapter->link_speed) {
2891 case IXGBE_LINK_SPEED_10GB_FULL:
2892 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2893 break;
2894 case IXGBE_LINK_SPEED_1GB_FULL:
2895 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2896 break;
2897 }
2898 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2899 switch (adapter->link_speed) {
2900 case IXGBE_LINK_SPEED_10GB_FULL:
2901 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2902 break;
2903 case IXGBE_LINK_SPEED_1GB_FULL:
2904 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2905 break;
2906 }
2907 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2908 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2909 switch (adapter->link_speed) {
2910 case IXGBE_LINK_SPEED_10GB_FULL:
2911 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2912 break;
2913 case IXGBE_LINK_SPEED_1GB_FULL:
2914 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2915 break;
2916 }
2917 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2918 switch (adapter->link_speed) {
2919 case IXGBE_LINK_SPEED_10GB_FULL:
2920 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2921 break;
2922 }
2923 /*
2924 * XXX: These need to use the proper media types once
2925 * they're added.
2926 */
2927 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2928 switch (adapter->link_speed) {
2929 case IXGBE_LINK_SPEED_10GB_FULL:
2930 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2931 break;
2932 case IXGBE_LINK_SPEED_2_5GB_FULL:
2933 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2934 break;
2935 case IXGBE_LINK_SPEED_1GB_FULL:
2936 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2937 break;
2938 }
2939 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2940 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2941 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2942 switch (adapter->link_speed) {
2943 case IXGBE_LINK_SPEED_10GB_FULL:
2944 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2945 break;
2946 case IXGBE_LINK_SPEED_2_5GB_FULL:
2947 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2948 break;
2949 case IXGBE_LINK_SPEED_1GB_FULL:
2950 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2951 break;
2952 }
2953
2954 /* If nothing is recognized... */
2955 #if 0
2956 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2957 ifmr->ifm_active |= IFM_UNKNOWN;
2958 #endif
2959
2960 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2961
2962 /* Display current flow control setting used on link */
2963 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2964 hw->fc.current_mode == ixgbe_fc_full)
2965 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2966 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2967 hw->fc.current_mode == ixgbe_fc_full)
2968 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2969
2970 IXGBE_CORE_UNLOCK(adapter);
2971
2972 return;
2973 } /* ixgbe_media_status */
2974
2975 /************************************************************************
2976 * ixgbe_media_change - Media Ioctl callback
2977 *
2978 * Called when the user changes speed/duplex using
2979 * media/mediopt option with ifconfig.
2980 ************************************************************************/
2981 static int
2982 ixgbe_media_change(struct ifnet *ifp)
2983 {
2984 struct adapter *adapter = ifp->if_softc;
2985 struct ifmedia *ifm = &adapter->media;
2986 struct ixgbe_hw *hw = &adapter->hw;
2987 ixgbe_link_speed speed = 0;
2988 ixgbe_link_speed link_caps = 0;
2989 bool negotiate = false;
2990 s32 err = IXGBE_NOT_IMPLEMENTED;
2991
2992 INIT_DEBUGOUT("ixgbe_media_change: begin");
2993
2994 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2995 return (EINVAL);
2996
2997 if (hw->phy.media_type == ixgbe_media_type_backplane)
2998 return (EPERM);
2999
3000 IXGBE_CORE_LOCK(adapter);
3001 /*
3002 * We don't actually need to check against the supported
3003 * media types of the adapter; ifmedia will take care of
3004 * that for us.
3005 */
3006 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3007 case IFM_AUTO:
3008 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3009 &negotiate);
3010 if (err != IXGBE_SUCCESS) {
3011 device_printf(adapter->dev, "Unable to determine "
3012 "supported advertise speeds\n");
3013 IXGBE_CORE_UNLOCK(adapter);
3014 return (ENODEV);
3015 }
3016 speed |= link_caps;
3017 break;
3018 case IFM_10G_T:
3019 case IFM_10G_LRM:
3020 case IFM_10G_LR:
3021 case IFM_10G_TWINAX:
3022 case IFM_10G_SR:
3023 case IFM_10G_CX4:
3024 case IFM_10G_KR:
3025 case IFM_10G_KX4:
3026 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3027 break;
3028 case IFM_5000_T:
3029 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3030 break;
3031 case IFM_2500_T:
3032 case IFM_2500_KX:
3033 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3034 break;
3035 case IFM_1000_T:
3036 case IFM_1000_LX:
3037 case IFM_1000_SX:
3038 case IFM_1000_KX:
3039 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3040 break;
3041 case IFM_100_TX:
3042 speed |= IXGBE_LINK_SPEED_100_FULL;
3043 break;
3044 case IFM_10_T:
3045 speed |= IXGBE_LINK_SPEED_10_FULL;
3046 break;
3047 case IFM_NONE:
3048 break;
3049 default:
3050 goto invalid;
3051 }
3052
3053 hw->mac.autotry_restart = TRUE;
3054 hw->mac.ops.setup_link(hw, speed, TRUE);
3055 adapter->advertise = 0;
3056 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3057 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3058 adapter->advertise |= 1 << 2;
3059 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3060 adapter->advertise |= 1 << 1;
3061 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3062 adapter->advertise |= 1 << 0;
3063 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3064 adapter->advertise |= 1 << 3;
3065 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3066 adapter->advertise |= 1 << 4;
3067 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3068 adapter->advertise |= 1 << 5;
3069 }
3070
3071 IXGBE_CORE_UNLOCK(adapter);
3072 return (0);
3073
3074 invalid:
3075 device_printf(adapter->dev, "Invalid media type!\n");
3076 IXGBE_CORE_UNLOCK(adapter);
3077
3078 return (EINVAL);
3079 } /* ixgbe_media_change */
3080
3081 /************************************************************************
3082 * ixgbe_msix_admin - Link status change ISR (MSI-X)
3083 ************************************************************************/
3084 static int
3085 ixgbe_msix_admin(void *arg)
3086 {
3087 struct adapter *adapter = arg;
3088 struct ixgbe_hw *hw = &adapter->hw;
3089 u32 eicr;
3090 u32 eims_orig;
3091 u32 eims_disable = 0;
3092
3093 IXGBE_EVC_ADD(&adapter->admin_irq, 1);
3094
3095 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3096 /* Pause other interrupts */
3097 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3098
3099 /*
3100 * First get the cause.
3101 *
3102 * The specifications of 82598, 82599, X540 and X550 say EICS register
3103 * is write only. However, Linux says it is a workaround for silicon
3104 * errata to read EICS instead of EICR to get interrupt cause.
3105 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3106 */
3107 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3108 /* Be sure the queue bits are not cleared */
3109 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3110 /* Clear all OTHER interrupts with write */
3111 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3112
3113 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3114
3115 /* Re-enable some OTHER interrupts */
3116 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3117
3118 return 1;
3119 } /* ixgbe_msix_admin */
3120
3121 static void
3122 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3123 {
3124 struct ixgbe_hw *hw = &adapter->hw;
3125 s32 retval;
3126
3127 /* Link status change */
3128 if (eicr & IXGBE_EICR_LSC) {
3129 softint_schedule(adapter->link_si);
3130 *eims_disable |= IXGBE_EIMS_LSC;
3131 }
3132
3133 if (ixgbe_is_sfp(hw)) {
3134 u32 eicr_mask;
3135
3136 /* Pluggable optics-related interrupt */
3137 if (hw->mac.type >= ixgbe_mac_X540)
3138 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3139 else
3140 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3141
3142 /*
3143 * An interrupt might not arrive when a module is inserted.
3144 * When an link status change interrupt occurred and the driver
3145 * still regard SFP as unplugged, issue the module softint
3146 * and then issue LSC interrupt.
3147 */
3148 if ((eicr & eicr_mask)
3149 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3150 && (eicr & IXGBE_EICR_LSC))) {
3151 softint_schedule(adapter->mod_si);
3152 *eims_disable |= IXGBE_EIMS_LSC;
3153 }
3154
3155 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3156 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3157 softint_schedule(adapter->msf_si);
3158 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3159 }
3160 }
3161
3162 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3163 #ifdef IXGBE_FDIR
3164 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3165 (eicr & IXGBE_EICR_FLOW_DIR)) {
3166 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3167 softint_schedule(adapter->fdir_si);
3168 /* Disable the interrupt */
3169 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3170 }
3171 }
3172 #endif
3173
3174 if (eicr & IXGBE_EICR_ECC) {
3175 if (ratecheck(&adapter->lasterr_time,
3176 &ixgbe_errlog_intrvl))
3177 device_printf(adapter->dev,
3178 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3179 }
3180
3181 /* Check for over temp condition */
3182 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3183 switch (adapter->hw.mac.type) {
3184 case ixgbe_mac_X550EM_a:
3185 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3186 break;
3187 retval = hw->phy.ops.check_overtemp(hw);
3188 if (retval != IXGBE_ERR_OVERTEMP)
3189 break;
3190 if (ratecheck(&adapter->lasterr_time,
3191 &ixgbe_errlog_intrvl)) {
3192 device_printf(adapter->dev,
3193 "CRITICAL: OVER TEMP!! "
3194 "PHY IS SHUT DOWN!!\n");
3195 device_printf(adapter->dev,
3196 "System shutdown required!\n");
3197 }
3198 break;
3199 default:
3200 if (!(eicr & IXGBE_EICR_TS))
3201 break;
3202 retval = hw->phy.ops.check_overtemp(hw);
3203 if (retval != IXGBE_ERR_OVERTEMP)
3204 break;
3205 if (ratecheck(&adapter->lasterr_time,
3206 &ixgbe_errlog_intrvl)) {
3207 device_printf(adapter->dev,
3208 "CRITICAL: OVER TEMP!! "
3209 "PHY IS SHUT DOWN!!\n");
3210 device_printf(adapter->dev,
3211 "System shutdown required!\n");
3212 }
3213 break;
3214 }
3215 }
3216
3217 /* Check for VF message */
3218 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3219 (eicr & IXGBE_EICR_MAILBOX)) {
3220 softint_schedule(adapter->mbx_si);
3221 *eims_disable |= IXGBE_EIMS_MAILBOX;
3222 }
3223 }
3224
3225 /* Check for fan failure */
3226 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3227 ixgbe_check_fan_failure(adapter, eicr, true);
3228
3229 /* External PHY interrupt */
3230 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3231 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3232 softint_schedule(adapter->phy_si);
3233 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3234 }
3235 }
3236
3237 static void
3238 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3239 {
3240
3241 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3242 itr |= itr << 16;
3243 else
3244 itr |= IXGBE_EITR_CNT_WDIS;
3245
3246 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3247 }
3248
3249
3250 /************************************************************************
3251 * ixgbe_sysctl_interrupt_rate_handler
3252 ************************************************************************/
3253 static int
3254 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3255 {
3256 struct sysctlnode node = *rnode;
3257 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3258 struct adapter *adapter;
3259 uint32_t reg, usec, rate;
3260 int error;
3261
3262 if (que == NULL)
3263 return 0;
3264
3265 adapter = que->adapter;
3266 if (ixgbe_fw_recovery_mode_swflag(adapter))
3267 return (EPERM);
3268
3269 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3270 usec = ((reg & 0x0FF8) >> 3);
3271 if (usec > 0)
3272 rate = 500000 / usec;
3273 else
3274 rate = 0;
3275 node.sysctl_data = &rate;
3276 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3277 if (error || newp == NULL)
3278 return error;
3279 reg &= ~0xfff; /* default, no limitation */
3280 if (rate > 0 && rate < 500000) {
3281 if (rate < 1000)
3282 rate = 1000;
3283 reg |= ((4000000 / rate) & 0xff8);
3284 /*
3285 * When RSC is used, ITR interval must be larger than
3286 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3287 * The minimum value is always greater than 2us on 100M
3288 * (and 10M?(not documented)), but it's not on 1G and higher.
3289 */
3290 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3291 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3292 if ((adapter->num_queues > 1)
3293 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3294 return EINVAL;
3295 }
3296 ixgbe_max_interrupt_rate = rate;
3297 } else
3298 ixgbe_max_interrupt_rate = 0;
3299 ixgbe_eitr_write(adapter, que->msix, reg);
3300
3301 return (0);
3302 } /* ixgbe_sysctl_interrupt_rate_handler */
3303
3304 const struct sysctlnode *
3305 ixgbe_sysctl_instance(struct adapter *adapter)
3306 {
3307 const char *dvname;
3308 struct sysctllog **log;
3309 int rc;
3310 const struct sysctlnode *rnode;
3311
3312 if (adapter->sysctltop != NULL)
3313 return adapter->sysctltop;
3314
3315 log = &adapter->sysctllog;
3316 dvname = device_xname(adapter->dev);
3317
3318 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3319 0, CTLTYPE_NODE, dvname,
3320 SYSCTL_DESCR("ixgbe information and settings"),
3321 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3322 goto err;
3323
3324 return rnode;
3325 err:
3326 device_printf(adapter->dev,
3327 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3328 return NULL;
3329 }
3330
3331 /************************************************************************
3332 * ixgbe_add_device_sysctls
3333 ************************************************************************/
3334 static void
3335 ixgbe_add_device_sysctls(struct adapter *adapter)
3336 {
3337 device_t dev = adapter->dev;
3338 struct ixgbe_hw *hw = &adapter->hw;
3339 struct sysctllog **log;
3340 const struct sysctlnode *rnode, *cnode;
3341
3342 log = &adapter->sysctllog;
3343
3344 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3345 aprint_error_dev(dev, "could not create sysctl root\n");
3346 return;
3347 }
3348
3349 if (sysctl_createv(log, 0, &rnode, &cnode,
3350 CTLFLAG_READWRITE, CTLTYPE_INT,
3351 "debug", SYSCTL_DESCR("Debug Info"),
3352 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL)
3353 != 0)
3354 aprint_error_dev(dev, "could not create sysctl\n");
3355
3356 if (sysctl_createv(log, 0, &rnode, &cnode,
3357 CTLFLAG_READWRITE, CTLTYPE_INT,
3358 "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3359 ixgbe_sysctl_rx_copy_len, 0,
3360 (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3361 aprint_error_dev(dev, "could not create sysctl\n");
3362
3363 if (sysctl_createv(log, 0, &rnode, &cnode,
3364 CTLFLAG_READONLY, CTLTYPE_INT,
3365 "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
3366 NULL, 0, &adapter->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3367 aprint_error_dev(dev, "could not create sysctl\n");
3368
3369 if (sysctl_createv(log, 0, &rnode, &cnode,
3370 CTLFLAG_READONLY, CTLTYPE_INT,
3371 "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
3372 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3373 aprint_error_dev(dev, "could not create sysctl\n");
3374
3375 if (sysctl_createv(log, 0, &rnode, &cnode,
3376 CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
3377 SYSCTL_DESCR("max number of RX packets to process"),
3378 ixgbe_sysctl_rx_process_limit, 0, (void *)adapter, 0, CTL_CREATE,
3379 CTL_EOL) != 0)
3380 aprint_error_dev(dev, "could not create sysctl\n");
3381
3382 if (sysctl_createv(log, 0, &rnode, &cnode,
3383 CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
3384 SYSCTL_DESCR("max number of TX packets to process"),
3385 ixgbe_sysctl_tx_process_limit, 0, (void *)adapter, 0, CTL_CREATE,
3386 CTL_EOL) != 0)
3387 aprint_error_dev(dev, "could not create sysctl\n");
3388
3389 if (sysctl_createv(log, 0, &rnode, &cnode,
3390 CTLFLAG_READONLY, CTLTYPE_INT,
3391 "num_queues", SYSCTL_DESCR("Number of queues"),
3392 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3393 aprint_error_dev(dev, "could not create sysctl\n");
3394
3395 /* Sysctls for all devices */
3396 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3397 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3398 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3399 CTL_EOL) != 0)
3400 aprint_error_dev(dev, "could not create sysctl\n");
3401
3402 adapter->enable_aim = ixgbe_enable_aim;
3403 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3404 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3405 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3406 aprint_error_dev(dev, "could not create sysctl\n");
3407
3408 if (sysctl_createv(log, 0, &rnode, &cnode,
3409 CTLFLAG_READWRITE, CTLTYPE_INT,
3410 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3411 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3412 CTL_EOL) != 0)
3413 aprint_error_dev(dev, "could not create sysctl\n");
3414
3415 /*
3416 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3417 * it causesflip-flopping softint/workqueue mode in one deferred
3418 * processing. Therefore, preempt_disable()/preempt_enable() are
3419 * required in ixgbe_sched_handle_que() to avoid
3420 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3421 * I think changing "que->txrx_use_workqueue" in interrupt handler
3422 * is lighter than doing preempt_disable()/preempt_enable() in every
3423 * ixgbe_sched_handle_que().
3424 */
3425 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3426 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3427 CTLTYPE_BOOL, "txrx_workqueue",
3428 SYSCTL_DESCR("Use workqueue for packet processing"),
3429 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE,
3430 CTL_EOL) != 0)
3431 aprint_error_dev(dev, "could not create sysctl\n");
3432
3433 #ifdef IXGBE_DEBUG
3434 /* testing sysctls (for all devices) */
3435 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3436 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3437 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3438 CTL_EOL) != 0)
3439 aprint_error_dev(dev, "could not create sysctl\n");
3440
3441 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3442 CTLTYPE_STRING, "print_rss_config",
3443 SYSCTL_DESCR("Prints RSS Configuration"),
3444 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3445 CTL_EOL) != 0)
3446 aprint_error_dev(dev, "could not create sysctl\n");
3447 #endif
3448 /* for X550 series devices */
3449 if (hw->mac.type >= ixgbe_mac_X550)
3450 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3451 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3452 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3453 CTL_EOL) != 0)
3454 aprint_error_dev(dev, "could not create sysctl\n");
3455
3456 /* for WoL-capable devices */
3457 if (adapter->wol_support) {
3458 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3459 CTLTYPE_BOOL, "wol_enable",
3460 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3461 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3462 CTL_EOL) != 0)
3463 aprint_error_dev(dev, "could not create sysctl\n");
3464
3465 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3466 CTLTYPE_INT, "wufc",
3467 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3468 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3469 CTL_EOL) != 0)
3470 aprint_error_dev(dev, "could not create sysctl\n");
3471 }
3472
3473 /* for X552/X557-AT devices */
3474 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3475 const struct sysctlnode *phy_node;
3476
3477 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3478 "phy", SYSCTL_DESCR("External PHY sysctls"),
3479 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3480 aprint_error_dev(dev, "could not create sysctl\n");
3481 return;
3482 }
3483
3484 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3485 CTLTYPE_INT, "temp",
3486 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3487 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3488 CTL_EOL) != 0)
3489 aprint_error_dev(dev, "could not create sysctl\n");
3490
3491 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3492 CTLTYPE_INT, "overtemp_occurred",
3493 SYSCTL_DESCR(
3494 "External PHY High Temperature Event Occurred"),
3495 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3496 CTL_CREATE, CTL_EOL) != 0)
3497 aprint_error_dev(dev, "could not create sysctl\n");
3498 }
3499
3500 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3501 && (hw->phy.type == ixgbe_phy_fw))
3502 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3503 CTLTYPE_BOOL, "force_10_100_autonego",
3504 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3505 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3506 CTL_CREATE, CTL_EOL) != 0)
3507 aprint_error_dev(dev, "could not create sysctl\n");
3508
3509 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3510 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3511 CTLTYPE_INT, "eee_state",
3512 SYSCTL_DESCR("EEE Power Save State"),
3513 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3514 CTL_EOL) != 0)
3515 aprint_error_dev(dev, "could not create sysctl\n");
3516 }
3517 } /* ixgbe_add_device_sysctls */
3518
3519 /************************************************************************
3520 * ixgbe_allocate_pci_resources
3521 ************************************************************************/
3522 static int
3523 ixgbe_allocate_pci_resources(struct adapter *adapter,
3524 const struct pci_attach_args *pa)
3525 {
3526 pcireg_t memtype, csr;
3527 device_t dev = adapter->dev;
3528 bus_addr_t addr;
3529 int flags;
3530
3531 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3532 switch (memtype) {
3533 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3534 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3535 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3536 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3537 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3538 goto map_err;
3539 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3540 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3541 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3542 }
3543 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3544 adapter->osdep.mem_size, flags,
3545 &adapter->osdep.mem_bus_space_handle) != 0) {
3546 map_err:
3547 adapter->osdep.mem_size = 0;
3548 aprint_error_dev(dev, "unable to map BAR0\n");
3549 return ENXIO;
3550 }
3551 /*
3552 * Enable address decoding for memory range in case BIOS or
3553 * UEFI don't set it.
3554 */
3555 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3556 PCI_COMMAND_STATUS_REG);
3557 csr |= PCI_COMMAND_MEM_ENABLE;
3558 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3559 csr);
3560 break;
3561 default:
3562 aprint_error_dev(dev, "unexpected type on BAR0\n");
3563 return ENXIO;
3564 }
3565
3566 return (0);
3567 } /* ixgbe_allocate_pci_resources */
3568
3569 static void
3570 ixgbe_free_softint(struct adapter *adapter)
3571 {
3572 struct ix_queue *que = adapter->queues;
3573 struct tx_ring *txr = adapter->tx_rings;
3574 int i;
3575
3576 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3577 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3578 if (txr->txr_si != NULL)
3579 softint_disestablish(txr->txr_si);
3580 }
3581 if (que->que_si != NULL)
3582 softint_disestablish(que->que_si);
3583 }
3584 if (adapter->txr_wq != NULL)
3585 workqueue_destroy(adapter->txr_wq);
3586 if (adapter->txr_wq_enqueued != NULL)
3587 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3588 if (adapter->que_wq != NULL)
3589 workqueue_destroy(adapter->que_wq);
3590
3591 /* Drain the Link queue */
3592 if (adapter->link_si != NULL) {
3593 softint_disestablish(adapter->link_si);
3594 adapter->link_si = NULL;
3595 }
3596 if (adapter->mod_si != NULL) {
3597 softint_disestablish(adapter->mod_si);
3598 adapter->mod_si = NULL;
3599 }
3600 if (adapter->msf_si != NULL) {
3601 softint_disestablish(adapter->msf_si);
3602 adapter->msf_si = NULL;
3603 }
3604 if (adapter->phy_si != NULL) {
3605 softint_disestablish(adapter->phy_si);
3606 adapter->phy_si = NULL;
3607 }
3608 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3609 if (adapter->fdir_si != NULL) {
3610 softint_disestablish(adapter->fdir_si);
3611 adapter->fdir_si = NULL;
3612 }
3613 }
3614 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3615 if (adapter->mbx_si != NULL) {
3616 softint_disestablish(adapter->mbx_si);
3617 adapter->mbx_si = NULL;
3618 }
3619 }
3620 } /* ixgbe_free_softint */
3621
3622 /************************************************************************
3623 * ixgbe_detach - Device removal routine
3624 *
3625 * Called when the driver is being removed.
3626 * Stops the adapter and deallocates all the resources
3627 * that were allocated for driver operation.
3628 *
3629 * return 0 on success, positive on failure
3630 ************************************************************************/
3631 static int
3632 ixgbe_detach(device_t dev, int flags)
3633 {
3634 struct adapter *adapter = device_private(dev);
3635 struct rx_ring *rxr = adapter->rx_rings;
3636 struct tx_ring *txr = adapter->tx_rings;
3637 struct ixgbe_hw *hw = &adapter->hw;
3638 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3639 u32 ctrl_ext;
3640 int i;
3641
3642 INIT_DEBUGOUT("ixgbe_detach: begin");
3643 if (adapter->osdep.attached == false)
3644 return 0;
3645
3646 if (ixgbe_pci_iov_detach(dev) != 0) {
3647 device_printf(dev, "SR-IOV in use; detach first.\n");
3648 return (EBUSY);
3649 }
3650
3651 /*
3652 * Stop the interface. ixgbe_setup_low_power_mode() calls
3653 * ixgbe_stop_locked(), so it's not required to call ixgbe_stop_locked()
3654 * directly.
3655 */
3656 IXGBE_CORE_LOCK(adapter);
3657 ixgbe_setup_low_power_mode(adapter);
3658 IXGBE_CORE_UNLOCK(adapter);
3659 #if NVLAN > 0
3660 /* Make sure VLANs are not using driver */
3661 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3662 ; /* nothing to do: no VLANs */
3663 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3664 vlan_ifdetach(adapter->ifp);
3665 else {
3666 aprint_error_dev(dev, "VLANs in use, detach first\n");
3667 return (EBUSY);
3668 }
3669 #endif
3670
3671 pmf_device_deregister(dev);
3672
3673 ether_ifdetach(adapter->ifp);
3674
3675 ixgbe_free_softint(adapter);
3676
3677 /* let hardware know driver is unloading */
3678 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3679 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3680 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3681
3682 callout_halt(&adapter->timer, NULL);
3683 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3684 callout_halt(&adapter->recovery_mode_timer, NULL);
3685
3686 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3687 netmap_detach(adapter->ifp);
3688
3689 ixgbe_free_pci_resources(adapter);
3690 #if 0 /* XXX the NetBSD port is probably missing something here */
3691 bus_generic_detach(dev);
3692 #endif
3693 if_detach(adapter->ifp);
3694 if_percpuq_destroy(adapter->ipq);
3695
3696 sysctl_teardown(&adapter->sysctllog);
3697 evcnt_detach(&adapter->efbig_tx_dma_setup);
3698 evcnt_detach(&adapter->mbuf_defrag_failed);
3699 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3700 evcnt_detach(&adapter->einval_tx_dma_setup);
3701 evcnt_detach(&adapter->other_tx_dma_setup);
3702 evcnt_detach(&adapter->eagain_tx_dma_setup);
3703 evcnt_detach(&adapter->enomem_tx_dma_setup);
3704 evcnt_detach(&adapter->watchdog_events);
3705 evcnt_detach(&adapter->tso_err);
3706 evcnt_detach(&adapter->admin_irq);
3707 evcnt_detach(&adapter->link_sicount);
3708 evcnt_detach(&adapter->mod_sicount);
3709 evcnt_detach(&adapter->msf_sicount);
3710 evcnt_detach(&adapter->phy_sicount);
3711
3712 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3713 if (i < __arraycount(stats->mpc)) {
3714 evcnt_detach(&stats->mpc[i]);
3715 if (hw->mac.type == ixgbe_mac_82598EB)
3716 evcnt_detach(&stats->rnbc[i]);
3717 }
3718 if (i < __arraycount(stats->pxontxc)) {
3719 evcnt_detach(&stats->pxontxc[i]);
3720 evcnt_detach(&stats->pxonrxc[i]);
3721 evcnt_detach(&stats->pxofftxc[i]);
3722 evcnt_detach(&stats->pxoffrxc[i]);
3723 if (hw->mac.type >= ixgbe_mac_82599EB)
3724 evcnt_detach(&stats->pxon2offc[i]);
3725 }
3726 }
3727
3728 txr = adapter->tx_rings;
3729 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3730 evcnt_detach(&adapter->queues[i].irqs);
3731 evcnt_detach(&adapter->queues[i].handleq);
3732 evcnt_detach(&adapter->queues[i].req);
3733 evcnt_detach(&txr->no_desc_avail);
3734 evcnt_detach(&txr->total_packets);
3735 evcnt_detach(&txr->tso_tx);
3736 #ifndef IXGBE_LEGACY_TX
3737 evcnt_detach(&txr->pcq_drops);
3738 #endif
3739
3740 if (i < __arraycount(stats->qprc)) {
3741 evcnt_detach(&stats->qprc[i]);
3742 evcnt_detach(&stats->qptc[i]);
3743 evcnt_detach(&stats->qbrc[i]);
3744 evcnt_detach(&stats->qbtc[i]);
3745 if (hw->mac.type >= ixgbe_mac_82599EB)
3746 evcnt_detach(&stats->qprdc[i]);
3747 }
3748
3749 evcnt_detach(&rxr->rx_packets);
3750 evcnt_detach(&rxr->rx_bytes);
3751 evcnt_detach(&rxr->rx_copies);
3752 evcnt_detach(&rxr->no_mbuf);
3753 evcnt_detach(&rxr->rx_discarded);
3754 }
3755 evcnt_detach(&stats->ipcs);
3756 evcnt_detach(&stats->l4cs);
3757 evcnt_detach(&stats->ipcs_bad);
3758 evcnt_detach(&stats->l4cs_bad);
3759 evcnt_detach(&stats->intzero);
3760 evcnt_detach(&stats->legint);
3761 evcnt_detach(&stats->crcerrs);
3762 evcnt_detach(&stats->illerrc);
3763 evcnt_detach(&stats->errbc);
3764 evcnt_detach(&stats->mspdc);
3765 if (hw->mac.type >= ixgbe_mac_X550)
3766 evcnt_detach(&stats->mbsdc);
3767 evcnt_detach(&stats->mpctotal);
3768 evcnt_detach(&stats->mlfc);
3769 evcnt_detach(&stats->mrfc);
3770 evcnt_detach(&stats->rlec);
3771 evcnt_detach(&stats->lxontxc);
3772 evcnt_detach(&stats->lxonrxc);
3773 evcnt_detach(&stats->lxofftxc);
3774 evcnt_detach(&stats->lxoffrxc);
3775
3776 /* Packet Reception Stats */
3777 evcnt_detach(&stats->tor);
3778 evcnt_detach(&stats->gorc);
3779 evcnt_detach(&stats->tpr);
3780 evcnt_detach(&stats->gprc);
3781 evcnt_detach(&stats->mprc);
3782 evcnt_detach(&stats->bprc);
3783 evcnt_detach(&stats->prc64);
3784 evcnt_detach(&stats->prc127);
3785 evcnt_detach(&stats->prc255);
3786 evcnt_detach(&stats->prc511);
3787 evcnt_detach(&stats->prc1023);
3788 evcnt_detach(&stats->prc1522);
3789 evcnt_detach(&stats->ruc);
3790 evcnt_detach(&stats->rfc);
3791 evcnt_detach(&stats->roc);
3792 evcnt_detach(&stats->rjc);
3793 evcnt_detach(&stats->mngprc);
3794 evcnt_detach(&stats->mngpdc);
3795 evcnt_detach(&stats->xec);
3796
3797 /* Packet Transmission Stats */
3798 evcnt_detach(&stats->gotc);
3799 evcnt_detach(&stats->tpt);
3800 evcnt_detach(&stats->gptc);
3801 evcnt_detach(&stats->bptc);
3802 evcnt_detach(&stats->mptc);
3803 evcnt_detach(&stats->mngptc);
3804 evcnt_detach(&stats->ptc64);
3805 evcnt_detach(&stats->ptc127);
3806 evcnt_detach(&stats->ptc255);
3807 evcnt_detach(&stats->ptc511);
3808 evcnt_detach(&stats->ptc1023);
3809 evcnt_detach(&stats->ptc1522);
3810
3811 ixgbe_free_queues(adapter);
3812 free(adapter->mta, M_DEVBUF);
3813
3814 IXGBE_CORE_LOCK_DESTROY(adapter);
3815
3816 return (0);
3817 } /* ixgbe_detach */
3818
3819 /************************************************************************
3820 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3821 *
3822 * Prepare the adapter/port for LPLU and/or WoL
3823 ************************************************************************/
3824 static int
3825 ixgbe_setup_low_power_mode(struct adapter *adapter)
3826 {
3827 struct ixgbe_hw *hw = &adapter->hw;
3828 device_t dev = adapter->dev;
3829 s32 error = 0;
3830
3831 KASSERT(mutex_owned(&adapter->core_mtx));
3832
3833 /* Limit power management flow to X550EM baseT */
3834 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3835 hw->phy.ops.enter_lplu) {
3836 /* X550EM baseT adapters need a special LPLU flow */
3837 hw->phy.reset_disable = true;
3838 ixgbe_stop_locked(adapter);
3839 error = hw->phy.ops.enter_lplu(hw);
3840 if (error)
3841 device_printf(dev,
3842 "Error entering LPLU: %d\n", error);
3843 hw->phy.reset_disable = false;
3844 } else {
3845 /* Just stop for other adapters */
3846 ixgbe_stop_locked(adapter);
3847 }
3848
3849 if (!hw->wol_enabled) {
3850 ixgbe_set_phy_power(hw, FALSE);
3851 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3852 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3853 } else {
3854 /* Turn off support for APM wakeup. (Using ACPI instead) */
3855 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3856 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3857
3858 /*
3859 * Clear Wake Up Status register to prevent any previous wakeup
3860 * events from waking us up immediately after we suspend.
3861 */
3862 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3863
3864 /*
3865 * Program the Wakeup Filter Control register with user filter
3866 * settings
3867 */
3868 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3869
3870 /* Enable wakeups and power management in Wakeup Control */
3871 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3872 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3873 }
3874
3875 return error;
3876 } /* ixgbe_setup_low_power_mode */
3877
3878 /************************************************************************
3879 * ixgbe_shutdown - Shutdown entry point
3880 ************************************************************************/
3881 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3882 static int
3883 ixgbe_shutdown(device_t dev)
3884 {
3885 struct adapter *adapter = device_private(dev);
3886 int error = 0;
3887
3888 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3889
3890 IXGBE_CORE_LOCK(adapter);
3891 error = ixgbe_setup_low_power_mode(adapter);
3892 IXGBE_CORE_UNLOCK(adapter);
3893
3894 return (error);
3895 } /* ixgbe_shutdown */
3896 #endif
3897
3898 /************************************************************************
3899 * ixgbe_suspend
3900 *
3901 * From D0 to D3
3902 ************************************************************************/
3903 static bool
3904 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3905 {
3906 struct adapter *adapter = device_private(dev);
3907 int error = 0;
3908
3909 INIT_DEBUGOUT("ixgbe_suspend: begin");
3910
3911 IXGBE_CORE_LOCK(adapter);
3912
3913 error = ixgbe_setup_low_power_mode(adapter);
3914
3915 IXGBE_CORE_UNLOCK(adapter);
3916
3917 return (error);
3918 } /* ixgbe_suspend */
3919
3920 /************************************************************************
3921 * ixgbe_resume
3922 *
3923 * From D3 to D0
3924 ************************************************************************/
3925 static bool
3926 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3927 {
3928 struct adapter *adapter = device_private(dev);
3929 struct ifnet *ifp = adapter->ifp;
3930 struct ixgbe_hw *hw = &adapter->hw;
3931 u32 wus;
3932
3933 INIT_DEBUGOUT("ixgbe_resume: begin");
3934
3935 IXGBE_CORE_LOCK(adapter);
3936
3937 /* Read & clear WUS register */
3938 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3939 if (wus)
3940 device_printf(dev, "Woken up by (WUS): %#010x\n",
3941 IXGBE_READ_REG(hw, IXGBE_WUS));
3942 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3943 /* And clear WUFC until next low-power transition */
3944 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3945
3946 /*
3947 * Required after D3->D0 transition;
3948 * will re-advertise all previous advertised speeds
3949 */
3950 if (ifp->if_flags & IFF_UP)
3951 ixgbe_init_locked(adapter);
3952
3953 IXGBE_CORE_UNLOCK(adapter);
3954
3955 return true;
3956 } /* ixgbe_resume */
3957
3958 /*
3959 * Set the various hardware offload abilities.
3960 *
3961 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3962 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3963 * mbuf offload flags the driver will understand.
3964 */
3965 static void
3966 ixgbe_set_if_hwassist(struct adapter *adapter)
3967 {
3968 /* XXX */
3969 }
3970
3971 /************************************************************************
3972 * ixgbe_init_locked - Init entry point
3973 *
3974 * Used in two ways: It is used by the stack as an init
3975 * entry point in network interface structure. It is also
3976 * used by the driver as a hw/sw initialization routine to
3977 * get to a consistent state.
3978 *
3979 * return 0 on success, positive on failure
3980 ************************************************************************/
3981 static void
3982 ixgbe_init_locked(struct adapter *adapter)
3983 {
3984 struct ifnet *ifp = adapter->ifp;
3985 device_t dev = adapter->dev;
3986 struct ixgbe_hw *hw = &adapter->hw;
3987 struct ix_queue *que;
3988 struct tx_ring *txr;
3989 struct rx_ring *rxr;
3990 u32 txdctl, mhadd;
3991 u32 rxdctl, rxctrl;
3992 u32 ctrl_ext;
3993 bool unsupported_sfp = false;
3994 int i, j, error;
3995
3996 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3997
3998 KASSERT(mutex_owned(&adapter->core_mtx));
3999 INIT_DEBUGOUT("ixgbe_init_locked: begin");
4000
4001 hw->need_unsupported_sfp_recovery = false;
4002 hw->adapter_stopped = FALSE;
4003 ixgbe_stop_adapter(hw);
4004 callout_stop(&adapter->timer);
4005 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
4006 que->disabled_count = 0;
4007
4008 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4009 adapter->max_frame_size =
4010 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4011
4012 /* Queue indices may change with IOV mode */
4013 ixgbe_align_all_queue_indices(adapter);
4014
4015 /* reprogram the RAR[0] in case user changed it. */
4016 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
4017
4018 /* Get the latest mac address, User can use a LAA */
4019 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4020 IXGBE_ETH_LENGTH_OF_ADDRESS);
4021 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
4022 hw->addr_ctrl.rar_used_count = 1;
4023
4024 /* Set hardware offload abilities from ifnet flags */
4025 ixgbe_set_if_hwassist(adapter);
4026
4027 /* Prepare transmit descriptors and buffers */
4028 if (ixgbe_setup_transmit_structures(adapter)) {
4029 device_printf(dev, "Could not setup transmit structures\n");
4030 ixgbe_stop_locked(adapter);
4031 return;
4032 }
4033
4034 ixgbe_init_hw(hw);
4035
4036 ixgbe_initialize_iov(adapter);
4037
4038 ixgbe_initialize_transmit_units(adapter);
4039
4040 /* Setup Multicast table */
4041 ixgbe_set_rxfilter(adapter);
4042
4043 /* Use fixed buffer size, even for jumbo frames */
4044 adapter->rx_mbuf_sz = MCLBYTES;
4045
4046 /* Prepare receive descriptors and buffers */
4047 error = ixgbe_setup_receive_structures(adapter);
4048 if (error) {
4049 device_printf(dev,
4050 "Could not setup receive structures (err = %d)\n", error);
4051 ixgbe_stop_locked(adapter);
4052 return;
4053 }
4054
4055 /* Configure RX settings */
4056 ixgbe_initialize_receive_units(adapter);
4057
4058 /* Enable SDP & MSI-X interrupts based on adapter */
4059 ixgbe_config_gpie(adapter);
4060
4061 /* Set MTU size */
4062 if (ifp->if_mtu > ETHERMTU) {
4063 /* aka IXGBE_MAXFRS on 82599 and newer */
4064 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4065 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4066 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4067 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4068 }
4069
4070 /* Now enable all the queues */
4071 for (i = 0; i < adapter->num_queues; i++) {
4072 txr = &adapter->tx_rings[i];
4073 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4074 txdctl |= IXGBE_TXDCTL_ENABLE;
4075 /* Set WTHRESH to 8, burst writeback */
4076 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4077 /*
4078 * When the internal queue falls below PTHRESH (32),
4079 * start prefetching as long as there are at least
4080 * HTHRESH (1) buffers ready. The values are taken
4081 * from the Intel linux driver 3.8.21.
4082 * Prefetching enables tx line rate even with 1 queue.
4083 */
4084 txdctl |= (32 << 0) | (1 << 8);
4085 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4086 }
4087
4088 for (i = 0; i < adapter->num_queues; i++) {
4089 rxr = &adapter->rx_rings[i];
4090 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4091 if (hw->mac.type == ixgbe_mac_82598EB) {
4092 /*
4093 * PTHRESH = 21
4094 * HTHRESH = 4
4095 * WTHRESH = 8
4096 */
4097 rxdctl &= ~0x3FFFFF;
4098 rxdctl |= 0x080420;
4099 }
4100 rxdctl |= IXGBE_RXDCTL_ENABLE;
4101 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4102 for (j = 0; j < 10; j++) {
4103 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4104 IXGBE_RXDCTL_ENABLE)
4105 break;
4106 else
4107 msec_delay(1);
4108 }
4109 IXGBE_WRITE_BARRIER(hw);
4110
4111 /*
4112 * In netmap mode, we must preserve the buffers made
4113 * available to userspace before the if_init()
4114 * (this is true by default on the TX side, because
4115 * init makes all buffers available to userspace).
4116 *
4117 * netmap_reset() and the device specific routines
4118 * (e.g. ixgbe_setup_receive_rings()) map these
4119 * buffers at the end of the NIC ring, so here we
4120 * must set the RDT (tail) register to make sure
4121 * they are not overwritten.
4122 *
4123 * In this driver the NIC ring starts at RDH = 0,
4124 * RDT points to the last slot available for reception (?),
4125 * so RDT = num_rx_desc - 1 means the whole ring is available.
4126 */
4127 #ifdef DEV_NETMAP
4128 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4129 (ifp->if_capenable & IFCAP_NETMAP)) {
4130 struct netmap_adapter *na = NA(adapter->ifp);
4131 struct netmap_kring *kring = na->rx_rings[i];
4132 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4133
4134 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4135 } else
4136 #endif /* DEV_NETMAP */
4137 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4138 adapter->num_rx_desc - 1);
4139 }
4140
4141 /* Enable Receive engine */
4142 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4143 if (hw->mac.type == ixgbe_mac_82598EB)
4144 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4145 rxctrl |= IXGBE_RXCTRL_RXEN;
4146 ixgbe_enable_rx_dma(hw, rxctrl);
4147
4148 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4149
4150 /* Set up MSI/MSI-X routing */
4151 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4152 ixgbe_configure_ivars(adapter);
4153 /* Set up auto-mask */
4154 if (hw->mac.type == ixgbe_mac_82598EB)
4155 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4156 else {
4157 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4158 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4159 }
4160 } else { /* Simple settings for Legacy/MSI */
4161 ixgbe_set_ivar(adapter, 0, 0, 0);
4162 ixgbe_set_ivar(adapter, 0, 0, 1);
4163 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4164 }
4165
4166 ixgbe_init_fdir(adapter);
4167
4168 /*
4169 * Check on any SFP devices that
4170 * need to be kick-started
4171 */
4172 if (hw->phy.type == ixgbe_phy_none) {
4173 error = hw->phy.ops.identify(hw);
4174 if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4175 unsupported_sfp = true;
4176 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4177 unsupported_sfp = true;
4178
4179 if (unsupported_sfp)
4180 device_printf(dev,
4181 "Unsupported SFP+ module type was detected.\n");
4182
4183 /* Set moderation on the Link interrupt */
4184 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4185
4186 /* Enable EEE power saving */
4187 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4188 hw->mac.ops.setup_eee(hw,
4189 adapter->feat_en & IXGBE_FEATURE_EEE);
4190
4191 /* Enable power to the phy. */
4192 if (!unsupported_sfp) {
4193 ixgbe_set_phy_power(hw, TRUE);
4194
4195 /* Config/Enable Link */
4196 ixgbe_config_link(adapter);
4197 }
4198
4199 /* Hardware Packet Buffer & Flow Control setup */
4200 ixgbe_config_delay_values(adapter);
4201
4202 /* Initialize the FC settings */
4203 ixgbe_start_hw(hw);
4204
4205 /* Set up VLAN support and filter */
4206 ixgbe_setup_vlan_hw_support(adapter);
4207
4208 /* Setup DMA Coalescing */
4209 ixgbe_config_dmac(adapter);
4210
4211 /* And now turn on interrupts */
4212 ixgbe_enable_intr(adapter);
4213
4214 /* Enable the use of the MBX by the VF's */
4215 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4216 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4217 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4218 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4219 }
4220
4221 /* Update saved flags. See ixgbe_ifflags_cb() */
4222 adapter->if_flags = ifp->if_flags;
4223 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4224
4225 /* Now inform the stack we're ready */
4226 ifp->if_flags |= IFF_RUNNING;
4227
4228 return;
4229 } /* ixgbe_init_locked */
4230
4231 /************************************************************************
4232 * ixgbe_init
4233 ************************************************************************/
4234 static int
4235 ixgbe_init(struct ifnet *ifp)
4236 {
4237 struct adapter *adapter = ifp->if_softc;
4238
4239 IXGBE_CORE_LOCK(adapter);
4240 ixgbe_init_locked(adapter);
4241 IXGBE_CORE_UNLOCK(adapter);
4242
4243 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4244 } /* ixgbe_init */
4245
4246 /************************************************************************
4247 * ixgbe_set_ivar
4248 *
4249 * Setup the correct IVAR register for a particular MSI-X interrupt
4250 * (yes this is all very magic and confusing :)
4251 * - entry is the register array entry
4252 * - vector is the MSI-X vector for this queue
4253 * - type is RX/TX/MISC
4254 ************************************************************************/
4255 static void
4256 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4257 {
4258 struct ixgbe_hw *hw = &adapter->hw;
4259 u32 ivar, index;
4260
4261 vector |= IXGBE_IVAR_ALLOC_VAL;
4262
4263 switch (hw->mac.type) {
4264 case ixgbe_mac_82598EB:
4265 if (type == -1)
4266 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4267 else
4268 entry += (type * 64);
4269 index = (entry >> 2) & 0x1F;
4270 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4271 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4272 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4274 break;
4275 case ixgbe_mac_82599EB:
4276 case ixgbe_mac_X540:
4277 case ixgbe_mac_X550:
4278 case ixgbe_mac_X550EM_x:
4279 case ixgbe_mac_X550EM_a:
4280 if (type == -1) { /* MISC IVAR */
4281 index = (entry & 1) * 8;
4282 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4283 ivar &= ~(0xffUL << index);
4284 ivar |= ((u32)vector << index);
4285 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4286 } else { /* RX/TX IVARS */
4287 index = (16 * (entry & 1)) + (8 * type);
4288 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4289 ivar &= ~(0xffUL << index);
4290 ivar |= ((u32)vector << index);
4291 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4292 }
4293 break;
4294 default:
4295 break;
4296 }
4297 } /* ixgbe_set_ivar */
4298
4299 /************************************************************************
4300 * ixgbe_configure_ivars
4301 ************************************************************************/
4302 static void
4303 ixgbe_configure_ivars(struct adapter *adapter)
4304 {
4305 struct ix_queue *que = adapter->queues;
4306 u32 newitr;
4307
4308 if (ixgbe_max_interrupt_rate > 0)
4309 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4310 else {
4311 /*
4312 * Disable DMA coalescing if interrupt moderation is
4313 * disabled.
4314 */
4315 adapter->dmac = 0;
4316 newitr = 0;
4317 }
4318
4319 for (int i = 0; i < adapter->num_queues; i++, que++) {
4320 struct rx_ring *rxr = &adapter->rx_rings[i];
4321 struct tx_ring *txr = &adapter->tx_rings[i];
4322 /* First the RX queue entry */
4323 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4324 /* ... and the TX */
4325 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4326 /* Set an Initial EITR value */
4327 ixgbe_eitr_write(adapter, que->msix, newitr);
4328 /*
4329 * To eliminate influence of the previous state.
4330 * At this point, Tx/Rx interrupt handler
4331 * (ixgbe_msix_que()) cannot be called, so both
4332 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4333 */
4334 que->eitr_setting = 0;
4335 }
4336
4337 /* For the Link interrupt */
4338 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4339 } /* ixgbe_configure_ivars */
4340
4341 /************************************************************************
4342 * ixgbe_config_gpie
4343 ************************************************************************/
4344 static void
4345 ixgbe_config_gpie(struct adapter *adapter)
4346 {
4347 struct ixgbe_hw *hw = &adapter->hw;
4348 u32 gpie;
4349
4350 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4351
4352 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4353 /* Enable Enhanced MSI-X mode */
4354 gpie |= IXGBE_GPIE_MSIX_MODE
4355 | IXGBE_GPIE_EIAME
4356 | IXGBE_GPIE_PBA_SUPPORT
4357 | IXGBE_GPIE_OCD;
4358 }
4359
4360 /* Fan Failure Interrupt */
4361 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4362 gpie |= IXGBE_SDP1_GPIEN;
4363
4364 /* Thermal Sensor Interrupt */
4365 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4366 gpie |= IXGBE_SDP0_GPIEN_X540;
4367
4368 /* Link detection */
4369 switch (hw->mac.type) {
4370 case ixgbe_mac_82599EB:
4371 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4372 break;
4373 case ixgbe_mac_X550EM_x:
4374 case ixgbe_mac_X550EM_a:
4375 gpie |= IXGBE_SDP0_GPIEN_X540;
4376 break;
4377 default:
4378 break;
4379 }
4380
4381 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4382
4383 } /* ixgbe_config_gpie */
4384
4385 /************************************************************************
4386 * ixgbe_config_delay_values
4387 *
4388 * Requires adapter->max_frame_size to be set.
4389 ************************************************************************/
4390 static void
4391 ixgbe_config_delay_values(struct adapter *adapter)
4392 {
4393 struct ixgbe_hw *hw = &adapter->hw;
4394 u32 rxpb, frame, size, tmp;
4395
4396 frame = adapter->max_frame_size;
4397
4398 /* Calculate High Water */
4399 switch (hw->mac.type) {
4400 case ixgbe_mac_X540:
4401 case ixgbe_mac_X550:
4402 case ixgbe_mac_X550EM_x:
4403 case ixgbe_mac_X550EM_a:
4404 tmp = IXGBE_DV_X540(frame, frame);
4405 break;
4406 default:
4407 tmp = IXGBE_DV(frame, frame);
4408 break;
4409 }
4410 size = IXGBE_BT2KB(tmp);
4411 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4412 hw->fc.high_water[0] = rxpb - size;
4413
4414 /* Now calculate Low Water */
4415 switch (hw->mac.type) {
4416 case ixgbe_mac_X540:
4417 case ixgbe_mac_X550:
4418 case ixgbe_mac_X550EM_x:
4419 case ixgbe_mac_X550EM_a:
4420 tmp = IXGBE_LOW_DV_X540(frame);
4421 break;
4422 default:
4423 tmp = IXGBE_LOW_DV(frame);
4424 break;
4425 }
4426 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4427
4428 hw->fc.pause_time = IXGBE_FC_PAUSE;
4429 hw->fc.send_xon = TRUE;
4430 } /* ixgbe_config_delay_values */
4431
4432 /************************************************************************
4433 * ixgbe_set_rxfilter - Multicast Update
4434 *
4435 * Called whenever multicast address list is updated.
4436 ************************************************************************/
4437 static void
4438 ixgbe_set_rxfilter(struct adapter *adapter)
4439 {
4440 struct ixgbe_mc_addr *mta;
4441 struct ifnet *ifp = adapter->ifp;
4442 u8 *update_ptr;
4443 int mcnt = 0;
4444 u32 fctrl;
4445 struct ethercom *ec = &adapter->osdep.ec;
4446 struct ether_multi *enm;
4447 struct ether_multistep step;
4448
4449 KASSERT(mutex_owned(&adapter->core_mtx));
4450 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4451
4452 mta = adapter->mta;
4453 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4454
4455 ETHER_LOCK(ec);
4456 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4457 ETHER_FIRST_MULTI(step, ec, enm);
4458 while (enm != NULL) {
4459 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4460 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4461 ETHER_ADDR_LEN) != 0)) {
4462 ec->ec_flags |= ETHER_F_ALLMULTI;
4463 break;
4464 }
4465 bcopy(enm->enm_addrlo,
4466 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4467 mta[mcnt].vmdq = adapter->pool;
4468 mcnt++;
4469 ETHER_NEXT_MULTI(step, enm);
4470 }
4471
4472 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4473 if (ifp->if_flags & IFF_PROMISC)
4474 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4475 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4476 fctrl |= IXGBE_FCTRL_MPE;
4477 fctrl &= ~IXGBE_FCTRL_UPE;
4478 } else
4479 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4480
4481 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4482
4483 /* Update multicast filter entries only when it's not ALLMULTI */
4484 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4485 ETHER_UNLOCK(ec);
4486 update_ptr = (u8 *)mta;
4487 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4488 ixgbe_mc_array_itr, TRUE);
4489 } else
4490 ETHER_UNLOCK(ec);
4491 } /* ixgbe_set_rxfilter */
4492
4493 /************************************************************************
4494 * ixgbe_mc_array_itr
4495 *
4496 * An iterator function needed by the multicast shared code.
4497 * It feeds the shared code routine the addresses in the
4498 * array of ixgbe_set_rxfilter() one by one.
4499 ************************************************************************/
4500 static u8 *
4501 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4502 {
4503 struct ixgbe_mc_addr *mta;
4504
4505 mta = (struct ixgbe_mc_addr *)*update_ptr;
4506 *vmdq = mta->vmdq;
4507
4508 *update_ptr = (u8*)(mta + 1);
4509
4510 return (mta->addr);
4511 } /* ixgbe_mc_array_itr */
4512
4513 /************************************************************************
4514 * ixgbe_local_timer - Timer routine
4515 *
4516 * Checks for link status, updates statistics,
4517 * and runs the watchdog check.
4518 ************************************************************************/
4519 static void
4520 ixgbe_local_timer(void *arg)
4521 {
4522 struct adapter *adapter = arg;
4523
4524 IXGBE_CORE_LOCK(adapter);
4525 ixgbe_local_timer1(adapter);
4526 IXGBE_CORE_UNLOCK(adapter);
4527 }
4528
4529 static void
4530 ixgbe_local_timer1(void *arg)
4531 {
4532 struct adapter *adapter = arg;
4533 device_t dev = adapter->dev;
4534 struct ix_queue *que = adapter->queues;
4535 u64 queues = 0;
4536 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4537 int hung = 0;
4538 int i;
4539
4540 KASSERT(mutex_owned(&adapter->core_mtx));
4541
4542 /* Check for pluggable optics */
4543 if (adapter->sfp_probe)
4544 if (!ixgbe_sfp_probe(adapter))
4545 goto out; /* Nothing to do */
4546
4547 ixgbe_update_link_status(adapter);
4548 ixgbe_update_stats_counters(adapter);
4549
4550 /* Update some event counters */
4551 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4552 que = adapter->queues;
4553 for (i = 0; i < adapter->num_queues; i++, que++) {
4554 struct tx_ring *txr = que->txr;
4555
4556 v0 += txr->q_efbig_tx_dma_setup;
4557 v1 += txr->q_mbuf_defrag_failed;
4558 v2 += txr->q_efbig2_tx_dma_setup;
4559 v3 += txr->q_einval_tx_dma_setup;
4560 v4 += txr->q_other_tx_dma_setup;
4561 v5 += txr->q_eagain_tx_dma_setup;
4562 v6 += txr->q_enomem_tx_dma_setup;
4563 v7 += txr->q_tso_err;
4564 }
4565 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, v0);
4566 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, v1);
4567 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, v2);
4568 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, v3);
4569 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, v4);
4570 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, v5);
4571 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, v6);
4572 IXGBE_EVC_STORE(&adapter->tso_err, v7);
4573
4574 /*
4575 * Check the TX queues status
4576 * - mark hung queues so we don't schedule on them
4577 * - watchdog only if all queues show hung
4578 */
4579 que = adapter->queues;
4580 for (i = 0; i < adapter->num_queues; i++, que++) {
4581 /* Keep track of queues with work for soft irq */
4582 if (que->txr->busy)
4583 queues |= 1ULL << que->me;
4584 /*
4585 * Each time txeof runs without cleaning, but there
4586 * are uncleaned descriptors it increments busy. If
4587 * we get to the MAX we declare it hung.
4588 */
4589 if (que->busy == IXGBE_QUEUE_HUNG) {
4590 ++hung;
4591 /* Mark the queue as inactive */
4592 adapter->active_queues &= ~(1ULL << que->me);
4593 continue;
4594 } else {
4595 /* Check if we've come back from hung */
4596 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4597 adapter->active_queues |= 1ULL << que->me;
4598 }
4599 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4600 device_printf(dev,
4601 "Warning queue %d appears to be hung!\n", i);
4602 que->txr->busy = IXGBE_QUEUE_HUNG;
4603 ++hung;
4604 }
4605 }
4606
4607 /* Only truly watchdog if all queues show hung */
4608 if (hung == adapter->num_queues)
4609 goto watchdog;
4610 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4611 else if (queues != 0) { /* Force an IRQ on queues with work */
4612 que = adapter->queues;
4613 for (i = 0; i < adapter->num_queues; i++, que++) {
4614 mutex_enter(&que->dc_mtx);
4615 if (que->disabled_count == 0)
4616 ixgbe_rearm_queues(adapter,
4617 queues & ((u64)1 << i));
4618 mutex_exit(&que->dc_mtx);
4619 }
4620 }
4621 #endif
4622
4623 out:
4624 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4625 return;
4626
4627 watchdog:
4628 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4629 adapter->ifp->if_flags &= ~IFF_RUNNING;
4630 IXGBE_EVC_ADD(&adapter->watchdog_events, 1);
4631 ixgbe_init_locked(adapter);
4632 } /* ixgbe_local_timer */
4633
4634 /************************************************************************
4635 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4636 ************************************************************************/
4637 static void
4638 ixgbe_recovery_mode_timer(void *arg)
4639 {
4640 struct adapter *adapter = arg;
4641 struct ixgbe_hw *hw = &adapter->hw;
4642
4643 IXGBE_CORE_LOCK(adapter);
4644 if (ixgbe_fw_recovery_mode(hw)) {
4645 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1) == 0) {
4646 /* Firmware error detected, entering recovery mode */
4647 device_printf(adapter->dev,
4648 "Firmware recovery mode detected. Limiting "
4649 "functionality. Refer to the Intel(R) Ethernet "
4650 "Adapters and Devices User Guide for details on "
4651 "firmware recovery mode.\n");
4652
4653 if (hw->adapter_stopped == FALSE)
4654 ixgbe_stop_locked(adapter);
4655 }
4656 } else
4657 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4658
4659 callout_reset(&adapter->recovery_mode_timer, hz,
4660 ixgbe_recovery_mode_timer, adapter);
4661 IXGBE_CORE_UNLOCK(adapter);
4662 } /* ixgbe_recovery_mode_timer */
4663
4664 /************************************************************************
4665 * ixgbe_sfp_probe
4666 *
4667 * Determine if a port had optics inserted.
4668 ************************************************************************/
4669 static bool
4670 ixgbe_sfp_probe(struct adapter *adapter)
4671 {
4672 struct ixgbe_hw *hw = &adapter->hw;
4673 device_t dev = adapter->dev;
4674 bool result = FALSE;
4675
4676 if ((hw->phy.type == ixgbe_phy_nl) &&
4677 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4678 s32 ret = hw->phy.ops.identify_sfp(hw);
4679 if (ret)
4680 goto out;
4681 ret = hw->phy.ops.reset(hw);
4682 adapter->sfp_probe = FALSE;
4683 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4684 device_printf(dev,"Unsupported SFP+ module detected!");
4685 device_printf(dev,
4686 "Reload driver with supported module.\n");
4687 goto out;
4688 } else
4689 device_printf(dev, "SFP+ module detected!\n");
4690 /* We now have supported optics */
4691 result = TRUE;
4692 }
4693 out:
4694
4695 return (result);
4696 } /* ixgbe_sfp_probe */
4697
4698 /************************************************************************
4699 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4700 ************************************************************************/
4701 static void
4702 ixgbe_handle_mod(void *context)
4703 {
4704 struct adapter *adapter = context;
4705 struct ixgbe_hw *hw = &adapter->hw;
4706 device_t dev = adapter->dev;
4707 u32 err, cage_full = 0;
4708
4709 IXGBE_CORE_LOCK(adapter);
4710 IXGBE_EVC_ADD(&adapter->mod_sicount, 1);
4711 if (adapter->hw.need_crosstalk_fix) {
4712 switch (hw->mac.type) {
4713 case ixgbe_mac_82599EB:
4714 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4715 IXGBE_ESDP_SDP2;
4716 break;
4717 case ixgbe_mac_X550EM_x:
4718 case ixgbe_mac_X550EM_a:
4719 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4720 IXGBE_ESDP_SDP0;
4721 break;
4722 default:
4723 break;
4724 }
4725
4726 if (!cage_full)
4727 goto out;
4728 }
4729
4730 err = hw->phy.ops.identify_sfp(hw);
4731 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4732 device_printf(dev,
4733 "Unsupported SFP+ module type was detected.\n");
4734 goto out;
4735 }
4736
4737 if (hw->need_unsupported_sfp_recovery) {
4738 device_printf(dev, "Recovering from unsupported SFP\n");
4739 /*
4740 * We could recover the status by calling setup_sfp(),
4741 * setup_link() and some others. It's complex and might not
4742 * work correctly on some unknown cases. To avoid such type of
4743 * problem, call ixgbe_init_locked(). It's simple and safe
4744 * approach.
4745 */
4746 ixgbe_init_locked(adapter);
4747 } else {
4748 if (hw->mac.type == ixgbe_mac_82598EB)
4749 err = hw->phy.ops.reset(hw);
4750 else {
4751 err = hw->mac.ops.setup_sfp(hw);
4752 hw->phy.sfp_setup_needed = FALSE;
4753 }
4754 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4755 device_printf(dev,
4756 "Setup failure - unsupported SFP+ module type.\n");
4757 goto out;
4758 }
4759 }
4760 softint_schedule(adapter->msf_si);
4761 out:
4762 IXGBE_CORE_UNLOCK(adapter);
4763 } /* ixgbe_handle_mod */
4764
4765
4766 /************************************************************************
4767 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4768 ************************************************************************/
4769 static void
4770 ixgbe_handle_msf(void *context)
4771 {
4772 struct adapter *adapter = context;
4773 struct ixgbe_hw *hw = &adapter->hw;
4774 u32 autoneg;
4775 bool negotiate;
4776
4777 IXGBE_CORE_LOCK(adapter);
4778 IXGBE_EVC_ADD(&adapter->msf_sicount, 1);
4779 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4780 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4781
4782 autoneg = hw->phy.autoneg_advertised;
4783 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4784 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4785 else
4786 negotiate = 0;
4787 if (hw->mac.ops.setup_link)
4788 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4789
4790 /* Adjust media types shown in ifconfig */
4791 ifmedia_removeall(&adapter->media);
4792 ixgbe_add_media_types(adapter);
4793 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4794 IXGBE_CORE_UNLOCK(adapter);
4795 } /* ixgbe_handle_msf */
4796
4797 /************************************************************************
4798 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4799 ************************************************************************/
4800 static void
4801 ixgbe_handle_phy(void *context)
4802 {
4803 struct adapter *adapter = context;
4804 struct ixgbe_hw *hw = &adapter->hw;
4805 int error;
4806
4807 IXGBE_EVC_ADD(&adapter->phy_sicount, 1);
4808 error = hw->phy.ops.handle_lasi(hw);
4809 if (error == IXGBE_ERR_OVERTEMP)
4810 device_printf(adapter->dev,
4811 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4812 " PHY will downshift to lower power state!\n");
4813 else if (error)
4814 device_printf(adapter->dev,
4815 "Error handling LASI interrupt: %d\n", error);
4816 } /* ixgbe_handle_phy */
4817
4818 static void
4819 ixgbe_ifstop(struct ifnet *ifp, int disable)
4820 {
4821 struct adapter *adapter = ifp->if_softc;
4822
4823 IXGBE_CORE_LOCK(adapter);
4824 ixgbe_stop_locked(adapter);
4825 IXGBE_CORE_UNLOCK(adapter);
4826 }
4827
4828 /************************************************************************
4829 * ixgbe_stop_locked - Stop the hardware
4830 *
4831 * Disables all traffic on the adapter by issuing a
4832 * global reset on the MAC and deallocates TX/RX buffers.
4833 ************************************************************************/
4834 static void
4835 ixgbe_stop_locked(void *arg)
4836 {
4837 struct ifnet *ifp;
4838 struct adapter *adapter = arg;
4839 struct ixgbe_hw *hw = &adapter->hw;
4840
4841 ifp = adapter->ifp;
4842
4843 KASSERT(mutex_owned(&adapter->core_mtx));
4844
4845 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4846 ixgbe_disable_intr(adapter);
4847 callout_stop(&adapter->timer);
4848
4849 /* Let the stack know...*/
4850 ifp->if_flags &= ~IFF_RUNNING;
4851
4852 ixgbe_reset_hw(hw);
4853 hw->adapter_stopped = FALSE;
4854 ixgbe_stop_adapter(hw);
4855 if (hw->mac.type == ixgbe_mac_82599EB)
4856 ixgbe_stop_mac_link_on_d3_82599(hw);
4857 /* Turn off the laser - noop with no optics */
4858 ixgbe_disable_tx_laser(hw);
4859
4860 /* Update the stack */
4861 adapter->link_up = FALSE;
4862 ixgbe_update_link_status(adapter);
4863
4864 /* reprogram the RAR[0] in case user changed it. */
4865 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4866
4867 return;
4868 } /* ixgbe_stop_locked */
4869
4870 /************************************************************************
4871 * ixgbe_update_link_status - Update OS on link state
4872 *
4873 * Note: Only updates the OS on the cached link state.
4874 * The real check of the hardware only happens with
4875 * a link interrupt.
4876 ************************************************************************/
4877 static void
4878 ixgbe_update_link_status(struct adapter *adapter)
4879 {
4880 struct ifnet *ifp = adapter->ifp;
4881 device_t dev = adapter->dev;
4882 struct ixgbe_hw *hw = &adapter->hw;
4883
4884 KASSERT(mutex_owned(&adapter->core_mtx));
4885
4886 if (adapter->link_up) {
4887 if (adapter->link_active != LINK_STATE_UP) {
4888 /*
4889 * To eliminate influence of the previous state
4890 * in the same way as ixgbe_init_locked().
4891 */
4892 struct ix_queue *que = adapter->queues;
4893 for (int i = 0; i < adapter->num_queues; i++, que++)
4894 que->eitr_setting = 0;
4895
4896 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4897 /*
4898 * Discard count for both MAC Local Fault and
4899 * Remote Fault because those registers are
4900 * valid only when the link speed is up and
4901 * 10Gbps.
4902 */
4903 IXGBE_READ_REG(hw, IXGBE_MLFC);
4904 IXGBE_READ_REG(hw, IXGBE_MRFC);
4905 }
4906
4907 if (bootverbose) {
4908 const char *bpsmsg;
4909
4910 switch (adapter->link_speed) {
4911 case IXGBE_LINK_SPEED_10GB_FULL:
4912 bpsmsg = "10 Gbps";
4913 break;
4914 case IXGBE_LINK_SPEED_5GB_FULL:
4915 bpsmsg = "5 Gbps";
4916 break;
4917 case IXGBE_LINK_SPEED_2_5GB_FULL:
4918 bpsmsg = "2.5 Gbps";
4919 break;
4920 case IXGBE_LINK_SPEED_1GB_FULL:
4921 bpsmsg = "1 Gbps";
4922 break;
4923 case IXGBE_LINK_SPEED_100_FULL:
4924 bpsmsg = "100 Mbps";
4925 break;
4926 case IXGBE_LINK_SPEED_10_FULL:
4927 bpsmsg = "10 Mbps";
4928 break;
4929 default:
4930 bpsmsg = "unknown speed";
4931 break;
4932 }
4933 device_printf(dev, "Link is up %s %s \n",
4934 bpsmsg, "Full Duplex");
4935 }
4936 adapter->link_active = LINK_STATE_UP;
4937 /* Update any Flow Control changes */
4938 ixgbe_fc_enable(&adapter->hw);
4939 /* Update DMA coalescing config */
4940 ixgbe_config_dmac(adapter);
4941 if_link_state_change(ifp, LINK_STATE_UP);
4942
4943 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4944 ixgbe_ping_all_vfs(adapter);
4945 }
4946 } else {
4947 /*
4948 * Do it when link active changes to DOWN. i.e.
4949 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4950 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4951 */
4952 if (adapter->link_active != LINK_STATE_DOWN) {
4953 if (bootverbose)
4954 device_printf(dev, "Link is Down\n");
4955 if_link_state_change(ifp, LINK_STATE_DOWN);
4956 adapter->link_active = LINK_STATE_DOWN;
4957 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4958 ixgbe_ping_all_vfs(adapter);
4959 ixgbe_drain_all(adapter);
4960 }
4961 }
4962 } /* ixgbe_update_link_status */
4963
4964 /************************************************************************
4965 * ixgbe_config_dmac - Configure DMA Coalescing
4966 ************************************************************************/
4967 static void
4968 ixgbe_config_dmac(struct adapter *adapter)
4969 {
4970 struct ixgbe_hw *hw = &adapter->hw;
4971 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4972
4973 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4974 return;
4975
4976 if (dcfg->watchdog_timer ^ adapter->dmac ||
4977 dcfg->link_speed ^ adapter->link_speed) {
4978 dcfg->watchdog_timer = adapter->dmac;
4979 dcfg->fcoe_en = false;
4980 dcfg->link_speed = adapter->link_speed;
4981 dcfg->num_tcs = 1;
4982
4983 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4984 dcfg->watchdog_timer, dcfg->link_speed);
4985
4986 hw->mac.ops.dmac_config(hw);
4987 }
4988 } /* ixgbe_config_dmac */
4989
4990 /************************************************************************
4991 * ixgbe_enable_intr
4992 ************************************************************************/
4993 static void
4994 ixgbe_enable_intr(struct adapter *adapter)
4995 {
4996 struct ixgbe_hw *hw = &adapter->hw;
4997 struct ix_queue *que = adapter->queues;
4998 u32 mask, fwsm;
4999
5000 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5001
5002 switch (adapter->hw.mac.type) {
5003 case ixgbe_mac_82599EB:
5004 mask |= IXGBE_EIMS_ECC;
5005 /* Temperature sensor on some adapters */
5006 mask |= IXGBE_EIMS_GPI_SDP0;
5007 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5008 mask |= IXGBE_EIMS_GPI_SDP1;
5009 mask |= IXGBE_EIMS_GPI_SDP2;
5010 break;
5011 case ixgbe_mac_X540:
5012 /* Detect if Thermal Sensor is enabled */
5013 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5014 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5015 mask |= IXGBE_EIMS_TS;
5016 mask |= IXGBE_EIMS_ECC;
5017 break;
5018 case ixgbe_mac_X550:
5019 /* MAC thermal sensor is automatically enabled */
5020 mask |= IXGBE_EIMS_TS;
5021 mask |= IXGBE_EIMS_ECC;
5022 break;
5023 case ixgbe_mac_X550EM_x:
5024 case ixgbe_mac_X550EM_a:
5025 /* Some devices use SDP0 for important information */
5026 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5027 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5028 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5029 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5030 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5031 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5032 mask |= IXGBE_EICR_GPI_SDP0_X540;
5033 mask |= IXGBE_EIMS_ECC;
5034 break;
5035 default:
5036 break;
5037 }
5038
5039 /* Enable Fan Failure detection */
5040 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5041 mask |= IXGBE_EIMS_GPI_SDP1;
5042 /* Enable SR-IOV */
5043 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5044 mask |= IXGBE_EIMS_MAILBOX;
5045 /* Enable Flow Director */
5046 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5047 mask |= IXGBE_EIMS_FLOW_DIR;
5048
5049 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5050
5051 /* With MSI-X we use auto clear */
5052 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
5053 /*
5054 * We use auto clear for RTX_QUEUE only. Don't use other
5055 * interrupts (e.g. link interrupt). BTW, we don't use
5056 * TCP_TIMER interrupt itself.
5057 */
5058 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5059 }
5060
5061 /*
5062 * Now enable all queues, this is done separately to
5063 * allow for handling the extended (beyond 32) MSI-X
5064 * vectors that can be used by 82599
5065 */
5066 for (int i = 0; i < adapter->num_queues; i++, que++)
5067 ixgbe_enable_queue(adapter, que->msix);
5068
5069 IXGBE_WRITE_FLUSH(hw);
5070
5071 } /* ixgbe_enable_intr */
5072
5073 /************************************************************************
5074 * ixgbe_disable_intr_internal
5075 ************************************************************************/
5076 static void
5077 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5078 {
5079 struct ix_queue *que = adapter->queues;
5080
5081 /* disable interrupts other than queues */
5082 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5083
5084 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0)
5085 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5086
5087 for (int i = 0; i < adapter->num_queues; i++, que++)
5088 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5089
5090 IXGBE_WRITE_FLUSH(&adapter->hw);
5091
5092 } /* ixgbe_do_disable_intr_internal */
5093
5094 /************************************************************************
5095 * ixgbe_disable_intr
5096 ************************************************************************/
5097 static void
5098 ixgbe_disable_intr(struct adapter *adapter)
5099 {
5100
5101 ixgbe_disable_intr_internal(adapter, true);
5102 } /* ixgbe_disable_intr */
5103
5104 /************************************************************************
5105 * ixgbe_ensure_disabled_intr
5106 ************************************************************************/
5107 void
5108 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5109 {
5110
5111 ixgbe_disable_intr_internal(adapter, false);
5112 } /* ixgbe_ensure_disabled_intr */
5113
5114 /************************************************************************
5115 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5116 ************************************************************************/
5117 static int
5118 ixgbe_legacy_irq(void *arg)
5119 {
5120 struct ix_queue *que = arg;
5121 struct adapter *adapter = que->adapter;
5122 struct ixgbe_hw *hw = &adapter->hw;
5123 struct ifnet *ifp = adapter->ifp;
5124 struct tx_ring *txr = adapter->tx_rings;
5125 u32 eicr;
5126 u32 eims_orig;
5127 u32 eims_enable = 0;
5128 u32 eims_disable = 0;
5129
5130 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5131 /*
5132 * Silicon errata #26 on 82598. Disable all interrupts before reading
5133 * EICR.
5134 */
5135 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5136
5137 /* Read and clear EICR */
5138 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5139
5140 if (eicr == 0) {
5141 IXGBE_EVC_ADD(&adapter->stats.pf.intzero, 1);
5142 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5143 return 0;
5144 }
5145 IXGBE_EVC_ADD(&adapter->stats.pf.legint, 1);
5146
5147 /* Queue (0) intr */
5148 if (((ifp->if_flags & IFF_RUNNING) != 0) &&
5149 (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5150 IXGBE_EVC_ADD(&que->irqs, 1);
5151
5152 /*
5153 * The same as ixgbe_msix_que() about
5154 * "que->txrx_use_workqueue".
5155 */
5156 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5157
5158 IXGBE_TX_LOCK(txr);
5159 ixgbe_txeof(txr);
5160 #ifdef notyet
5161 if (!ixgbe_ring_empty(ifp, txr->br))
5162 ixgbe_start_locked(ifp, txr);
5163 #endif
5164 IXGBE_TX_UNLOCK(txr);
5165
5166 IXGBE_EVC_ADD(&que->req, 1);
5167 ixgbe_sched_handle_que(adapter, que);
5168 /* Disable queue 0 interrupt */
5169 eims_disable |= 1UL << 0;
5170 } else
5171 eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE;
5172
5173 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5174
5175 /* Re-enable some interrupts */
5176 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5177 (eims_orig & ~eims_disable) | eims_enable);
5178
5179 return 1;
5180 } /* ixgbe_legacy_irq */
5181
5182 /************************************************************************
5183 * ixgbe_free_pciintr_resources
5184 ************************************************************************/
5185 static void
5186 ixgbe_free_pciintr_resources(struct adapter *adapter)
5187 {
5188 struct ix_queue *que = adapter->queues;
5189 int rid;
5190
5191 /*
5192 * Release all msix queue resources:
5193 */
5194 for (int i = 0; i < adapter->num_queues; i++, que++) {
5195 if (que->res != NULL) {
5196 pci_intr_disestablish(adapter->osdep.pc,
5197 adapter->osdep.ihs[i]);
5198 adapter->osdep.ihs[i] = NULL;
5199 }
5200 }
5201
5202 /* Clean the Legacy or Link interrupt last */
5203 if (adapter->vector) /* we are doing MSIX */
5204 rid = adapter->vector;
5205 else
5206 rid = 0;
5207
5208 if (adapter->osdep.ihs[rid] != NULL) {
5209 pci_intr_disestablish(adapter->osdep.pc,
5210 adapter->osdep.ihs[rid]);
5211 adapter->osdep.ihs[rid] = NULL;
5212 }
5213
5214 if (adapter->osdep.intrs != NULL) {
5215 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5216 adapter->osdep.nintrs);
5217 adapter->osdep.intrs = NULL;
5218 }
5219 } /* ixgbe_free_pciintr_resources */
5220
5221 /************************************************************************
5222 * ixgbe_free_pci_resources
5223 ************************************************************************/
5224 static void
5225 ixgbe_free_pci_resources(struct adapter *adapter)
5226 {
5227
5228 ixgbe_free_pciintr_resources(adapter);
5229
5230 if (adapter->osdep.mem_size != 0) {
5231 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5232 adapter->osdep.mem_bus_space_handle,
5233 adapter->osdep.mem_size);
5234 }
5235 } /* ixgbe_free_pci_resources */
5236
5237 /************************************************************************
5238 * ixgbe_sysctl_flowcntl
5239 *
5240 * SYSCTL wrapper around setting Flow Control
5241 ************************************************************************/
5242 static int
5243 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5244 {
5245 struct sysctlnode node = *rnode;
5246 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5247 int error, fc;
5248
5249 if (ixgbe_fw_recovery_mode_swflag(adapter))
5250 return (EPERM);
5251
5252 fc = adapter->hw.fc.current_mode;
5253 node.sysctl_data = &fc;
5254 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5255 if (error != 0 || newp == NULL)
5256 return error;
5257
5258 /* Don't bother if it's not changed */
5259 if (fc == adapter->hw.fc.current_mode)
5260 return (0);
5261
5262 return ixgbe_set_flowcntl(adapter, fc);
5263 } /* ixgbe_sysctl_flowcntl */
5264
5265 /************************************************************************
5266 * ixgbe_set_flowcntl - Set flow control
5267 *
5268 * Flow control values:
5269 * 0 - off
5270 * 1 - rx pause
5271 * 2 - tx pause
5272 * 3 - full
5273 ************************************************************************/
5274 static int
5275 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5276 {
5277 switch (fc) {
5278 case ixgbe_fc_rx_pause:
5279 case ixgbe_fc_tx_pause:
5280 case ixgbe_fc_full:
5281 adapter->hw.fc.requested_mode = fc;
5282 if (adapter->num_queues > 1)
5283 ixgbe_disable_rx_drop(adapter);
5284 break;
5285 case ixgbe_fc_none:
5286 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5287 if (adapter->num_queues > 1)
5288 ixgbe_enable_rx_drop(adapter);
5289 break;
5290 default:
5291 return (EINVAL);
5292 }
5293
5294 #if 0 /* XXX NetBSD */
5295 /* Don't autoneg if forcing a value */
5296 adapter->hw.fc.disable_fc_autoneg = TRUE;
5297 #endif
5298 ixgbe_fc_enable(&adapter->hw);
5299
5300 return (0);
5301 } /* ixgbe_set_flowcntl */
5302
5303 /************************************************************************
5304 * ixgbe_enable_rx_drop
5305 *
5306 * Enable the hardware to drop packets when the buffer is
5307 * full. This is useful with multiqueue, so that no single
5308 * queue being full stalls the entire RX engine. We only
5309 * enable this when Multiqueue is enabled AND Flow Control
5310 * is disabled.
5311 ************************************************************************/
5312 static void
5313 ixgbe_enable_rx_drop(struct adapter *adapter)
5314 {
5315 struct ixgbe_hw *hw = &adapter->hw;
5316 struct rx_ring *rxr;
5317 u32 srrctl;
5318
5319 for (int i = 0; i < adapter->num_queues; i++) {
5320 rxr = &adapter->rx_rings[i];
5321 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5322 srrctl |= IXGBE_SRRCTL_DROP_EN;
5323 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5324 }
5325
5326 /* enable drop for each vf */
5327 for (int i = 0; i < adapter->num_vfs; i++) {
5328 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5329 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5330 IXGBE_QDE_ENABLE));
5331 }
5332 } /* ixgbe_enable_rx_drop */
5333
5334 /************************************************************************
5335 * ixgbe_disable_rx_drop
5336 ************************************************************************/
5337 static void
5338 ixgbe_disable_rx_drop(struct adapter *adapter)
5339 {
5340 struct ixgbe_hw *hw = &adapter->hw;
5341 struct rx_ring *rxr;
5342 u32 srrctl;
5343
5344 for (int i = 0; i < adapter->num_queues; i++) {
5345 rxr = &adapter->rx_rings[i];
5346 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5347 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5348 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5349 }
5350
5351 /* disable drop for each vf */
5352 for (int i = 0; i < adapter->num_vfs; i++) {
5353 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5354 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5355 }
5356 } /* ixgbe_disable_rx_drop */
5357
5358 /************************************************************************
5359 * ixgbe_sysctl_advertise
5360 *
5361 * SYSCTL wrapper around setting advertised speed
5362 ************************************************************************/
5363 static int
5364 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5365 {
5366 struct sysctlnode node = *rnode;
5367 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5368 int error = 0, advertise;
5369
5370 if (ixgbe_fw_recovery_mode_swflag(adapter))
5371 return (EPERM);
5372
5373 advertise = adapter->advertise;
5374 node.sysctl_data = &advertise;
5375 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5376 if (error != 0 || newp == NULL)
5377 return error;
5378
5379 return ixgbe_set_advertise(adapter, advertise);
5380 } /* ixgbe_sysctl_advertise */
5381
5382 /************************************************************************
5383 * ixgbe_set_advertise - Control advertised link speed
5384 *
5385 * Flags:
5386 * 0x00 - Default (all capable link speed)
5387 * 0x1 - advertise 100 Mb
5388 * 0x2 - advertise 1G
5389 * 0x4 - advertise 10G
5390 * 0x8 - advertise 10 Mb (yes, Mb)
5391 * 0x10 - advertise 2.5G
5392 * 0x20 - advertise 5G
5393 ************************************************************************/
5394 static int
5395 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5396 {
5397 device_t dev;
5398 struct ixgbe_hw *hw;
5399 ixgbe_link_speed speed = 0;
5400 ixgbe_link_speed link_caps = 0;
5401 s32 err = IXGBE_NOT_IMPLEMENTED;
5402 bool negotiate = FALSE;
5403
5404 /* Checks to validate new value */
5405 if (adapter->advertise == advertise) /* no change */
5406 return (0);
5407
5408 dev = adapter->dev;
5409 hw = &adapter->hw;
5410
5411 /* No speed changes for backplane media */
5412 if (hw->phy.media_type == ixgbe_media_type_backplane)
5413 return (ENODEV);
5414
5415 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5416 (hw->phy.multispeed_fiber))) {
5417 device_printf(dev,
5418 "Advertised speed can only be set on copper or "
5419 "multispeed fiber media types.\n");
5420 return (EINVAL);
5421 }
5422
5423 if (advertise < 0x0 || advertise > 0x3f) {
5424 device_printf(dev, "Invalid advertised speed; "
5425 "valid modes are 0x0 through 0x3f\n");
5426 return (EINVAL);
5427 }
5428
5429 if (hw->mac.ops.get_link_capabilities) {
5430 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5431 &negotiate);
5432 if (err != IXGBE_SUCCESS) {
5433 device_printf(dev, "Unable to determine supported "
5434 "advertise speeds\n");
5435 return (ENODEV);
5436 }
5437 }
5438
5439 /* Set new value and report new advertised mode */
5440 if (advertise & 0x1) {
5441 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5442 device_printf(dev, "Interface does not support 100Mb "
5443 "advertised speed\n");
5444 return (EINVAL);
5445 }
5446 speed |= IXGBE_LINK_SPEED_100_FULL;
5447 }
5448 if (advertise & 0x2) {
5449 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5450 device_printf(dev, "Interface does not support 1Gb "
5451 "advertised speed\n");
5452 return (EINVAL);
5453 }
5454 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5455 }
5456 if (advertise & 0x4) {
5457 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5458 device_printf(dev, "Interface does not support 10Gb "
5459 "advertised speed\n");
5460 return (EINVAL);
5461 }
5462 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5463 }
5464 if (advertise & 0x8) {
5465 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5466 device_printf(dev, "Interface does not support 10Mb "
5467 "advertised speed\n");
5468 return (EINVAL);
5469 }
5470 speed |= IXGBE_LINK_SPEED_10_FULL;
5471 }
5472 if (advertise & 0x10) {
5473 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5474 device_printf(dev, "Interface does not support 2.5Gb "
5475 "advertised speed\n");
5476 return (EINVAL);
5477 }
5478 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5479 }
5480 if (advertise & 0x20) {
5481 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5482 device_printf(dev, "Interface does not support 5Gb "
5483 "advertised speed\n");
5484 return (EINVAL);
5485 }
5486 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5487 }
5488 if (advertise == 0)
5489 speed = link_caps; /* All capable link speed */
5490
5491 hw->mac.autotry_restart = TRUE;
5492 hw->mac.ops.setup_link(hw, speed, TRUE);
5493 adapter->advertise = advertise;
5494
5495 return (0);
5496 } /* ixgbe_set_advertise */
5497
5498 /************************************************************************
5499 * ixgbe_get_default_advertise - Get default advertised speed settings
5500 *
5501 * Formatted for sysctl usage.
5502 * Flags:
5503 * 0x1 - advertise 100 Mb
5504 * 0x2 - advertise 1G
5505 * 0x4 - advertise 10G
5506 * 0x8 - advertise 10 Mb (yes, Mb)
5507 * 0x10 - advertise 2.5G
5508 * 0x20 - advertise 5G
5509 ************************************************************************/
5510 static int
5511 ixgbe_get_default_advertise(struct adapter *adapter)
5512 {
5513 struct ixgbe_hw *hw = &adapter->hw;
5514 int speed;
5515 ixgbe_link_speed link_caps = 0;
5516 s32 err;
5517 bool negotiate = FALSE;
5518
5519 /*
5520 * Advertised speed means nothing unless it's copper or
5521 * multi-speed fiber
5522 */
5523 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5524 !(hw->phy.multispeed_fiber))
5525 return (0);
5526
5527 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5528 if (err != IXGBE_SUCCESS)
5529 return (0);
5530
5531 speed =
5532 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5533 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5534 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5535 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5536 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5537 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5538
5539 return speed;
5540 } /* ixgbe_get_default_advertise */
5541
5542 /************************************************************************
5543 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5544 *
5545 * Control values:
5546 * 0/1 - off / on (use default value of 1000)
5547 *
5548 * Legal timer values are:
5549 * 50,100,250,500,1000,2000,5000,10000
5550 *
5551 * Turning off interrupt moderation will also turn this off.
5552 ************************************************************************/
5553 static int
5554 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5555 {
5556 struct sysctlnode node = *rnode;
5557 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5558 struct ifnet *ifp = adapter->ifp;
5559 int error;
5560 int newval;
5561
5562 if (ixgbe_fw_recovery_mode_swflag(adapter))
5563 return (EPERM);
5564
5565 newval = adapter->dmac;
5566 node.sysctl_data = &newval;
5567 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5568 if ((error) || (newp == NULL))
5569 return (error);
5570
5571 switch (newval) {
5572 case 0:
5573 /* Disabled */
5574 adapter->dmac = 0;
5575 break;
5576 case 1:
5577 /* Enable and use default */
5578 adapter->dmac = 1000;
5579 break;
5580 case 50:
5581 case 100:
5582 case 250:
5583 case 500:
5584 case 1000:
5585 case 2000:
5586 case 5000:
5587 case 10000:
5588 /* Legal values - allow */
5589 adapter->dmac = newval;
5590 break;
5591 default:
5592 /* Do nothing, illegal value */
5593 return (EINVAL);
5594 }
5595
5596 /* Re-initialize hardware if it's already running */
5597 if (ifp->if_flags & IFF_RUNNING)
5598 ifp->if_init(ifp);
5599
5600 return (0);
5601 }
5602
5603 #ifdef IXGBE_DEBUG
5604 /************************************************************************
5605 * ixgbe_sysctl_power_state
5606 *
5607 * Sysctl to test power states
5608 * Values:
5609 * 0 - set device to D0
5610 * 3 - set device to D3
5611 * (none) - get current device power state
5612 ************************************************************************/
5613 static int
5614 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5615 {
5616 #ifdef notyet
5617 struct sysctlnode node = *rnode;
5618 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5619 device_t dev = adapter->dev;
5620 int curr_ps, new_ps, error = 0;
5621
5622 if (ixgbe_fw_recovery_mode_swflag(adapter))
5623 return (EPERM);
5624
5625 curr_ps = new_ps = pci_get_powerstate(dev);
5626
5627 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5628 if ((error) || (req->newp == NULL))
5629 return (error);
5630
5631 if (new_ps == curr_ps)
5632 return (0);
5633
5634 if (new_ps == 3 && curr_ps == 0)
5635 error = DEVICE_SUSPEND(dev);
5636 else if (new_ps == 0 && curr_ps == 3)
5637 error = DEVICE_RESUME(dev);
5638 else
5639 return (EINVAL);
5640
5641 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5642
5643 return (error);
5644 #else
5645 return 0;
5646 #endif
5647 } /* ixgbe_sysctl_power_state */
5648 #endif
5649
5650 /************************************************************************
5651 * ixgbe_sysctl_wol_enable
5652 *
5653 * Sysctl to enable/disable the WoL capability,
5654 * if supported by the adapter.
5655 *
5656 * Values:
5657 * 0 - disabled
5658 * 1 - enabled
5659 ************************************************************************/
5660 static int
5661 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5662 {
5663 struct sysctlnode node = *rnode;
5664 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5665 struct ixgbe_hw *hw = &adapter->hw;
5666 bool new_wol_enabled;
5667 int error = 0;
5668
5669 /*
5670 * It's not required to check recovery mode because this function never
5671 * touches hardware.
5672 */
5673 new_wol_enabled = hw->wol_enabled;
5674 node.sysctl_data = &new_wol_enabled;
5675 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5676 if ((error) || (newp == NULL))
5677 return (error);
5678 if (new_wol_enabled == hw->wol_enabled)
5679 return (0);
5680
5681 if (new_wol_enabled && !adapter->wol_support)
5682 return (ENODEV);
5683 else
5684 hw->wol_enabled = new_wol_enabled;
5685
5686 return (0);
5687 } /* ixgbe_sysctl_wol_enable */
5688
5689 /************************************************************************
5690 * ixgbe_sysctl_wufc - Wake Up Filter Control
5691 *
5692 * Sysctl to enable/disable the types of packets that the
5693 * adapter will wake up on upon receipt.
5694 * Flags:
5695 * 0x1 - Link Status Change
5696 * 0x2 - Magic Packet
5697 * 0x4 - Direct Exact
5698 * 0x8 - Directed Multicast
5699 * 0x10 - Broadcast
5700 * 0x20 - ARP/IPv4 Request Packet
5701 * 0x40 - Direct IPv4 Packet
5702 * 0x80 - Direct IPv6 Packet
5703 *
5704 * Settings not listed above will cause the sysctl to return an error.
5705 ************************************************************************/
5706 static int
5707 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5708 {
5709 struct sysctlnode node = *rnode;
5710 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5711 int error = 0;
5712 u32 new_wufc;
5713
5714 /*
5715 * It's not required to check recovery mode because this function never
5716 * touches hardware.
5717 */
5718 new_wufc = adapter->wufc;
5719 node.sysctl_data = &new_wufc;
5720 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5721 if ((error) || (newp == NULL))
5722 return (error);
5723 if (new_wufc == adapter->wufc)
5724 return (0);
5725
5726 if (new_wufc & 0xffffff00)
5727 return (EINVAL);
5728
5729 new_wufc &= 0xff;
5730 new_wufc |= (0xffffff & adapter->wufc);
5731 adapter->wufc = new_wufc;
5732
5733 return (0);
5734 } /* ixgbe_sysctl_wufc */
5735
5736 #ifdef IXGBE_DEBUG
5737 /************************************************************************
5738 * ixgbe_sysctl_print_rss_config
5739 ************************************************************************/
5740 static int
5741 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5742 {
5743 #ifdef notyet
5744 struct sysctlnode node = *rnode;
5745 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5746 struct ixgbe_hw *hw = &adapter->hw;
5747 device_t dev = adapter->dev;
5748 struct sbuf *buf;
5749 int error = 0, reta_size;
5750 u32 reg;
5751
5752 if (ixgbe_fw_recovery_mode_swflag(adapter))
5753 return (EPERM);
5754
5755 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5756 if (!buf) {
5757 device_printf(dev, "Could not allocate sbuf for output.\n");
5758 return (ENOMEM);
5759 }
5760
5761 // TODO: use sbufs to make a string to print out
5762 /* Set multiplier for RETA setup and table size based on MAC */
5763 switch (adapter->hw.mac.type) {
5764 case ixgbe_mac_X550:
5765 case ixgbe_mac_X550EM_x:
5766 case ixgbe_mac_X550EM_a:
5767 reta_size = 128;
5768 break;
5769 default:
5770 reta_size = 32;
5771 break;
5772 }
5773
5774 /* Print out the redirection table */
5775 sbuf_cat(buf, "\n");
5776 for (int i = 0; i < reta_size; i++) {
5777 if (i < 32) {
5778 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5779 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5780 } else {
5781 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5782 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5783 }
5784 }
5785
5786 // TODO: print more config
5787
5788 error = sbuf_finish(buf);
5789 if (error)
5790 device_printf(dev, "Error finishing sbuf: %d\n", error);
5791
5792 sbuf_delete(buf);
5793 #endif
5794 return (0);
5795 } /* ixgbe_sysctl_print_rss_config */
5796 #endif /* IXGBE_DEBUG */
5797
5798 /************************************************************************
5799 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5800 *
5801 * For X552/X557-AT devices using an external PHY
5802 ************************************************************************/
5803 static int
5804 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5805 {
5806 struct sysctlnode node = *rnode;
5807 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5808 struct ixgbe_hw *hw = &adapter->hw;
5809 int val;
5810 u16 reg;
5811 int error;
5812
5813 if (ixgbe_fw_recovery_mode_swflag(adapter))
5814 return (EPERM);
5815
5816 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5817 device_printf(adapter->dev,
5818 "Device has no supported external thermal sensor.\n");
5819 return (ENODEV);
5820 }
5821
5822 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5823 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5824 device_printf(adapter->dev,
5825 "Error reading from PHY's current temperature register\n");
5826 return (EAGAIN);
5827 }
5828
5829 node.sysctl_data = &val;
5830
5831 /* Shift temp for output */
5832 val = reg >> 8;
5833
5834 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5835 if ((error) || (newp == NULL))
5836 return (error);
5837
5838 return (0);
5839 } /* ixgbe_sysctl_phy_temp */
5840
5841 /************************************************************************
5842 * ixgbe_sysctl_phy_overtemp_occurred
5843 *
5844 * Reports (directly from the PHY) whether the current PHY
5845 * temperature is over the overtemp threshold.
5846 ************************************************************************/
5847 static int
5848 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5849 {
5850 struct sysctlnode node = *rnode;
5851 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5852 struct ixgbe_hw *hw = &adapter->hw;
5853 int val, error;
5854 u16 reg;
5855
5856 if (ixgbe_fw_recovery_mode_swflag(adapter))
5857 return (EPERM);
5858
5859 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5860 device_printf(adapter->dev,
5861 "Device has no supported external thermal sensor.\n");
5862 return (ENODEV);
5863 }
5864
5865 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5866 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5867 device_printf(adapter->dev,
5868 "Error reading from PHY's temperature status register\n");
5869 return (EAGAIN);
5870 }
5871
5872 node.sysctl_data = &val;
5873
5874 /* Get occurrence bit */
5875 val = !!(reg & 0x4000);
5876
5877 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5878 if ((error) || (newp == NULL))
5879 return (error);
5880
5881 return (0);
5882 } /* ixgbe_sysctl_phy_overtemp_occurred */
5883
5884 /************************************************************************
5885 * ixgbe_sysctl_eee_state
5886 *
5887 * Sysctl to set EEE power saving feature
5888 * Values:
5889 * 0 - disable EEE
5890 * 1 - enable EEE
5891 * (none) - get current device EEE state
5892 ************************************************************************/
5893 static int
5894 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5895 {
5896 struct sysctlnode node = *rnode;
5897 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5898 struct ifnet *ifp = adapter->ifp;
5899 device_t dev = adapter->dev;
5900 int curr_eee, new_eee, error = 0;
5901 s32 retval;
5902
5903 if (ixgbe_fw_recovery_mode_swflag(adapter))
5904 return (EPERM);
5905
5906 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5907 node.sysctl_data = &new_eee;
5908 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5909 if ((error) || (newp == NULL))
5910 return (error);
5911
5912 /* Nothing to do */
5913 if (new_eee == curr_eee)
5914 return (0);
5915
5916 /* Not supported */
5917 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5918 return (EINVAL);
5919
5920 /* Bounds checking */
5921 if ((new_eee < 0) || (new_eee > 1))
5922 return (EINVAL);
5923
5924 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
5925 if (retval) {
5926 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5927 return (EINVAL);
5928 }
5929
5930 /* Restart auto-neg */
5931 ifp->if_init(ifp);
5932
5933 device_printf(dev, "New EEE state: %d\n", new_eee);
5934
5935 /* Cache new value */
5936 if (new_eee)
5937 adapter->feat_en |= IXGBE_FEATURE_EEE;
5938 else
5939 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5940
5941 return (error);
5942 } /* ixgbe_sysctl_eee_state */
5943
5944 #define PRINTQS(adapter, regname) \
5945 do { \
5946 struct ixgbe_hw *_hw = &(adapter)->hw; \
5947 int _i; \
5948 \
5949 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5950 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5951 printf((_i == 0) ? "\t" : " "); \
5952 printf("%08x", IXGBE_READ_REG(_hw, \
5953 IXGBE_##regname(_i))); \
5954 } \
5955 printf("\n"); \
5956 } while (0)
5957
5958 /************************************************************************
5959 * ixgbe_print_debug_info
5960 *
5961 * Called only when em_display_debug_stats is enabled.
5962 * Provides a way to take a look at important statistics
5963 * maintained by the driver and hardware.
5964 ************************************************************************/
5965 static void
5966 ixgbe_print_debug_info(struct adapter *adapter)
5967 {
5968 device_t dev = adapter->dev;
5969 struct ixgbe_hw *hw = &adapter->hw;
5970 int table_size;
5971 int i;
5972
5973 switch (adapter->hw.mac.type) {
5974 case ixgbe_mac_X550:
5975 case ixgbe_mac_X550EM_x:
5976 case ixgbe_mac_X550EM_a:
5977 table_size = 128;
5978 break;
5979 default:
5980 table_size = 32;
5981 break;
5982 }
5983
5984 device_printf(dev, "[E]RETA:\n");
5985 for (i = 0; i < table_size; i++) {
5986 if (i < 32)
5987 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5988 IXGBE_RETA(i)));
5989 else
5990 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5991 IXGBE_ERETA(i - 32)));
5992 }
5993
5994 device_printf(dev, "queue:");
5995 for (i = 0; i < adapter->num_queues; i++) {
5996 printf((i == 0) ? "\t" : " ");
5997 printf("%8d", i);
5998 }
5999 printf("\n");
6000 PRINTQS(adapter, RDBAL);
6001 PRINTQS(adapter, RDBAH);
6002 PRINTQS(adapter, RDLEN);
6003 PRINTQS(adapter, SRRCTL);
6004 PRINTQS(adapter, RDH);
6005 PRINTQS(adapter, RDT);
6006 PRINTQS(adapter, RXDCTL);
6007
6008 device_printf(dev, "RQSMR:");
6009 for (i = 0; i < adapter->num_queues / 4; i++) {
6010 printf((i == 0) ? "\t" : " ");
6011 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6012 }
6013 printf("\n");
6014
6015 device_printf(dev, "disabled_count:");
6016 for (i = 0; i < adapter->num_queues; i++) {
6017 printf((i == 0) ? "\t" : " ");
6018 printf("%8d", adapter->queues[i].disabled_count);
6019 }
6020 printf("\n");
6021
6022 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6023 if (hw->mac.type != ixgbe_mac_82598EB) {
6024 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6025 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6026 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6027 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6028 }
6029 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6030 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6031 } /* ixgbe_print_debug_info */
6032
6033 /************************************************************************
6034 * ixgbe_sysctl_debug
6035 ************************************************************************/
6036 static int
6037 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6038 {
6039 struct sysctlnode node = *rnode;
6040 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6041 int error, result = 0;
6042
6043 if (ixgbe_fw_recovery_mode_swflag(adapter))
6044 return (EPERM);
6045
6046 node.sysctl_data = &result;
6047 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6048
6049 if (error || newp == NULL)
6050 return error;
6051
6052 if (result == 1)
6053 ixgbe_print_debug_info(adapter);
6054
6055 return 0;
6056 } /* ixgbe_sysctl_debug */
6057
6058 /************************************************************************
6059 * ixgbe_sysctl_rx_copy_len
6060 ************************************************************************/
6061 static int
6062 ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6063 {
6064 struct sysctlnode node = *rnode;
6065 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6066 int error;
6067 int result = adapter->rx_copy_len;
6068
6069 node.sysctl_data = &result;
6070 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6071
6072 if (error || newp == NULL)
6073 return error;
6074
6075 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6076 return EINVAL;
6077
6078 adapter->rx_copy_len = result;
6079
6080 return 0;
6081 } /* ixgbe_sysctl_rx_copy_len */
6082
6083 /************************************************************************
6084 * ixgbe_sysctl_tx_process_limit
6085 ************************************************************************/
6086 static int
6087 ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6088 {
6089 struct sysctlnode node = *rnode;
6090 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6091 int error;
6092 int result = adapter->tx_process_limit;
6093
6094 node.sysctl_data = &result;
6095 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6096
6097 if (error || newp == NULL)
6098 return error;
6099
6100 if ((result <= 0) || (result > adapter->num_tx_desc))
6101 return EINVAL;
6102
6103 adapter->tx_process_limit = result;
6104
6105 return 0;
6106 } /* ixgbe_sysctl_tx_process_limit */
6107
6108 /************************************************************************
6109 * ixgbe_sysctl_rx_process_limit
6110 ************************************************************************/
6111 static int
6112 ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6113 {
6114 struct sysctlnode node = *rnode;
6115 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6116 int error;
6117 int result = adapter->rx_process_limit;
6118
6119 node.sysctl_data = &result;
6120 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6121
6122 if (error || newp == NULL)
6123 return error;
6124
6125 if ((result <= 0) || (result > adapter->num_rx_desc))
6126 return EINVAL;
6127
6128 adapter->rx_process_limit = result;
6129
6130 return 0;
6131 } /* ixgbe_sysctl_rx_process_limit */
6132
6133 /************************************************************************
6134 * ixgbe_init_device_features
6135 ************************************************************************/
6136 static void
6137 ixgbe_init_device_features(struct adapter *adapter)
6138 {
6139 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6140 | IXGBE_FEATURE_RSS
6141 | IXGBE_FEATURE_MSI
6142 | IXGBE_FEATURE_MSIX
6143 | IXGBE_FEATURE_LEGACY_IRQ
6144 | IXGBE_FEATURE_LEGACY_TX;
6145
6146 /* Set capabilities first... */
6147 switch (adapter->hw.mac.type) {
6148 case ixgbe_mac_82598EB:
6149 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6150 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6151 break;
6152 case ixgbe_mac_X540:
6153 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6154 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6155 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6156 (adapter->hw.bus.func == 0))
6157 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6158 break;
6159 case ixgbe_mac_X550:
6160 /*
6161 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6162 * NVM Image version.
6163 */
6164 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6165 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6166 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6167 break;
6168 case ixgbe_mac_X550EM_x:
6169 /*
6170 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6171 * NVM Image version.
6172 */
6173 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6174 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6175 break;
6176 case ixgbe_mac_X550EM_a:
6177 /*
6178 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6179 * NVM Image version.
6180 */
6181 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6182 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6183 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6184 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6185 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6186 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6187 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6188 }
6189 break;
6190 case ixgbe_mac_82599EB:
6191 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6192 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6193 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6194 (adapter->hw.bus.func == 0))
6195 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6196 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6197 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6198 break;
6199 default:
6200 break;
6201 }
6202
6203 /* Enabled by default... */
6204 /* Fan failure detection */
6205 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6206 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6207 /* Netmap */
6208 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6209 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6210 /* EEE */
6211 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6212 adapter->feat_en |= IXGBE_FEATURE_EEE;
6213 /* Thermal Sensor */
6214 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6215 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6216 /*
6217 * Recovery mode:
6218 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6219 * NVM Image version.
6220 */
6221
6222 /* Enabled via global sysctl... */
6223 /* Flow Director */
6224 if (ixgbe_enable_fdir) {
6225 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6226 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6227 else
6228 device_printf(adapter->dev, "Device does not support "
6229 "Flow Director. Leaving disabled.");
6230 }
6231 /* Legacy (single queue) transmit */
6232 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6233 ixgbe_enable_legacy_tx)
6234 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6235 /*
6236 * Message Signal Interrupts - Extended (MSI-X)
6237 * Normal MSI is only enabled if MSI-X calls fail.
6238 */
6239 if (!ixgbe_enable_msix)
6240 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6241 /* Receive-Side Scaling (RSS) */
6242 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6243 adapter->feat_en |= IXGBE_FEATURE_RSS;
6244
6245 /* Disable features with unmet dependencies... */
6246 /* No MSI-X */
6247 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6248 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6249 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6250 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6251 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6252 }
6253 } /* ixgbe_init_device_features */
6254
6255 /************************************************************************
6256 * ixgbe_probe - Device identification routine
6257 *
6258 * Determines if the driver should be loaded on
6259 * adapter based on its PCI vendor/device ID.
6260 *
6261 * return BUS_PROBE_DEFAULT on success, positive on failure
6262 ************************************************************************/
6263 static int
6264 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6265 {
6266 const struct pci_attach_args *pa = aux;
6267
6268 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6269 }
6270
6271 static const ixgbe_vendor_info_t *
6272 ixgbe_lookup(const struct pci_attach_args *pa)
6273 {
6274 const ixgbe_vendor_info_t *ent;
6275 pcireg_t subid;
6276
6277 INIT_DEBUGOUT("ixgbe_lookup: begin");
6278
6279 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6280 return NULL;
6281
6282 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6283
6284 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6285 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6286 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6287 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6288 (ent->subvendor_id == 0)) &&
6289 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6290 (ent->subdevice_id == 0))) {
6291 return ent;
6292 }
6293 }
6294 return NULL;
6295 }
6296
6297 static int
6298 ixgbe_ifflags_cb(struct ethercom *ec)
6299 {
6300 struct ifnet *ifp = &ec->ec_if;
6301 struct adapter *adapter = ifp->if_softc;
6302 u_short change;
6303 int rv = 0;
6304
6305 IXGBE_CORE_LOCK(adapter);
6306
6307 change = ifp->if_flags ^ adapter->if_flags;
6308 if (change != 0)
6309 adapter->if_flags = ifp->if_flags;
6310
6311 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6312 rv = ENETRESET;
6313 goto out;
6314 } else if ((change & IFF_PROMISC) != 0)
6315 ixgbe_set_rxfilter(adapter);
6316
6317 /* Check for ec_capenable. */
6318 change = ec->ec_capenable ^ adapter->ec_capenable;
6319 adapter->ec_capenable = ec->ec_capenable;
6320 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6321 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6322 rv = ENETRESET;
6323 goto out;
6324 }
6325
6326 /*
6327 * Special handling is not required for ETHERCAP_VLAN_MTU.
6328 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6329 */
6330
6331 /* Set up VLAN support and filter */
6332 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6333 ixgbe_setup_vlan_hw_support(adapter);
6334
6335 out:
6336 IXGBE_CORE_UNLOCK(adapter);
6337
6338 return rv;
6339 }
6340
6341 /************************************************************************
6342 * ixgbe_ioctl - Ioctl entry point
6343 *
6344 * Called when the user wants to configure the interface.
6345 *
6346 * return 0 on success, positive on failure
6347 ************************************************************************/
6348 static int
6349 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6350 {
6351 struct adapter *adapter = ifp->if_softc;
6352 struct ixgbe_hw *hw = &adapter->hw;
6353 struct ifcapreq *ifcr = data;
6354 struct ifreq *ifr = data;
6355 int error = 0;
6356 int l4csum_en;
6357 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6358 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6359
6360 if (ixgbe_fw_recovery_mode_swflag(adapter))
6361 return (EPERM);
6362
6363 switch (command) {
6364 case SIOCSIFFLAGS:
6365 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6366 break;
6367 case SIOCADDMULTI:
6368 case SIOCDELMULTI:
6369 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6370 break;
6371 case SIOCSIFMEDIA:
6372 case SIOCGIFMEDIA:
6373 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6374 break;
6375 case SIOCSIFCAP:
6376 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6377 break;
6378 case SIOCSIFMTU:
6379 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6380 break;
6381 #ifdef __NetBSD__
6382 case SIOCINITIFADDR:
6383 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6384 break;
6385 case SIOCGIFFLAGS:
6386 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6387 break;
6388 case SIOCGIFAFLAG_IN:
6389 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6390 break;
6391 case SIOCGIFADDR:
6392 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6393 break;
6394 case SIOCGIFMTU:
6395 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6396 break;
6397 case SIOCGIFCAP:
6398 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6399 break;
6400 case SIOCGETHERCAP:
6401 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6402 break;
6403 case SIOCGLIFADDR:
6404 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6405 break;
6406 case SIOCZIFDATA:
6407 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6408 hw->mac.ops.clear_hw_cntrs(hw);
6409 ixgbe_clear_evcnt(adapter);
6410 break;
6411 case SIOCAIFADDR:
6412 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6413 break;
6414 #endif
6415 default:
6416 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6417 break;
6418 }
6419
6420 switch (command) {
6421 case SIOCGI2C:
6422 {
6423 struct ixgbe_i2c_req i2c;
6424
6425 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6426 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6427 if (error != 0)
6428 break;
6429 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6430 error = EINVAL;
6431 break;
6432 }
6433 if (i2c.len > sizeof(i2c.data)) {
6434 error = EINVAL;
6435 break;
6436 }
6437
6438 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6439 i2c.dev_addr, i2c.data);
6440 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6441 break;
6442 }
6443 case SIOCSIFCAP:
6444 /* Layer-4 Rx checksum offload has to be turned on and
6445 * off as a unit.
6446 */
6447 l4csum_en = ifcr->ifcr_capenable & l4csum;
6448 if (l4csum_en != l4csum && l4csum_en != 0)
6449 return EINVAL;
6450 /*FALLTHROUGH*/
6451 case SIOCADDMULTI:
6452 case SIOCDELMULTI:
6453 case SIOCSIFFLAGS:
6454 case SIOCSIFMTU:
6455 default:
6456 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6457 return error;
6458 if ((ifp->if_flags & IFF_RUNNING) == 0)
6459 ;
6460 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6461 IXGBE_CORE_LOCK(adapter);
6462 if ((ifp->if_flags & IFF_RUNNING) != 0)
6463 ixgbe_init_locked(adapter);
6464 ixgbe_recalculate_max_frame(adapter);
6465 IXGBE_CORE_UNLOCK(adapter);
6466 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6467 /*
6468 * Multicast list has changed; set the hardware filter
6469 * accordingly.
6470 */
6471 IXGBE_CORE_LOCK(adapter);
6472 ixgbe_disable_intr(adapter);
6473 ixgbe_set_rxfilter(adapter);
6474 ixgbe_enable_intr(adapter);
6475 IXGBE_CORE_UNLOCK(adapter);
6476 }
6477 return 0;
6478 }
6479
6480 return error;
6481 } /* ixgbe_ioctl */
6482
6483 /************************************************************************
6484 * ixgbe_check_fan_failure
6485 ************************************************************************/
6486 static void
6487 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6488 {
6489 u32 mask;
6490
6491 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6492 IXGBE_ESDP_SDP1;
6493
6494 if ((reg & mask) == 0)
6495 return;
6496
6497 /*
6498 * Use ratecheck() just in case interrupt occur frequently.
6499 * When EXPX9501AT's fan stopped, interrupt occurred only once,
6500 * an red LED on the board turned on and link never up until
6501 * power off.
6502 */
6503 if (ratecheck(&adapter->lasterr_time, &ixgbe_errlog_intrvl))
6504 device_printf(adapter->dev,
6505 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6506 } /* ixgbe_check_fan_failure */
6507
6508 /************************************************************************
6509 * ixgbe_handle_que
6510 ************************************************************************/
6511 static void
6512 ixgbe_handle_que(void *context)
6513 {
6514 struct ix_queue *que = context;
6515 struct adapter *adapter = que->adapter;
6516 struct tx_ring *txr = que->txr;
6517 struct ifnet *ifp = adapter->ifp;
6518 bool more = false;
6519
6520 IXGBE_EVC_ADD(&que->handleq, 1);
6521
6522 if (ifp->if_flags & IFF_RUNNING) {
6523 IXGBE_TX_LOCK(txr);
6524 more = ixgbe_txeof(txr);
6525 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6526 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6527 ixgbe_mq_start_locked(ifp, txr);
6528 /* Only for queue 0 */
6529 /* NetBSD still needs this for CBQ */
6530 if ((&adapter->queues[0] == que)
6531 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6532 ixgbe_legacy_start_locked(ifp, txr);
6533 IXGBE_TX_UNLOCK(txr);
6534 more |= ixgbe_rxeof(que);
6535 }
6536
6537 if (more) {
6538 IXGBE_EVC_ADD(&que->req, 1);
6539 ixgbe_sched_handle_que(adapter, que);
6540 } else if (que->res != NULL) {
6541 /* MSIX: Re-enable this interrupt */
6542 ixgbe_enable_queue(adapter, que->msix);
6543 } else {
6544 /* INTx or MSI */
6545 ixgbe_enable_queue(adapter, 0);
6546 }
6547
6548 return;
6549 } /* ixgbe_handle_que */
6550
6551 /************************************************************************
6552 * ixgbe_handle_que_work
6553 ************************************************************************/
6554 static void
6555 ixgbe_handle_que_work(struct work *wk, void *context)
6556 {
6557 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6558
6559 /*
6560 * "enqueued flag" is not required here.
6561 * See ixgbe_msix_que().
6562 */
6563 ixgbe_handle_que(que);
6564 }
6565
6566 /************************************************************************
6567 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6568 ************************************************************************/
6569 static int
6570 ixgbe_allocate_legacy(struct adapter *adapter,
6571 const struct pci_attach_args *pa)
6572 {
6573 device_t dev = adapter->dev;
6574 struct ix_queue *que = adapter->queues;
6575 struct tx_ring *txr = adapter->tx_rings;
6576 int counts[PCI_INTR_TYPE_SIZE];
6577 pci_intr_type_t intr_type, max_type;
6578 char intrbuf[PCI_INTRSTR_LEN];
6579 char wqname[MAXCOMLEN];
6580 const char *intrstr = NULL;
6581 int defertx_error = 0, error;
6582
6583 /* We allocate a single interrupt resource */
6584 max_type = PCI_INTR_TYPE_MSI;
6585 counts[PCI_INTR_TYPE_MSIX] = 0;
6586 counts[PCI_INTR_TYPE_MSI] =
6587 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6588 /* Check not feat_en but feat_cap to fallback to INTx */
6589 counts[PCI_INTR_TYPE_INTX] =
6590 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6591
6592 alloc_retry:
6593 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6594 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6595 return ENXIO;
6596 }
6597 adapter->osdep.nintrs = 1;
6598 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6599 intrbuf, sizeof(intrbuf));
6600 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6601 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6602 device_xname(dev));
6603 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6604 if (adapter->osdep.ihs[0] == NULL) {
6605 aprint_error_dev(dev,"unable to establish %s\n",
6606 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6607 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6608 adapter->osdep.intrs = NULL;
6609 switch (intr_type) {
6610 case PCI_INTR_TYPE_MSI:
6611 /* The next try is for INTx: Disable MSI */
6612 max_type = PCI_INTR_TYPE_INTX;
6613 counts[PCI_INTR_TYPE_INTX] = 1;
6614 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6615 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6616 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6617 goto alloc_retry;
6618 } else
6619 break;
6620 case PCI_INTR_TYPE_INTX:
6621 default:
6622 /* See below */
6623 break;
6624 }
6625 }
6626 if (intr_type == PCI_INTR_TYPE_INTX) {
6627 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6628 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6629 }
6630 if (adapter->osdep.ihs[0] == NULL) {
6631 aprint_error_dev(dev,
6632 "couldn't establish interrupt%s%s\n",
6633 intrstr ? " at " : "", intrstr ? intrstr : "");
6634 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6635 adapter->osdep.intrs = NULL;
6636 return ENXIO;
6637 }
6638 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6639 /*
6640 * Try allocating a fast interrupt and the associated deferred
6641 * processing contexts.
6642 */
6643 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6644 txr->txr_si =
6645 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6646 ixgbe_deferred_mq_start, txr);
6647
6648 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6649 device_xname(dev));
6650 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6651 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6652 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6653 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6654 }
6655 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6656 ixgbe_handle_que, que);
6657 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6658 error = workqueue_create(&adapter->que_wq, wqname,
6659 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6660 IXGBE_WORKQUEUE_FLAGS);
6661
6662 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6663 && ((txr->txr_si == NULL) || defertx_error != 0))
6664 || (que->que_si == NULL) || error != 0) {
6665 aprint_error_dev(dev,
6666 "could not establish software interrupts\n");
6667
6668 return ENXIO;
6669 }
6670 /* For simplicity in the handlers */
6671 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6672
6673 return (0);
6674 } /* ixgbe_allocate_legacy */
6675
6676 /************************************************************************
6677 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6678 ************************************************************************/
6679 static int
6680 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6681 {
6682 device_t dev = adapter->dev;
6683 struct ix_queue *que = adapter->queues;
6684 struct tx_ring *txr = adapter->tx_rings;
6685 pci_chipset_tag_t pc;
6686 char intrbuf[PCI_INTRSTR_LEN];
6687 char intr_xname[32];
6688 char wqname[MAXCOMLEN];
6689 const char *intrstr = NULL;
6690 int error, vector = 0;
6691 int cpu_id = 0;
6692 kcpuset_t *affinity;
6693 #ifdef RSS
6694 unsigned int rss_buckets = 0;
6695 kcpuset_t cpu_mask;
6696 #endif
6697
6698 pc = adapter->osdep.pc;
6699 #ifdef RSS
6700 /*
6701 * If we're doing RSS, the number of queues needs to
6702 * match the number of RSS buckets that are configured.
6703 *
6704 * + If there's more queues than RSS buckets, we'll end
6705 * up with queues that get no traffic.
6706 *
6707 * + If there's more RSS buckets than queues, we'll end
6708 * up having multiple RSS buckets map to the same queue,
6709 * so there'll be some contention.
6710 */
6711 rss_buckets = rss_getnumbuckets();
6712 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6713 (adapter->num_queues != rss_buckets)) {
6714 device_printf(dev,
6715 "%s: number of queues (%d) != number of RSS buckets (%d)"
6716 "; performance will be impacted.\n",
6717 __func__, adapter->num_queues, rss_buckets);
6718 }
6719 #endif
6720
6721 adapter->osdep.nintrs = adapter->num_queues + 1;
6722 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6723 adapter->osdep.nintrs) != 0) {
6724 aprint_error_dev(dev,
6725 "failed to allocate MSI-X interrupt\n");
6726 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
6727 return (ENXIO);
6728 }
6729
6730 kcpuset_create(&affinity, false);
6731 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6732 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6733 device_xname(dev), i);
6734 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6735 sizeof(intrbuf));
6736 #ifdef IXGBE_MPSAFE
6737 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6738 true);
6739 #endif
6740 /* Set the handler function */
6741 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6742 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6743 intr_xname);
6744 if (que->res == NULL) {
6745 aprint_error_dev(dev,
6746 "Failed to register QUE handler\n");
6747 error = ENXIO;
6748 goto err_out;
6749 }
6750 que->msix = vector;
6751 adapter->active_queues |= 1ULL << que->msix;
6752
6753 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6754 #ifdef RSS
6755 /*
6756 * The queue ID is used as the RSS layer bucket ID.
6757 * We look up the queue ID -> RSS CPU ID and select
6758 * that.
6759 */
6760 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6761 CPU_SETOF(cpu_id, &cpu_mask);
6762 #endif
6763 } else {
6764 /*
6765 * Bind the MSI-X vector, and thus the
6766 * rings to the corresponding CPU.
6767 *
6768 * This just happens to match the default RSS
6769 * round-robin bucket -> queue -> CPU allocation.
6770 */
6771 if (adapter->num_queues > 1)
6772 cpu_id = i;
6773 }
6774 /* Round-robin affinity */
6775 kcpuset_zero(affinity);
6776 kcpuset_set(affinity, cpu_id % ncpu);
6777 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6778 NULL);
6779 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6780 intrstr);
6781 if (error == 0) {
6782 #if 1 /* def IXGBE_DEBUG */
6783 #ifdef RSS
6784 aprint_normal(", bound RSS bucket %d to CPU %d", i,
6785 cpu_id % ncpu);
6786 #else
6787 aprint_normal(", bound queue %d to cpu %d", i,
6788 cpu_id % ncpu);
6789 #endif
6790 #endif /* IXGBE_DEBUG */
6791 }
6792 aprint_normal("\n");
6793
6794 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6795 txr->txr_si = softint_establish(
6796 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6797 ixgbe_deferred_mq_start, txr);
6798 if (txr->txr_si == NULL) {
6799 aprint_error_dev(dev,
6800 "couldn't establish software interrupt\n");
6801 error = ENXIO;
6802 goto err_out;
6803 }
6804 }
6805 que->que_si
6806 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6807 ixgbe_handle_que, que);
6808 if (que->que_si == NULL) {
6809 aprint_error_dev(dev,
6810 "couldn't establish software interrupt\n");
6811 error = ENXIO;
6812 goto err_out;
6813 }
6814 }
6815 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6816 error = workqueue_create(&adapter->txr_wq, wqname,
6817 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6818 IXGBE_WORKQUEUE_FLAGS);
6819 if (error) {
6820 aprint_error_dev(dev,
6821 "couldn't create workqueue for deferred Tx\n");
6822 goto err_out;
6823 }
6824 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6825
6826 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6827 error = workqueue_create(&adapter->que_wq, wqname,
6828 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6829 IXGBE_WORKQUEUE_FLAGS);
6830 if (error) {
6831 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6832 goto err_out;
6833 }
6834
6835 /* and Link */
6836 cpu_id++;
6837 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6838 adapter->vector = vector;
6839 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6840 sizeof(intrbuf));
6841 #ifdef IXGBE_MPSAFE
6842 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6843 true);
6844 #endif
6845 /* Set the link handler function */
6846 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6847 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6848 intr_xname);
6849 if (adapter->osdep.ihs[vector] == NULL) {
6850 aprint_error_dev(dev, "Failed to register LINK handler\n");
6851 error = ENXIO;
6852 goto err_out;
6853 }
6854 /* Round-robin affinity */
6855 kcpuset_zero(affinity);
6856 kcpuset_set(affinity, cpu_id % ncpu);
6857 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6858 NULL);
6859
6860 aprint_normal_dev(dev,
6861 "for link, interrupting at %s", intrstr);
6862 if (error == 0)
6863 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6864 else
6865 aprint_normal("\n");
6866
6867 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6868 adapter->mbx_si =
6869 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6870 ixgbe_handle_mbx, adapter);
6871 if (adapter->mbx_si == NULL) {
6872 aprint_error_dev(dev,
6873 "could not establish software interrupts\n");
6874
6875 error = ENXIO;
6876 goto err_out;
6877 }
6878 }
6879
6880 kcpuset_destroy(affinity);
6881 aprint_normal_dev(dev,
6882 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6883
6884 return (0);
6885
6886 err_out:
6887 kcpuset_destroy(affinity);
6888 ixgbe_free_softint(adapter);
6889 ixgbe_free_pciintr_resources(adapter);
6890 return (error);
6891 } /* ixgbe_allocate_msix */
6892
6893 /************************************************************************
6894 * ixgbe_configure_interrupts
6895 *
6896 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6897 * This will also depend on user settings.
6898 ************************************************************************/
6899 static int
6900 ixgbe_configure_interrupts(struct adapter *adapter)
6901 {
6902 device_t dev = adapter->dev;
6903 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6904 int want, queues, msgs;
6905
6906 /* Default to 1 queue if MSI-X setup fails */
6907 adapter->num_queues = 1;
6908
6909 /* Override by tuneable */
6910 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6911 goto msi;
6912
6913 /*
6914 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6915 * interrupt slot.
6916 */
6917 if (ncpu == 1)
6918 goto msi;
6919
6920 /* First try MSI-X */
6921 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6922 msgs = MIN(msgs, IXG_MAX_NINTR);
6923 if (msgs < 2)
6924 goto msi;
6925
6926 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6927
6928 /* Figure out a reasonable auto config value */
6929 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6930
6931 #ifdef RSS
6932 /* If we're doing RSS, clamp at the number of RSS buckets */
6933 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6934 queues = uimin(queues, rss_getnumbuckets());
6935 #endif
6936 if (ixgbe_num_queues > queues) {
6937 aprint_error_dev(adapter->dev,
6938 "ixgbe_num_queues (%d) is too large, "
6939 "using reduced amount (%d).\n", ixgbe_num_queues, queues);
6940 ixgbe_num_queues = queues;
6941 }
6942
6943 if (ixgbe_num_queues != 0)
6944 queues = ixgbe_num_queues;
6945 else
6946 queues = uimin(queues,
6947 uimin(mac->max_tx_queues, mac->max_rx_queues));
6948
6949 /* reflect correct sysctl value */
6950 ixgbe_num_queues = queues;
6951
6952 /*
6953 * Want one vector (RX/TX pair) per queue
6954 * plus an additional for Link.
6955 */
6956 want = queues + 1;
6957 if (msgs >= want)
6958 msgs = want;
6959 else {
6960 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6961 "%d vectors but %d queues wanted!\n", msgs, want);
6962 goto msi;
6963 }
6964 adapter->num_queues = queues;
6965 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6966 return (0);
6967
6968 /*
6969 * MSI-X allocation failed or provided us with
6970 * less vectors than needed. Free MSI-X resources
6971 * and we'll try enabling MSI.
6972 */
6973 msi:
6974 /* Without MSI-X, some features are no longer supported */
6975 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6976 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6977 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6978 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6979
6980 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6981 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
6982 if (msgs > 1)
6983 msgs = 1;
6984 if (msgs != 0) {
6985 msgs = 1;
6986 adapter->feat_en |= IXGBE_FEATURE_MSI;
6987 return (0);
6988 }
6989
6990 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6991 aprint_error_dev(dev,
6992 "Device does not support legacy interrupts.\n");
6993 return 1;
6994 }
6995
6996 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6997
6998 return (0);
6999 } /* ixgbe_configure_interrupts */
7000
7001
7002 /************************************************************************
7003 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7004 *
7005 * Done outside of interrupt context since the driver might sleep
7006 ************************************************************************/
7007 static void
7008 ixgbe_handle_link(void *context)
7009 {
7010 struct adapter *adapter = context;
7011 struct ixgbe_hw *hw = &adapter->hw;
7012
7013 IXGBE_CORE_LOCK(adapter);
7014 IXGBE_EVC_ADD(&adapter->link_sicount, 1);
7015 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7016 ixgbe_update_link_status(adapter);
7017
7018 /* Re-enable link interrupts */
7019 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7020
7021 IXGBE_CORE_UNLOCK(adapter);
7022 } /* ixgbe_handle_link */
7023
7024 #if 0
7025 /************************************************************************
7026 * ixgbe_rearm_queues
7027 ************************************************************************/
7028 static __inline void
7029 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7030 {
7031 u32 mask;
7032
7033 switch (adapter->hw.mac.type) {
7034 case ixgbe_mac_82598EB:
7035 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7037 break;
7038 case ixgbe_mac_82599EB:
7039 case ixgbe_mac_X540:
7040 case ixgbe_mac_X550:
7041 case ixgbe_mac_X550EM_x:
7042 case ixgbe_mac_X550EM_a:
7043 mask = (queues & 0xFFFFFFFF);
7044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7045 mask = (queues >> 32);
7046 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7047 break;
7048 default:
7049 break;
7050 }
7051 } /* ixgbe_rearm_queues */
7052 #endif
7053