ixgbe.c revision 1.279 1 /* $NetBSD: ixgbe.c,v 1.279 2021/03/09 10:03:18 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #include "opt_ixgbe.h"
71 #endif
72
73 #include "ixgbe.h"
74 #include "ixgbe_phy.h"
75 #include "ixgbe_sriov.h"
76 #include "vlan.h"
77
78 #include <sys/cprng.h>
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
81
82 /************************************************************************
83 * Driver version
84 ************************************************************************/
85 static const char ixgbe_driver_version[] = "4.0.1-k";
86 /* XXX NetBSD: + 3.3.10 */
87
88 /************************************************************************
89 * PCI Device ID Table
90 *
91 * Used by probe to select devices to load on
92 * Last field stores an index into ixgbe_strings
93 * Last entry must be all 0s
94 *
95 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
96 ************************************************************************/
97 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
98 {
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
149 /* required last entry */
150 {0, 0, 0, 0, 0}
151 };
152
153 /************************************************************************
154 * Table of branding strings
155 ************************************************************************/
156 static const char *ixgbe_strings[] = {
157 "Intel(R) PRO/10GbE PCI-Express Network Driver"
158 };
159
160 /************************************************************************
161 * Function prototypes
162 ************************************************************************/
163 static int ixgbe_probe(device_t, cfdata_t, void *);
164 static void ixgbe_quirks(struct adapter *);
165 static void ixgbe_attach(device_t, device_t, void *);
166 static int ixgbe_detach(device_t, int);
167 #if 0
168 static int ixgbe_shutdown(device_t);
169 #endif
170 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
171 static bool ixgbe_resume(device_t, const pmf_qual_t *);
172 static int ixgbe_ifflags_cb(struct ethercom *);
173 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
174 static int ixgbe_init(struct ifnet *);
175 static void ixgbe_init_locked(struct adapter *);
176 static void ixgbe_ifstop(struct ifnet *, int);
177 static void ixgbe_stop_locked(void *);
178 static void ixgbe_init_device_features(struct adapter *);
179 static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
180 static void ixgbe_add_media_types(struct adapter *);
181 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
182 static int ixgbe_media_change(struct ifnet *);
183 static int ixgbe_allocate_pci_resources(struct adapter *,
184 const struct pci_attach_args *);
185 static void ixgbe_free_deferred_handlers(struct adapter *);
186 static void ixgbe_get_slot_info(struct adapter *);
187 static int ixgbe_allocate_msix(struct adapter *,
188 const struct pci_attach_args *);
189 static int ixgbe_allocate_legacy(struct adapter *,
190 const struct pci_attach_args *);
191 static int ixgbe_configure_interrupts(struct adapter *);
192 static void ixgbe_free_pciintr_resources(struct adapter *);
193 static void ixgbe_free_pci_resources(struct adapter *);
194 static void ixgbe_local_timer(void *);
195 static void ixgbe_handle_timer(struct work *, void *);
196 static void ixgbe_recovery_mode_timer(void *);
197 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
198 static int ixgbe_setup_interface(device_t, struct adapter *);
199 static void ixgbe_config_gpie(struct adapter *);
200 static void ixgbe_config_dmac(struct adapter *);
201 static void ixgbe_config_delay_values(struct adapter *);
202 static void ixgbe_schedule_admin_tasklet(struct adapter *);
203 static void ixgbe_config_link(struct adapter *);
204 static void ixgbe_check_wol_support(struct adapter *);
205 static int ixgbe_setup_low_power_mode(struct adapter *);
206 #if 0
207 static void ixgbe_rearm_queues(struct adapter *, u64);
208 #endif
209
210 static void ixgbe_initialize_transmit_units(struct adapter *);
211 static void ixgbe_initialize_receive_units(struct adapter *);
212 static void ixgbe_enable_rx_drop(struct adapter *);
213 static void ixgbe_disable_rx_drop(struct adapter *);
214 static void ixgbe_initialize_rss_mapping(struct adapter *);
215
216 static void ixgbe_enable_intr(struct adapter *);
217 static void ixgbe_disable_intr(struct adapter *);
218 static void ixgbe_update_stats_counters(struct adapter *);
219 static void ixgbe_set_rxfilter(struct adapter *);
220 static void ixgbe_update_link_status(struct adapter *);
221 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
222 static void ixgbe_configure_ivars(struct adapter *);
223 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
224 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
225
226 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
227 static void ixgbe_setup_vlan_hw_support(struct adapter *);
228 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
229 static int ixgbe_register_vlan(struct adapter *, u16);
230 static int ixgbe_unregister_vlan(struct adapter *, u16);
231
232 static void ixgbe_add_device_sysctls(struct adapter *);
233 static void ixgbe_add_hw_stats(struct adapter *);
234 static void ixgbe_clear_evcnt(struct adapter *);
235 static int ixgbe_set_flowcntl(struct adapter *, int);
236 static int ixgbe_set_advertise(struct adapter *, int);
237 static int ixgbe_get_advertise(struct adapter *);
238
239 /* Sysctl handlers */
240 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
241 const char *, int *, int);
242 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
248 #ifdef IXGBE_DEBUG
249 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
251 #endif
252 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
260 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
261
262 /* Interrupt functions */
263 static int ixgbe_msix_que(void *);
264 static int ixgbe_msix_admin(void *);
265 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
266 static int ixgbe_legacy_irq(void *);
267
268 /* Event handlers running on workqueue */
269 static void ixgbe_handle_que(void *);
270 static void ixgbe_handle_link(void *);
271 static void ixgbe_handle_msf(void *);
272 static void ixgbe_handle_mod(void *, bool);
273 static void ixgbe_handle_phy(void *);
274
275 /* Deferred workqueue handlers */
276 static void ixgbe_handle_admin(struct work *, void *);
277 static void ixgbe_handle_que_work(struct work *, void *);
278
279 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
280
281 /************************************************************************
282 * NetBSD Device Interface Entry Points
283 ************************************************************************/
284 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
285 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
286 DVF_DETACH_SHUTDOWN);
287
288 #if 0
289 devclass_t ix_devclass;
290 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
291
292 MODULE_DEPEND(ix, pci, 1, 1, 1);
293 MODULE_DEPEND(ix, ether, 1, 1, 1);
294 #ifdef DEV_NETMAP
295 MODULE_DEPEND(ix, netmap, 1, 1, 1);
296 #endif
297 #endif
298
299 /*
300 * TUNEABLE PARAMETERS:
301 */
302
303 /*
304 * AIM: Adaptive Interrupt Moderation
305 * which means that the interrupt rate
306 * is varied over time based on the
307 * traffic for that interrupt vector
308 */
309 static bool ixgbe_enable_aim = true;
310 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
311 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
312 "Enable adaptive interrupt moderation");
313
314 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
315 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
316 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
317
318 /* How many packets rxeof tries to clean at a time */
319 static int ixgbe_rx_process_limit = 256;
320 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
321 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
322
323 /* How many packets txeof tries to clean at a time */
324 static int ixgbe_tx_process_limit = 256;
325 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
326 &ixgbe_tx_process_limit, 0,
327 "Maximum number of sent packets to process at a time, -1 means unlimited");
328
329 /* Flow control setting, default to full */
330 static int ixgbe_flow_control = ixgbe_fc_full;
331 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
332 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
333
334 /* Which packet processing uses workqueue or softint */
335 static bool ixgbe_txrx_workqueue = false;
336
337 /*
338 * Smart speed setting, default to on
339 * this only works as a compile option
340 * right now as its during attach, set
341 * this to 'ixgbe_smart_speed_off' to
342 * disable.
343 */
344 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
345
346 /*
347 * MSI-X should be the default for best performance,
348 * but this allows it to be forced off for testing.
349 */
350 static int ixgbe_enable_msix = 1;
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
352 "Enable MSI-X interrupts");
353
354 /*
355 * Number of Queues, can be set to 0,
356 * it then autoconfigures based on the
357 * number of cpus with a max of 8. This
358 * can be overridden manually here.
359 */
360 static int ixgbe_num_queues = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
362 "Number of queues to configure, 0 indicates autoconfigure");
363
364 /*
365 * Number of TX descriptors per ring,
366 * setting higher than RX as this seems
367 * the better performing choice.
368 */
369 static int ixgbe_txd = PERFORM_TXD;
370 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
371 "Number of transmit descriptors per queue");
372
373 /* Number of RX descriptors per ring */
374 static int ixgbe_rxd = PERFORM_RXD;
375 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
376 "Number of receive descriptors per queue");
377
378 /*
379 * Defining this on will allow the use
380 * of unsupported SFP+ modules, note that
381 * doing so you are on your own :)
382 */
383 static int allow_unsupported_sfp = false;
384 #define TUNABLE_INT(__x, __y)
385 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
386
387 /*
388 * Not sure if Flow Director is fully baked,
389 * so we'll default to turning it off.
390 */
391 static int ixgbe_enable_fdir = 0;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
393 "Enable Flow Director");
394
395 /* Legacy Transmit (single queue) */
396 static int ixgbe_enable_legacy_tx = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
398 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
399
400 /* Receive-Side Scaling */
401 static int ixgbe_enable_rss = 1;
402 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
403 "Enable Receive-Side Scaling (RSS)");
404
405 #if 0
406 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
407 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
408 #endif
409
410 #ifdef NET_MPSAFE
411 #define IXGBE_MPSAFE 1
412 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
413 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
414 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
415 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
416 #else
417 #define IXGBE_CALLOUT_FLAGS 0
418 #define IXGBE_SOFTINT_FLAGS 0
419 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
420 #define IXGBE_TASKLET_WQ_FLAGS 0
421 #endif
422 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
423
424 /************************************************************************
425 * ixgbe_initialize_rss_mapping
426 ************************************************************************/
427 static void
428 ixgbe_initialize_rss_mapping(struct adapter *adapter)
429 {
430 struct ixgbe_hw *hw = &adapter->hw;
431 u32 reta = 0, mrqc, rss_key[10];
432 int queue_id, table_size, index_mult;
433 int i, j;
434 u32 rss_hash_config;
435
436 /* force use default RSS key. */
437 #ifdef __NetBSD__
438 rss_getkey((uint8_t *) &rss_key);
439 #else
440 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
441 /* Fetch the configured RSS key */
442 rss_getkey((uint8_t *) &rss_key);
443 } else {
444 /* set up random bits */
445 cprng_fast(&rss_key, sizeof(rss_key));
446 }
447 #endif
448
449 /* Set multiplier for RETA setup and table size based on MAC */
450 index_mult = 0x1;
451 table_size = 128;
452 switch (adapter->hw.mac.type) {
453 case ixgbe_mac_82598EB:
454 index_mult = 0x11;
455 break;
456 case ixgbe_mac_X550:
457 case ixgbe_mac_X550EM_x:
458 case ixgbe_mac_X550EM_a:
459 table_size = 512;
460 break;
461 default:
462 break;
463 }
464
465 /* Set up the redirection table */
466 for (i = 0, j = 0; i < table_size; i++, j++) {
467 if (j == adapter->num_queues)
468 j = 0;
469
470 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
471 /*
472 * Fetch the RSS bucket id for the given indirection
473 * entry. Cap it at the number of configured buckets
474 * (which is num_queues.)
475 */
476 queue_id = rss_get_indirection_to_bucket(i);
477 queue_id = queue_id % adapter->num_queues;
478 } else
479 queue_id = (j * index_mult);
480
481 /*
482 * The low 8 bits are for hash value (n+0);
483 * The next 8 bits are for hash value (n+1), etc.
484 */
485 reta = reta >> 8;
486 reta = reta | (((uint32_t) queue_id) << 24);
487 if ((i & 3) == 3) {
488 if (i < 128)
489 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
490 else
491 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
492 reta);
493 reta = 0;
494 }
495 }
496
497 /* Now fill our hash function seeds */
498 for (i = 0; i < 10; i++)
499 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
500
501 /* Perform hash on these packet types */
502 if (adapter->feat_en & IXGBE_FEATURE_RSS)
503 rss_hash_config = rss_gethashconfig();
504 else {
505 /*
506 * Disable UDP - IP fragments aren't currently being handled
507 * and so we end up with a mix of 2-tuple and 4-tuple
508 * traffic.
509 */
510 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
511 | RSS_HASHTYPE_RSS_TCP_IPV4
512 | RSS_HASHTYPE_RSS_IPV6
513 | RSS_HASHTYPE_RSS_TCP_IPV6
514 | RSS_HASHTYPE_RSS_IPV6_EX
515 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
516 }
517
518 mrqc = IXGBE_MRQC_RSSEN;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
537 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
538 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
539 } /* ixgbe_initialize_rss_mapping */
540
541 /************************************************************************
542 * ixgbe_initialize_receive_units - Setup receive registers and features.
543 ************************************************************************/
544 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
545
546 static void
547 ixgbe_initialize_receive_units(struct adapter *adapter)
548 {
549 struct rx_ring *rxr = adapter->rx_rings;
550 struct ixgbe_hw *hw = &adapter->hw;
551 struct ifnet *ifp = adapter->ifp;
552 int i, j;
553 u32 bufsz, fctrl, srrctl, rxcsum;
554 u32 hlreg;
555
556 /*
557 * Make sure receives are disabled while
558 * setting up the descriptor ring
559 */
560 ixgbe_disable_rx(hw);
561
562 /* Enable broadcasts */
563 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
564 fctrl |= IXGBE_FCTRL_BAM;
565 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
566 fctrl |= IXGBE_FCTRL_DPF;
567 fctrl |= IXGBE_FCTRL_PMCF;
568 }
569 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
570
571 /* Set for Jumbo Frames? */
572 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
573 if (ifp->if_mtu > ETHERMTU)
574 hlreg |= IXGBE_HLREG0_JUMBOEN;
575 else
576 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
577
578 #ifdef DEV_NETMAP
579 /* CRC stripping is conditional in Netmap */
580 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
581 (ifp->if_capenable & IFCAP_NETMAP) &&
582 !ix_crcstrip)
583 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
584 else
585 #endif /* DEV_NETMAP */
586 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
587
588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
589
590 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
591 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
592
593 for (i = 0; i < adapter->num_queues; i++, rxr++) {
594 u64 rdba = rxr->rxdma.dma_paddr;
595 u32 reg;
596 int regnum = i / 4; /* 1 register per 4 queues */
597 int regshift = i % 4; /* 4 bits per 1 queue */
598 j = rxr->me;
599
600 /* Setup the Base and Length of the Rx Descriptor Ring */
601 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
602 (rdba & 0x00000000ffffffffULL));
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
604 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
605 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
606
607 /* Set up the SRRCTL register */
608 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
609 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
610 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
611 srrctl |= bufsz;
612 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
613
614 /* Set RQSMR (Receive Queue Statistic Mapping) register */
615 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
616 reg &= ~(0x000000ffUL << (regshift * 8));
617 reg |= i << (regshift * 8);
618 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
619
620 /*
621 * Set DROP_EN iff we have no flow control and >1 queue.
622 * Note that srrctl was cleared shortly before during reset,
623 * so we do not need to clear the bit, but do it just in case
624 * this code is moved elsewhere.
625 */
626 if (adapter->num_queues > 1 &&
627 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
628 srrctl |= IXGBE_SRRCTL_DROP_EN;
629 } else {
630 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
631 }
632
633 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
634
635 /* Setup the HW Rx Head and Tail Descriptor Pointers */
636 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
637 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
638
639 /* Set the driver rx tail address */
640 rxr->tail = IXGBE_RDT(rxr->me);
641 }
642
643 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
644 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
645 | IXGBE_PSRTYPE_UDPHDR
646 | IXGBE_PSRTYPE_IPV4HDR
647 | IXGBE_PSRTYPE_IPV6HDR;
648 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
649 }
650
651 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
652
653 ixgbe_initialize_rss_mapping(adapter);
654
655 if (adapter->num_queues > 1) {
656 /* RSS and RX IPP Checksum are mutually exclusive */
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658 }
659
660 if (ifp->if_capenable & IFCAP_RXCSUM)
661 rxcsum |= IXGBE_RXCSUM_PCSD;
662
663 /* This is useful for calculating UDP/IP fragment checksums */
664 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
665 rxcsum |= IXGBE_RXCSUM_IPPCSE;
666
667 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
668
669 } /* ixgbe_initialize_receive_units */
670
671 /************************************************************************
672 * ixgbe_initialize_transmit_units - Enable transmit units.
673 ************************************************************************/
674 static void
675 ixgbe_initialize_transmit_units(struct adapter *adapter)
676 {
677 struct tx_ring *txr = adapter->tx_rings;
678 struct ixgbe_hw *hw = &adapter->hw;
679 int i;
680
681 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 u32 tqsmreg, reg;
688 int regnum = i / 4; /* 1 register per 4 queues */
689 int regshift = i % 4; /* 4 bits per 1 queue */
690 int j = txr->me;
691
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
693 (tdba & 0x00000000ffffffffULL));
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
695 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
696 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
697
698 /*
699 * Set TQSMR (Transmit Queue Statistic Mapping) register.
700 * Register location is different between 82598 and others.
701 */
702 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
703 tqsmreg = IXGBE_TQSMR(regnum);
704 else
705 tqsmreg = IXGBE_TQSM(regnum);
706 reg = IXGBE_READ_REG(hw, tqsmreg);
707 reg &= ~(0x000000ffUL << (regshift * 8));
708 reg |= i << (regshift * 8);
709 IXGBE_WRITE_REG(hw, tqsmreg, reg);
710
711 /* Setup the HW Tx Head and Tail descriptor pointers */
712 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
713 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
714
715 /* Cache the tail address */
716 txr->tail = IXGBE_TDT(j);
717
718 txr->txr_no_space = false;
719
720 /* Disable Head Writeback */
721 /*
722 * Note: for X550 series devices, these registers are actually
723 * prefixed with TPH_ isntead of DCA_, but the addresses and
724 * fields remain the same.
725 */
726 switch (hw->mac.type) {
727 case ixgbe_mac_82598EB:
728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
729 break;
730 default:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
732 break;
733 }
734 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
735 switch (hw->mac.type) {
736 case ixgbe_mac_82598EB:
737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
738 break;
739 default:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
741 break;
742 }
743
744 }
745
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 u32 dmatxctl, rttdcs;
748
749 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
750 dmatxctl |= IXGBE_DMATXCTL_TE;
751 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
752 /* Disable arbiter to set MTQC */
753 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
754 rttdcs |= IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
757 ixgbe_get_mtqc(adapter->iov_mode));
758 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
759 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
760 }
761
762 return;
763 } /* ixgbe_initialize_transmit_units */
764
765 static void
766 ixgbe_quirks(struct adapter *adapter)
767 {
768 device_t dev = adapter->dev;
769 struct ixgbe_hw *hw = &adapter->hw;
770 const char *vendor, *product;
771
772 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
773 /*
774 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
775 * MA10-ST0.
776 */
777 vendor = pmf_get_platform("system-vendor");
778 product = pmf_get_platform("system-product");
779
780 if ((vendor == NULL) || (product == NULL))
781 return;
782
783 if ((strcmp(vendor, "GIGABYTE") == 0) &&
784 (strcmp(product, "MA10-ST0") == 0)) {
785 aprint_verbose_dev(dev,
786 "Enable SFP+ MOD_ABS inverse quirk\n");
787 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
788 }
789 }
790 }
791
792 /************************************************************************
793 * ixgbe_attach - Device initialization routine
794 *
795 * Called when the driver is being loaded.
796 * Identifies the type of hardware, allocates all resources
797 * and initializes the hardware.
798 *
799 * return 0 on success, positive on failure
800 ************************************************************************/
801 static void
802 ixgbe_attach(device_t parent, device_t dev, void *aux)
803 {
804 struct adapter *adapter;
805 struct ixgbe_hw *hw;
806 int error = -1;
807 u32 ctrl_ext;
808 u16 high, low, nvmreg;
809 pcireg_t id, subid;
810 const ixgbe_vendor_info_t *ent;
811 struct pci_attach_args *pa = aux;
812 bool unsupported_sfp = false;
813 const char *str;
814 char wqname[MAXCOMLEN];
815 char buf[256];
816
817 INIT_DEBUGOUT("ixgbe_attach: begin");
818
819 /* Allocate, clear, and link in our adapter structure */
820 adapter = device_private(dev);
821 adapter->hw.back = adapter;
822 adapter->dev = dev;
823 hw = &adapter->hw;
824 adapter->osdep.pc = pa->pa_pc;
825 adapter->osdep.tag = pa->pa_tag;
826 if (pci_dma64_available(pa))
827 adapter->osdep.dmat = pa->pa_dmat64;
828 else
829 adapter->osdep.dmat = pa->pa_dmat;
830 adapter->osdep.attached = false;
831 adapter->osdep.detaching = false;
832
833 ent = ixgbe_lookup(pa);
834
835 KASSERT(ent != NULL);
836
837 aprint_normal(": %s, Version - %s\n",
838 ixgbe_strings[ent->index], ixgbe_driver_version);
839
840 /* Core Lock Init */
841 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
842
843 /* Set up the timer callout and workqueue */
844 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
845 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
846 error = workqueue_create(&adapter->timer_wq, wqname,
847 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
848 IXGBE_TASKLET_WQ_FLAGS);
849 if (error) {
850 aprint_error_dev(dev,
851 "could not create timer workqueue (%d)\n", error);
852 goto err_out;
853 }
854
855 /* Determine hardware revision */
856 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
857 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
858
859 hw->vendor_id = PCI_VENDOR(id);
860 hw->device_id = PCI_PRODUCT(id);
861 hw->revision_id =
862 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
863 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
864 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
865
866 /* Set quirk flags */
867 ixgbe_quirks(adapter);
868
869 /*
870 * Make sure BUSMASTER is set
871 */
872 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
873
874 /* Do base PCI setup - map BAR0 */
875 if (ixgbe_allocate_pci_resources(adapter, pa)) {
876 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* let hardware know driver is loaded */
882 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
883 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
884 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
885
886 /*
887 * Initialize the shared code
888 */
889 if (ixgbe_init_shared_code(hw) != 0) {
890 aprint_error_dev(dev, "Unable to initialize the shared code\n");
891 error = ENXIO;
892 goto err_out;
893 }
894
895 switch (hw->mac.type) {
896 case ixgbe_mac_82598EB:
897 str = "82598EB";
898 break;
899 case ixgbe_mac_82599EB:
900 str = "82599EB";
901 break;
902 case ixgbe_mac_X540:
903 str = "X540";
904 break;
905 case ixgbe_mac_X550:
906 str = "X550";
907 break;
908 case ixgbe_mac_X550EM_x:
909 str = "X550EM X";
910 break;
911 case ixgbe_mac_X550EM_a:
912 str = "X550EM A";
913 break;
914 default:
915 str = "Unknown";
916 break;
917 }
918 aprint_normal_dev(dev, "device %s\n", str);
919
920 if (hw->mbx.ops.init_params)
921 hw->mbx.ops.init_params(hw);
922
923 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924
925 /* Pick up the 82599 settings */
926 if (hw->mac.type != ixgbe_mac_82598EB) {
927 hw->phy.smart_speed = ixgbe_smart_speed;
928 adapter->num_segs = IXGBE_82599_SCATTER;
929 } else
930 adapter->num_segs = IXGBE_82598_SCATTER;
931
932 /* Ensure SW/FW semaphore is free */
933 ixgbe_init_swfw_semaphore(hw);
934
935 hw->mac.ops.set_lan_id(hw);
936 ixgbe_init_device_features(adapter);
937
938 if (ixgbe_configure_interrupts(adapter)) {
939 error = ENXIO;
940 goto err_out;
941 }
942
943 /* Allocate multicast array memory. */
944 adapter->mta = malloc(sizeof(*adapter->mta) *
945 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
946
947 /* Enable WoL (if supported) */
948 ixgbe_check_wol_support(adapter);
949
950 /* Register for VLAN events */
951 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
952
953 /* Verify adapter fan is still functional (if applicable) */
954 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
955 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
956 ixgbe_check_fan_failure(adapter, esdp, FALSE);
957 }
958
959 /* Set an initial default flow control value */
960 hw->fc.requested_mode = ixgbe_flow_control;
961
962 /* Sysctls for limiting the amount of work done in the taskqueues */
963 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
964 "max number of rx packets to process",
965 &adapter->rx_process_limit, ixgbe_rx_process_limit);
966
967 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
968 "max number of tx packets to process",
969 &adapter->tx_process_limit, ixgbe_tx_process_limit);
970
971 /* Do descriptor calc and sanity checks */
972 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
973 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
974 aprint_error_dev(dev, "TXD config issue, using default!\n");
975 adapter->num_tx_desc = DEFAULT_TXD;
976 } else
977 adapter->num_tx_desc = ixgbe_txd;
978
979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 aprint_error_dev(dev, "RXD config issue, using default!\n");
982 adapter->num_rx_desc = DEFAULT_RXD;
983 } else
984 adapter->num_rx_desc = ixgbe_rxd;
985
986 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
987
988 /* Allocate our TX/RX Queues */
989 if (ixgbe_allocate_queues(adapter)) {
990 error = ENOMEM;
991 goto err_out;
992 }
993
994 hw->phy.reset_if_overtemp = TRUE;
995 error = ixgbe_reset_hw(hw);
996 hw->phy.reset_if_overtemp = FALSE;
997 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
998 error = IXGBE_SUCCESS;
999 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1000 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
1001 unsupported_sfp = true;
1002 error = IXGBE_SUCCESS;
1003 } else if (error) {
1004 aprint_error_dev(dev, "Hardware initialization failed\n");
1005 error = EIO;
1006 goto err_late;
1007 }
1008
1009 /* Make sure we have a good EEPROM before we read from it */
1010 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1011 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1012 error = EIO;
1013 goto err_late;
1014 }
1015
1016 aprint_normal("%s:", device_xname(dev));
1017 /* NVM Image Version */
1018 high = low = 0;
1019 switch (hw->mac.type) {
1020 case ixgbe_mac_X540:
1021 case ixgbe_mac_X550EM_a:
1022 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1023 if (nvmreg == 0xffff)
1024 break;
1025 high = (nvmreg >> 12) & 0x0f;
1026 low = (nvmreg >> 4) & 0xff;
1027 id = nvmreg & 0x0f;
1028 aprint_normal(" NVM Image Version %u.", high);
1029 if (hw->mac.type == ixgbe_mac_X540)
1030 str = "%x";
1031 else
1032 str = "%02x";
1033 aprint_normal(str, low);
1034 aprint_normal(" ID 0x%x,", id);
1035 break;
1036 case ixgbe_mac_X550EM_x:
1037 case ixgbe_mac_X550:
1038 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1039 if (nvmreg == 0xffff)
1040 break;
1041 high = (nvmreg >> 12) & 0x0f;
1042 low = nvmreg & 0xff;
1043 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1044 break;
1045 default:
1046 break;
1047 }
1048 hw->eeprom.nvm_image_ver_high = high;
1049 hw->eeprom.nvm_image_ver_low = low;
1050
1051 /* PHY firmware revision */
1052 switch (hw->mac.type) {
1053 case ixgbe_mac_X540:
1054 case ixgbe_mac_X550:
1055 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1056 if (nvmreg == 0xffff)
1057 break;
1058 high = (nvmreg >> 12) & 0x0f;
1059 low = (nvmreg >> 4) & 0xff;
1060 id = nvmreg & 0x000f;
1061 aprint_normal(" PHY FW Revision %u.", high);
1062 if (hw->mac.type == ixgbe_mac_X540)
1063 str = "%x";
1064 else
1065 str = "%02x";
1066 aprint_normal(str, low);
1067 aprint_normal(" ID 0x%x,", id);
1068 break;
1069 default:
1070 break;
1071 }
1072
1073 /* NVM Map version & OEM NVM Image version */
1074 switch (hw->mac.type) {
1075 case ixgbe_mac_X550:
1076 case ixgbe_mac_X550EM_x:
1077 case ixgbe_mac_X550EM_a:
1078 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1079 if (nvmreg != 0xffff) {
1080 high = (nvmreg >> 12) & 0x0f;
1081 low = nvmreg & 0x00ff;
1082 aprint_normal(" NVM Map version %u.%02x,", high, low);
1083 }
1084 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1085 if (nvmreg != 0xffff) {
1086 high = (nvmreg >> 12) & 0x0f;
1087 low = nvmreg & 0x00ff;
1088 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1089 low);
1090 }
1091 break;
1092 default:
1093 break;
1094 }
1095
1096 /* Print the ETrackID */
1097 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1098 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1099 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1100
1101 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1102 error = ixgbe_allocate_msix(adapter, pa);
1103 if (error) {
1104 /* Free allocated queue structures first */
1105 ixgbe_free_queues(adapter);
1106
1107 /* Fallback to legacy interrupt */
1108 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1109 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1110 adapter->feat_en |= IXGBE_FEATURE_MSI;
1111 adapter->num_queues = 1;
1112
1113 /* Allocate our TX/RX Queues again */
1114 if (ixgbe_allocate_queues(adapter)) {
1115 error = ENOMEM;
1116 goto err_out;
1117 }
1118 }
1119 }
1120 /* Recovery mode */
1121 switch (adapter->hw.mac.type) {
1122 case ixgbe_mac_X550:
1123 case ixgbe_mac_X550EM_x:
1124 case ixgbe_mac_X550EM_a:
1125 /* >= 2.00 */
1126 if (hw->eeprom.nvm_image_ver_high >= 2) {
1127 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1128 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1129 }
1130 break;
1131 default:
1132 break;
1133 }
1134
1135 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1136 error = ixgbe_allocate_legacy(adapter, pa);
1137 if (error)
1138 goto err_late;
1139
1140 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1141 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1142 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1143 error = workqueue_create(&adapter->admin_wq, wqname,
1144 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1145 IXGBE_TASKLET_WQ_FLAGS);
1146 if (error) {
1147 aprint_error_dev(dev,
1148 "could not create admin workqueue (%d)\n", error);
1149 goto err_out;
1150 }
1151
1152 error = ixgbe_start_hw(hw);
1153 switch (error) {
1154 case IXGBE_ERR_EEPROM_VERSION:
1155 aprint_error_dev(dev, "This device is a pre-production adapter/"
1156 "LOM. Please be aware there may be issues associated "
1157 "with your hardware.\nIf you are experiencing problems "
1158 "please contact your Intel or hardware representative "
1159 "who provided you with this hardware.\n");
1160 break;
1161 default:
1162 break;
1163 }
1164
1165 /* Setup OS specific network interface */
1166 if (ixgbe_setup_interface(dev, adapter) != 0)
1167 goto err_late;
1168
1169 /*
1170 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1171 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1172 */
1173 if (hw->phy.media_type == ixgbe_media_type_copper) {
1174 uint16_t id1, id2;
1175 int oui, model, rev;
1176 const char *descr;
1177
1178 id1 = hw->phy.id >> 16;
1179 id2 = hw->phy.id & 0xffff;
1180 oui = MII_OUI(id1, id2);
1181 model = MII_MODEL(id2);
1182 rev = MII_REV(id2);
1183 if ((descr = mii_get_descr(oui, model)) != NULL)
1184 aprint_normal_dev(dev,
1185 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1186 descr, oui, model, rev);
1187 else
1188 aprint_normal_dev(dev,
1189 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1190 oui, model, rev);
1191 }
1192
1193 /* Enable EEE power saving */
1194 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1195 hw->mac.ops.setup_eee(hw,
1196 adapter->feat_en & IXGBE_FEATURE_EEE);
1197
1198 /* Enable power to the phy. */
1199 if (!unsupported_sfp) {
1200 /* Enable the optics for 82599 SFP+ fiber */
1201 ixgbe_enable_tx_laser(hw);
1202
1203 /*
1204 * XXX Currently, ixgbe_set_phy_power() supports only copper
1205 * PHY, so it's not required to test with !unsupported_sfp.
1206 */
1207 ixgbe_set_phy_power(hw, TRUE);
1208 }
1209
1210 /* Initialize statistics */
1211 ixgbe_update_stats_counters(adapter);
1212
1213 /* Check PCIE slot type/speed/width */
1214 ixgbe_get_slot_info(adapter);
1215
1216 /*
1217 * Do time init and sysctl init here, but
1218 * only on the first port of a bypass adapter.
1219 */
1220 ixgbe_bypass_init(adapter);
1221
1222 /* Set an initial dmac value */
1223 adapter->dmac = 0;
1224 /* Set initial advertised speeds (if applicable) */
1225 adapter->advertise = ixgbe_get_advertise(adapter);
1226
1227 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1228 ixgbe_define_iov_schemas(dev, &error);
1229
1230 /* Add sysctls */
1231 ixgbe_add_device_sysctls(adapter);
1232 ixgbe_add_hw_stats(adapter);
1233
1234 /* For Netmap */
1235 adapter->init_locked = ixgbe_init_locked;
1236 adapter->stop_locked = ixgbe_stop_locked;
1237
1238 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1239 ixgbe_netmap_attach(adapter);
1240
1241 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1242 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1243 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1244 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1245
1246 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1247 pmf_class_network_register(dev, adapter->ifp);
1248 else
1249 aprint_error_dev(dev, "couldn't establish power handler\n");
1250
1251 /* Init recovery mode timer and state variable */
1252 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1253 adapter->recovery_mode = 0;
1254
1255 /* Set up the timer callout */
1256 callout_init(&adapter->recovery_mode_timer,
1257 IXGBE_CALLOUT_FLAGS);
1258 snprintf(wqname, sizeof(wqname), "%s-recovery",
1259 device_xname(dev));
1260 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1261 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1262 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1263 if (error) {
1264 aprint_error_dev(dev, "could not create "
1265 "recovery_mode_timer workqueue (%d)\n", error);
1266 goto err_out;
1267 }
1268
1269 /* Start the task */
1270 callout_reset(&adapter->recovery_mode_timer, hz,
1271 ixgbe_recovery_mode_timer, adapter);
1272 }
1273
1274 INIT_DEBUGOUT("ixgbe_attach: end");
1275 adapter->osdep.attached = true;
1276
1277 return;
1278
1279 err_late:
1280 ixgbe_free_queues(adapter);
1281 err_out:
1282 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1283 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1285 ixgbe_free_deferred_handlers(adapter);
1286 ixgbe_free_pci_resources(adapter);
1287 if (adapter->mta != NULL)
1288 free(adapter->mta, M_DEVBUF);
1289 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1290 IXGBE_CORE_LOCK_DESTROY(adapter);
1291
1292 return;
1293 } /* ixgbe_attach */
1294
1295 /************************************************************************
1296 * ixgbe_check_wol_support
1297 *
1298 * Checks whether the adapter's ports are capable of
1299 * Wake On LAN by reading the adapter's NVM.
1300 *
1301 * Sets each port's hw->wol_enabled value depending
1302 * on the value read here.
1303 ************************************************************************/
1304 static void
1305 ixgbe_check_wol_support(struct adapter *adapter)
1306 {
1307 struct ixgbe_hw *hw = &adapter->hw;
1308 u16 dev_caps = 0;
1309
1310 /* Find out WoL support for port */
1311 adapter->wol_support = hw->wol_enabled = 0;
1312 ixgbe_get_device_caps(hw, &dev_caps);
1313 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1314 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1315 hw->bus.func == 0))
1316 adapter->wol_support = hw->wol_enabled = 1;
1317
1318 /* Save initial wake up filter configuration */
1319 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1320
1321 return;
1322 } /* ixgbe_check_wol_support */
1323
1324 /************************************************************************
1325 * ixgbe_setup_interface
1326 *
1327 * Setup networking device structure and register an interface.
1328 ************************************************************************/
1329 static int
1330 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1331 {
1332 struct ethercom *ec = &adapter->osdep.ec;
1333 struct ifnet *ifp;
1334 int rv;
1335
1336 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1337
1338 ifp = adapter->ifp = &ec->ec_if;
1339 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1340 ifp->if_baudrate = IF_Gbps(10);
1341 ifp->if_init = ixgbe_init;
1342 ifp->if_stop = ixgbe_ifstop;
1343 ifp->if_softc = adapter;
1344 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1345 #ifdef IXGBE_MPSAFE
1346 ifp->if_extflags = IFEF_MPSAFE;
1347 #endif
1348 ifp->if_ioctl = ixgbe_ioctl;
1349 #if __FreeBSD_version >= 1100045
1350 /* TSO parameters */
1351 ifp->if_hw_tsomax = 65518;
1352 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1353 ifp->if_hw_tsomaxsegsize = 2048;
1354 #endif
1355 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1356 #if 0
1357 ixgbe_start_locked = ixgbe_legacy_start_locked;
1358 #endif
1359 } else {
1360 ifp->if_transmit = ixgbe_mq_start;
1361 #if 0
1362 ixgbe_start_locked = ixgbe_mq_start_locked;
1363 #endif
1364 }
1365 ifp->if_start = ixgbe_legacy_start;
1366 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1367 IFQ_SET_READY(&ifp->if_snd);
1368
1369 rv = if_initialize(ifp);
1370 if (rv != 0) {
1371 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1372 return rv;
1373 }
1374 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1375 ether_ifattach(ifp, adapter->hw.mac.addr);
1376 aprint_normal_dev(dev, "Ethernet address %s\n",
1377 ether_sprintf(adapter->hw.mac.addr));
1378 /*
1379 * We use per TX queue softint, so if_deferred_start_init() isn't
1380 * used.
1381 */
1382 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1383
1384 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1385
1386 /*
1387 * Tell the upper layer(s) we support long frames.
1388 */
1389 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1390
1391 /* Set capability flags */
1392 ifp->if_capabilities |= IFCAP_RXCSUM
1393 | IFCAP_TXCSUM
1394 | IFCAP_TSOv4
1395 | IFCAP_TSOv6;
1396 ifp->if_capenable = 0;
1397
1398 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1399 | ETHERCAP_VLAN_HWCSUM
1400 | ETHERCAP_JUMBO_MTU
1401 | ETHERCAP_VLAN_MTU;
1402
1403 /* Enable the above capabilities by default */
1404 ec->ec_capenable = ec->ec_capabilities;
1405
1406 /*
1407 * Don't turn this on by default, if vlans are
1408 * created on another pseudo device (eg. lagg)
1409 * then vlan events are not passed thru, breaking
1410 * operation, but with HW FILTER off it works. If
1411 * using vlans directly on the ixgbe driver you can
1412 * enable this and get full hardware tag filtering.
1413 */
1414 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1415
1416 /*
1417 * Specify the media types supported by this adapter and register
1418 * callbacks to update media and link information
1419 */
1420 ec->ec_ifmedia = &adapter->media;
1421 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1422 ixgbe_media_status, &adapter->core_mtx);
1423
1424 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1425 ixgbe_add_media_types(adapter);
1426
1427 /* Set autoselect media by default */
1428 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1429
1430 if_register(ifp);
1431
1432 return (0);
1433 } /* ixgbe_setup_interface */
1434
1435 /************************************************************************
1436 * ixgbe_add_media_types
1437 ************************************************************************/
1438 static void
1439 ixgbe_add_media_types(struct adapter *adapter)
1440 {
1441 struct ixgbe_hw *hw = &adapter->hw;
1442 u64 layer;
1443
1444 layer = adapter->phy_layer;
1445
1446 #define ADD(mm, dd) \
1447 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1448
1449 ADD(IFM_NONE, 0);
1450
1451 /* Media types with matching NetBSD media defines */
1452 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1453 ADD(IFM_10G_T | IFM_FDX, 0);
1454 }
1455 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1456 ADD(IFM_1000_T | IFM_FDX, 0);
1457 }
1458 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1459 ADD(IFM_100_TX | IFM_FDX, 0);
1460 }
1461 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1462 ADD(IFM_10_T | IFM_FDX, 0);
1463 }
1464
1465 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1466 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1467 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1468 }
1469
1470 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1471 ADD(IFM_10G_LR | IFM_FDX, 0);
1472 if (hw->phy.multispeed_fiber) {
1473 ADD(IFM_1000_LX | IFM_FDX, 0);
1474 }
1475 }
1476 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1477 ADD(IFM_10G_SR | IFM_FDX, 0);
1478 if (hw->phy.multispeed_fiber) {
1479 ADD(IFM_1000_SX | IFM_FDX, 0);
1480 }
1481 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1482 ADD(IFM_1000_SX | IFM_FDX, 0);
1483 }
1484 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1485 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1486 }
1487
1488 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1489 ADD(IFM_10G_KR | IFM_FDX, 0);
1490 }
1491 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1492 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1493 }
1494 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1495 ADD(IFM_1000_KX | IFM_FDX, 0);
1496 }
1497 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1498 ADD(IFM_2500_KX | IFM_FDX, 0);
1499 }
1500 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1501 ADD(IFM_2500_T | IFM_FDX, 0);
1502 }
1503 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1504 ADD(IFM_5000_T | IFM_FDX, 0);
1505 }
1506 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1507 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1508 /* XXX no ifmedia_set? */
1509
1510 ADD(IFM_AUTO, 0);
1511
1512 #undef ADD
1513 } /* ixgbe_add_media_types */
1514
1515 /************************************************************************
1516 * ixgbe_is_sfp
1517 ************************************************************************/
1518 static inline bool
1519 ixgbe_is_sfp(struct ixgbe_hw *hw)
1520 {
1521 switch (hw->mac.type) {
1522 case ixgbe_mac_82598EB:
1523 if (hw->phy.type == ixgbe_phy_nl)
1524 return (TRUE);
1525 return (FALSE);
1526 case ixgbe_mac_82599EB:
1527 case ixgbe_mac_X550EM_x:
1528 case ixgbe_mac_X550EM_a:
1529 switch (hw->mac.ops.get_media_type(hw)) {
1530 case ixgbe_media_type_fiber:
1531 case ixgbe_media_type_fiber_qsfp:
1532 return (TRUE);
1533 default:
1534 return (FALSE);
1535 }
1536 default:
1537 return (FALSE);
1538 }
1539 } /* ixgbe_is_sfp */
1540
1541 static void
1542 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1543 {
1544
1545 KASSERT(mutex_owned(&adapter->admin_mtx));
1546
1547 if (__predict_true(adapter->osdep.detaching == false)) {
1548 if (adapter->admin_pending == 0)
1549 workqueue_enqueue(adapter->admin_wq,
1550 &adapter->admin_wc, NULL);
1551 adapter->admin_pending = 1;
1552 }
1553 }
1554
1555 /************************************************************************
1556 * ixgbe_config_link
1557 ************************************************************************/
1558 static void
1559 ixgbe_config_link(struct adapter *adapter)
1560 {
1561 struct ixgbe_hw *hw = &adapter->hw;
1562 u32 autoneg, err = 0;
1563 u32 task_requests = 0;
1564 bool sfp, negotiate = false;
1565
1566 sfp = ixgbe_is_sfp(hw);
1567
1568 if (sfp) {
1569 if (hw->phy.multispeed_fiber) {
1570 ixgbe_enable_tx_laser(hw);
1571 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1572 }
1573 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1574
1575 mutex_enter(&adapter->admin_mtx);
1576 adapter->task_requests |= task_requests;
1577 ixgbe_schedule_admin_tasklet(adapter);
1578 mutex_exit(&adapter->admin_mtx);
1579 } else {
1580 struct ifmedia *ifm = &adapter->media;
1581
1582 if (hw->mac.ops.check_link)
1583 err = ixgbe_check_link(hw, &adapter->link_speed,
1584 &adapter->link_up, FALSE);
1585 if (err)
1586 return;
1587
1588 /*
1589 * Check if it's the first call. If it's the first call,
1590 * get value for auto negotiation.
1591 */
1592 autoneg = hw->phy.autoneg_advertised;
1593 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1594 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1595 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1596 &negotiate);
1597 if (err)
1598 return;
1599 if (hw->mac.ops.setup_link)
1600 err = hw->mac.ops.setup_link(hw, autoneg,
1601 adapter->link_up);
1602 }
1603
1604 } /* ixgbe_config_link */
1605
1606 /************************************************************************
1607 * ixgbe_update_stats_counters - Update board statistics counters.
1608 ************************************************************************/
1609 static void
1610 ixgbe_update_stats_counters(struct adapter *adapter)
1611 {
1612 struct ifnet *ifp = adapter->ifp;
1613 struct ixgbe_hw *hw = &adapter->hw;
1614 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1615 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1616 u64 total_missed_rx = 0;
1617 uint64_t crcerrs, rlec;
1618 unsigned int queue_counters;
1619 int i;
1620
1621 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1622 stats->crcerrs.ev_count += crcerrs;
1623 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1624 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1625 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1626 if (hw->mac.type >= ixgbe_mac_X550)
1627 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1628
1629 /* 16 registers exist */
1630 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1631 for (i = 0; i < queue_counters; i++) {
1632 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1633 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1634 if (hw->mac.type >= ixgbe_mac_82599EB) {
1635 stats->qprdc[i].ev_count
1636 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1637 }
1638 }
1639
1640 /* 8 registers exist */
1641 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1642 uint32_t mp;
1643
1644 /* MPC */
1645 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1646 /* global total per queue */
1647 stats->mpc[i].ev_count += mp;
1648 /* running comprehensive total for stats display */
1649 total_missed_rx += mp;
1650
1651 if (hw->mac.type == ixgbe_mac_82598EB)
1652 stats->rnbc[i].ev_count
1653 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1654
1655 stats->pxontxc[i].ev_count
1656 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1657 stats->pxofftxc[i].ev_count
1658 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1659 if (hw->mac.type >= ixgbe_mac_82599EB) {
1660 stats->pxonrxc[i].ev_count
1661 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1662 stats->pxoffrxc[i].ev_count
1663 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1664 stats->pxon2offc[i].ev_count
1665 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1666 } else {
1667 stats->pxonrxc[i].ev_count
1668 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1669 stats->pxoffrxc[i].ev_count
1670 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1671 }
1672 }
1673 stats->mpctotal.ev_count += total_missed_rx;
1674
1675 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1676 if ((adapter->link_active == LINK_STATE_UP)
1677 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1678 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1679 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1680 }
1681 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1682 stats->rlec.ev_count += rlec;
1683
1684 /* Hardware workaround, gprc counts missed packets */
1685 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1686
1687 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1688 stats->lxontxc.ev_count += lxon;
1689 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1690 stats->lxofftxc.ev_count += lxoff;
1691 total = lxon + lxoff;
1692
1693 if (hw->mac.type != ixgbe_mac_82598EB) {
1694 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1695 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1696 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1697 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1698 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1699 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1700 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1701 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1702 } else {
1703 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1704 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1705 /* 82598 only has a counter in the high register */
1706 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1707 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1708 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1709 }
1710
1711 /*
1712 * Workaround: mprc hardware is incorrectly counting
1713 * broadcasts, so for now we subtract those.
1714 */
1715 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1716 stats->bprc.ev_count += bprc;
1717 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1718 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1719
1720 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1721 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1722 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1723 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1724 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1725 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1726
1727 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1728 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1729 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1730
1731 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1732 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1733 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1734 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1735 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1736 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1737 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1738 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1739 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1740 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1741 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1742 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1743 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1744 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1745 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1746 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1747 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1748 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1749 /* Only read FCOE on 82599 */
1750 if (hw->mac.type != ixgbe_mac_82598EB) {
1751 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1752 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1753 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1754 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1755 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1756 }
1757
1758 /*
1759 * Fill out the OS statistics structure. Only RX errors are required
1760 * here because all TX counters are incremented in the TX path and
1761 * normal RX counters are prepared in ether_input().
1762 */
1763 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1764 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1765 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1766 IF_STAT_PUTREF(ifp);
1767 } /* ixgbe_update_stats_counters */
1768
1769 /************************************************************************
1770 * ixgbe_add_hw_stats
1771 *
1772 * Add sysctl variables, one per statistic, to the system.
1773 ************************************************************************/
1774 static void
1775 ixgbe_add_hw_stats(struct adapter *adapter)
1776 {
1777 device_t dev = adapter->dev;
1778 const struct sysctlnode *rnode, *cnode;
1779 struct sysctllog **log = &adapter->sysctllog;
1780 struct tx_ring *txr = adapter->tx_rings;
1781 struct rx_ring *rxr = adapter->rx_rings;
1782 struct ixgbe_hw *hw = &adapter->hw;
1783 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1784 const char *xname = device_xname(dev);
1785 int i;
1786
1787 /* Driver Statistics */
1788 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1789 NULL, xname, "Driver tx dma soft fail EFBIG");
1790 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1791 NULL, xname, "m_defrag() failed");
1792 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1793 NULL, xname, "Driver tx dma hard fail EFBIG");
1794 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1795 NULL, xname, "Driver tx dma hard fail EINVAL");
1796 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1797 NULL, xname, "Driver tx dma hard fail other");
1798 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1799 NULL, xname, "Driver tx dma soft fail EAGAIN");
1800 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1801 NULL, xname, "Driver tx dma soft fail ENOMEM");
1802 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1803 NULL, xname, "Watchdog timeouts");
1804 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1805 NULL, xname, "TSO errors");
1806 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1807 NULL, xname, "Admin MSI-X IRQ Handled");
1808 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1809 NULL, xname, "Link event");
1810 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1811 NULL, xname, "SFP+ module event");
1812 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1813 NULL, xname, "Multispeed event");
1814 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1815 NULL, xname, "External PHY event");
1816
1817 /* Max number of traffic class is 8 */
1818 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1819 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1820 snprintf(adapter->tcs[i].evnamebuf,
1821 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1822 xname, i);
1823 if (i < __arraycount(stats->mpc)) {
1824 evcnt_attach_dynamic(&stats->mpc[i],
1825 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1826 "RX Missed Packet Count");
1827 if (hw->mac.type == ixgbe_mac_82598EB)
1828 evcnt_attach_dynamic(&stats->rnbc[i],
1829 EVCNT_TYPE_MISC, NULL,
1830 adapter->tcs[i].evnamebuf,
1831 "Receive No Buffers");
1832 }
1833 if (i < __arraycount(stats->pxontxc)) {
1834 evcnt_attach_dynamic(&stats->pxontxc[i],
1835 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1836 "pxontxc");
1837 evcnt_attach_dynamic(&stats->pxonrxc[i],
1838 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1839 "pxonrxc");
1840 evcnt_attach_dynamic(&stats->pxofftxc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1842 "pxofftxc");
1843 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1845 "pxoffrxc");
1846 if (hw->mac.type >= ixgbe_mac_82599EB)
1847 evcnt_attach_dynamic(&stats->pxon2offc[i],
1848 EVCNT_TYPE_MISC, NULL,
1849 adapter->tcs[i].evnamebuf,
1850 "pxon2offc");
1851 }
1852 }
1853
1854 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1855 #ifdef LRO
1856 struct lro_ctrl *lro = &rxr->lro;
1857 #endif /* LRO */
1858
1859 snprintf(adapter->queues[i].evnamebuf,
1860 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1861 xname, i);
1862 snprintf(adapter->queues[i].namebuf,
1863 sizeof(adapter->queues[i].namebuf), "q%d", i);
1864
1865 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1866 aprint_error_dev(dev, "could not create sysctl root\n");
1867 break;
1868 }
1869
1870 if (sysctl_createv(log, 0, &rnode, &rnode,
1871 0, CTLTYPE_NODE,
1872 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1873 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1874 break;
1875
1876 if (sysctl_createv(log, 0, &rnode, &cnode,
1877 CTLFLAG_READWRITE, CTLTYPE_INT,
1878 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1879 ixgbe_sysctl_interrupt_rate_handler, 0,
1880 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1881 break;
1882
1883 if (sysctl_createv(log, 0, &rnode, &cnode,
1884 CTLFLAG_READONLY, CTLTYPE_INT,
1885 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1886 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1887 0, CTL_CREATE, CTL_EOL) != 0)
1888 break;
1889
1890 if (sysctl_createv(log, 0, &rnode, &cnode,
1891 CTLFLAG_READONLY, CTLTYPE_INT,
1892 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1893 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1894 0, CTL_CREATE, CTL_EOL) != 0)
1895 break;
1896
1897 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1898 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1899 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1900 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1901 "Handled queue in softint");
1902 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1903 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1904 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1905 NULL, adapter->queues[i].evnamebuf, "TSO");
1906 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1907 NULL, adapter->queues[i].evnamebuf,
1908 "TX Queue No Descriptor Available");
1909 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1910 NULL, adapter->queues[i].evnamebuf,
1911 "Queue Packets Transmitted");
1912 #ifndef IXGBE_LEGACY_TX
1913 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1914 NULL, adapter->queues[i].evnamebuf,
1915 "Packets dropped in pcq");
1916 #endif
1917
1918 if (sysctl_createv(log, 0, &rnode, &cnode,
1919 CTLFLAG_READONLY,
1920 CTLTYPE_INT,
1921 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1922 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1923 CTL_CREATE, CTL_EOL) != 0)
1924 break;
1925
1926 if (sysctl_createv(log, 0, &rnode, &cnode,
1927 CTLFLAG_READONLY,
1928 CTLTYPE_INT,
1929 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1930 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1931 CTL_CREATE, CTL_EOL) != 0)
1932 break;
1933
1934 if (sysctl_createv(log, 0, &rnode, &cnode,
1935 CTLFLAG_READONLY,
1936 CTLTYPE_INT,
1937 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1938 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1939 CTL_CREATE, CTL_EOL) != 0)
1940 break;
1941
1942 if (i < __arraycount(stats->qprc)) {
1943 evcnt_attach_dynamic(&stats->qprc[i],
1944 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1945 "qprc");
1946 evcnt_attach_dynamic(&stats->qptc[i],
1947 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1948 "qptc");
1949 evcnt_attach_dynamic(&stats->qbrc[i],
1950 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1951 "qbrc");
1952 evcnt_attach_dynamic(&stats->qbtc[i],
1953 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1954 "qbtc");
1955 if (hw->mac.type >= ixgbe_mac_82599EB)
1956 evcnt_attach_dynamic(&stats->qprdc[i],
1957 EVCNT_TYPE_MISC, NULL,
1958 adapter->queues[i].evnamebuf, "qprdc");
1959 }
1960
1961 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1962 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1963 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1964 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1965 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1966 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1967 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1968 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1969 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1970 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1971 #ifdef LRO
1972 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1973 CTLFLAG_RD, &lro->lro_queued, 0,
1974 "LRO Queued");
1975 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1976 CTLFLAG_RD, &lro->lro_flushed, 0,
1977 "LRO Flushed");
1978 #endif /* LRO */
1979 }
1980
1981 /* MAC stats get their own sub node */
1982
1983 snprintf(stats->namebuf,
1984 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1985
1986 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "rx csum offload - IP");
1988 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "rx csum offload - L4");
1990 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "rx csum offload - IP bad");
1992 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "rx csum offload - L4 bad");
1994 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Interrupt conditions zero");
1996 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Legacy interrupts");
1998
1999 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "CRC Errors");
2001 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "Illegal Byte Errors");
2003 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "Byte Errors");
2005 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "MAC Short Packets Discarded");
2007 if (hw->mac.type >= ixgbe_mac_X550)
2008 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Bad SFD");
2010 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Total Packets Missed");
2012 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "MAC Local Faults");
2014 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "MAC Remote Faults");
2016 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Receive Length Errors");
2018 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Link XON Transmitted");
2020 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Link XON Received");
2022 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Link XOFF Transmitted");
2024 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Link XOFF Received");
2026
2027 /* Packet Reception Stats */
2028 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Total Octets Received");
2030 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Good Octets Received");
2032 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Total Packets Received");
2034 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Good Packets Received");
2036 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Multicast Packets Received");
2038 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "Broadcast Packets Received");
2040 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "64 byte frames received ");
2042 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "65-127 byte frames received");
2044 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "128-255 byte frames received");
2046 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "256-511 byte frames received");
2048 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "512-1023 byte frames received");
2050 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "1023-1522 byte frames received");
2052 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Receive Undersized");
2054 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Fragmented Packets Received ");
2056 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Oversized Packets Received");
2058 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Received Jabber");
2060 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Management Packets Received");
2062 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Management Packets Dropped");
2064 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2065 stats->namebuf, "Checksum Errors");
2066
2067 /* Packet Transmission Stats */
2068 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "Good Octets Transmitted");
2070 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "Total Packets Transmitted");
2072 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "Good Packets Transmitted");
2074 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "Broadcast Packets Transmitted");
2076 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "Multicast Packets Transmitted");
2078 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "Management Packets Transmitted");
2080 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2081 stats->namebuf, "64 byte frames transmitted ");
2082 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2083 stats->namebuf, "65-127 byte frames transmitted");
2084 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2085 stats->namebuf, "128-255 byte frames transmitted");
2086 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2087 stats->namebuf, "256-511 byte frames transmitted");
2088 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2089 stats->namebuf, "512-1023 byte frames transmitted");
2090 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2091 stats->namebuf, "1024-1522 byte frames transmitted");
2092 } /* ixgbe_add_hw_stats */
2093
2094 static void
2095 ixgbe_clear_evcnt(struct adapter *adapter)
2096 {
2097 struct tx_ring *txr = adapter->tx_rings;
2098 struct rx_ring *rxr = adapter->rx_rings;
2099 struct ixgbe_hw *hw = &adapter->hw;
2100 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2101 int i;
2102
2103 adapter->efbig_tx_dma_setup.ev_count = 0;
2104 adapter->mbuf_defrag_failed.ev_count = 0;
2105 adapter->efbig2_tx_dma_setup.ev_count = 0;
2106 adapter->einval_tx_dma_setup.ev_count = 0;
2107 adapter->other_tx_dma_setup.ev_count = 0;
2108 adapter->eagain_tx_dma_setup.ev_count = 0;
2109 adapter->enomem_tx_dma_setup.ev_count = 0;
2110 adapter->tso_err.ev_count = 0;
2111 adapter->watchdog_events.ev_count = 0;
2112 adapter->admin_irqev.ev_count = 0;
2113 adapter->link_workev.ev_count = 0;
2114 adapter->mod_workev.ev_count = 0;
2115 adapter->msf_workev.ev_count = 0;
2116 adapter->phy_workev.ev_count = 0;
2117
2118 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2119 if (i < __arraycount(stats->mpc)) {
2120 stats->mpc[i].ev_count = 0;
2121 if (hw->mac.type == ixgbe_mac_82598EB)
2122 stats->rnbc[i].ev_count = 0;
2123 }
2124 if (i < __arraycount(stats->pxontxc)) {
2125 stats->pxontxc[i].ev_count = 0;
2126 stats->pxonrxc[i].ev_count = 0;
2127 stats->pxofftxc[i].ev_count = 0;
2128 stats->pxoffrxc[i].ev_count = 0;
2129 if (hw->mac.type >= ixgbe_mac_82599EB)
2130 stats->pxon2offc[i].ev_count = 0;
2131 }
2132 }
2133
2134 txr = adapter->tx_rings;
2135 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2136 adapter->queues[i].irqs.ev_count = 0;
2137 adapter->queues[i].handleq.ev_count = 0;
2138 adapter->queues[i].req.ev_count = 0;
2139 txr->no_desc_avail.ev_count = 0;
2140 txr->total_packets.ev_count = 0;
2141 txr->tso_tx.ev_count = 0;
2142 #ifndef IXGBE_LEGACY_TX
2143 txr->pcq_drops.ev_count = 0;
2144 #endif
2145 txr->q_efbig_tx_dma_setup = 0;
2146 txr->q_mbuf_defrag_failed = 0;
2147 txr->q_efbig2_tx_dma_setup = 0;
2148 txr->q_einval_tx_dma_setup = 0;
2149 txr->q_other_tx_dma_setup = 0;
2150 txr->q_eagain_tx_dma_setup = 0;
2151 txr->q_enomem_tx_dma_setup = 0;
2152 txr->q_tso_err = 0;
2153
2154 if (i < __arraycount(stats->qprc)) {
2155 stats->qprc[i].ev_count = 0;
2156 stats->qptc[i].ev_count = 0;
2157 stats->qbrc[i].ev_count = 0;
2158 stats->qbtc[i].ev_count = 0;
2159 if (hw->mac.type >= ixgbe_mac_82599EB)
2160 stats->qprdc[i].ev_count = 0;
2161 }
2162
2163 rxr->rx_packets.ev_count = 0;
2164 rxr->rx_bytes.ev_count = 0;
2165 rxr->rx_copies.ev_count = 0;
2166 rxr->no_jmbuf.ev_count = 0;
2167 rxr->rx_discarded.ev_count = 0;
2168 }
2169 stats->ipcs.ev_count = 0;
2170 stats->l4cs.ev_count = 0;
2171 stats->ipcs_bad.ev_count = 0;
2172 stats->l4cs_bad.ev_count = 0;
2173 stats->intzero.ev_count = 0;
2174 stats->legint.ev_count = 0;
2175 stats->crcerrs.ev_count = 0;
2176 stats->illerrc.ev_count = 0;
2177 stats->errbc.ev_count = 0;
2178 stats->mspdc.ev_count = 0;
2179 if (hw->mac.type >= ixgbe_mac_X550)
2180 stats->mbsdc.ev_count = 0;
2181 stats->mpctotal.ev_count = 0;
2182 stats->mlfc.ev_count = 0;
2183 stats->mrfc.ev_count = 0;
2184 stats->rlec.ev_count = 0;
2185 stats->lxontxc.ev_count = 0;
2186 stats->lxonrxc.ev_count = 0;
2187 stats->lxofftxc.ev_count = 0;
2188 stats->lxoffrxc.ev_count = 0;
2189
2190 /* Packet Reception Stats */
2191 stats->tor.ev_count = 0;
2192 stats->gorc.ev_count = 0;
2193 stats->tpr.ev_count = 0;
2194 stats->gprc.ev_count = 0;
2195 stats->mprc.ev_count = 0;
2196 stats->bprc.ev_count = 0;
2197 stats->prc64.ev_count = 0;
2198 stats->prc127.ev_count = 0;
2199 stats->prc255.ev_count = 0;
2200 stats->prc511.ev_count = 0;
2201 stats->prc1023.ev_count = 0;
2202 stats->prc1522.ev_count = 0;
2203 stats->ruc.ev_count = 0;
2204 stats->rfc.ev_count = 0;
2205 stats->roc.ev_count = 0;
2206 stats->rjc.ev_count = 0;
2207 stats->mngprc.ev_count = 0;
2208 stats->mngpdc.ev_count = 0;
2209 stats->xec.ev_count = 0;
2210
2211 /* Packet Transmission Stats */
2212 stats->gotc.ev_count = 0;
2213 stats->tpt.ev_count = 0;
2214 stats->gptc.ev_count = 0;
2215 stats->bptc.ev_count = 0;
2216 stats->mptc.ev_count = 0;
2217 stats->mngptc.ev_count = 0;
2218 stats->ptc64.ev_count = 0;
2219 stats->ptc127.ev_count = 0;
2220 stats->ptc255.ev_count = 0;
2221 stats->ptc511.ev_count = 0;
2222 stats->ptc1023.ev_count = 0;
2223 stats->ptc1522.ev_count = 0;
2224 }
2225
2226 /************************************************************************
2227 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2228 *
2229 * Retrieves the TDH value from the hardware
2230 ************************************************************************/
2231 static int
2232 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2233 {
2234 struct sysctlnode node = *rnode;
2235 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2236 struct adapter *adapter;
2237 uint32_t val;
2238
2239 if (!txr)
2240 return (0);
2241
2242 adapter = txr->adapter;
2243 if (ixgbe_fw_recovery_mode_swflag(adapter))
2244 return (EPERM);
2245
2246 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2247 node.sysctl_data = &val;
2248 return sysctl_lookup(SYSCTLFN_CALL(&node));
2249 } /* ixgbe_sysctl_tdh_handler */
2250
2251 /************************************************************************
2252 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2253 *
2254 * Retrieves the TDT value from the hardware
2255 ************************************************************************/
2256 static int
2257 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2258 {
2259 struct sysctlnode node = *rnode;
2260 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2261 struct adapter *adapter;
2262 uint32_t val;
2263
2264 if (!txr)
2265 return (0);
2266
2267 adapter = txr->adapter;
2268 if (ixgbe_fw_recovery_mode_swflag(adapter))
2269 return (EPERM);
2270
2271 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2272 node.sysctl_data = &val;
2273 return sysctl_lookup(SYSCTLFN_CALL(&node));
2274 } /* ixgbe_sysctl_tdt_handler */
2275
2276 /************************************************************************
2277 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2278 * handler function
2279 *
2280 * Retrieves the next_to_check value
2281 ************************************************************************/
2282 static int
2283 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2284 {
2285 struct sysctlnode node = *rnode;
2286 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2287 struct adapter *adapter;
2288 uint32_t val;
2289
2290 if (!rxr)
2291 return (0);
2292
2293 adapter = rxr->adapter;
2294 if (ixgbe_fw_recovery_mode_swflag(adapter))
2295 return (EPERM);
2296
2297 val = rxr->next_to_check;
2298 node.sysctl_data = &val;
2299 return sysctl_lookup(SYSCTLFN_CALL(&node));
2300 } /* ixgbe_sysctl_next_to_check_handler */
2301
2302 /************************************************************************
2303 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2304 *
2305 * Retrieves the RDH value from the hardware
2306 ************************************************************************/
2307 static int
2308 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2309 {
2310 struct sysctlnode node = *rnode;
2311 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2312 struct adapter *adapter;
2313 uint32_t val;
2314
2315 if (!rxr)
2316 return (0);
2317
2318 adapter = rxr->adapter;
2319 if (ixgbe_fw_recovery_mode_swflag(adapter))
2320 return (EPERM);
2321
2322 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2323 node.sysctl_data = &val;
2324 return sysctl_lookup(SYSCTLFN_CALL(&node));
2325 } /* ixgbe_sysctl_rdh_handler */
2326
2327 /************************************************************************
2328 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2329 *
2330 * Retrieves the RDT value from the hardware
2331 ************************************************************************/
2332 static int
2333 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2334 {
2335 struct sysctlnode node = *rnode;
2336 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2337 struct adapter *adapter;
2338 uint32_t val;
2339
2340 if (!rxr)
2341 return (0);
2342
2343 adapter = rxr->adapter;
2344 if (ixgbe_fw_recovery_mode_swflag(adapter))
2345 return (EPERM);
2346
2347 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2348 node.sysctl_data = &val;
2349 return sysctl_lookup(SYSCTLFN_CALL(&node));
2350 } /* ixgbe_sysctl_rdt_handler */
2351
2352 static int
2353 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2354 {
2355 struct ifnet *ifp = &ec->ec_if;
2356 struct adapter *adapter = ifp->if_softc;
2357 int rv;
2358
2359 if (set)
2360 rv = ixgbe_register_vlan(adapter, vid);
2361 else
2362 rv = ixgbe_unregister_vlan(adapter, vid);
2363
2364 if (rv != 0)
2365 return rv;
2366
2367 /*
2368 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2369 * or 0 to 1.
2370 */
2371 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2372 ixgbe_setup_vlan_hw_tagging(adapter);
2373
2374 return rv;
2375 }
2376
2377 /************************************************************************
2378 * ixgbe_register_vlan
2379 *
2380 * Run via vlan config EVENT, it enables us to use the
2381 * HW Filter table since we can get the vlan id. This
2382 * just creates the entry in the soft version of the
2383 * VFTA, init will repopulate the real table.
2384 ************************************************************************/
2385 static int
2386 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2387 {
2388 u16 index, bit;
2389 int error;
2390
2391 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2392 return EINVAL;
2393
2394 IXGBE_CORE_LOCK(adapter);
2395 index = (vtag >> 5) & 0x7F;
2396 bit = vtag & 0x1F;
2397 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2398 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2399 true);
2400 IXGBE_CORE_UNLOCK(adapter);
2401 if (error != 0)
2402 error = EACCES;
2403
2404 return error;
2405 } /* ixgbe_register_vlan */
2406
2407 /************************************************************************
2408 * ixgbe_unregister_vlan
2409 *
2410 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2411 ************************************************************************/
2412 static int
2413 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2414 {
2415 u16 index, bit;
2416 int error;
2417
2418 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2419 return EINVAL;
2420
2421 IXGBE_CORE_LOCK(adapter);
2422 index = (vtag >> 5) & 0x7F;
2423 bit = vtag & 0x1F;
2424 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2425 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2426 true);
2427 IXGBE_CORE_UNLOCK(adapter);
2428 if (error != 0)
2429 error = EACCES;
2430
2431 return error;
2432 } /* ixgbe_unregister_vlan */
2433
2434 static void
2435 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2436 {
2437 struct ethercom *ec = &adapter->osdep.ec;
2438 struct ixgbe_hw *hw = &adapter->hw;
2439 struct rx_ring *rxr;
2440 u32 ctrl;
2441 int i;
2442 bool hwtagging;
2443
2444 /* Enable HW tagging only if any vlan is attached */
2445 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2446 && VLAN_ATTACHED(ec);
2447
2448 /* Setup the queues for vlans */
2449 for (i = 0; i < adapter->num_queues; i++) {
2450 rxr = &adapter->rx_rings[i];
2451 /*
2452 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2453 */
2454 if (hw->mac.type != ixgbe_mac_82598EB) {
2455 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2456 if (hwtagging)
2457 ctrl |= IXGBE_RXDCTL_VME;
2458 else
2459 ctrl &= ~IXGBE_RXDCTL_VME;
2460 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2461 }
2462 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2463 }
2464
2465 /* VLAN hw tagging for 82598 */
2466 if (hw->mac.type == ixgbe_mac_82598EB) {
2467 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2468 if (hwtagging)
2469 ctrl |= IXGBE_VLNCTRL_VME;
2470 else
2471 ctrl &= ~IXGBE_VLNCTRL_VME;
2472 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2473 }
2474 } /* ixgbe_setup_vlan_hw_tagging */
2475
2476 static void
2477 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2478 {
2479 struct ethercom *ec = &adapter->osdep.ec;
2480 struct ixgbe_hw *hw = &adapter->hw;
2481 int i;
2482 u32 ctrl;
2483 struct vlanid_list *vlanidp;
2484
2485 /*
2486 * This function is called from both if_init and ifflags_cb()
2487 * on NetBSD.
2488 */
2489
2490 /*
2491 * Part 1:
2492 * Setup VLAN HW tagging
2493 */
2494 ixgbe_setup_vlan_hw_tagging(adapter);
2495
2496 /*
2497 * Part 2:
2498 * Setup VLAN HW filter
2499 */
2500 /* Cleanup shadow_vfta */
2501 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2502 adapter->shadow_vfta[i] = 0;
2503 /* Generate shadow_vfta from ec_vids */
2504 ETHER_LOCK(ec);
2505 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2506 uint32_t idx;
2507
2508 idx = vlanidp->vid / 32;
2509 KASSERT(idx < IXGBE_VFTA_SIZE);
2510 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2511 }
2512 ETHER_UNLOCK(ec);
2513 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2514 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2515
2516 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2517 /* Enable the Filter Table if enabled */
2518 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2519 ctrl |= IXGBE_VLNCTRL_VFE;
2520 else
2521 ctrl &= ~IXGBE_VLNCTRL_VFE;
2522 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2523 } /* ixgbe_setup_vlan_hw_support */
2524
2525 /************************************************************************
2526 * ixgbe_get_slot_info
2527 *
2528 * Get the width and transaction speed of
2529 * the slot this adapter is plugged into.
2530 ************************************************************************/
2531 static void
2532 ixgbe_get_slot_info(struct adapter *adapter)
2533 {
2534 device_t dev = adapter->dev;
2535 struct ixgbe_hw *hw = &adapter->hw;
2536 u32 offset;
2537 u16 link;
2538 int bus_info_valid = TRUE;
2539
2540 /* Some devices are behind an internal bridge */
2541 switch (hw->device_id) {
2542 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2543 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2544 goto get_parent_info;
2545 default:
2546 break;
2547 }
2548
2549 ixgbe_get_bus_info(hw);
2550
2551 /*
2552 * Some devices don't use PCI-E, but there is no need
2553 * to display "Unknown" for bus speed and width.
2554 */
2555 switch (hw->mac.type) {
2556 case ixgbe_mac_X550EM_x:
2557 case ixgbe_mac_X550EM_a:
2558 return;
2559 default:
2560 goto display;
2561 }
2562
2563 get_parent_info:
2564 /*
2565 * For the Quad port adapter we need to parse back
2566 * up the PCI tree to find the speed of the expansion
2567 * slot into which this adapter is plugged. A bit more work.
2568 */
2569 dev = device_parent(device_parent(dev));
2570 #if 0
2571 #ifdef IXGBE_DEBUG
2572 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2573 pci_get_slot(dev), pci_get_function(dev));
2574 #endif
2575 dev = device_parent(device_parent(dev));
2576 #ifdef IXGBE_DEBUG
2577 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2578 pci_get_slot(dev), pci_get_function(dev));
2579 #endif
2580 #endif
2581 /* Now get the PCI Express Capabilities offset */
2582 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2583 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2584 /*
2585 * Hmm...can't get PCI-Express capabilities.
2586 * Falling back to default method.
2587 */
2588 bus_info_valid = FALSE;
2589 ixgbe_get_bus_info(hw);
2590 goto display;
2591 }
2592 /* ...and read the Link Status Register */
2593 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2594 offset + PCIE_LCSR) >> 16;
2595 ixgbe_set_pci_config_data_generic(hw, link);
2596
2597 display:
2598 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2599 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2600 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2601 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2602 "Unknown"),
2603 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2604 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2605 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2606 "Unknown"));
2607
2608 if (bus_info_valid) {
2609 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2610 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2611 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2612 device_printf(dev, "PCI-Express bandwidth available"
2613 " for this card\n is not sufficient for"
2614 " optimal performance.\n");
2615 device_printf(dev, "For optimal performance a x8 "
2616 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2617 }
2618 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2619 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2620 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2621 device_printf(dev, "PCI-Express bandwidth available"
2622 " for this card\n is not sufficient for"
2623 " optimal performance.\n");
2624 device_printf(dev, "For optimal performance a x8 "
2625 "PCIE Gen3 slot is required.\n");
2626 }
2627 } else
2628 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2629
2630 return;
2631 } /* ixgbe_get_slot_info */
2632
2633 /************************************************************************
2634 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2635 ************************************************************************/
2636 static inline void
2637 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2638 {
2639 struct ixgbe_hw *hw = &adapter->hw;
2640 struct ix_queue *que = &adapter->queues[vector];
2641 u64 queue = 1ULL << vector;
2642 u32 mask;
2643
2644 mutex_enter(&que->dc_mtx);
2645 if (que->disabled_count > 0 && --que->disabled_count > 0)
2646 goto out;
2647
2648 if (hw->mac.type == ixgbe_mac_82598EB) {
2649 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2650 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2651 } else {
2652 mask = (queue & 0xFFFFFFFF);
2653 if (mask)
2654 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2655 mask = (queue >> 32);
2656 if (mask)
2657 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2658 }
2659 out:
2660 mutex_exit(&que->dc_mtx);
2661 } /* ixgbe_enable_queue */
2662
2663 /************************************************************************
2664 * ixgbe_disable_queue_internal
2665 ************************************************************************/
2666 static inline void
2667 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2668 {
2669 struct ixgbe_hw *hw = &adapter->hw;
2670 struct ix_queue *que = &adapter->queues[vector];
2671 u64 queue = 1ULL << vector;
2672 u32 mask;
2673
2674 mutex_enter(&que->dc_mtx);
2675
2676 if (que->disabled_count > 0) {
2677 if (nestok)
2678 que->disabled_count++;
2679 goto out;
2680 }
2681 que->disabled_count++;
2682
2683 if (hw->mac.type == ixgbe_mac_82598EB) {
2684 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2685 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2686 } else {
2687 mask = (queue & 0xFFFFFFFF);
2688 if (mask)
2689 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2690 mask = (queue >> 32);
2691 if (mask)
2692 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2693 }
2694 out:
2695 mutex_exit(&que->dc_mtx);
2696 } /* ixgbe_disable_queue_internal */
2697
2698 /************************************************************************
2699 * ixgbe_disable_queue
2700 ************************************************************************/
2701 static inline void
2702 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2703 {
2704
2705 ixgbe_disable_queue_internal(adapter, vector, true);
2706 } /* ixgbe_disable_queue */
2707
2708 /************************************************************************
2709 * ixgbe_sched_handle_que - schedule deferred packet processing
2710 ************************************************************************/
2711 static inline void
2712 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2713 {
2714
2715 if (que->txrx_use_workqueue) {
2716 /*
2717 * adapter->que_wq is bound to each CPU instead of
2718 * each NIC queue to reduce workqueue kthread. As we
2719 * should consider about interrupt affinity in this
2720 * function, the workqueue kthread must be WQ_PERCPU.
2721 * If create WQ_PERCPU workqueue kthread for each NIC
2722 * queue, that number of created workqueue kthread is
2723 * (number of used NIC queue) * (number of CPUs) =
2724 * (number of CPUs) ^ 2 most often.
2725 *
2726 * The same NIC queue's interrupts are avoided by
2727 * masking the queue's interrupt. And different
2728 * NIC queue's interrupts use different struct work
2729 * (que->wq_cookie). So, "enqueued flag" to avoid
2730 * twice workqueue_enqueue() is not required .
2731 */
2732 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2733 } else {
2734 softint_schedule(que->que_si);
2735 }
2736 }
2737
2738 /************************************************************************
2739 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2740 ************************************************************************/
2741 static int
2742 ixgbe_msix_que(void *arg)
2743 {
2744 struct ix_queue *que = arg;
2745 struct adapter *adapter = que->adapter;
2746 struct ifnet *ifp = adapter->ifp;
2747 struct tx_ring *txr = que->txr;
2748 struct rx_ring *rxr = que->rxr;
2749 bool more;
2750 u32 newitr = 0;
2751
2752 /* Protect against spurious interrupts */
2753 if ((ifp->if_flags & IFF_RUNNING) == 0)
2754 return 0;
2755
2756 ixgbe_disable_queue(adapter, que->msix);
2757 ++que->irqs.ev_count;
2758
2759 /*
2760 * Don't change "que->txrx_use_workqueue" from this point to avoid
2761 * flip-flopping softint/workqueue mode in one deferred processing.
2762 */
2763 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2764
2765 #ifdef __NetBSD__
2766 /* Don't run ixgbe_rxeof in interrupt context */
2767 more = true;
2768 #else
2769 more = ixgbe_rxeof(que);
2770 #endif
2771
2772 IXGBE_TX_LOCK(txr);
2773 ixgbe_txeof(txr);
2774 IXGBE_TX_UNLOCK(txr);
2775
2776 /* Do AIM now? */
2777
2778 if (adapter->enable_aim == false)
2779 goto no_calc;
2780 /*
2781 * Do Adaptive Interrupt Moderation:
2782 * - Write out last calculated setting
2783 * - Calculate based on average size over
2784 * the last interval.
2785 */
2786 if (que->eitr_setting)
2787 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2788
2789 que->eitr_setting = 0;
2790
2791 /* Idle, do nothing */
2792 if ((txr->bytes == 0) && (rxr->bytes == 0))
2793 goto no_calc;
2794
2795 if ((txr->bytes) && (txr->packets))
2796 newitr = txr->bytes/txr->packets;
2797 if ((rxr->bytes) && (rxr->packets))
2798 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2799 newitr += 24; /* account for hardware frame, crc */
2800
2801 /* set an upper boundary */
2802 newitr = uimin(newitr, 3000);
2803
2804 /* Be nice to the mid range */
2805 if ((newitr > 300) && (newitr < 1200))
2806 newitr = (newitr / 3);
2807 else
2808 newitr = (newitr / 2);
2809
2810 /*
2811 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2812 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2813 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2814 * on 1G and higher.
2815 */
2816 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2817 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2818 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2819 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2820 }
2821
2822 /* save for next interrupt */
2823 que->eitr_setting = newitr;
2824
2825 /* Reset state */
2826 txr->bytes = 0;
2827 txr->packets = 0;
2828 rxr->bytes = 0;
2829 rxr->packets = 0;
2830
2831 no_calc:
2832 if (more)
2833 ixgbe_sched_handle_que(adapter, que);
2834 else
2835 ixgbe_enable_queue(adapter, que->msix);
2836
2837 return 1;
2838 } /* ixgbe_msix_que */
2839
2840 /************************************************************************
2841 * ixgbe_media_status - Media Ioctl callback
2842 *
2843 * Called whenever the user queries the status of
2844 * the interface using ifconfig.
2845 ************************************************************************/
2846 static void
2847 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2848 {
2849 struct adapter *adapter = ifp->if_softc;
2850 struct ixgbe_hw *hw = &adapter->hw;
2851 int layer;
2852
2853 INIT_DEBUGOUT("ixgbe_media_status: begin");
2854 ixgbe_update_link_status(adapter);
2855
2856 ifmr->ifm_status = IFM_AVALID;
2857 ifmr->ifm_active = IFM_ETHER;
2858
2859 if (adapter->link_active != LINK_STATE_UP) {
2860 ifmr->ifm_active |= IFM_NONE;
2861 return;
2862 }
2863
2864 ifmr->ifm_status |= IFM_ACTIVE;
2865 layer = adapter->phy_layer;
2866
2867 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2868 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2869 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2870 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2871 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2872 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2873 switch (adapter->link_speed) {
2874 case IXGBE_LINK_SPEED_10GB_FULL:
2875 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2876 break;
2877 case IXGBE_LINK_SPEED_5GB_FULL:
2878 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2879 break;
2880 case IXGBE_LINK_SPEED_2_5GB_FULL:
2881 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2882 break;
2883 case IXGBE_LINK_SPEED_1GB_FULL:
2884 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2885 break;
2886 case IXGBE_LINK_SPEED_100_FULL:
2887 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2888 break;
2889 case IXGBE_LINK_SPEED_10_FULL:
2890 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2891 break;
2892 }
2893 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2894 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2895 switch (adapter->link_speed) {
2896 case IXGBE_LINK_SPEED_10GB_FULL:
2897 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2898 break;
2899 }
2900 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2901 switch (adapter->link_speed) {
2902 case IXGBE_LINK_SPEED_10GB_FULL:
2903 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2904 break;
2905 case IXGBE_LINK_SPEED_1GB_FULL:
2906 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2907 break;
2908 }
2909 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2910 switch (adapter->link_speed) {
2911 case IXGBE_LINK_SPEED_10GB_FULL:
2912 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2913 break;
2914 case IXGBE_LINK_SPEED_1GB_FULL:
2915 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2916 break;
2917 }
2918 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2919 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2920 switch (adapter->link_speed) {
2921 case IXGBE_LINK_SPEED_10GB_FULL:
2922 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2923 break;
2924 case IXGBE_LINK_SPEED_1GB_FULL:
2925 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2926 break;
2927 }
2928 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2929 switch (adapter->link_speed) {
2930 case IXGBE_LINK_SPEED_10GB_FULL:
2931 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2932 break;
2933 }
2934 /*
2935 * XXX: These need to use the proper media types once
2936 * they're added.
2937 */
2938 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2939 switch (adapter->link_speed) {
2940 case IXGBE_LINK_SPEED_10GB_FULL:
2941 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2942 break;
2943 case IXGBE_LINK_SPEED_2_5GB_FULL:
2944 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2945 break;
2946 case IXGBE_LINK_SPEED_1GB_FULL:
2947 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2948 break;
2949 }
2950 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2951 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2952 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2953 switch (adapter->link_speed) {
2954 case IXGBE_LINK_SPEED_10GB_FULL:
2955 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2956 break;
2957 case IXGBE_LINK_SPEED_2_5GB_FULL:
2958 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2959 break;
2960 case IXGBE_LINK_SPEED_1GB_FULL:
2961 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2962 break;
2963 }
2964
2965 /* If nothing is recognized... */
2966 #if 0
2967 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2968 ifmr->ifm_active |= IFM_UNKNOWN;
2969 #endif
2970
2971 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2972
2973 /* Display current flow control setting used on link */
2974 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2975 hw->fc.current_mode == ixgbe_fc_full)
2976 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2977 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2978 hw->fc.current_mode == ixgbe_fc_full)
2979 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2980
2981 return;
2982 } /* ixgbe_media_status */
2983
2984 /************************************************************************
2985 * ixgbe_media_change - Media Ioctl callback
2986 *
2987 * Called when the user changes speed/duplex using
2988 * media/mediopt option with ifconfig.
2989 ************************************************************************/
2990 static int
2991 ixgbe_media_change(struct ifnet *ifp)
2992 {
2993 struct adapter *adapter = ifp->if_softc;
2994 struct ifmedia *ifm = &adapter->media;
2995 struct ixgbe_hw *hw = &adapter->hw;
2996 ixgbe_link_speed speed = 0;
2997 ixgbe_link_speed link_caps = 0;
2998 bool negotiate = false;
2999 s32 err = IXGBE_NOT_IMPLEMENTED;
3000
3001 INIT_DEBUGOUT("ixgbe_media_change: begin");
3002
3003 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3004 return (EINVAL);
3005
3006 if (hw->phy.media_type == ixgbe_media_type_backplane)
3007 return (EPERM);
3008
3009 /*
3010 * We don't actually need to check against the supported
3011 * media types of the adapter; ifmedia will take care of
3012 * that for us.
3013 */
3014 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3015 case IFM_AUTO:
3016 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3017 &negotiate);
3018 if (err != IXGBE_SUCCESS) {
3019 device_printf(adapter->dev, "Unable to determine "
3020 "supported advertise speeds\n");
3021 return (ENODEV);
3022 }
3023 speed |= link_caps;
3024 break;
3025 case IFM_10G_T:
3026 case IFM_10G_LRM:
3027 case IFM_10G_LR:
3028 case IFM_10G_TWINAX:
3029 case IFM_10G_SR:
3030 case IFM_10G_CX4:
3031 case IFM_10G_KR:
3032 case IFM_10G_KX4:
3033 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3034 break;
3035 case IFM_5000_T:
3036 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3037 break;
3038 case IFM_2500_T:
3039 case IFM_2500_KX:
3040 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3041 break;
3042 case IFM_1000_T:
3043 case IFM_1000_LX:
3044 case IFM_1000_SX:
3045 case IFM_1000_KX:
3046 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3047 break;
3048 case IFM_100_TX:
3049 speed |= IXGBE_LINK_SPEED_100_FULL;
3050 break;
3051 case IFM_10_T:
3052 speed |= IXGBE_LINK_SPEED_10_FULL;
3053 break;
3054 case IFM_NONE:
3055 break;
3056 default:
3057 goto invalid;
3058 }
3059
3060 hw->mac.autotry_restart = TRUE;
3061 hw->mac.ops.setup_link(hw, speed, TRUE);
3062 adapter->advertise = 0;
3063 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3064 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3065 adapter->advertise |= 1 << 2;
3066 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3067 adapter->advertise |= 1 << 1;
3068 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3069 adapter->advertise |= 1 << 0;
3070 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3071 adapter->advertise |= 1 << 3;
3072 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3073 adapter->advertise |= 1 << 4;
3074 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3075 adapter->advertise |= 1 << 5;
3076 }
3077
3078 return (0);
3079
3080 invalid:
3081 device_printf(adapter->dev, "Invalid media type!\n");
3082
3083 return (EINVAL);
3084 } /* ixgbe_media_change */
3085
3086 /************************************************************************
3087 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3088 ************************************************************************/
3089 static int
3090 ixgbe_msix_admin(void *arg)
3091 {
3092 struct adapter *adapter = arg;
3093 struct ixgbe_hw *hw = &adapter->hw;
3094 u32 eicr;
3095 u32 eims_orig;
3096 u32 eims_disable = 0;
3097
3098 ++adapter->admin_irqev.ev_count;
3099
3100 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3101 /* Pause other interrupts */
3102 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3103
3104 /*
3105 * First get the cause.
3106 *
3107 * The specifications of 82598, 82599, X540 and X550 say EICS register
3108 * is write only. However, Linux says it is a workaround for silicon
3109 * errata to read EICS instead of EICR to get interrupt cause.
3110 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3111 */
3112 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3113 /* Be sure the queue bits are not cleared */
3114 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3115 /* Clear all OTHER interrupts with write */
3116 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3117
3118 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3119
3120 /* Re-enable some OTHER interrupts */
3121 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3122
3123 return 1;
3124 } /* ixgbe_msix_admin */
3125
3126 static void
3127 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3128 {
3129 struct ixgbe_hw *hw = &adapter->hw;
3130 u32 eicr_mask;
3131 u32 task_requests = 0;
3132 s32 retval;
3133
3134 /* Link status change */
3135 if (eicr & IXGBE_EICR_LSC) {
3136 task_requests |= IXGBE_REQUEST_TASK_LSC;
3137 *eims_disable |= IXGBE_EIMS_LSC;
3138 }
3139
3140 if (ixgbe_is_sfp(hw)) {
3141 /* Pluggable optics-related interrupt */
3142 if (hw->mac.type >= ixgbe_mac_X540)
3143 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3144 else
3145 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3146
3147 /*
3148 * An interrupt might not arrive when a module is inserted.
3149 * When an link status change interrupt occurred and the driver
3150 * still regard SFP as unplugged, issue the module softint
3151 * and then issue LSC interrupt.
3152 */
3153 if ((eicr & eicr_mask)
3154 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3155 && (eicr & IXGBE_EICR_LSC))) {
3156 task_requests |= IXGBE_REQUEST_TASK_MOD;
3157 *eims_disable |= IXGBE_EIMS_LSC;
3158 }
3159
3160 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3161 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3162 task_requests |= IXGBE_REQUEST_TASK_MSF;
3163 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3164 }
3165 }
3166
3167 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3168 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3169 (eicr & IXGBE_EICR_FLOW_DIR)) {
3170 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3171 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3172 /* Disable the interrupt */
3173 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3174 }
3175 }
3176
3177 if (eicr & IXGBE_EICR_ECC) {
3178 device_printf(adapter->dev,
3179 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3180 /* Disable interrupt to prevent log spam */
3181 *eims_disable |= IXGBE_EICR_ECC;
3182 }
3183
3184 /* Check for over temp condition */
3185 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3186 switch (adapter->hw.mac.type) {
3187 case ixgbe_mac_X550EM_a:
3188 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3189 break;
3190 /* Disable interrupt to prevent log spam */
3191 *eims_disable |= IXGBE_EICR_GPI_SDP0_X550EM_a;
3192
3193 retval = hw->phy.ops.check_overtemp(hw);
3194 if (retval != IXGBE_ERR_OVERTEMP)
3195 break;
3196 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3197 device_printf(adapter->dev, "System shutdown required!\n");
3198 break;
3199 default:
3200 if (!(eicr & IXGBE_EICR_TS))
3201 break;
3202 /* Disable interrupt to prevent log spam */
3203 *eims_disable |= IXGBE_EIMS_TS;
3204
3205 retval = hw->phy.ops.check_overtemp(hw);
3206 if (retval != IXGBE_ERR_OVERTEMP)
3207 break;
3208 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3209 device_printf(adapter->dev, "System shutdown required!\n");
3210 break;
3211 }
3212 }
3213
3214 /* Check for VF message */
3215 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3216 (eicr & IXGBE_EICR_MAILBOX)) {
3217 task_requests |= IXGBE_REQUEST_TASK_MBX;
3218 *eims_disable |= IXGBE_EIMS_MAILBOX;
3219 }
3220 }
3221
3222 /* Check for fan failure */
3223 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3224 retval = ixgbe_check_fan_failure(adapter, eicr, true);
3225 if (retval == IXGBE_ERR_FAN_FAILURE) {
3226 /* Disable interrupt to prevent log spam */
3227 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3228 }
3229 }
3230
3231 /* External PHY interrupt */
3232 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3233 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3234 task_requests |= IXGBE_REQUEST_TASK_PHY;
3235 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3236 }
3237
3238 if (task_requests != 0) {
3239 mutex_enter(&adapter->admin_mtx);
3240 adapter->task_requests |= task_requests;
3241 ixgbe_schedule_admin_tasklet(adapter);
3242 mutex_exit(&adapter->admin_mtx);
3243 }
3244
3245 }
3246
3247 static void
3248 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3249 {
3250
3251 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3252 itr |= itr << 16;
3253 else
3254 itr |= IXGBE_EITR_CNT_WDIS;
3255
3256 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3257 }
3258
3259
3260 /************************************************************************
3261 * ixgbe_sysctl_interrupt_rate_handler
3262 ************************************************************************/
3263 static int
3264 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3265 {
3266 struct sysctlnode node = *rnode;
3267 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3268 struct adapter *adapter;
3269 uint32_t reg, usec, rate;
3270 int error;
3271
3272 if (que == NULL)
3273 return 0;
3274
3275 adapter = que->adapter;
3276 if (ixgbe_fw_recovery_mode_swflag(adapter))
3277 return (EPERM);
3278
3279 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3280 usec = ((reg & 0x0FF8) >> 3);
3281 if (usec > 0)
3282 rate = 500000 / usec;
3283 else
3284 rate = 0;
3285 node.sysctl_data = &rate;
3286 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3287 if (error || newp == NULL)
3288 return error;
3289 reg &= ~0xfff; /* default, no limitation */
3290 if (rate > 0 && rate < 500000) {
3291 if (rate < 1000)
3292 rate = 1000;
3293 reg |= ((4000000 / rate) & 0xff8);
3294 /*
3295 * When RSC is used, ITR interval must be larger than
3296 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3297 * The minimum value is always greater than 2us on 100M
3298 * (and 10M?(not documented)), but it's not on 1G and higher.
3299 */
3300 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3301 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3302 if ((adapter->num_queues > 1)
3303 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3304 return EINVAL;
3305 }
3306 ixgbe_max_interrupt_rate = rate;
3307 } else
3308 ixgbe_max_interrupt_rate = 0;
3309 ixgbe_eitr_write(adapter, que->msix, reg);
3310
3311 return (0);
3312 } /* ixgbe_sysctl_interrupt_rate_handler */
3313
3314 const struct sysctlnode *
3315 ixgbe_sysctl_instance(struct adapter *adapter)
3316 {
3317 const char *dvname;
3318 struct sysctllog **log;
3319 int rc;
3320 const struct sysctlnode *rnode;
3321
3322 if (adapter->sysctltop != NULL)
3323 return adapter->sysctltop;
3324
3325 log = &adapter->sysctllog;
3326 dvname = device_xname(adapter->dev);
3327
3328 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3329 0, CTLTYPE_NODE, dvname,
3330 SYSCTL_DESCR("ixgbe information and settings"),
3331 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3332 goto err;
3333
3334 return rnode;
3335 err:
3336 device_printf(adapter->dev,
3337 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3338 return NULL;
3339 }
3340
3341 /************************************************************************
3342 * ixgbe_add_device_sysctls
3343 ************************************************************************/
3344 static void
3345 ixgbe_add_device_sysctls(struct adapter *adapter)
3346 {
3347 device_t dev = adapter->dev;
3348 struct ixgbe_hw *hw = &adapter->hw;
3349 struct sysctllog **log;
3350 const struct sysctlnode *rnode, *cnode;
3351
3352 log = &adapter->sysctllog;
3353
3354 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3355 aprint_error_dev(dev, "could not create sysctl root\n");
3356 return;
3357 }
3358
3359 if (sysctl_createv(log, 0, &rnode, &cnode,
3360 CTLFLAG_READWRITE, CTLTYPE_INT,
3361 "debug", SYSCTL_DESCR("Debug Info"),
3362 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3363 aprint_error_dev(dev, "could not create sysctl\n");
3364
3365 if (sysctl_createv(log, 0, &rnode, &cnode,
3366 CTLFLAG_READONLY, CTLTYPE_INT,
3367 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3368 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3369 aprint_error_dev(dev, "could not create sysctl\n");
3370
3371 if (sysctl_createv(log, 0, &rnode, &cnode,
3372 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
3373 SYSCTL_DESCR("Number of jumbo buffers per queue"),
3374 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
3375 CTL_EOL) != 0)
3376 aprint_error_dev(dev, "could not create sysctl\n");
3377
3378 if (sysctl_createv(log, 0, &rnode, &cnode,
3379 CTLFLAG_READONLY, CTLTYPE_INT,
3380 "num_queues", SYSCTL_DESCR("Number of queues"),
3381 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3382 aprint_error_dev(dev, "could not create sysctl\n");
3383
3384 /* Sysctls for all devices */
3385 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3386 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3387 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3388 CTL_EOL) != 0)
3389 aprint_error_dev(dev, "could not create sysctl\n");
3390
3391 adapter->enable_aim = ixgbe_enable_aim;
3392 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3393 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3394 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3395 aprint_error_dev(dev, "could not create sysctl\n");
3396
3397 if (sysctl_createv(log, 0, &rnode, &cnode,
3398 CTLFLAG_READWRITE, CTLTYPE_INT,
3399 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3400 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3401 CTL_EOL) != 0)
3402 aprint_error_dev(dev, "could not create sysctl\n");
3403
3404 /*
3405 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3406 * it causesflip-flopping softint/workqueue mode in one deferred
3407 * processing. Therefore, preempt_disable()/preempt_enable() are
3408 * required in ixgbe_sched_handle_que() to avoid
3409 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3410 * I think changing "que->txrx_use_workqueue" in interrupt handler
3411 * is lighter than doing preempt_disable()/preempt_enable() in every
3412 * ixgbe_sched_handle_que().
3413 */
3414 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3415 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3416 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3417 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3418 aprint_error_dev(dev, "could not create sysctl\n");
3419
3420 #ifdef IXGBE_DEBUG
3421 /* testing sysctls (for all devices) */
3422 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3423 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3424 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3425 CTL_EOL) != 0)
3426 aprint_error_dev(dev, "could not create sysctl\n");
3427
3428 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3429 CTLTYPE_STRING, "print_rss_config",
3430 SYSCTL_DESCR("Prints RSS Configuration"),
3431 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3432 CTL_EOL) != 0)
3433 aprint_error_dev(dev, "could not create sysctl\n");
3434 #endif
3435 /* for X550 series devices */
3436 if (hw->mac.type >= ixgbe_mac_X550)
3437 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3438 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3439 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3440 CTL_EOL) != 0)
3441 aprint_error_dev(dev, "could not create sysctl\n");
3442
3443 /* for WoL-capable devices */
3444 if (adapter->wol_support) {
3445 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3446 CTLTYPE_BOOL, "wol_enable",
3447 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3448 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3449 CTL_EOL) != 0)
3450 aprint_error_dev(dev, "could not create sysctl\n");
3451
3452 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3453 CTLTYPE_INT, "wufc",
3454 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3455 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3456 CTL_EOL) != 0)
3457 aprint_error_dev(dev, "could not create sysctl\n");
3458 }
3459
3460 /* for X552/X557-AT devices */
3461 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3462 const struct sysctlnode *phy_node;
3463
3464 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3465 "phy", SYSCTL_DESCR("External PHY sysctls"),
3466 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3467 aprint_error_dev(dev, "could not create sysctl\n");
3468 return;
3469 }
3470
3471 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3472 CTLTYPE_INT, "temp",
3473 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3474 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3475 CTL_EOL) != 0)
3476 aprint_error_dev(dev, "could not create sysctl\n");
3477
3478 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3479 CTLTYPE_INT, "overtemp_occurred",
3480 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3481 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3482 CTL_CREATE, CTL_EOL) != 0)
3483 aprint_error_dev(dev, "could not create sysctl\n");
3484 }
3485
3486 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3487 && (hw->phy.type == ixgbe_phy_fw))
3488 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3489 CTLTYPE_BOOL, "force_10_100_autonego",
3490 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3491 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3492 CTL_CREATE, CTL_EOL) != 0)
3493 aprint_error_dev(dev, "could not create sysctl\n");
3494
3495 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3496 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3497 CTLTYPE_INT, "eee_state",
3498 SYSCTL_DESCR("EEE Power Save State"),
3499 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3500 CTL_EOL) != 0)
3501 aprint_error_dev(dev, "could not create sysctl\n");
3502 }
3503 } /* ixgbe_add_device_sysctls */
3504
3505 /************************************************************************
3506 * ixgbe_allocate_pci_resources
3507 ************************************************************************/
3508 static int
3509 ixgbe_allocate_pci_resources(struct adapter *adapter,
3510 const struct pci_attach_args *pa)
3511 {
3512 pcireg_t memtype, csr;
3513 device_t dev = adapter->dev;
3514 bus_addr_t addr;
3515 int flags;
3516
3517 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3518 switch (memtype) {
3519 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3520 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3521 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3522 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3523 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3524 goto map_err;
3525 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3526 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3527 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3528 }
3529 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3530 adapter->osdep.mem_size, flags,
3531 &adapter->osdep.mem_bus_space_handle) != 0) {
3532 map_err:
3533 adapter->osdep.mem_size = 0;
3534 aprint_error_dev(dev, "unable to map BAR0\n");
3535 return ENXIO;
3536 }
3537 /*
3538 * Enable address decoding for memory range in case BIOS or
3539 * UEFI don't set it.
3540 */
3541 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3542 PCI_COMMAND_STATUS_REG);
3543 csr |= PCI_COMMAND_MEM_ENABLE;
3544 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3545 csr);
3546 break;
3547 default:
3548 aprint_error_dev(dev, "unexpected type on BAR0\n");
3549 return ENXIO;
3550 }
3551
3552 return (0);
3553 } /* ixgbe_allocate_pci_resources */
3554
3555 static void
3556 ixgbe_free_deferred_handlers(struct adapter *adapter)
3557 {
3558 struct ix_queue *que = adapter->queues;
3559 struct tx_ring *txr = adapter->tx_rings;
3560 int i;
3561
3562 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3563 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3564 if (txr->txr_si != NULL)
3565 softint_disestablish(txr->txr_si);
3566 }
3567 if (que->que_si != NULL)
3568 softint_disestablish(que->que_si);
3569 }
3570 if (adapter->txr_wq != NULL)
3571 workqueue_destroy(adapter->txr_wq);
3572 if (adapter->txr_wq_enqueued != NULL)
3573 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3574 if (adapter->que_wq != NULL)
3575 workqueue_destroy(adapter->que_wq);
3576
3577 if (adapter->admin_wq != NULL) {
3578 workqueue_destroy(adapter->admin_wq);
3579 adapter->admin_wq = NULL;
3580 }
3581 if (adapter->timer_wq != NULL) {
3582 workqueue_destroy(adapter->timer_wq);
3583 adapter->timer_wq = NULL;
3584 }
3585 if (adapter->recovery_mode_timer_wq != NULL) {
3586 /*
3587 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3588 * the recovery_mode_timer workqueue, so call it here.
3589 */
3590 workqueue_wait(adapter->recovery_mode_timer_wq,
3591 &adapter->recovery_mode_timer_wc);
3592 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3593 workqueue_destroy(adapter->recovery_mode_timer_wq);
3594 adapter->recovery_mode_timer_wq = NULL;
3595 }
3596 } /* ixgbe_free_deferred_handlers */
3597
3598 /************************************************************************
3599 * ixgbe_detach - Device removal routine
3600 *
3601 * Called when the driver is being removed.
3602 * Stops the adapter and deallocates all the resources
3603 * that were allocated for driver operation.
3604 *
3605 * return 0 on success, positive on failure
3606 ************************************************************************/
3607 static int
3608 ixgbe_detach(device_t dev, int flags)
3609 {
3610 struct adapter *adapter = device_private(dev);
3611 struct rx_ring *rxr = adapter->rx_rings;
3612 struct tx_ring *txr = adapter->tx_rings;
3613 struct ixgbe_hw *hw = &adapter->hw;
3614 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3615 u32 ctrl_ext;
3616 int i;
3617
3618 INIT_DEBUGOUT("ixgbe_detach: begin");
3619 if (adapter->osdep.attached == false)
3620 return 0;
3621
3622 if (ixgbe_pci_iov_detach(dev) != 0) {
3623 device_printf(dev, "SR-IOV in use; detach first.\n");
3624 return (EBUSY);
3625 }
3626
3627 #if NVLAN > 0
3628 /* Make sure VLANs are not using driver */
3629 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3630 ; /* nothing to do: no VLANs */
3631 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3632 vlan_ifdetach(adapter->ifp);
3633 else {
3634 aprint_error_dev(dev, "VLANs in use, detach first\n");
3635 return (EBUSY);
3636 }
3637 #endif
3638
3639 adapter->osdep.detaching = true;
3640 /*
3641 * Stop the interface. ixgbe_setup_low_power_mode() calls
3642 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3643 * directly.
3644 */
3645 ixgbe_setup_low_power_mode(adapter);
3646
3647 callout_halt(&adapter->timer, NULL);
3648 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3649 callout_halt(&adapter->recovery_mode_timer, NULL);
3650
3651 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3652 atomic_store_relaxed(&adapter->admin_pending, 0);
3653 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3654 atomic_store_relaxed(&adapter->timer_pending, 0);
3655
3656 pmf_device_deregister(dev);
3657
3658 ether_ifdetach(adapter->ifp);
3659
3660 ixgbe_free_deferred_handlers(adapter);
3661
3662 /* let hardware know driver is unloading */
3663 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3664 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3665 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3666
3667 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3668 netmap_detach(adapter->ifp);
3669
3670 ixgbe_free_pci_resources(adapter);
3671 #if 0 /* XXX the NetBSD port is probably missing something here */
3672 bus_generic_detach(dev);
3673 #endif
3674 if_detach(adapter->ifp);
3675 ifmedia_fini(&adapter->media);
3676 if_percpuq_destroy(adapter->ipq);
3677
3678 sysctl_teardown(&adapter->sysctllog);
3679 evcnt_detach(&adapter->efbig_tx_dma_setup);
3680 evcnt_detach(&adapter->mbuf_defrag_failed);
3681 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3682 evcnt_detach(&adapter->einval_tx_dma_setup);
3683 evcnt_detach(&adapter->other_tx_dma_setup);
3684 evcnt_detach(&adapter->eagain_tx_dma_setup);
3685 evcnt_detach(&adapter->enomem_tx_dma_setup);
3686 evcnt_detach(&adapter->watchdog_events);
3687 evcnt_detach(&adapter->tso_err);
3688 evcnt_detach(&adapter->admin_irqev);
3689 evcnt_detach(&adapter->link_workev);
3690 evcnt_detach(&adapter->mod_workev);
3691 evcnt_detach(&adapter->msf_workev);
3692 evcnt_detach(&adapter->phy_workev);
3693
3694 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3695 if (i < __arraycount(stats->mpc)) {
3696 evcnt_detach(&stats->mpc[i]);
3697 if (hw->mac.type == ixgbe_mac_82598EB)
3698 evcnt_detach(&stats->rnbc[i]);
3699 }
3700 if (i < __arraycount(stats->pxontxc)) {
3701 evcnt_detach(&stats->pxontxc[i]);
3702 evcnt_detach(&stats->pxonrxc[i]);
3703 evcnt_detach(&stats->pxofftxc[i]);
3704 evcnt_detach(&stats->pxoffrxc[i]);
3705 if (hw->mac.type >= ixgbe_mac_82599EB)
3706 evcnt_detach(&stats->pxon2offc[i]);
3707 }
3708 }
3709
3710 txr = adapter->tx_rings;
3711 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3712 evcnt_detach(&adapter->queues[i].irqs);
3713 evcnt_detach(&adapter->queues[i].handleq);
3714 evcnt_detach(&adapter->queues[i].req);
3715 evcnt_detach(&txr->no_desc_avail);
3716 evcnt_detach(&txr->total_packets);
3717 evcnt_detach(&txr->tso_tx);
3718 #ifndef IXGBE_LEGACY_TX
3719 evcnt_detach(&txr->pcq_drops);
3720 #endif
3721
3722 if (i < __arraycount(stats->qprc)) {
3723 evcnt_detach(&stats->qprc[i]);
3724 evcnt_detach(&stats->qptc[i]);
3725 evcnt_detach(&stats->qbrc[i]);
3726 evcnt_detach(&stats->qbtc[i]);
3727 if (hw->mac.type >= ixgbe_mac_82599EB)
3728 evcnt_detach(&stats->qprdc[i]);
3729 }
3730
3731 evcnt_detach(&rxr->rx_packets);
3732 evcnt_detach(&rxr->rx_bytes);
3733 evcnt_detach(&rxr->rx_copies);
3734 evcnt_detach(&rxr->no_jmbuf);
3735 evcnt_detach(&rxr->rx_discarded);
3736 }
3737 evcnt_detach(&stats->ipcs);
3738 evcnt_detach(&stats->l4cs);
3739 evcnt_detach(&stats->ipcs_bad);
3740 evcnt_detach(&stats->l4cs_bad);
3741 evcnt_detach(&stats->intzero);
3742 evcnt_detach(&stats->legint);
3743 evcnt_detach(&stats->crcerrs);
3744 evcnt_detach(&stats->illerrc);
3745 evcnt_detach(&stats->errbc);
3746 evcnt_detach(&stats->mspdc);
3747 if (hw->mac.type >= ixgbe_mac_X550)
3748 evcnt_detach(&stats->mbsdc);
3749 evcnt_detach(&stats->mpctotal);
3750 evcnt_detach(&stats->mlfc);
3751 evcnt_detach(&stats->mrfc);
3752 evcnt_detach(&stats->rlec);
3753 evcnt_detach(&stats->lxontxc);
3754 evcnt_detach(&stats->lxonrxc);
3755 evcnt_detach(&stats->lxofftxc);
3756 evcnt_detach(&stats->lxoffrxc);
3757
3758 /* Packet Reception Stats */
3759 evcnt_detach(&stats->tor);
3760 evcnt_detach(&stats->gorc);
3761 evcnt_detach(&stats->tpr);
3762 evcnt_detach(&stats->gprc);
3763 evcnt_detach(&stats->mprc);
3764 evcnt_detach(&stats->bprc);
3765 evcnt_detach(&stats->prc64);
3766 evcnt_detach(&stats->prc127);
3767 evcnt_detach(&stats->prc255);
3768 evcnt_detach(&stats->prc511);
3769 evcnt_detach(&stats->prc1023);
3770 evcnt_detach(&stats->prc1522);
3771 evcnt_detach(&stats->ruc);
3772 evcnt_detach(&stats->rfc);
3773 evcnt_detach(&stats->roc);
3774 evcnt_detach(&stats->rjc);
3775 evcnt_detach(&stats->mngprc);
3776 evcnt_detach(&stats->mngpdc);
3777 evcnt_detach(&stats->xec);
3778
3779 /* Packet Transmission Stats */
3780 evcnt_detach(&stats->gotc);
3781 evcnt_detach(&stats->tpt);
3782 evcnt_detach(&stats->gptc);
3783 evcnt_detach(&stats->bptc);
3784 evcnt_detach(&stats->mptc);
3785 evcnt_detach(&stats->mngptc);
3786 evcnt_detach(&stats->ptc64);
3787 evcnt_detach(&stats->ptc127);
3788 evcnt_detach(&stats->ptc255);
3789 evcnt_detach(&stats->ptc511);
3790 evcnt_detach(&stats->ptc1023);
3791 evcnt_detach(&stats->ptc1522);
3792
3793 ixgbe_free_queues(adapter);
3794 free(adapter->mta, M_DEVBUF);
3795
3796 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3797 IXGBE_CORE_LOCK_DESTROY(adapter);
3798
3799 return (0);
3800 } /* ixgbe_detach */
3801
3802 /************************************************************************
3803 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3804 *
3805 * Prepare the adapter/port for LPLU and/or WoL
3806 ************************************************************************/
3807 static int
3808 ixgbe_setup_low_power_mode(struct adapter *adapter)
3809 {
3810 struct ixgbe_hw *hw = &adapter->hw;
3811 device_t dev = adapter->dev;
3812 struct ifnet *ifp = adapter->ifp;
3813 s32 error = 0;
3814
3815 /* Limit power management flow to X550EM baseT */
3816 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3817 hw->phy.ops.enter_lplu) {
3818 /* X550EM baseT adapters need a special LPLU flow */
3819 hw->phy.reset_disable = true;
3820 ixgbe_ifstop(ifp, 1);
3821 error = hw->phy.ops.enter_lplu(hw);
3822 if (error)
3823 device_printf(dev,
3824 "Error entering LPLU: %d\n", error);
3825 hw->phy.reset_disable = false;
3826 } else {
3827 /* Just stop for other adapters */
3828 ixgbe_ifstop(ifp, 1);
3829 }
3830
3831 IXGBE_CORE_LOCK(adapter);
3832
3833 if (!hw->wol_enabled) {
3834 ixgbe_set_phy_power(hw, FALSE);
3835 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3836 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3837 } else {
3838 /* Turn off support for APM wakeup. (Using ACPI instead) */
3839 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3840 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3841
3842 /*
3843 * Clear Wake Up Status register to prevent any previous wakeup
3844 * events from waking us up immediately after we suspend.
3845 */
3846 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3847
3848 /*
3849 * Program the Wakeup Filter Control register with user filter
3850 * settings
3851 */
3852 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3853
3854 /* Enable wakeups and power management in Wakeup Control */
3855 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3856 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3857
3858 }
3859
3860 IXGBE_CORE_UNLOCK(adapter);
3861
3862 return error;
3863 } /* ixgbe_setup_low_power_mode */
3864
3865 /************************************************************************
3866 * ixgbe_shutdown - Shutdown entry point
3867 ************************************************************************/
3868 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3869 static int
3870 ixgbe_shutdown(device_t dev)
3871 {
3872 struct adapter *adapter = device_private(dev);
3873 int error = 0;
3874
3875 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3876
3877 error = ixgbe_setup_low_power_mode(adapter);
3878
3879 return (error);
3880 } /* ixgbe_shutdown */
3881 #endif
3882
3883 /************************************************************************
3884 * ixgbe_suspend
3885 *
3886 * From D0 to D3
3887 ************************************************************************/
3888 static bool
3889 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3890 {
3891 struct adapter *adapter = device_private(dev);
3892 int error = 0;
3893
3894 INIT_DEBUGOUT("ixgbe_suspend: begin");
3895
3896 error = ixgbe_setup_low_power_mode(adapter);
3897
3898 return (error);
3899 } /* ixgbe_suspend */
3900
3901 /************************************************************************
3902 * ixgbe_resume
3903 *
3904 * From D3 to D0
3905 ************************************************************************/
3906 static bool
3907 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3908 {
3909 struct adapter *adapter = device_private(dev);
3910 struct ifnet *ifp = adapter->ifp;
3911 struct ixgbe_hw *hw = &adapter->hw;
3912 u32 wus;
3913
3914 INIT_DEBUGOUT("ixgbe_resume: begin");
3915
3916 IXGBE_CORE_LOCK(adapter);
3917
3918 /* Read & clear WUS register */
3919 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3920 if (wus)
3921 device_printf(dev, "Woken up by (WUS): %#010x\n",
3922 IXGBE_READ_REG(hw, IXGBE_WUS));
3923 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3924 /* And clear WUFC until next low-power transition */
3925 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3926
3927 /*
3928 * Required after D3->D0 transition;
3929 * will re-advertise all previous advertised speeds
3930 */
3931 if (ifp->if_flags & IFF_UP)
3932 ixgbe_init_locked(adapter);
3933
3934 IXGBE_CORE_UNLOCK(adapter);
3935
3936 return true;
3937 } /* ixgbe_resume */
3938
3939 /*
3940 * Set the various hardware offload abilities.
3941 *
3942 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3943 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3944 * mbuf offload flags the driver will understand.
3945 */
3946 static void
3947 ixgbe_set_if_hwassist(struct adapter *adapter)
3948 {
3949 /* XXX */
3950 }
3951
3952 /************************************************************************
3953 * ixgbe_init_locked - Init entry point
3954 *
3955 * Used in two ways: It is used by the stack as an init
3956 * entry point in network interface structure. It is also
3957 * used by the driver as a hw/sw initialization routine to
3958 * get to a consistent state.
3959 *
3960 * return 0 on success, positive on failure
3961 ************************************************************************/
3962 static void
3963 ixgbe_init_locked(struct adapter *adapter)
3964 {
3965 struct ifnet *ifp = adapter->ifp;
3966 device_t dev = adapter->dev;
3967 struct ixgbe_hw *hw = &adapter->hw;
3968 struct ix_queue *que;
3969 struct tx_ring *txr;
3970 struct rx_ring *rxr;
3971 u32 txdctl, mhadd;
3972 u32 rxdctl, rxctrl;
3973 u32 ctrl_ext;
3974 bool unsupported_sfp = false;
3975 int i, j, err;
3976
3977 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3978
3979 KASSERT(mutex_owned(&adapter->core_mtx));
3980 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3981
3982 hw->need_unsupported_sfp_recovery = false;
3983 hw->adapter_stopped = FALSE;
3984 ixgbe_stop_adapter(hw);
3985 callout_stop(&adapter->timer);
3986 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3987 callout_stop(&adapter->recovery_mode_timer);
3988 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3989 que->disabled_count = 0;
3990
3991 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3992 adapter->max_frame_size =
3993 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3994
3995 /* Queue indices may change with IOV mode */
3996 ixgbe_align_all_queue_indices(adapter);
3997
3998 /* reprogram the RAR[0] in case user changed it. */
3999 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
4000
4001 /* Get the latest mac address, User can use a LAA */
4002 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4003 IXGBE_ETH_LENGTH_OF_ADDRESS);
4004 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
4005 hw->addr_ctrl.rar_used_count = 1;
4006
4007 /* Set hardware offload abilities from ifnet flags */
4008 ixgbe_set_if_hwassist(adapter);
4009
4010 /* Prepare transmit descriptors and buffers */
4011 if (ixgbe_setup_transmit_structures(adapter)) {
4012 device_printf(dev, "Could not setup transmit structures\n");
4013 ixgbe_stop_locked(adapter);
4014 return;
4015 }
4016
4017 ixgbe_init_hw(hw);
4018
4019 ixgbe_initialize_iov(adapter);
4020
4021 ixgbe_initialize_transmit_units(adapter);
4022
4023 /* Setup Multicast table */
4024 ixgbe_set_rxfilter(adapter);
4025
4026 /* Determine the correct mbuf pool, based on frame size */
4027 if (adapter->max_frame_size <= MCLBYTES)
4028 adapter->rx_mbuf_sz = MCLBYTES;
4029 else
4030 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4031
4032 /* Prepare receive descriptors and buffers */
4033 if (ixgbe_setup_receive_structures(adapter)) {
4034 device_printf(dev, "Could not setup receive structures\n");
4035 ixgbe_stop_locked(adapter);
4036 return;
4037 }
4038
4039 /* Configure RX settings */
4040 ixgbe_initialize_receive_units(adapter);
4041
4042 /* Initialize variable holding task enqueue requests interrupts */
4043 adapter->task_requests = 0;
4044
4045 /* Enable SDP & MSI-X interrupts based on adapter */
4046 ixgbe_config_gpie(adapter);
4047
4048 /* Set MTU size */
4049 if (ifp->if_mtu > ETHERMTU) {
4050 /* aka IXGBE_MAXFRS on 82599 and newer */
4051 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4052 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4053 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4054 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4055 }
4056
4057 /* Now enable all the queues */
4058 for (i = 0; i < adapter->num_queues; i++) {
4059 txr = &adapter->tx_rings[i];
4060 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4061 txdctl |= IXGBE_TXDCTL_ENABLE;
4062 /* Set WTHRESH to 8, burst writeback */
4063 txdctl |= (8 << 16);
4064 /*
4065 * When the internal queue falls below PTHRESH (32),
4066 * start prefetching as long as there are at least
4067 * HTHRESH (1) buffers ready. The values are taken
4068 * from the Intel linux driver 3.8.21.
4069 * Prefetching enables tx line rate even with 1 queue.
4070 */
4071 txdctl |= (32 << 0) | (1 << 8);
4072 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4073 }
4074
4075 for (i = 0; i < adapter->num_queues; i++) {
4076 rxr = &adapter->rx_rings[i];
4077 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4078 if (hw->mac.type == ixgbe_mac_82598EB) {
4079 /*
4080 * PTHRESH = 21
4081 * HTHRESH = 4
4082 * WTHRESH = 8
4083 */
4084 rxdctl &= ~0x3FFFFF;
4085 rxdctl |= 0x080420;
4086 }
4087 rxdctl |= IXGBE_RXDCTL_ENABLE;
4088 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4089 for (j = 0; j < 10; j++) {
4090 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4091 IXGBE_RXDCTL_ENABLE)
4092 break;
4093 else
4094 msec_delay(1);
4095 }
4096 IXGBE_WRITE_BARRIER(hw);
4097
4098 /*
4099 * In netmap mode, we must preserve the buffers made
4100 * available to userspace before the if_init()
4101 * (this is true by default on the TX side, because
4102 * init makes all buffers available to userspace).
4103 *
4104 * netmap_reset() and the device specific routines
4105 * (e.g. ixgbe_setup_receive_rings()) map these
4106 * buffers at the end of the NIC ring, so here we
4107 * must set the RDT (tail) register to make sure
4108 * they are not overwritten.
4109 *
4110 * In this driver the NIC ring starts at RDH = 0,
4111 * RDT points to the last slot available for reception (?),
4112 * so RDT = num_rx_desc - 1 means the whole ring is available.
4113 */
4114 #ifdef DEV_NETMAP
4115 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4116 (ifp->if_capenable & IFCAP_NETMAP)) {
4117 struct netmap_adapter *na = NA(adapter->ifp);
4118 struct netmap_kring *kring = na->rx_rings[i];
4119 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4120
4121 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4122 } else
4123 #endif /* DEV_NETMAP */
4124 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4125 adapter->num_rx_desc - 1);
4126 }
4127
4128 /* Enable Receive engine */
4129 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4130 if (hw->mac.type == ixgbe_mac_82598EB)
4131 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4132 rxctrl |= IXGBE_RXCTRL_RXEN;
4133 ixgbe_enable_rx_dma(hw, rxctrl);
4134
4135 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4136 atomic_store_relaxed(&adapter->timer_pending, 0);
4137 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4138 callout_reset(&adapter->recovery_mode_timer, hz,
4139 ixgbe_recovery_mode_timer, adapter);
4140
4141 /* Set up MSI/MSI-X routing */
4142 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4143 ixgbe_configure_ivars(adapter);
4144 /* Set up auto-mask */
4145 if (hw->mac.type == ixgbe_mac_82598EB)
4146 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4147 else {
4148 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4149 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4150 }
4151 } else { /* Simple settings for Legacy/MSI */
4152 ixgbe_set_ivar(adapter, 0, 0, 0);
4153 ixgbe_set_ivar(adapter, 0, 0, 1);
4154 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4155 }
4156
4157 ixgbe_init_fdir(adapter);
4158
4159 /*
4160 * Check on any SFP devices that
4161 * need to be kick-started
4162 */
4163 if (hw->phy.type == ixgbe_phy_none) {
4164 err = hw->phy.ops.identify(hw);
4165 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4166 unsupported_sfp = true;
4167 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4168 unsupported_sfp = true;
4169
4170 if (unsupported_sfp)
4171 device_printf(dev,
4172 "Unsupported SFP+ module type was detected.\n");
4173
4174 /* Set moderation on the Link interrupt */
4175 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4176
4177 /* Enable EEE power saving */
4178 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4179 hw->mac.ops.setup_eee(hw,
4180 adapter->feat_en & IXGBE_FEATURE_EEE);
4181
4182 /* Enable power to the phy. */
4183 if (!unsupported_sfp) {
4184 ixgbe_set_phy_power(hw, TRUE);
4185
4186 /* Config/Enable Link */
4187 ixgbe_config_link(adapter);
4188 }
4189
4190 /* Hardware Packet Buffer & Flow Control setup */
4191 ixgbe_config_delay_values(adapter);
4192
4193 /* Initialize the FC settings */
4194 ixgbe_start_hw(hw);
4195
4196 /* Set up VLAN support and filter */
4197 ixgbe_setup_vlan_hw_support(adapter);
4198
4199 /* Setup DMA Coalescing */
4200 ixgbe_config_dmac(adapter);
4201
4202 /* OK to schedule workqueues. */
4203 adapter->schedule_wqs_ok = true;
4204
4205 /* And now turn on interrupts */
4206 ixgbe_enable_intr(adapter);
4207
4208 /* Enable the use of the MBX by the VF's */
4209 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4210 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4211 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4212 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4213 }
4214
4215 /* Update saved flags. See ixgbe_ifflags_cb() */
4216 adapter->if_flags = ifp->if_flags;
4217 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4218
4219 /* Now inform the stack we're ready */
4220 ifp->if_flags |= IFF_RUNNING;
4221
4222 return;
4223 } /* ixgbe_init_locked */
4224
4225 /************************************************************************
4226 * ixgbe_init
4227 ************************************************************************/
4228 static int
4229 ixgbe_init(struct ifnet *ifp)
4230 {
4231 struct adapter *adapter = ifp->if_softc;
4232
4233 IXGBE_CORE_LOCK(adapter);
4234 ixgbe_init_locked(adapter);
4235 IXGBE_CORE_UNLOCK(adapter);
4236
4237 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4238 } /* ixgbe_init */
4239
4240 /************************************************************************
4241 * ixgbe_set_ivar
4242 *
4243 * Setup the correct IVAR register for a particular MSI-X interrupt
4244 * (yes this is all very magic and confusing :)
4245 * - entry is the register array entry
4246 * - vector is the MSI-X vector for this queue
4247 * - type is RX/TX/MISC
4248 ************************************************************************/
4249 static void
4250 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4251 {
4252 struct ixgbe_hw *hw = &adapter->hw;
4253 u32 ivar, index;
4254
4255 vector |= IXGBE_IVAR_ALLOC_VAL;
4256
4257 switch (hw->mac.type) {
4258 case ixgbe_mac_82598EB:
4259 if (type == -1)
4260 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4261 else
4262 entry += (type * 64);
4263 index = (entry >> 2) & 0x1F;
4264 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4265 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4266 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4267 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4268 break;
4269 case ixgbe_mac_82599EB:
4270 case ixgbe_mac_X540:
4271 case ixgbe_mac_X550:
4272 case ixgbe_mac_X550EM_x:
4273 case ixgbe_mac_X550EM_a:
4274 if (type == -1) { /* MISC IVAR */
4275 index = (entry & 1) * 8;
4276 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4277 ivar &= ~(0xffUL << index);
4278 ivar |= ((u32)vector << index);
4279 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4280 } else { /* RX/TX IVARS */
4281 index = (16 * (entry & 1)) + (8 * type);
4282 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4283 ivar &= ~(0xffUL << index);
4284 ivar |= ((u32)vector << index);
4285 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4286 }
4287 break;
4288 default:
4289 break;
4290 }
4291 } /* ixgbe_set_ivar */
4292
4293 /************************************************************************
4294 * ixgbe_configure_ivars
4295 ************************************************************************/
4296 static void
4297 ixgbe_configure_ivars(struct adapter *adapter)
4298 {
4299 struct ix_queue *que = adapter->queues;
4300 u32 newitr;
4301
4302 if (ixgbe_max_interrupt_rate > 0)
4303 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4304 else {
4305 /*
4306 * Disable DMA coalescing if interrupt moderation is
4307 * disabled.
4308 */
4309 adapter->dmac = 0;
4310 newitr = 0;
4311 }
4312
4313 for (int i = 0; i < adapter->num_queues; i++, que++) {
4314 struct rx_ring *rxr = &adapter->rx_rings[i];
4315 struct tx_ring *txr = &adapter->tx_rings[i];
4316 /* First the RX queue entry */
4317 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4318 /* ... and the TX */
4319 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4320 /* Set an Initial EITR value */
4321 ixgbe_eitr_write(adapter, que->msix, newitr);
4322 /*
4323 * To eliminate influence of the previous state.
4324 * At this point, Tx/Rx interrupt handler
4325 * (ixgbe_msix_que()) cannot be called, so both
4326 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4327 */
4328 que->eitr_setting = 0;
4329 }
4330
4331 /* For the Link interrupt */
4332 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4333 } /* ixgbe_configure_ivars */
4334
4335 /************************************************************************
4336 * ixgbe_config_gpie
4337 ************************************************************************/
4338 static void
4339 ixgbe_config_gpie(struct adapter *adapter)
4340 {
4341 struct ixgbe_hw *hw = &adapter->hw;
4342 u32 gpie;
4343
4344 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4345
4346 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4347 /* Enable Enhanced MSI-X mode */
4348 gpie |= IXGBE_GPIE_MSIX_MODE
4349 | IXGBE_GPIE_EIAME
4350 | IXGBE_GPIE_PBA_SUPPORT
4351 | IXGBE_GPIE_OCD;
4352 }
4353
4354 /* Fan Failure Interrupt */
4355 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4356 gpie |= IXGBE_SDP1_GPIEN;
4357
4358 /* Thermal Sensor Interrupt */
4359 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4360 gpie |= IXGBE_SDP0_GPIEN_X540;
4361
4362 /* Link detection */
4363 switch (hw->mac.type) {
4364 case ixgbe_mac_82599EB:
4365 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4366 break;
4367 case ixgbe_mac_X550EM_x:
4368 case ixgbe_mac_X550EM_a:
4369 gpie |= IXGBE_SDP0_GPIEN_X540;
4370 break;
4371 default:
4372 break;
4373 }
4374
4375 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4376
4377 } /* ixgbe_config_gpie */
4378
4379 /************************************************************************
4380 * ixgbe_config_delay_values
4381 *
4382 * Requires adapter->max_frame_size to be set.
4383 ************************************************************************/
4384 static void
4385 ixgbe_config_delay_values(struct adapter *adapter)
4386 {
4387 struct ixgbe_hw *hw = &adapter->hw;
4388 u32 rxpb, frame, size, tmp;
4389
4390 frame = adapter->max_frame_size;
4391
4392 /* Calculate High Water */
4393 switch (hw->mac.type) {
4394 case ixgbe_mac_X540:
4395 case ixgbe_mac_X550:
4396 case ixgbe_mac_X550EM_x:
4397 case ixgbe_mac_X550EM_a:
4398 tmp = IXGBE_DV_X540(frame, frame);
4399 break;
4400 default:
4401 tmp = IXGBE_DV(frame, frame);
4402 break;
4403 }
4404 size = IXGBE_BT2KB(tmp);
4405 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4406 hw->fc.high_water[0] = rxpb - size;
4407
4408 /* Now calculate Low Water */
4409 switch (hw->mac.type) {
4410 case ixgbe_mac_X540:
4411 case ixgbe_mac_X550:
4412 case ixgbe_mac_X550EM_x:
4413 case ixgbe_mac_X550EM_a:
4414 tmp = IXGBE_LOW_DV_X540(frame);
4415 break;
4416 default:
4417 tmp = IXGBE_LOW_DV(frame);
4418 break;
4419 }
4420 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4421
4422 hw->fc.pause_time = IXGBE_FC_PAUSE;
4423 hw->fc.send_xon = TRUE;
4424 } /* ixgbe_config_delay_values */
4425
4426 /************************************************************************
4427 * ixgbe_set_rxfilter - Multicast Update
4428 *
4429 * Called whenever multicast address list is updated.
4430 ************************************************************************/
4431 static void
4432 ixgbe_set_rxfilter(struct adapter *adapter)
4433 {
4434 struct ixgbe_mc_addr *mta;
4435 struct ifnet *ifp = adapter->ifp;
4436 u8 *update_ptr;
4437 int mcnt = 0;
4438 u32 fctrl;
4439 struct ethercom *ec = &adapter->osdep.ec;
4440 struct ether_multi *enm;
4441 struct ether_multistep step;
4442
4443 KASSERT(mutex_owned(&adapter->core_mtx));
4444 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4445
4446 mta = adapter->mta;
4447 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4448
4449 ETHER_LOCK(ec);
4450 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4451 ETHER_FIRST_MULTI(step, ec, enm);
4452 while (enm != NULL) {
4453 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4454 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4455 ETHER_ADDR_LEN) != 0)) {
4456 ec->ec_flags |= ETHER_F_ALLMULTI;
4457 break;
4458 }
4459 bcopy(enm->enm_addrlo,
4460 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4461 mta[mcnt].vmdq = adapter->pool;
4462 mcnt++;
4463 ETHER_NEXT_MULTI(step, enm);
4464 }
4465
4466 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4467 if (ifp->if_flags & IFF_PROMISC)
4468 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4469 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4470 fctrl |= IXGBE_FCTRL_MPE;
4471 fctrl &= ~IXGBE_FCTRL_UPE;
4472 } else
4473 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4474
4475 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4476
4477 /* Update multicast filter entries only when it's not ALLMULTI */
4478 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4479 ETHER_UNLOCK(ec);
4480 update_ptr = (u8 *)mta;
4481 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4482 ixgbe_mc_array_itr, TRUE);
4483 } else
4484 ETHER_UNLOCK(ec);
4485 } /* ixgbe_set_rxfilter */
4486
4487 /************************************************************************
4488 * ixgbe_mc_array_itr
4489 *
4490 * An iterator function needed by the multicast shared code.
4491 * It feeds the shared code routine the addresses in the
4492 * array of ixgbe_set_rxfilter() one by one.
4493 ************************************************************************/
4494 static u8 *
4495 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4496 {
4497 struct ixgbe_mc_addr *mta;
4498
4499 mta = (struct ixgbe_mc_addr *)*update_ptr;
4500 *vmdq = mta->vmdq;
4501
4502 *update_ptr = (u8*)(mta + 1);
4503
4504 return (mta->addr);
4505 } /* ixgbe_mc_array_itr */
4506
4507 /************************************************************************
4508 * ixgbe_local_timer - Timer routine
4509 *
4510 * Checks for link status, updates statistics,
4511 * and runs the watchdog check.
4512 ************************************************************************/
4513 static void
4514 ixgbe_local_timer(void *arg)
4515 {
4516 struct adapter *adapter = arg;
4517
4518 if (adapter->schedule_wqs_ok) {
4519 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4520 workqueue_enqueue(adapter->timer_wq,
4521 &adapter->timer_wc, NULL);
4522 }
4523 }
4524
4525 static void
4526 ixgbe_handle_timer(struct work *wk, void *context)
4527 {
4528 struct adapter *adapter = context;
4529 struct ixgbe_hw *hw = &adapter->hw;
4530 device_t dev = adapter->dev;
4531 struct ix_queue *que = adapter->queues;
4532 u64 queues = 0;
4533 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4534 int hung = 0;
4535 int i;
4536
4537 IXGBE_CORE_LOCK(adapter);
4538
4539 /* Check for pluggable optics */
4540 if (ixgbe_is_sfp(hw)) {
4541 bool sched_mod_task = false;
4542
4543 if (hw->mac.type == ixgbe_mac_82598EB) {
4544 /*
4545 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4546 * any GPIO(SDP). So just schedule TASK_MOD.
4547 */
4548 sched_mod_task = true;
4549 } else {
4550 bool was_full, is_full;
4551
4552 was_full =
4553 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4554 is_full = ixgbe_sfp_cage_full(hw);
4555
4556 /* Do probe if cage state changed */
4557 if (was_full ^ is_full)
4558 sched_mod_task = true;
4559 }
4560 if (sched_mod_task) {
4561 mutex_enter(&adapter->admin_mtx);
4562 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4563 ixgbe_schedule_admin_tasklet(adapter);
4564 mutex_exit(&adapter->admin_mtx);
4565 }
4566 }
4567
4568 ixgbe_update_link_status(adapter);
4569 ixgbe_update_stats_counters(adapter);
4570
4571 /* Update some event counters */
4572 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4573 que = adapter->queues;
4574 for (i = 0; i < adapter->num_queues; i++, que++) {
4575 struct tx_ring *txr = que->txr;
4576
4577 v0 += txr->q_efbig_tx_dma_setup;
4578 v1 += txr->q_mbuf_defrag_failed;
4579 v2 += txr->q_efbig2_tx_dma_setup;
4580 v3 += txr->q_einval_tx_dma_setup;
4581 v4 += txr->q_other_tx_dma_setup;
4582 v5 += txr->q_eagain_tx_dma_setup;
4583 v6 += txr->q_enomem_tx_dma_setup;
4584 v7 += txr->q_tso_err;
4585 }
4586 adapter->efbig_tx_dma_setup.ev_count = v0;
4587 adapter->mbuf_defrag_failed.ev_count = v1;
4588 adapter->efbig2_tx_dma_setup.ev_count = v2;
4589 adapter->einval_tx_dma_setup.ev_count = v3;
4590 adapter->other_tx_dma_setup.ev_count = v4;
4591 adapter->eagain_tx_dma_setup.ev_count = v5;
4592 adapter->enomem_tx_dma_setup.ev_count = v6;
4593 adapter->tso_err.ev_count = v7;
4594
4595 /*
4596 * Check the TX queues status
4597 * - mark hung queues so we don't schedule on them
4598 * - watchdog only if all queues show hung
4599 */
4600 que = adapter->queues;
4601 for (i = 0; i < adapter->num_queues; i++, que++) {
4602 /* Keep track of queues with work for soft irq */
4603 if (que->txr->busy)
4604 queues |= 1ULL << que->me;
4605 /*
4606 * Each time txeof runs without cleaning, but there
4607 * are uncleaned descriptors it increments busy. If
4608 * we get to the MAX we declare it hung.
4609 */
4610 if (que->busy == IXGBE_QUEUE_HUNG) {
4611 ++hung;
4612 /* Mark the queue as inactive */
4613 adapter->active_queues &= ~(1ULL << que->me);
4614 continue;
4615 } else {
4616 /* Check if we've come back from hung */
4617 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4618 adapter->active_queues |= 1ULL << que->me;
4619 }
4620 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4621 device_printf(dev,
4622 "Warning queue %d appears to be hung!\n", i);
4623 que->txr->busy = IXGBE_QUEUE_HUNG;
4624 ++hung;
4625 }
4626 }
4627
4628 /* Only truly watchdog if all queues show hung */
4629 if (hung == adapter->num_queues)
4630 goto watchdog;
4631 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4632 else if (queues != 0) { /* Force an IRQ on queues with work */
4633 que = adapter->queues;
4634 for (i = 0; i < adapter->num_queues; i++, que++) {
4635 mutex_enter(&que->dc_mtx);
4636 if (que->disabled_count == 0)
4637 ixgbe_rearm_queues(adapter,
4638 queues & ((u64)1 << i));
4639 mutex_exit(&que->dc_mtx);
4640 }
4641 }
4642 #endif
4643
4644 atomic_store_relaxed(&adapter->timer_pending, 0);
4645 IXGBE_CORE_UNLOCK(adapter);
4646 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4647 return;
4648
4649 watchdog:
4650 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4651 adapter->ifp->if_flags &= ~IFF_RUNNING;
4652 adapter->watchdog_events.ev_count++;
4653 ixgbe_init_locked(adapter);
4654 IXGBE_CORE_UNLOCK(adapter);
4655 } /* ixgbe_handle_timer */
4656
4657 /************************************************************************
4658 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4659 ************************************************************************/
4660 static void
4661 ixgbe_recovery_mode_timer(void *arg)
4662 {
4663 struct adapter *adapter = arg;
4664
4665 if (__predict_true(adapter->osdep.detaching == false)) {
4666 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4667 0, 1) == 0) {
4668 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4669 &adapter->recovery_mode_timer_wc, NULL);
4670 }
4671 }
4672 }
4673
4674 static void
4675 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4676 {
4677 struct adapter *adapter = context;
4678 struct ixgbe_hw *hw = &adapter->hw;
4679
4680 IXGBE_CORE_LOCK(adapter);
4681 if (ixgbe_fw_recovery_mode(hw)) {
4682 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4683 /* Firmware error detected, entering recovery mode */
4684 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4685
4686 if (hw->adapter_stopped == FALSE)
4687 ixgbe_stop_locked(adapter);
4688 }
4689 } else
4690 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4691
4692 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4693 callout_reset(&adapter->recovery_mode_timer, hz,
4694 ixgbe_recovery_mode_timer, adapter);
4695 IXGBE_CORE_UNLOCK(adapter);
4696 } /* ixgbe_handle_recovery_mode_timer */
4697
4698 /************************************************************************
4699 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4700 * bool int_en: true if it's called when the interrupt is enabled.
4701 ************************************************************************/
4702 static void
4703 ixgbe_handle_mod(void *context, bool int_en)
4704 {
4705 struct adapter *adapter = context;
4706 struct ixgbe_hw *hw = &adapter->hw;
4707 device_t dev = adapter->dev;
4708 enum ixgbe_sfp_type last_sfp_type;
4709 u32 err;
4710 bool last_unsupported_sfp_recovery;
4711
4712 KASSERT(mutex_owned(&adapter->core_mtx));
4713
4714 last_sfp_type = hw->phy.sfp_type;
4715 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4716 ++adapter->mod_workev.ev_count;
4717 if (adapter->hw.need_crosstalk_fix) {
4718 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4719 !ixgbe_sfp_cage_full(hw))
4720 goto out;
4721 }
4722
4723 err = hw->phy.ops.identify_sfp(hw);
4724 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4725 if (last_unsupported_sfp_recovery == false)
4726 device_printf(dev,
4727 "Unsupported SFP+ module type was detected.\n");
4728 goto out;
4729 }
4730
4731 if (hw->need_unsupported_sfp_recovery) {
4732 device_printf(dev, "Recovering from unsupported SFP\n");
4733 /*
4734 * We could recover the status by calling setup_sfp(),
4735 * setup_link() and some others. It's complex and might not
4736 * work correctly on some unknown cases. To avoid such type of
4737 * problem, call ixgbe_init_locked(). It's simple and safe
4738 * approach.
4739 */
4740 ixgbe_init_locked(adapter);
4741 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4742 (hw->phy.sfp_type != last_sfp_type)) {
4743 /* A module is inserted and changed. */
4744
4745 if (hw->mac.type == ixgbe_mac_82598EB)
4746 err = hw->phy.ops.reset(hw);
4747 else {
4748 err = hw->mac.ops.setup_sfp(hw);
4749 hw->phy.sfp_setup_needed = FALSE;
4750 }
4751 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4752 device_printf(dev,
4753 "Setup failure - unsupported SFP+ module type.\n");
4754 goto out;
4755 }
4756 }
4757
4758 out:
4759 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4760 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4761
4762 /* Adjust media types shown in ifconfig */
4763 IXGBE_CORE_UNLOCK(adapter);
4764 ifmedia_removeall(&adapter->media);
4765 ixgbe_add_media_types(adapter);
4766 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4767 IXGBE_CORE_LOCK(adapter);
4768
4769 /*
4770 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4771 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4772 * flap because the function calls setup_link().
4773 */
4774 if (hw->mac.type != ixgbe_mac_82598EB) {
4775 mutex_enter(&adapter->admin_mtx);
4776 if (int_en)
4777 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4778 else
4779 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4780 mutex_exit(&adapter->admin_mtx);
4781 }
4782
4783 /*
4784 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4785 * the workqueue now.
4786 */
4787 } /* ixgbe_handle_mod */
4788
4789
4790 /************************************************************************
4791 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4792 ************************************************************************/
4793 static void
4794 ixgbe_handle_msf(void *context)
4795 {
4796 struct adapter *adapter = context;
4797 struct ixgbe_hw *hw = &adapter->hw;
4798 u32 autoneg;
4799 bool negotiate;
4800
4801 KASSERT(mutex_owned(&adapter->core_mtx));
4802
4803 ++adapter->msf_workev.ev_count;
4804
4805 autoneg = hw->phy.autoneg_advertised;
4806 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4807 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4808 if (hw->mac.ops.setup_link)
4809 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4810 } /* ixgbe_handle_msf */
4811
4812 /************************************************************************
4813 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4814 ************************************************************************/
4815 static void
4816 ixgbe_handle_phy(void *context)
4817 {
4818 struct adapter *adapter = context;
4819 struct ixgbe_hw *hw = &adapter->hw;
4820 int error;
4821
4822 KASSERT(mutex_owned(&adapter->core_mtx));
4823
4824 ++adapter->phy_workev.ev_count;
4825 error = hw->phy.ops.handle_lasi(hw);
4826 if (error == IXGBE_ERR_OVERTEMP)
4827 device_printf(adapter->dev,
4828 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4829 " PHY will downshift to lower power state!\n");
4830 else if (error)
4831 device_printf(adapter->dev,
4832 "Error handling LASI interrupt: %d\n", error);
4833 } /* ixgbe_handle_phy */
4834
4835 static void
4836 ixgbe_handle_admin(struct work *wk, void *context)
4837 {
4838 struct adapter *adapter = context;
4839 struct ifnet *ifp = adapter->ifp;
4840 struct ixgbe_hw *hw = &adapter->hw;
4841 u32 task_requests;
4842 u32 eims_enable = 0;
4843
4844 mutex_enter(&adapter->admin_mtx);
4845 adapter->admin_pending = 0;
4846 task_requests = adapter->task_requests;
4847 adapter->task_requests = 0;
4848 mutex_exit(&adapter->admin_mtx);
4849
4850 /*
4851 * Hold the IFNET_LOCK across this entire call. This will
4852 * prevent additional changes to adapter->phy_layer
4853 * and serialize calls to this tasklet. We cannot hold the
4854 * CORE_LOCK while calling into the ifmedia functions as
4855 * they call ifmedia_lock() and the lock is CORE_LOCK.
4856 */
4857 IFNET_LOCK(ifp);
4858 IXGBE_CORE_LOCK(adapter);
4859 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4860 ixgbe_handle_link(adapter);
4861 eims_enable |= IXGBE_EIMS_LSC;
4862 }
4863 if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) {
4864 ixgbe_handle_mod(adapter, false);
4865 }
4866 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4867 ixgbe_handle_mod(adapter, true);
4868 if (hw->mac.type >= ixgbe_mac_X540)
4869 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4870 else
4871 eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4872 }
4873 if ((task_requests
4874 & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4875 ixgbe_handle_msf(adapter);
4876 if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4877 (hw->mac.type == ixgbe_mac_82599EB))
4878 eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4879 }
4880 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4881 ixgbe_handle_phy(adapter);
4882 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4883 }
4884 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4885 ixgbe_reinit_fdir(adapter);
4886 eims_enable |= IXGBE_EIMS_FLOW_DIR;
4887 }
4888 #if 0 /* notyet */
4889 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4890 ixgbe_handle_mbx(adapter);
4891 eims_enable |= IXGBE_EIMS_MAILBOX;
4892 }
4893 #endif
4894 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4895
4896 IXGBE_CORE_UNLOCK(adapter);
4897 IFNET_UNLOCK(ifp);
4898 } /* ixgbe_handle_admin */
4899
4900 static void
4901 ixgbe_ifstop(struct ifnet *ifp, int disable)
4902 {
4903 struct adapter *adapter = ifp->if_softc;
4904
4905 IXGBE_CORE_LOCK(adapter);
4906 ixgbe_stop_locked(adapter);
4907 IXGBE_CORE_UNLOCK(adapter);
4908
4909 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4910 atomic_store_relaxed(&adapter->timer_pending, 0);
4911 }
4912
4913 /************************************************************************
4914 * ixgbe_stop_locked - Stop the hardware
4915 *
4916 * Disables all traffic on the adapter by issuing a
4917 * global reset on the MAC and deallocates TX/RX buffers.
4918 ************************************************************************/
4919 static void
4920 ixgbe_stop_locked(void *arg)
4921 {
4922 struct ifnet *ifp;
4923 struct adapter *adapter = arg;
4924 struct ixgbe_hw *hw = &adapter->hw;
4925
4926 ifp = adapter->ifp;
4927
4928 KASSERT(mutex_owned(&adapter->core_mtx));
4929
4930 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4931 ixgbe_disable_intr(adapter);
4932 callout_stop(&adapter->timer);
4933
4934 /* Don't schedule workqueues. */
4935 adapter->schedule_wqs_ok = false;
4936
4937 /* Let the stack know...*/
4938 ifp->if_flags &= ~IFF_RUNNING;
4939
4940 ixgbe_reset_hw(hw);
4941 hw->adapter_stopped = FALSE;
4942 ixgbe_stop_adapter(hw);
4943 if (hw->mac.type == ixgbe_mac_82599EB)
4944 ixgbe_stop_mac_link_on_d3_82599(hw);
4945 /* Turn off the laser - noop with no optics */
4946 ixgbe_disable_tx_laser(hw);
4947
4948 /* Update the stack */
4949 adapter->link_up = FALSE;
4950 ixgbe_update_link_status(adapter);
4951
4952 /* reprogram the RAR[0] in case user changed it. */
4953 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4954
4955 return;
4956 } /* ixgbe_stop_locked */
4957
4958 /************************************************************************
4959 * ixgbe_update_link_status - Update OS on link state
4960 *
4961 * Note: Only updates the OS on the cached link state.
4962 * The real check of the hardware only happens with
4963 * a link interrupt.
4964 ************************************************************************/
4965 static void
4966 ixgbe_update_link_status(struct adapter *adapter)
4967 {
4968 struct ifnet *ifp = adapter->ifp;
4969 device_t dev = adapter->dev;
4970 struct ixgbe_hw *hw = &adapter->hw;
4971
4972 KASSERT(mutex_owned(&adapter->core_mtx));
4973
4974 if (adapter->link_up) {
4975 if (adapter->link_active != LINK_STATE_UP) {
4976 /*
4977 * To eliminate influence of the previous state
4978 * in the same way as ixgbe_init_locked().
4979 */
4980 struct ix_queue *que = adapter->queues;
4981 for (int i = 0; i < adapter->num_queues; i++, que++)
4982 que->eitr_setting = 0;
4983
4984 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4985 /*
4986 * Discard count for both MAC Local Fault and
4987 * Remote Fault because those registers are
4988 * valid only when the link speed is up and
4989 * 10Gbps.
4990 */
4991 IXGBE_READ_REG(hw, IXGBE_MLFC);
4992 IXGBE_READ_REG(hw, IXGBE_MRFC);
4993 }
4994
4995 if (bootverbose) {
4996 const char *bpsmsg;
4997
4998 switch (adapter->link_speed) {
4999 case IXGBE_LINK_SPEED_10GB_FULL:
5000 bpsmsg = "10 Gbps";
5001 break;
5002 case IXGBE_LINK_SPEED_5GB_FULL:
5003 bpsmsg = "5 Gbps";
5004 break;
5005 case IXGBE_LINK_SPEED_2_5GB_FULL:
5006 bpsmsg = "2.5 Gbps";
5007 break;
5008 case IXGBE_LINK_SPEED_1GB_FULL:
5009 bpsmsg = "1 Gbps";
5010 break;
5011 case IXGBE_LINK_SPEED_100_FULL:
5012 bpsmsg = "100 Mbps";
5013 break;
5014 case IXGBE_LINK_SPEED_10_FULL:
5015 bpsmsg = "10 Mbps";
5016 break;
5017 default:
5018 bpsmsg = "unknown speed";
5019 break;
5020 }
5021 device_printf(dev, "Link is up %s %s \n",
5022 bpsmsg, "Full Duplex");
5023 }
5024 adapter->link_active = LINK_STATE_UP;
5025 /* Update any Flow Control changes */
5026 ixgbe_fc_enable(&adapter->hw);
5027 /* Update DMA coalescing config */
5028 ixgbe_config_dmac(adapter);
5029 if_link_state_change(ifp, LINK_STATE_UP);
5030
5031 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5032 ixgbe_ping_all_vfs(adapter);
5033 }
5034 } else {
5035 /*
5036 * Do it when link active changes to DOWN. i.e.
5037 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5038 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5039 */
5040 if (adapter->link_active != LINK_STATE_DOWN) {
5041 if (bootverbose)
5042 device_printf(dev, "Link is Down\n");
5043 if_link_state_change(ifp, LINK_STATE_DOWN);
5044 adapter->link_active = LINK_STATE_DOWN;
5045 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5046 ixgbe_ping_all_vfs(adapter);
5047 ixgbe_drain_all(adapter);
5048 }
5049 }
5050 } /* ixgbe_update_link_status */
5051
5052 /************************************************************************
5053 * ixgbe_config_dmac - Configure DMA Coalescing
5054 ************************************************************************/
5055 static void
5056 ixgbe_config_dmac(struct adapter *adapter)
5057 {
5058 struct ixgbe_hw *hw = &adapter->hw;
5059 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5060
5061 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5062 return;
5063
5064 if (dcfg->watchdog_timer ^ adapter->dmac ||
5065 dcfg->link_speed ^ adapter->link_speed) {
5066 dcfg->watchdog_timer = adapter->dmac;
5067 dcfg->fcoe_en = false;
5068 dcfg->link_speed = adapter->link_speed;
5069 dcfg->num_tcs = 1;
5070
5071 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5072 dcfg->watchdog_timer, dcfg->link_speed);
5073
5074 hw->mac.ops.dmac_config(hw);
5075 }
5076 } /* ixgbe_config_dmac */
5077
5078 /************************************************************************
5079 * ixgbe_enable_intr
5080 ************************************************************************/
5081 static void
5082 ixgbe_enable_intr(struct adapter *adapter)
5083 {
5084 struct ixgbe_hw *hw = &adapter->hw;
5085 struct ix_queue *que = adapter->queues;
5086 u32 mask, fwsm;
5087
5088 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5089
5090 switch (adapter->hw.mac.type) {
5091 case ixgbe_mac_82599EB:
5092 mask |= IXGBE_EIMS_ECC;
5093 /* Temperature sensor on some adapters */
5094 mask |= IXGBE_EIMS_GPI_SDP0;
5095 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5096 mask |= IXGBE_EIMS_GPI_SDP1;
5097 mask |= IXGBE_EIMS_GPI_SDP2;
5098 break;
5099 case ixgbe_mac_X540:
5100 /* Detect if Thermal Sensor is enabled */
5101 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5102 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5103 mask |= IXGBE_EIMS_TS;
5104 mask |= IXGBE_EIMS_ECC;
5105 break;
5106 case ixgbe_mac_X550:
5107 /* MAC thermal sensor is automatically enabled */
5108 mask |= IXGBE_EIMS_TS;
5109 mask |= IXGBE_EIMS_ECC;
5110 break;
5111 case ixgbe_mac_X550EM_x:
5112 case ixgbe_mac_X550EM_a:
5113 /* Some devices use SDP0 for important information */
5114 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5115 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5116 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5117 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5118 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5119 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5120 mask |= IXGBE_EICR_GPI_SDP0_X540;
5121 mask |= IXGBE_EIMS_ECC;
5122 break;
5123 default:
5124 break;
5125 }
5126
5127 /* Enable Fan Failure detection */
5128 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5129 mask |= IXGBE_EIMS_GPI_SDP1;
5130 /* Enable SR-IOV */
5131 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5132 mask |= IXGBE_EIMS_MAILBOX;
5133 /* Enable Flow Director */
5134 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5135 mask |= IXGBE_EIMS_FLOW_DIR;
5136
5137 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5138
5139 /* With MSI-X we use auto clear */
5140 if (adapter->msix_mem) {
5141 /*
5142 * It's not required to set TCP_TIMER because we don't use
5143 * it.
5144 */
5145 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5146 }
5147
5148 /*
5149 * Now enable all queues, this is done separately to
5150 * allow for handling the extended (beyond 32) MSI-X
5151 * vectors that can be used by 82599
5152 */
5153 for (int i = 0; i < adapter->num_queues; i++, que++)
5154 ixgbe_enable_queue(adapter, que->msix);
5155
5156 IXGBE_WRITE_FLUSH(hw);
5157
5158 } /* ixgbe_enable_intr */
5159
5160 /************************************************************************
5161 * ixgbe_disable_intr_internal
5162 ************************************************************************/
5163 static void
5164 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5165 {
5166 struct ix_queue *que = adapter->queues;
5167
5168 /* disable interrupts other than queues */
5169 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5170
5171 if (adapter->msix_mem)
5172 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5173
5174 for (int i = 0; i < adapter->num_queues; i++, que++)
5175 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5176
5177 IXGBE_WRITE_FLUSH(&adapter->hw);
5178
5179 } /* ixgbe_do_disable_intr_internal */
5180
5181 /************************************************************************
5182 * ixgbe_disable_intr
5183 ************************************************************************/
5184 static void
5185 ixgbe_disable_intr(struct adapter *adapter)
5186 {
5187
5188 ixgbe_disable_intr_internal(adapter, true);
5189 } /* ixgbe_disable_intr */
5190
5191 /************************************************************************
5192 * ixgbe_ensure_disabled_intr
5193 ************************************************************************/
5194 void
5195 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5196 {
5197
5198 ixgbe_disable_intr_internal(adapter, false);
5199 } /* ixgbe_ensure_disabled_intr */
5200
5201 /************************************************************************
5202 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5203 ************************************************************************/
5204 static int
5205 ixgbe_legacy_irq(void *arg)
5206 {
5207 struct ix_queue *que = arg;
5208 struct adapter *adapter = que->adapter;
5209 struct ixgbe_hw *hw = &adapter->hw;
5210 struct tx_ring *txr = adapter->tx_rings;
5211 u32 eicr;
5212 u32 eims_orig;
5213 u32 eims_enable = 0;
5214 u32 eims_disable = 0;
5215
5216 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5217 /*
5218 * Silicon errata #26 on 82598. Disable all interrupts before reading
5219 * EICR.
5220 */
5221 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5222
5223 /* Read and clear EICR */
5224 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5225
5226 adapter->stats.pf.legint.ev_count++;
5227 if (eicr == 0) {
5228 adapter->stats.pf.intzero.ev_count++;
5229 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5230 return 0;
5231 }
5232
5233 /* Queue (0) intr */
5234 if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5235 ++que->irqs.ev_count;
5236
5237 /*
5238 * The same as ixgbe_msix_que() about
5239 * "que->txrx_use_workqueue".
5240 */
5241 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5242
5243 IXGBE_TX_LOCK(txr);
5244 ixgbe_txeof(txr);
5245 #ifdef notyet
5246 if (!ixgbe_ring_empty(ifp, txr->br))
5247 ixgbe_start_locked(ifp, txr);
5248 #endif
5249 IXGBE_TX_UNLOCK(txr);
5250
5251 que->req.ev_count++;
5252 ixgbe_sched_handle_que(adapter, que);
5253 /* Disable queue 0 interrupt */
5254 eims_disable |= 1UL << 0;
5255
5256 } else
5257 eims_enable |= IXGBE_EIMC_RTX_QUEUE;
5258
5259 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5260
5261 /* Re-enable some interrupts */
5262 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5263 (eims_orig & ~eims_disable) | eims_enable);
5264
5265 return 1;
5266 } /* ixgbe_legacy_irq */
5267
5268 /************************************************************************
5269 * ixgbe_free_pciintr_resources
5270 ************************************************************************/
5271 static void
5272 ixgbe_free_pciintr_resources(struct adapter *adapter)
5273 {
5274 struct ix_queue *que = adapter->queues;
5275 int rid;
5276
5277 /*
5278 * Release all msix queue resources:
5279 */
5280 for (int i = 0; i < adapter->num_queues; i++, que++) {
5281 if (que->res != NULL) {
5282 pci_intr_disestablish(adapter->osdep.pc,
5283 adapter->osdep.ihs[i]);
5284 adapter->osdep.ihs[i] = NULL;
5285 }
5286 }
5287
5288 /* Clean the Legacy or Link interrupt last */
5289 if (adapter->vector) /* we are doing MSIX */
5290 rid = adapter->vector;
5291 else
5292 rid = 0;
5293
5294 if (adapter->osdep.ihs[rid] != NULL) {
5295 pci_intr_disestablish(adapter->osdep.pc,
5296 adapter->osdep.ihs[rid]);
5297 adapter->osdep.ihs[rid] = NULL;
5298 }
5299
5300 if (adapter->osdep.intrs != NULL) {
5301 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5302 adapter->osdep.nintrs);
5303 adapter->osdep.intrs = NULL;
5304 }
5305 } /* ixgbe_free_pciintr_resources */
5306
5307 /************************************************************************
5308 * ixgbe_free_pci_resources
5309 ************************************************************************/
5310 static void
5311 ixgbe_free_pci_resources(struct adapter *adapter)
5312 {
5313
5314 ixgbe_free_pciintr_resources(adapter);
5315
5316 if (adapter->osdep.mem_size != 0) {
5317 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5318 adapter->osdep.mem_bus_space_handle,
5319 adapter->osdep.mem_size);
5320 }
5321
5322 } /* ixgbe_free_pci_resources */
5323
5324 /************************************************************************
5325 * ixgbe_set_sysctl_value
5326 ************************************************************************/
5327 static void
5328 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5329 const char *description, int *limit, int value)
5330 {
5331 device_t dev = adapter->dev;
5332 struct sysctllog **log;
5333 const struct sysctlnode *rnode, *cnode;
5334
5335 /*
5336 * It's not required to check recovery mode because this function never
5337 * touches hardware.
5338 */
5339
5340 log = &adapter->sysctllog;
5341 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5342 aprint_error_dev(dev, "could not create sysctl root\n");
5343 return;
5344 }
5345 if (sysctl_createv(log, 0, &rnode, &cnode,
5346 CTLFLAG_READWRITE, CTLTYPE_INT,
5347 name, SYSCTL_DESCR(description),
5348 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5349 aprint_error_dev(dev, "could not create sysctl\n");
5350 *limit = value;
5351 } /* ixgbe_set_sysctl_value */
5352
5353 /************************************************************************
5354 * ixgbe_sysctl_flowcntl
5355 *
5356 * SYSCTL wrapper around setting Flow Control
5357 ************************************************************************/
5358 static int
5359 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5360 {
5361 struct sysctlnode node = *rnode;
5362 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5363 int error, fc;
5364
5365 if (ixgbe_fw_recovery_mode_swflag(adapter))
5366 return (EPERM);
5367
5368 fc = adapter->hw.fc.current_mode;
5369 node.sysctl_data = &fc;
5370 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5371 if (error != 0 || newp == NULL)
5372 return error;
5373
5374 /* Don't bother if it's not changed */
5375 if (fc == adapter->hw.fc.current_mode)
5376 return (0);
5377
5378 return ixgbe_set_flowcntl(adapter, fc);
5379 } /* ixgbe_sysctl_flowcntl */
5380
5381 /************************************************************************
5382 * ixgbe_set_flowcntl - Set flow control
5383 *
5384 * Flow control values:
5385 * 0 - off
5386 * 1 - rx pause
5387 * 2 - tx pause
5388 * 3 - full
5389 ************************************************************************/
5390 static int
5391 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5392 {
5393 switch (fc) {
5394 case ixgbe_fc_rx_pause:
5395 case ixgbe_fc_tx_pause:
5396 case ixgbe_fc_full:
5397 adapter->hw.fc.requested_mode = fc;
5398 if (adapter->num_queues > 1)
5399 ixgbe_disable_rx_drop(adapter);
5400 break;
5401 case ixgbe_fc_none:
5402 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5403 if (adapter->num_queues > 1)
5404 ixgbe_enable_rx_drop(adapter);
5405 break;
5406 default:
5407 return (EINVAL);
5408 }
5409
5410 #if 0 /* XXX NetBSD */
5411 /* Don't autoneg if forcing a value */
5412 adapter->hw.fc.disable_fc_autoneg = TRUE;
5413 #endif
5414 ixgbe_fc_enable(&adapter->hw);
5415
5416 return (0);
5417 } /* ixgbe_set_flowcntl */
5418
5419 /************************************************************************
5420 * ixgbe_enable_rx_drop
5421 *
5422 * Enable the hardware to drop packets when the buffer is
5423 * full. This is useful with multiqueue, so that no single
5424 * queue being full stalls the entire RX engine. We only
5425 * enable this when Multiqueue is enabled AND Flow Control
5426 * is disabled.
5427 ************************************************************************/
5428 static void
5429 ixgbe_enable_rx_drop(struct adapter *adapter)
5430 {
5431 struct ixgbe_hw *hw = &adapter->hw;
5432 struct rx_ring *rxr;
5433 u32 srrctl;
5434
5435 for (int i = 0; i < adapter->num_queues; i++) {
5436 rxr = &adapter->rx_rings[i];
5437 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5438 srrctl |= IXGBE_SRRCTL_DROP_EN;
5439 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5440 }
5441
5442 /* enable drop for each vf */
5443 for (int i = 0; i < adapter->num_vfs; i++) {
5444 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5445 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5446 IXGBE_QDE_ENABLE));
5447 }
5448 } /* ixgbe_enable_rx_drop */
5449
5450 /************************************************************************
5451 * ixgbe_disable_rx_drop
5452 ************************************************************************/
5453 static void
5454 ixgbe_disable_rx_drop(struct adapter *adapter)
5455 {
5456 struct ixgbe_hw *hw = &adapter->hw;
5457 struct rx_ring *rxr;
5458 u32 srrctl;
5459
5460 for (int i = 0; i < adapter->num_queues; i++) {
5461 rxr = &adapter->rx_rings[i];
5462 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5463 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5464 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5465 }
5466
5467 /* disable drop for each vf */
5468 for (int i = 0; i < adapter->num_vfs; i++) {
5469 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5470 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5471 }
5472 } /* ixgbe_disable_rx_drop */
5473
5474 /************************************************************************
5475 * ixgbe_sysctl_advertise
5476 *
5477 * SYSCTL wrapper around setting advertised speed
5478 ************************************************************************/
5479 static int
5480 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5481 {
5482 struct sysctlnode node = *rnode;
5483 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5484 int error = 0, advertise;
5485
5486 if (ixgbe_fw_recovery_mode_swflag(adapter))
5487 return (EPERM);
5488
5489 advertise = adapter->advertise;
5490 node.sysctl_data = &advertise;
5491 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5492 if (error != 0 || newp == NULL)
5493 return error;
5494
5495 return ixgbe_set_advertise(adapter, advertise);
5496 } /* ixgbe_sysctl_advertise */
5497
5498 /************************************************************************
5499 * ixgbe_set_advertise - Control advertised link speed
5500 *
5501 * Flags:
5502 * 0x00 - Default (all capable link speed)
5503 * 0x01 - advertise 100 Mb
5504 * 0x02 - advertise 1G
5505 * 0x04 - advertise 10G
5506 * 0x08 - advertise 10 Mb
5507 * 0x10 - advertise 2.5G
5508 * 0x20 - advertise 5G
5509 ************************************************************************/
5510 static int
5511 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5512 {
5513 device_t dev;
5514 struct ixgbe_hw *hw;
5515 ixgbe_link_speed speed = 0;
5516 ixgbe_link_speed link_caps = 0;
5517 s32 err = IXGBE_NOT_IMPLEMENTED;
5518 bool negotiate = FALSE;
5519
5520 /* Checks to validate new value */
5521 if (adapter->advertise == advertise) /* no change */
5522 return (0);
5523
5524 dev = adapter->dev;
5525 hw = &adapter->hw;
5526
5527 /* No speed changes for backplane media */
5528 if (hw->phy.media_type == ixgbe_media_type_backplane)
5529 return (ENODEV);
5530
5531 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5532 (hw->phy.multispeed_fiber))) {
5533 device_printf(dev,
5534 "Advertised speed can only be set on copper or "
5535 "multispeed fiber media types.\n");
5536 return (EINVAL);
5537 }
5538
5539 if (advertise < 0x0 || advertise > 0x3f) {
5540 device_printf(dev,
5541 "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5542 return (EINVAL);
5543 }
5544
5545 if (hw->mac.ops.get_link_capabilities) {
5546 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5547 &negotiate);
5548 if (err != IXGBE_SUCCESS) {
5549 device_printf(dev, "Unable to determine supported advertise speeds\n");
5550 return (ENODEV);
5551 }
5552 }
5553
5554 /* Set new value and report new advertised mode */
5555 if (advertise & 0x1) {
5556 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5557 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5558 return (EINVAL);
5559 }
5560 speed |= IXGBE_LINK_SPEED_100_FULL;
5561 }
5562 if (advertise & 0x2) {
5563 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5564 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5565 return (EINVAL);
5566 }
5567 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5568 }
5569 if (advertise & 0x4) {
5570 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5571 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5572 return (EINVAL);
5573 }
5574 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5575 }
5576 if (advertise & 0x8) {
5577 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5578 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5579 return (EINVAL);
5580 }
5581 speed |= IXGBE_LINK_SPEED_10_FULL;
5582 }
5583 if (advertise & 0x10) {
5584 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5585 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5586 return (EINVAL);
5587 }
5588 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5589 }
5590 if (advertise & 0x20) {
5591 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5592 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5593 return (EINVAL);
5594 }
5595 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5596 }
5597 if (advertise == 0)
5598 speed = link_caps; /* All capable link speed */
5599
5600 hw->mac.autotry_restart = TRUE;
5601 hw->mac.ops.setup_link(hw, speed, TRUE);
5602 adapter->advertise = advertise;
5603
5604 return (0);
5605 } /* ixgbe_set_advertise */
5606
5607 /************************************************************************
5608 * ixgbe_get_advertise - Get current advertised speed settings
5609 *
5610 * Formatted for sysctl usage.
5611 * Flags:
5612 * 0x01 - advertise 100 Mb
5613 * 0x02 - advertise 1G
5614 * 0x04 - advertise 10G
5615 * 0x08 - advertise 10 Mb (yes, Mb)
5616 * 0x10 - advertise 2.5G
5617 * 0x20 - advertise 5G
5618 ************************************************************************/
5619 static int
5620 ixgbe_get_advertise(struct adapter *adapter)
5621 {
5622 struct ixgbe_hw *hw = &adapter->hw;
5623 int speed;
5624 ixgbe_link_speed link_caps = 0;
5625 s32 err;
5626 bool negotiate = FALSE;
5627
5628 /*
5629 * Advertised speed means nothing unless it's copper or
5630 * multi-speed fiber
5631 */
5632 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5633 !(hw->phy.multispeed_fiber))
5634 return (0);
5635
5636 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5637 if (err != IXGBE_SUCCESS)
5638 return (0);
5639
5640 speed =
5641 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5642 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5643 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5644 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5645 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5646 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5647
5648 return speed;
5649 } /* ixgbe_get_advertise */
5650
5651 /************************************************************************
5652 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5653 *
5654 * Control values:
5655 * 0/1 - off / on (use default value of 1000)
5656 *
5657 * Legal timer values are:
5658 * 50,100,250,500,1000,2000,5000,10000
5659 *
5660 * Turning off interrupt moderation will also turn this off.
5661 ************************************************************************/
5662 static int
5663 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5664 {
5665 struct sysctlnode node = *rnode;
5666 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5667 struct ifnet *ifp = adapter->ifp;
5668 int error;
5669 int newval;
5670
5671 if (ixgbe_fw_recovery_mode_swflag(adapter))
5672 return (EPERM);
5673
5674 newval = adapter->dmac;
5675 node.sysctl_data = &newval;
5676 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5677 if ((error) || (newp == NULL))
5678 return (error);
5679
5680 switch (newval) {
5681 case 0:
5682 /* Disabled */
5683 adapter->dmac = 0;
5684 break;
5685 case 1:
5686 /* Enable and use default */
5687 adapter->dmac = 1000;
5688 break;
5689 case 50:
5690 case 100:
5691 case 250:
5692 case 500:
5693 case 1000:
5694 case 2000:
5695 case 5000:
5696 case 10000:
5697 /* Legal values - allow */
5698 adapter->dmac = newval;
5699 break;
5700 default:
5701 /* Do nothing, illegal value */
5702 return (EINVAL);
5703 }
5704
5705 /* Re-initialize hardware if it's already running */
5706 if (ifp->if_flags & IFF_RUNNING)
5707 ifp->if_init(ifp);
5708
5709 return (0);
5710 }
5711
5712 #ifdef IXGBE_DEBUG
5713 /************************************************************************
5714 * ixgbe_sysctl_power_state
5715 *
5716 * Sysctl to test power states
5717 * Values:
5718 * 0 - set device to D0
5719 * 3 - set device to D3
5720 * (none) - get current device power state
5721 ************************************************************************/
5722 static int
5723 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5724 {
5725 #ifdef notyet
5726 struct sysctlnode node = *rnode;
5727 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5728 device_t dev = adapter->dev;
5729 int curr_ps, new_ps, error = 0;
5730
5731 if (ixgbe_fw_recovery_mode_swflag(adapter))
5732 return (EPERM);
5733
5734 curr_ps = new_ps = pci_get_powerstate(dev);
5735
5736 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5737 if ((error) || (req->newp == NULL))
5738 return (error);
5739
5740 if (new_ps == curr_ps)
5741 return (0);
5742
5743 if (new_ps == 3 && curr_ps == 0)
5744 error = DEVICE_SUSPEND(dev);
5745 else if (new_ps == 0 && curr_ps == 3)
5746 error = DEVICE_RESUME(dev);
5747 else
5748 return (EINVAL);
5749
5750 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5751
5752 return (error);
5753 #else
5754 return 0;
5755 #endif
5756 } /* ixgbe_sysctl_power_state */
5757 #endif
5758
5759 /************************************************************************
5760 * ixgbe_sysctl_wol_enable
5761 *
5762 * Sysctl to enable/disable the WoL capability,
5763 * if supported by the adapter.
5764 *
5765 * Values:
5766 * 0 - disabled
5767 * 1 - enabled
5768 ************************************************************************/
5769 static int
5770 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5771 {
5772 struct sysctlnode node = *rnode;
5773 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5774 struct ixgbe_hw *hw = &adapter->hw;
5775 bool new_wol_enabled;
5776 int error = 0;
5777
5778 /*
5779 * It's not required to check recovery mode because this function never
5780 * touches hardware.
5781 */
5782 new_wol_enabled = hw->wol_enabled;
5783 node.sysctl_data = &new_wol_enabled;
5784 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5785 if ((error) || (newp == NULL))
5786 return (error);
5787 if (new_wol_enabled == hw->wol_enabled)
5788 return (0);
5789
5790 if (new_wol_enabled && !adapter->wol_support)
5791 return (ENODEV);
5792 else
5793 hw->wol_enabled = new_wol_enabled;
5794
5795 return (0);
5796 } /* ixgbe_sysctl_wol_enable */
5797
5798 /************************************************************************
5799 * ixgbe_sysctl_wufc - Wake Up Filter Control
5800 *
5801 * Sysctl to enable/disable the types of packets that the
5802 * adapter will wake up on upon receipt.
5803 * Flags:
5804 * 0x1 - Link Status Change
5805 * 0x2 - Magic Packet
5806 * 0x4 - Direct Exact
5807 * 0x8 - Directed Multicast
5808 * 0x10 - Broadcast
5809 * 0x20 - ARP/IPv4 Request Packet
5810 * 0x40 - Direct IPv4 Packet
5811 * 0x80 - Direct IPv6 Packet
5812 *
5813 * Settings not listed above will cause the sysctl to return an error.
5814 ************************************************************************/
5815 static int
5816 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5817 {
5818 struct sysctlnode node = *rnode;
5819 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5820 int error = 0;
5821 u32 new_wufc;
5822
5823 /*
5824 * It's not required to check recovery mode because this function never
5825 * touches hardware.
5826 */
5827 new_wufc = adapter->wufc;
5828 node.sysctl_data = &new_wufc;
5829 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5830 if ((error) || (newp == NULL))
5831 return (error);
5832 if (new_wufc == adapter->wufc)
5833 return (0);
5834
5835 if (new_wufc & 0xffffff00)
5836 return (EINVAL);
5837
5838 new_wufc &= 0xff;
5839 new_wufc |= (0xffffff & adapter->wufc);
5840 adapter->wufc = new_wufc;
5841
5842 return (0);
5843 } /* ixgbe_sysctl_wufc */
5844
5845 #ifdef IXGBE_DEBUG
5846 /************************************************************************
5847 * ixgbe_sysctl_print_rss_config
5848 ************************************************************************/
5849 static int
5850 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5851 {
5852 #ifdef notyet
5853 struct sysctlnode node = *rnode;
5854 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5855 struct ixgbe_hw *hw = &adapter->hw;
5856 device_t dev = adapter->dev;
5857 struct sbuf *buf;
5858 int error = 0, reta_size;
5859 u32 reg;
5860
5861 if (ixgbe_fw_recovery_mode_swflag(adapter))
5862 return (EPERM);
5863
5864 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5865 if (!buf) {
5866 device_printf(dev, "Could not allocate sbuf for output.\n");
5867 return (ENOMEM);
5868 }
5869
5870 // TODO: use sbufs to make a string to print out
5871 /* Set multiplier for RETA setup and table size based on MAC */
5872 switch (adapter->hw.mac.type) {
5873 case ixgbe_mac_X550:
5874 case ixgbe_mac_X550EM_x:
5875 case ixgbe_mac_X550EM_a:
5876 reta_size = 128;
5877 break;
5878 default:
5879 reta_size = 32;
5880 break;
5881 }
5882
5883 /* Print out the redirection table */
5884 sbuf_cat(buf, "\n");
5885 for (int i = 0; i < reta_size; i++) {
5886 if (i < 32) {
5887 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5888 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5889 } else {
5890 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5891 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5892 }
5893 }
5894
5895 // TODO: print more config
5896
5897 error = sbuf_finish(buf);
5898 if (error)
5899 device_printf(dev, "Error finishing sbuf: %d\n", error);
5900
5901 sbuf_delete(buf);
5902 #endif
5903 return (0);
5904 } /* ixgbe_sysctl_print_rss_config */
5905 #endif /* IXGBE_DEBUG */
5906
5907 /************************************************************************
5908 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5909 *
5910 * For X552/X557-AT devices using an external PHY
5911 ************************************************************************/
5912 static int
5913 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5914 {
5915 struct sysctlnode node = *rnode;
5916 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5917 struct ixgbe_hw *hw = &adapter->hw;
5918 int val;
5919 u16 reg;
5920 int error;
5921
5922 if (ixgbe_fw_recovery_mode_swflag(adapter))
5923 return (EPERM);
5924
5925 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5926 device_printf(adapter->dev,
5927 "Device has no supported external thermal sensor.\n");
5928 return (ENODEV);
5929 }
5930
5931 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5932 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5933 device_printf(adapter->dev,
5934 "Error reading from PHY's current temperature register\n");
5935 return (EAGAIN);
5936 }
5937
5938 node.sysctl_data = &val;
5939
5940 /* Shift temp for output */
5941 val = reg >> 8;
5942
5943 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5944 if ((error) || (newp == NULL))
5945 return (error);
5946
5947 return (0);
5948 } /* ixgbe_sysctl_phy_temp */
5949
5950 /************************************************************************
5951 * ixgbe_sysctl_phy_overtemp_occurred
5952 *
5953 * Reports (directly from the PHY) whether the current PHY
5954 * temperature is over the overtemp threshold.
5955 ************************************************************************/
5956 static int
5957 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5958 {
5959 struct sysctlnode node = *rnode;
5960 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5961 struct ixgbe_hw *hw = &adapter->hw;
5962 int val, error;
5963 u16 reg;
5964
5965 if (ixgbe_fw_recovery_mode_swflag(adapter))
5966 return (EPERM);
5967
5968 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5969 device_printf(adapter->dev,
5970 "Device has no supported external thermal sensor.\n");
5971 return (ENODEV);
5972 }
5973
5974 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5975 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5976 device_printf(adapter->dev,
5977 "Error reading from PHY's temperature status register\n");
5978 return (EAGAIN);
5979 }
5980
5981 node.sysctl_data = &val;
5982
5983 /* Get occurrence bit */
5984 val = !!(reg & 0x4000);
5985
5986 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5987 if ((error) || (newp == NULL))
5988 return (error);
5989
5990 return (0);
5991 } /* ixgbe_sysctl_phy_overtemp_occurred */
5992
5993 /************************************************************************
5994 * ixgbe_sysctl_eee_state
5995 *
5996 * Sysctl to set EEE power saving feature
5997 * Values:
5998 * 0 - disable EEE
5999 * 1 - enable EEE
6000 * (none) - get current device EEE state
6001 ************************************************************************/
6002 static int
6003 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6004 {
6005 struct sysctlnode node = *rnode;
6006 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6007 struct ifnet *ifp = adapter->ifp;
6008 device_t dev = adapter->dev;
6009 int curr_eee, new_eee, error = 0;
6010 s32 retval;
6011
6012 if (ixgbe_fw_recovery_mode_swflag(adapter))
6013 return (EPERM);
6014
6015 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6016 node.sysctl_data = &new_eee;
6017 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6018 if ((error) || (newp == NULL))
6019 return (error);
6020
6021 /* Nothing to do */
6022 if (new_eee == curr_eee)
6023 return (0);
6024
6025 /* Not supported */
6026 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6027 return (EINVAL);
6028
6029 /* Bounds checking */
6030 if ((new_eee < 0) || (new_eee > 1))
6031 return (EINVAL);
6032
6033 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6034 if (retval) {
6035 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6036 return (EINVAL);
6037 }
6038
6039 /* Restart auto-neg */
6040 ifp->if_init(ifp);
6041
6042 device_printf(dev, "New EEE state: %d\n", new_eee);
6043
6044 /* Cache new value */
6045 if (new_eee)
6046 adapter->feat_en |= IXGBE_FEATURE_EEE;
6047 else
6048 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6049
6050 return (error);
6051 } /* ixgbe_sysctl_eee_state */
6052
6053 #define PRINTQS(adapter, regname) \
6054 do { \
6055 struct ixgbe_hw *_hw = &(adapter)->hw; \
6056 int _i; \
6057 \
6058 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6059 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6060 printf((_i == 0) ? "\t" : " "); \
6061 printf("%08x", IXGBE_READ_REG(_hw, \
6062 IXGBE_##regname(_i))); \
6063 } \
6064 printf("\n"); \
6065 } while (0)
6066
6067 /************************************************************************
6068 * ixgbe_print_debug_info
6069 *
6070 * Called only when em_display_debug_stats is enabled.
6071 * Provides a way to take a look at important statistics
6072 * maintained by the driver and hardware.
6073 ************************************************************************/
6074 static void
6075 ixgbe_print_debug_info(struct adapter *adapter)
6076 {
6077 device_t dev = adapter->dev;
6078 struct ixgbe_hw *hw = &adapter->hw;
6079 int table_size;
6080 int i;
6081
6082 switch (adapter->hw.mac.type) {
6083 case ixgbe_mac_X550:
6084 case ixgbe_mac_X550EM_x:
6085 case ixgbe_mac_X550EM_a:
6086 table_size = 128;
6087 break;
6088 default:
6089 table_size = 32;
6090 break;
6091 }
6092
6093 device_printf(dev, "[E]RETA:\n");
6094 for (i = 0; i < table_size; i++) {
6095 if (i < 32)
6096 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6097 IXGBE_RETA(i)));
6098 else
6099 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6100 IXGBE_ERETA(i - 32)));
6101 }
6102
6103 device_printf(dev, "queue:");
6104 for (i = 0; i < adapter->num_queues; i++) {
6105 printf((i == 0) ? "\t" : " ");
6106 printf("%8d", i);
6107 }
6108 printf("\n");
6109 PRINTQS(adapter, RDBAL);
6110 PRINTQS(adapter, RDBAH);
6111 PRINTQS(adapter, RDLEN);
6112 PRINTQS(adapter, SRRCTL);
6113 PRINTQS(adapter, RDH);
6114 PRINTQS(adapter, RDT);
6115 PRINTQS(adapter, RXDCTL);
6116
6117 device_printf(dev, "RQSMR:");
6118 for (i = 0; i < adapter->num_queues / 4; i++) {
6119 printf((i == 0) ? "\t" : " ");
6120 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6121 }
6122 printf("\n");
6123
6124 device_printf(dev, "disabled_count:");
6125 for (i = 0; i < adapter->num_queues; i++) {
6126 printf((i == 0) ? "\t" : " ");
6127 printf("%8d", adapter->queues[i].disabled_count);
6128 }
6129 printf("\n");
6130
6131 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6132 if (hw->mac.type != ixgbe_mac_82598EB) {
6133 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6134 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6135 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6136 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6137 }
6138 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6139 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6140 } /* ixgbe_print_debug_info */
6141
6142 /************************************************************************
6143 * ixgbe_sysctl_debug
6144 ************************************************************************/
6145 static int
6146 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6147 {
6148 struct sysctlnode node = *rnode;
6149 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6150 int error, result = 0;
6151
6152 if (ixgbe_fw_recovery_mode_swflag(adapter))
6153 return (EPERM);
6154
6155 node.sysctl_data = &result;
6156 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6157
6158 if (error || newp == NULL)
6159 return error;
6160
6161 if (result == 1)
6162 ixgbe_print_debug_info(adapter);
6163
6164 return 0;
6165 } /* ixgbe_sysctl_debug */
6166
6167 /************************************************************************
6168 * ixgbe_init_device_features
6169 ************************************************************************/
6170 static void
6171 ixgbe_init_device_features(struct adapter *adapter)
6172 {
6173 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6174 | IXGBE_FEATURE_RSS
6175 | IXGBE_FEATURE_MSI
6176 | IXGBE_FEATURE_MSIX
6177 | IXGBE_FEATURE_LEGACY_IRQ
6178 | IXGBE_FEATURE_LEGACY_TX;
6179
6180 /* Set capabilities first... */
6181 switch (adapter->hw.mac.type) {
6182 case ixgbe_mac_82598EB:
6183 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6184 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6185 break;
6186 case ixgbe_mac_X540:
6187 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6188 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6189 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6190 (adapter->hw.bus.func == 0))
6191 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6192 break;
6193 case ixgbe_mac_X550:
6194 /*
6195 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6196 * NVM Image version.
6197 */
6198 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6199 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6200 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6201 break;
6202 case ixgbe_mac_X550EM_x:
6203 /*
6204 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6205 * NVM Image version.
6206 */
6207 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6208 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6209 break;
6210 case ixgbe_mac_X550EM_a:
6211 /*
6212 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6213 * NVM Image version.
6214 */
6215 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6216 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6217 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6218 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6219 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6220 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6221 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6222 }
6223 break;
6224 case ixgbe_mac_82599EB:
6225 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6226 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6227 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6228 (adapter->hw.bus.func == 0))
6229 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6230 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6231 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6232 break;
6233 default:
6234 break;
6235 }
6236
6237 /* Enabled by default... */
6238 /* Fan failure detection */
6239 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6240 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6241 /* Netmap */
6242 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6243 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6244 /* EEE */
6245 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6246 adapter->feat_en |= IXGBE_FEATURE_EEE;
6247 /* Thermal Sensor */
6248 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6249 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6250 /*
6251 * Recovery mode:
6252 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6253 * NVM Image version.
6254 */
6255
6256 /* Enabled via global sysctl... */
6257 /* Flow Director */
6258 if (ixgbe_enable_fdir) {
6259 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6260 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6261 else
6262 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6263 }
6264 /* Legacy (single queue) transmit */
6265 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6266 ixgbe_enable_legacy_tx)
6267 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6268 /*
6269 * Message Signal Interrupts - Extended (MSI-X)
6270 * Normal MSI is only enabled if MSI-X calls fail.
6271 */
6272 if (!ixgbe_enable_msix)
6273 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6274 /* Receive-Side Scaling (RSS) */
6275 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6276 adapter->feat_en |= IXGBE_FEATURE_RSS;
6277
6278 /* Disable features with unmet dependencies... */
6279 /* No MSI-X */
6280 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6281 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6282 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6283 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6284 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6285 }
6286 } /* ixgbe_init_device_features */
6287
6288 /************************************************************************
6289 * ixgbe_probe - Device identification routine
6290 *
6291 * Determines if the driver should be loaded on
6292 * adapter based on its PCI vendor/device ID.
6293 *
6294 * return BUS_PROBE_DEFAULT on success, positive on failure
6295 ************************************************************************/
6296 static int
6297 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6298 {
6299 const struct pci_attach_args *pa = aux;
6300
6301 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6302 }
6303
6304 static const ixgbe_vendor_info_t *
6305 ixgbe_lookup(const struct pci_attach_args *pa)
6306 {
6307 const ixgbe_vendor_info_t *ent;
6308 pcireg_t subid;
6309
6310 INIT_DEBUGOUT("ixgbe_lookup: begin");
6311
6312 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6313 return NULL;
6314
6315 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6316
6317 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6318 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6319 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6320 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6321 (ent->subvendor_id == 0)) &&
6322 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6323 (ent->subdevice_id == 0))) {
6324 return ent;
6325 }
6326 }
6327 return NULL;
6328 }
6329
6330 static int
6331 ixgbe_ifflags_cb(struct ethercom *ec)
6332 {
6333 struct ifnet *ifp = &ec->ec_if;
6334 struct adapter *adapter = ifp->if_softc;
6335 u_short change;
6336 int rv = 0;
6337
6338 IXGBE_CORE_LOCK(adapter);
6339
6340 change = ifp->if_flags ^ adapter->if_flags;
6341 if (change != 0)
6342 adapter->if_flags = ifp->if_flags;
6343
6344 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6345 rv = ENETRESET;
6346 goto out;
6347 } else if ((change & IFF_PROMISC) != 0)
6348 ixgbe_set_rxfilter(adapter);
6349
6350 /* Check for ec_capenable. */
6351 change = ec->ec_capenable ^ adapter->ec_capenable;
6352 adapter->ec_capenable = ec->ec_capenable;
6353 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6354 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6355 rv = ENETRESET;
6356 goto out;
6357 }
6358
6359 /*
6360 * Special handling is not required for ETHERCAP_VLAN_MTU.
6361 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6362 */
6363
6364 /* Set up VLAN support and filter */
6365 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6366 ixgbe_setup_vlan_hw_support(adapter);
6367
6368 out:
6369 IXGBE_CORE_UNLOCK(adapter);
6370
6371 return rv;
6372 }
6373
6374 /************************************************************************
6375 * ixgbe_ioctl - Ioctl entry point
6376 *
6377 * Called when the user wants to configure the interface.
6378 *
6379 * return 0 on success, positive on failure
6380 ************************************************************************/
6381 static int
6382 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6383 {
6384 struct adapter *adapter = ifp->if_softc;
6385 struct ixgbe_hw *hw = &adapter->hw;
6386 struct ifcapreq *ifcr = data;
6387 struct ifreq *ifr = data;
6388 int error = 0;
6389 int l4csum_en;
6390 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6391 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6392
6393 if (ixgbe_fw_recovery_mode_swflag(adapter))
6394 return (EPERM);
6395
6396 switch (command) {
6397 case SIOCSIFFLAGS:
6398 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6399 break;
6400 case SIOCADDMULTI:
6401 case SIOCDELMULTI:
6402 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6403 break;
6404 case SIOCSIFMEDIA:
6405 case SIOCGIFMEDIA:
6406 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6407 break;
6408 case SIOCSIFCAP:
6409 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6410 break;
6411 case SIOCSIFMTU:
6412 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6413 break;
6414 #ifdef __NetBSD__
6415 case SIOCINITIFADDR:
6416 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6417 break;
6418 case SIOCGIFFLAGS:
6419 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6420 break;
6421 case SIOCGIFAFLAG_IN:
6422 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6423 break;
6424 case SIOCGIFADDR:
6425 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6426 break;
6427 case SIOCGIFMTU:
6428 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6429 break;
6430 case SIOCGIFCAP:
6431 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6432 break;
6433 case SIOCGETHERCAP:
6434 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6435 break;
6436 case SIOCGLIFADDR:
6437 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6438 break;
6439 case SIOCZIFDATA:
6440 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6441 hw->mac.ops.clear_hw_cntrs(hw);
6442 ixgbe_clear_evcnt(adapter);
6443 break;
6444 case SIOCAIFADDR:
6445 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6446 break;
6447 #endif
6448 default:
6449 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6450 break;
6451 }
6452
6453 switch (command) {
6454 case SIOCGI2C:
6455 {
6456 struct ixgbe_i2c_req i2c;
6457
6458 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6459 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6460 if (error != 0)
6461 break;
6462 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6463 error = EINVAL;
6464 break;
6465 }
6466 if (i2c.len > sizeof(i2c.data)) {
6467 error = EINVAL;
6468 break;
6469 }
6470
6471 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6472 i2c.dev_addr, i2c.data);
6473 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6474 break;
6475 }
6476 case SIOCSIFCAP:
6477 /* Layer-4 Rx checksum offload has to be turned on and
6478 * off as a unit.
6479 */
6480 l4csum_en = ifcr->ifcr_capenable & l4csum;
6481 if (l4csum_en != l4csum && l4csum_en != 0)
6482 return EINVAL;
6483 /*FALLTHROUGH*/
6484 case SIOCADDMULTI:
6485 case SIOCDELMULTI:
6486 case SIOCSIFFLAGS:
6487 case SIOCSIFMTU:
6488 default:
6489 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6490 return error;
6491 if ((ifp->if_flags & IFF_RUNNING) == 0)
6492 ;
6493 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6494 IXGBE_CORE_LOCK(adapter);
6495 if ((ifp->if_flags & IFF_RUNNING) != 0)
6496 ixgbe_init_locked(adapter);
6497 ixgbe_recalculate_max_frame(adapter);
6498 IXGBE_CORE_UNLOCK(adapter);
6499 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6500 /*
6501 * Multicast list has changed; set the hardware filter
6502 * accordingly.
6503 */
6504 IXGBE_CORE_LOCK(adapter);
6505 ixgbe_disable_intr(adapter);
6506 ixgbe_set_rxfilter(adapter);
6507 ixgbe_enable_intr(adapter);
6508 IXGBE_CORE_UNLOCK(adapter);
6509 }
6510 return 0;
6511 }
6512
6513 return error;
6514 } /* ixgbe_ioctl */
6515
6516 /************************************************************************
6517 * ixgbe_check_fan_failure
6518 ************************************************************************/
6519 static int
6520 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6521 {
6522 u32 mask;
6523
6524 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6525 IXGBE_ESDP_SDP1;
6526
6527 if (reg & mask) {
6528 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6529 return IXGBE_ERR_FAN_FAILURE;
6530 }
6531
6532 return IXGBE_SUCCESS;
6533 } /* ixgbe_check_fan_failure */
6534
6535 /************************************************************************
6536 * ixgbe_handle_que
6537 ************************************************************************/
6538 static void
6539 ixgbe_handle_que(void *context)
6540 {
6541 struct ix_queue *que = context;
6542 struct adapter *adapter = que->adapter;
6543 struct tx_ring *txr = que->txr;
6544 struct ifnet *ifp = adapter->ifp;
6545 bool more = false;
6546
6547 que->handleq.ev_count++;
6548
6549 if (ifp->if_flags & IFF_RUNNING) {
6550 more = ixgbe_rxeof(que);
6551 IXGBE_TX_LOCK(txr);
6552 more |= ixgbe_txeof(txr);
6553 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6554 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6555 ixgbe_mq_start_locked(ifp, txr);
6556 /* Only for queue 0 */
6557 /* NetBSD still needs this for CBQ */
6558 if ((&adapter->queues[0] == que)
6559 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6560 ixgbe_legacy_start_locked(ifp, txr);
6561 IXGBE_TX_UNLOCK(txr);
6562 }
6563
6564 if (more) {
6565 que->req.ev_count++;
6566 ixgbe_sched_handle_que(adapter, que);
6567 } else if (que->res != NULL) {
6568 /* MSIX: Re-enable this interrupt */
6569 ixgbe_enable_queue(adapter, que->msix);
6570 } else {
6571 /* INTx or MSI */
6572 ixgbe_enable_queue(adapter, 0);
6573 }
6574
6575 return;
6576 } /* ixgbe_handle_que */
6577
6578 /************************************************************************
6579 * ixgbe_handle_que_work
6580 ************************************************************************/
6581 static void
6582 ixgbe_handle_que_work(struct work *wk, void *context)
6583 {
6584 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6585
6586 /*
6587 * "enqueued flag" is not required here.
6588 * See ixgbe_msix_que().
6589 */
6590 ixgbe_handle_que(que);
6591 }
6592
6593 /************************************************************************
6594 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6595 ************************************************************************/
6596 static int
6597 ixgbe_allocate_legacy(struct adapter *adapter,
6598 const struct pci_attach_args *pa)
6599 {
6600 device_t dev = adapter->dev;
6601 struct ix_queue *que = adapter->queues;
6602 struct tx_ring *txr = adapter->tx_rings;
6603 int counts[PCI_INTR_TYPE_SIZE];
6604 pci_intr_type_t intr_type, max_type;
6605 char intrbuf[PCI_INTRSTR_LEN];
6606 char wqname[MAXCOMLEN];
6607 const char *intrstr = NULL;
6608 int defertx_error = 0, error;
6609
6610 /* We allocate a single interrupt resource */
6611 max_type = PCI_INTR_TYPE_MSI;
6612 counts[PCI_INTR_TYPE_MSIX] = 0;
6613 counts[PCI_INTR_TYPE_MSI] =
6614 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6615 /* Check not feat_en but feat_cap to fallback to INTx */
6616 counts[PCI_INTR_TYPE_INTX] =
6617 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6618
6619 alloc_retry:
6620 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6621 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6622 return ENXIO;
6623 }
6624 adapter->osdep.nintrs = 1;
6625 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6626 intrbuf, sizeof(intrbuf));
6627 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6628 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6629 device_xname(dev));
6630 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6631 if (adapter->osdep.ihs[0] == NULL) {
6632 aprint_error_dev(dev,"unable to establish %s\n",
6633 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6634 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6635 adapter->osdep.intrs = NULL;
6636 switch (intr_type) {
6637 case PCI_INTR_TYPE_MSI:
6638 /* The next try is for INTx: Disable MSI */
6639 max_type = PCI_INTR_TYPE_INTX;
6640 counts[PCI_INTR_TYPE_INTX] = 1;
6641 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6642 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6643 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6644 goto alloc_retry;
6645 } else
6646 break;
6647 case PCI_INTR_TYPE_INTX:
6648 default:
6649 /* See below */
6650 break;
6651 }
6652 }
6653 if (intr_type == PCI_INTR_TYPE_INTX) {
6654 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6655 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6656 }
6657 if (adapter->osdep.ihs[0] == NULL) {
6658 aprint_error_dev(dev,
6659 "couldn't establish interrupt%s%s\n",
6660 intrstr ? " at " : "", intrstr ? intrstr : "");
6661 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6662 adapter->osdep.intrs = NULL;
6663 return ENXIO;
6664 }
6665 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6666 /*
6667 * Try allocating a fast interrupt and the associated deferred
6668 * processing contexts.
6669 */
6670 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6671 txr->txr_si =
6672 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6673 ixgbe_deferred_mq_start, txr);
6674
6675 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6676 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6677 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6678 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6679 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6680 }
6681 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6682 ixgbe_handle_que, que);
6683 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6684 error = workqueue_create(&adapter->que_wq, wqname,
6685 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6686 IXGBE_WORKQUEUE_FLAGS);
6687
6688 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6689 && ((txr->txr_si == NULL) || defertx_error != 0))
6690 || (que->que_si == NULL) || error != 0) {
6691 aprint_error_dev(dev,
6692 "could not establish software interrupts\n");
6693
6694 return ENXIO;
6695 }
6696 /* For simplicity in the handlers */
6697 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6698
6699 return (0);
6700 } /* ixgbe_allocate_legacy */
6701
6702 /************************************************************************
6703 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6704 ************************************************************************/
6705 static int
6706 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6707 {
6708 device_t dev = adapter->dev;
6709 struct ix_queue *que = adapter->queues;
6710 struct tx_ring *txr = adapter->tx_rings;
6711 pci_chipset_tag_t pc;
6712 char intrbuf[PCI_INTRSTR_LEN];
6713 char intr_xname[32];
6714 char wqname[MAXCOMLEN];
6715 const char *intrstr = NULL;
6716 int error, vector = 0;
6717 int cpu_id = 0;
6718 kcpuset_t *affinity;
6719 #ifdef RSS
6720 unsigned int rss_buckets = 0;
6721 kcpuset_t cpu_mask;
6722 #endif
6723
6724 pc = adapter->osdep.pc;
6725 #ifdef RSS
6726 /*
6727 * If we're doing RSS, the number of queues needs to
6728 * match the number of RSS buckets that are configured.
6729 *
6730 * + If there's more queues than RSS buckets, we'll end
6731 * up with queues that get no traffic.
6732 *
6733 * + If there's more RSS buckets than queues, we'll end
6734 * up having multiple RSS buckets map to the same queue,
6735 * so there'll be some contention.
6736 */
6737 rss_buckets = rss_getnumbuckets();
6738 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6739 (adapter->num_queues != rss_buckets)) {
6740 device_printf(dev,
6741 "%s: number of queues (%d) != number of RSS buckets (%d)"
6742 "; performance will be impacted.\n",
6743 __func__, adapter->num_queues, rss_buckets);
6744 }
6745 #endif
6746
6747 adapter->osdep.nintrs = adapter->num_queues + 1;
6748 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6749 adapter->osdep.nintrs) != 0) {
6750 aprint_error_dev(dev,
6751 "failed to allocate MSI-X interrupt\n");
6752 return (ENXIO);
6753 }
6754
6755 kcpuset_create(&affinity, false);
6756 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6757 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6758 device_xname(dev), i);
6759 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6760 sizeof(intrbuf));
6761 #ifdef IXGBE_MPSAFE
6762 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6763 true);
6764 #endif
6765 /* Set the handler function */
6766 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6767 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6768 intr_xname);
6769 if (que->res == NULL) {
6770 aprint_error_dev(dev,
6771 "Failed to register QUE handler\n");
6772 error = ENXIO;
6773 goto err_out;
6774 }
6775 que->msix = vector;
6776 adapter->active_queues |= 1ULL << que->msix;
6777
6778 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6779 #ifdef RSS
6780 /*
6781 * The queue ID is used as the RSS layer bucket ID.
6782 * We look up the queue ID -> RSS CPU ID and select
6783 * that.
6784 */
6785 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6786 CPU_SETOF(cpu_id, &cpu_mask);
6787 #endif
6788 } else {
6789 /*
6790 * Bind the MSI-X vector, and thus the
6791 * rings to the corresponding CPU.
6792 *
6793 * This just happens to match the default RSS
6794 * round-robin bucket -> queue -> CPU allocation.
6795 */
6796 if (adapter->num_queues > 1)
6797 cpu_id = i;
6798 }
6799 /* Round-robin affinity */
6800 kcpuset_zero(affinity);
6801 kcpuset_set(affinity, cpu_id % ncpu);
6802 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6803 NULL);
6804 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6805 intrstr);
6806 if (error == 0) {
6807 #if 1 /* def IXGBE_DEBUG */
6808 #ifdef RSS
6809 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6810 cpu_id % ncpu);
6811 #else
6812 aprint_normal(", bound queue %d to cpu %d", i,
6813 cpu_id % ncpu);
6814 #endif
6815 #endif /* IXGBE_DEBUG */
6816 }
6817 aprint_normal("\n");
6818
6819 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6820 txr->txr_si = softint_establish(
6821 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6822 ixgbe_deferred_mq_start, txr);
6823 if (txr->txr_si == NULL) {
6824 aprint_error_dev(dev,
6825 "couldn't establish software interrupt\n");
6826 error = ENXIO;
6827 goto err_out;
6828 }
6829 }
6830 que->que_si
6831 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6832 ixgbe_handle_que, que);
6833 if (que->que_si == NULL) {
6834 aprint_error_dev(dev,
6835 "couldn't establish software interrupt\n");
6836 error = ENXIO;
6837 goto err_out;
6838 }
6839 }
6840 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6841 error = workqueue_create(&adapter->txr_wq, wqname,
6842 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6843 IXGBE_WORKQUEUE_FLAGS);
6844 if (error) {
6845 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6846 goto err_out;
6847 }
6848 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6849
6850 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6851 error = workqueue_create(&adapter->que_wq, wqname,
6852 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6853 IXGBE_WORKQUEUE_FLAGS);
6854 if (error) {
6855 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6856 goto err_out;
6857 }
6858
6859 /* and Link */
6860 cpu_id++;
6861 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6862 adapter->vector = vector;
6863 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6864 sizeof(intrbuf));
6865 #ifdef IXGBE_MPSAFE
6866 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6867 true);
6868 #endif
6869 /* Set the link handler function */
6870 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6871 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6872 intr_xname);
6873 if (adapter->osdep.ihs[vector] == NULL) {
6874 aprint_error_dev(dev, "Failed to register LINK handler\n");
6875 error = ENXIO;
6876 goto err_out;
6877 }
6878 /* Round-robin affinity */
6879 kcpuset_zero(affinity);
6880 kcpuset_set(affinity, cpu_id % ncpu);
6881 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6882 NULL);
6883
6884 aprint_normal_dev(dev,
6885 "for link, interrupting at %s", intrstr);
6886 if (error == 0)
6887 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6888 else
6889 aprint_normal("\n");
6890
6891 kcpuset_destroy(affinity);
6892 aprint_normal_dev(dev,
6893 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6894
6895 return (0);
6896
6897 err_out:
6898 kcpuset_destroy(affinity);
6899 ixgbe_free_deferred_handlers(adapter);
6900 ixgbe_free_pciintr_resources(adapter);
6901 return (error);
6902 } /* ixgbe_allocate_msix */
6903
6904 /************************************************************************
6905 * ixgbe_configure_interrupts
6906 *
6907 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6908 * This will also depend on user settings.
6909 ************************************************************************/
6910 static int
6911 ixgbe_configure_interrupts(struct adapter *adapter)
6912 {
6913 device_t dev = adapter->dev;
6914 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6915 int want, queues, msgs;
6916
6917 /* Default to 1 queue if MSI-X setup fails */
6918 adapter->num_queues = 1;
6919
6920 /* Override by tuneable */
6921 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6922 goto msi;
6923
6924 /*
6925 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6926 * interrupt slot.
6927 */
6928 if (ncpu == 1)
6929 goto msi;
6930
6931 /* First try MSI-X */
6932 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6933 msgs = MIN(msgs, IXG_MAX_NINTR);
6934 if (msgs < 2)
6935 goto msi;
6936
6937 adapter->msix_mem = (void *)1; /* XXX */
6938
6939 /* Figure out a reasonable auto config value */
6940 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6941
6942 #ifdef RSS
6943 /* If we're doing RSS, clamp at the number of RSS buckets */
6944 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6945 queues = uimin(queues, rss_getnumbuckets());
6946 #endif
6947 if (ixgbe_num_queues > queues) {
6948 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6949 ixgbe_num_queues = queues;
6950 }
6951
6952 if (ixgbe_num_queues != 0)
6953 queues = ixgbe_num_queues;
6954 else
6955 queues = uimin(queues,
6956 uimin(mac->max_tx_queues, mac->max_rx_queues));
6957
6958 /* reflect correct sysctl value */
6959 ixgbe_num_queues = queues;
6960
6961 /*
6962 * Want one vector (RX/TX pair) per queue
6963 * plus an additional for Link.
6964 */
6965 want = queues + 1;
6966 if (msgs >= want)
6967 msgs = want;
6968 else {
6969 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6970 "%d vectors but %d queues wanted!\n",
6971 msgs, want);
6972 goto msi;
6973 }
6974 adapter->num_queues = queues;
6975 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6976 return (0);
6977
6978 /*
6979 * MSI-X allocation failed or provided us with
6980 * less vectors than needed. Free MSI-X resources
6981 * and we'll try enabling MSI.
6982 */
6983 msi:
6984 /* Without MSI-X, some features are no longer supported */
6985 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6986 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6987 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6988 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6989
6990 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6991 adapter->msix_mem = NULL; /* XXX */
6992 if (msgs > 1)
6993 msgs = 1;
6994 if (msgs != 0) {
6995 msgs = 1;
6996 adapter->feat_en |= IXGBE_FEATURE_MSI;
6997 return (0);
6998 }
6999
7000 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7001 aprint_error_dev(dev,
7002 "Device does not support legacy interrupts.\n");
7003 return 1;
7004 }
7005
7006 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7007
7008 return (0);
7009 } /* ixgbe_configure_interrupts */
7010
7011
7012 /************************************************************************
7013 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7014 *
7015 * Done outside of interrupt context since the driver might sleep
7016 ************************************************************************/
7017 static void
7018 ixgbe_handle_link(void *context)
7019 {
7020 struct adapter *adapter = context;
7021 struct ixgbe_hw *hw = &adapter->hw;
7022
7023 KASSERT(mutex_owned(&adapter->core_mtx));
7024
7025 ++adapter->link_workev.ev_count;
7026 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7027 ixgbe_update_link_status(adapter);
7028
7029 /* Re-enable link interrupts */
7030 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7031 } /* ixgbe_handle_link */
7032
7033 #if 0
7034 /************************************************************************
7035 * ixgbe_rearm_queues
7036 ************************************************************************/
7037 static __inline void
7038 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7039 {
7040 u32 mask;
7041
7042 switch (adapter->hw.mac.type) {
7043 case ixgbe_mac_82598EB:
7044 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7045 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7046 break;
7047 case ixgbe_mac_82599EB:
7048 case ixgbe_mac_X540:
7049 case ixgbe_mac_X550:
7050 case ixgbe_mac_X550EM_x:
7051 case ixgbe_mac_X550EM_a:
7052 mask = (queues & 0xFFFFFFFF);
7053 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7054 mask = (queues >> 32);
7055 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7056 break;
7057 default:
7058 break;
7059 }
7060 } /* ixgbe_rearm_queues */
7061 #endif
7062