ixgbe.c revision 1.254 1 /* $NetBSD: ixgbe.c,v 1.254 2020/09/01 04:19:16 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_phy.h"
74 #include "ixgbe_sriov.h"
75 #include "vlan.h"
76
77 #include <sys/cprng.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 /************************************************************************
82 * Driver version
83 ************************************************************************/
84 static const char ixgbe_driver_version[] = "4.0.1-k";
85 /* XXX NetBSD: + 3.3.10 */
86
87 /************************************************************************
88 * PCI Device ID Table
89 *
90 * Used by probe to select devices to load on
91 * Last field stores an index into ixgbe_strings
92 * Last entry must be all 0s
93 *
94 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
95 ************************************************************************/
96 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
97 {
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
148 /* required last entry */
149 {0, 0, 0, 0, 0}
150 };
151
152 /************************************************************************
153 * Table of branding strings
154 ************************************************************************/
155 static const char *ixgbe_strings[] = {
156 "Intel(R) PRO/10GbE PCI-Express Network Driver"
157 };
158
159 /************************************************************************
160 * Function prototypes
161 ************************************************************************/
162 static int ixgbe_probe(device_t, cfdata_t, void *);
163 static void ixgbe_quirks(struct adapter *);
164 static void ixgbe_attach(device_t, device_t, void *);
165 static int ixgbe_detach(device_t, int);
166 #if 0
167 static int ixgbe_shutdown(device_t);
168 #endif
169 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
170 static bool ixgbe_resume(device_t, const pmf_qual_t *);
171 static int ixgbe_ifflags_cb(struct ethercom *);
172 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
173 static int ixgbe_init(struct ifnet *);
174 static void ixgbe_init_locked(struct adapter *);
175 static void ixgbe_ifstop(struct ifnet *, int);
176 static void ixgbe_stop_locked(void *);
177 static void ixgbe_init_device_features(struct adapter *);
178 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
179 static void ixgbe_add_media_types(struct adapter *);
180 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
181 static int ixgbe_media_change(struct ifnet *);
182 static int ixgbe_allocate_pci_resources(struct adapter *,
183 const struct pci_attach_args *);
184 static void ixgbe_free_workqueue(struct adapter *);
185 static void ixgbe_get_slot_info(struct adapter *);
186 static int ixgbe_allocate_msix(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_allocate_legacy(struct adapter *,
189 const struct pci_attach_args *);
190 static int ixgbe_configure_interrupts(struct adapter *);
191 static void ixgbe_free_pciintr_resources(struct adapter *);
192 static void ixgbe_free_pci_resources(struct adapter *);
193 static void ixgbe_local_timer(void *);
194 static void ixgbe_handle_timer(struct work *, void *);
195 static void ixgbe_recovery_mode_timer(void *);
196 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_schedule_admin_tasklet(struct adapter *);
202 static void ixgbe_config_link(struct adapter *);
203 static void ixgbe_check_wol_support(struct adapter *);
204 static int ixgbe_setup_low_power_mode(struct adapter *);
205 #if 0
206 static void ixgbe_rearm_queues(struct adapter *, u64);
207 #endif
208
209 static void ixgbe_initialize_transmit_units(struct adapter *);
210 static void ixgbe_initialize_receive_units(struct adapter *);
211 static void ixgbe_enable_rx_drop(struct adapter *);
212 static void ixgbe_disable_rx_drop(struct adapter *);
213 static void ixgbe_initialize_rss_mapping(struct adapter *);
214
215 static void ixgbe_enable_intr(struct adapter *);
216 static void ixgbe_disable_intr(struct adapter *);
217 static void ixgbe_update_stats_counters(struct adapter *);
218 static void ixgbe_set_rxfilter(struct adapter *);
219 static void ixgbe_update_link_status(struct adapter *);
220 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
221 static void ixgbe_configure_ivars(struct adapter *);
222 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
223 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
224
225 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
226 static void ixgbe_setup_vlan_hw_support(struct adapter *);
227 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
228 static int ixgbe_register_vlan(struct adapter *, u16);
229 static int ixgbe_unregister_vlan(struct adapter *, u16);
230
231 static void ixgbe_add_device_sysctls(struct adapter *);
232 static void ixgbe_add_hw_stats(struct adapter *);
233 static void ixgbe_clear_evcnt(struct adapter *);
234 static int ixgbe_set_flowcntl(struct adapter *, int);
235 static int ixgbe_set_advertise(struct adapter *, int);
236 static int ixgbe_get_advertise(struct adapter *);
237
238 /* Sysctl handlers */
239 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
240 const char *, int *, int);
241 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
247 #ifdef IXGBE_DEBUG
248 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
250 #endif
251 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
260
261 /* Legacy (single vector) interrupt handler */
262 static int ixgbe_legacy_irq(void *);
263
264 /* The MSI/MSI-X Interrupt handlers */
265 static int ixgbe_msix_que(void *);
266 static int ixgbe_msix_admin(void *);
267
268 /* Event handlers running on workqueue */
269 static void ixgbe_handle_que(void *);
270 static void ixgbe_handle_link(void *);
271 static void ixgbe_handle_msf(void *);
272 static void ixgbe_handle_mod(void *);
273 static void ixgbe_handle_phy(void *);
274
275 /* Deferred workqueue handlers */
276 static void ixgbe_handle_admin(struct work *, void *);
277 static void ixgbe_handle_que_work(struct work *, void *);
278
279 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
280
281 /************************************************************************
282 * NetBSD Device Interface Entry Points
283 ************************************************************************/
284 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
285 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
286 DVF_DETACH_SHUTDOWN);
287
288 #if 0
289 devclass_t ix_devclass;
290 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
291
292 MODULE_DEPEND(ix, pci, 1, 1, 1);
293 MODULE_DEPEND(ix, ether, 1, 1, 1);
294 #ifdef DEV_NETMAP
295 MODULE_DEPEND(ix, netmap, 1, 1, 1);
296 #endif
297 #endif
298
299 /*
300 * TUNEABLE PARAMETERS:
301 */
302
303 /*
304 * AIM: Adaptive Interrupt Moderation
305 * which means that the interrupt rate
306 * is varied over time based on the
307 * traffic for that interrupt vector
308 */
309 static bool ixgbe_enable_aim = true;
310 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
311 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
312 "Enable adaptive interrupt moderation");
313
314 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
315 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
316 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
317
318 /* How many packets rxeof tries to clean at a time */
319 static int ixgbe_rx_process_limit = 256;
320 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
321 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
322
323 /* How many packets txeof tries to clean at a time */
324 static int ixgbe_tx_process_limit = 256;
325 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
326 &ixgbe_tx_process_limit, 0,
327 "Maximum number of sent packets to process at a time, -1 means unlimited");
328
329 /* Flow control setting, default to full */
330 static int ixgbe_flow_control = ixgbe_fc_full;
331 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
332 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
333
334 /* Which packet processing uses workqueue or softint */
335 static bool ixgbe_txrx_workqueue = false;
336
337 /*
338 * Smart speed setting, default to on
339 * this only works as a compile option
340 * right now as its during attach, set
341 * this to 'ixgbe_smart_speed_off' to
342 * disable.
343 */
344 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
345
346 /*
347 * MSI-X should be the default for best performance,
348 * but this allows it to be forced off for testing.
349 */
350 static int ixgbe_enable_msix = 1;
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
352 "Enable MSI-X interrupts");
353
354 /*
355 * Number of Queues, can be set to 0,
356 * it then autoconfigures based on the
357 * number of cpus with a max of 8. This
358 * can be overridden manually here.
359 */
360 static int ixgbe_num_queues = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
362 "Number of queues to configure, 0 indicates autoconfigure");
363
364 /*
365 * Number of TX descriptors per ring,
366 * setting higher than RX as this seems
367 * the better performing choice.
368 */
369 static int ixgbe_txd = PERFORM_TXD;
370 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
371 "Number of transmit descriptors per queue");
372
373 /* Number of RX descriptors per ring */
374 static int ixgbe_rxd = PERFORM_RXD;
375 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
376 "Number of receive descriptors per queue");
377
378 /*
379 * Defining this on will allow the use
380 * of unsupported SFP+ modules, note that
381 * doing so you are on your own :)
382 */
383 static int allow_unsupported_sfp = false;
384 #define TUNABLE_INT(__x, __y)
385 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
386
387 /*
388 * Not sure if Flow Director is fully baked,
389 * so we'll default to turning it off.
390 */
391 static int ixgbe_enable_fdir = 0;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
393 "Enable Flow Director");
394
395 /* Legacy Transmit (single queue) */
396 static int ixgbe_enable_legacy_tx = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
398 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
399
400 /* Receive-Side Scaling */
401 static int ixgbe_enable_rss = 1;
402 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
403 "Enable Receive-Side Scaling (RSS)");
404
405 #if 0
406 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
407 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
408 #endif
409
410 #ifdef NET_MPSAFE
411 #define IXGBE_MPSAFE 1
412 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
413 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
414 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
415 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
416 #else
417 #define IXGBE_CALLOUT_FLAGS 0
418 #define IXGBE_SOFTINT_FLAGS 0
419 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
420 #define IXGBE_TASKLET_WQ_FLAGS 0
421 #endif
422 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
423
424 /************************************************************************
425 * ixgbe_initialize_rss_mapping
426 ************************************************************************/
427 static void
428 ixgbe_initialize_rss_mapping(struct adapter *adapter)
429 {
430 struct ixgbe_hw *hw = &adapter->hw;
431 u32 reta = 0, mrqc, rss_key[10];
432 int queue_id, table_size, index_mult;
433 int i, j;
434 u32 rss_hash_config;
435
436 /* force use default RSS key. */
437 #ifdef __NetBSD__
438 rss_getkey((uint8_t *) &rss_key);
439 #else
440 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
441 /* Fetch the configured RSS key */
442 rss_getkey((uint8_t *) &rss_key);
443 } else {
444 /* set up random bits */
445 cprng_fast(&rss_key, sizeof(rss_key));
446 }
447 #endif
448
449 /* Set multiplier for RETA setup and table size based on MAC */
450 index_mult = 0x1;
451 table_size = 128;
452 switch (adapter->hw.mac.type) {
453 case ixgbe_mac_82598EB:
454 index_mult = 0x11;
455 break;
456 case ixgbe_mac_X550:
457 case ixgbe_mac_X550EM_x:
458 case ixgbe_mac_X550EM_a:
459 table_size = 512;
460 break;
461 default:
462 break;
463 }
464
465 /* Set up the redirection table */
466 for (i = 0, j = 0; i < table_size; i++, j++) {
467 if (j == adapter->num_queues)
468 j = 0;
469
470 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
471 /*
472 * Fetch the RSS bucket id for the given indirection
473 * entry. Cap it at the number of configured buckets
474 * (which is num_queues.)
475 */
476 queue_id = rss_get_indirection_to_bucket(i);
477 queue_id = queue_id % adapter->num_queues;
478 } else
479 queue_id = (j * index_mult);
480
481 /*
482 * The low 8 bits are for hash value (n+0);
483 * The next 8 bits are for hash value (n+1), etc.
484 */
485 reta = reta >> 8;
486 reta = reta | (((uint32_t) queue_id) << 24);
487 if ((i & 3) == 3) {
488 if (i < 128)
489 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
490 else
491 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
492 reta);
493 reta = 0;
494 }
495 }
496
497 /* Now fill our hash function seeds */
498 for (i = 0; i < 10; i++)
499 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
500
501 /* Perform hash on these packet types */
502 if (adapter->feat_en & IXGBE_FEATURE_RSS)
503 rss_hash_config = rss_gethashconfig();
504 else {
505 /*
506 * Disable UDP - IP fragments aren't currently being handled
507 * and so we end up with a mix of 2-tuple and 4-tuple
508 * traffic.
509 */
510 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
511 | RSS_HASHTYPE_RSS_TCP_IPV4
512 | RSS_HASHTYPE_RSS_IPV6
513 | RSS_HASHTYPE_RSS_TCP_IPV6
514 | RSS_HASHTYPE_RSS_IPV6_EX
515 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
516 }
517
518 mrqc = IXGBE_MRQC_RSSEN;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
537 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
538 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
539 } /* ixgbe_initialize_rss_mapping */
540
541 /************************************************************************
542 * ixgbe_initialize_receive_units - Setup receive registers and features.
543 ************************************************************************/
544 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
545
546 static void
547 ixgbe_initialize_receive_units(struct adapter *adapter)
548 {
549 struct rx_ring *rxr = adapter->rx_rings;
550 struct ixgbe_hw *hw = &adapter->hw;
551 struct ifnet *ifp = adapter->ifp;
552 int i, j;
553 u32 bufsz, fctrl, srrctl, rxcsum;
554 u32 hlreg;
555
556 /*
557 * Make sure receives are disabled while
558 * setting up the descriptor ring
559 */
560 ixgbe_disable_rx(hw);
561
562 /* Enable broadcasts */
563 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
564 fctrl |= IXGBE_FCTRL_BAM;
565 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
566 fctrl |= IXGBE_FCTRL_DPF;
567 fctrl |= IXGBE_FCTRL_PMCF;
568 }
569 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
570
571 /* Set for Jumbo Frames? */
572 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
573 if (ifp->if_mtu > ETHERMTU)
574 hlreg |= IXGBE_HLREG0_JUMBOEN;
575 else
576 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
577
578 #ifdef DEV_NETMAP
579 /* CRC stripping is conditional in Netmap */
580 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
581 (ifp->if_capenable & IFCAP_NETMAP) &&
582 !ix_crcstrip)
583 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
584 else
585 #endif /* DEV_NETMAP */
586 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
587
588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
589
590 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
591 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
592
593 for (i = 0; i < adapter->num_queues; i++, rxr++) {
594 u64 rdba = rxr->rxdma.dma_paddr;
595 u32 reg;
596 int regnum = i / 4; /* 1 register per 4 queues */
597 int regshift = i % 4; /* 4 bits per 1 queue */
598 j = rxr->me;
599
600 /* Setup the Base and Length of the Rx Descriptor Ring */
601 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
602 (rdba & 0x00000000ffffffffULL));
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
604 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
605 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
606
607 /* Set up the SRRCTL register */
608 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
609 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
610 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
611 srrctl |= bufsz;
612 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
613
614 /* Set RQSMR (Receive Queue Statistic Mapping) register */
615 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
616 reg &= ~(0x000000ffUL << (regshift * 8));
617 reg |= i << (regshift * 8);
618 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
619
620 /*
621 * Set DROP_EN iff we have no flow control and >1 queue.
622 * Note that srrctl was cleared shortly before during reset,
623 * so we do not need to clear the bit, but do it just in case
624 * this code is moved elsewhere.
625 */
626 if (adapter->num_queues > 1 &&
627 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
628 srrctl |= IXGBE_SRRCTL_DROP_EN;
629 } else {
630 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
631 }
632
633 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
634
635 /* Setup the HW Rx Head and Tail Descriptor Pointers */
636 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
637 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
638
639 /* Set the driver rx tail address */
640 rxr->tail = IXGBE_RDT(rxr->me);
641 }
642
643 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
644 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
645 | IXGBE_PSRTYPE_UDPHDR
646 | IXGBE_PSRTYPE_IPV4HDR
647 | IXGBE_PSRTYPE_IPV6HDR;
648 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
649 }
650
651 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
652
653 ixgbe_initialize_rss_mapping(adapter);
654
655 if (adapter->num_queues > 1) {
656 /* RSS and RX IPP Checksum are mutually exclusive */
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658 }
659
660 if (ifp->if_capenable & IFCAP_RXCSUM)
661 rxcsum |= IXGBE_RXCSUM_PCSD;
662
663 /* This is useful for calculating UDP/IP fragment checksums */
664 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
665 rxcsum |= IXGBE_RXCSUM_IPPCSE;
666
667 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
668
669 } /* ixgbe_initialize_receive_units */
670
671 /************************************************************************
672 * ixgbe_initialize_transmit_units - Enable transmit units.
673 ************************************************************************/
674 static void
675 ixgbe_initialize_transmit_units(struct adapter *adapter)
676 {
677 struct tx_ring *txr = adapter->tx_rings;
678 struct ixgbe_hw *hw = &adapter->hw;
679 int i;
680
681 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 u32 tqsmreg, reg;
688 int regnum = i / 4; /* 1 register per 4 queues */
689 int regshift = i % 4; /* 4 bits per 1 queue */
690 int j = txr->me;
691
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
693 (tdba & 0x00000000ffffffffULL));
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
695 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
696 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
697
698 /*
699 * Set TQSMR (Transmit Queue Statistic Mapping) register.
700 * Register location is different between 82598 and others.
701 */
702 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
703 tqsmreg = IXGBE_TQSMR(regnum);
704 else
705 tqsmreg = IXGBE_TQSM(regnum);
706 reg = IXGBE_READ_REG(hw, tqsmreg);
707 reg &= ~(0x000000ffUL << (regshift * 8));
708 reg |= i << (regshift * 8);
709 IXGBE_WRITE_REG(hw, tqsmreg, reg);
710
711 /* Setup the HW Tx Head and Tail descriptor pointers */
712 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
713 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
714
715 /* Cache the tail address */
716 txr->tail = IXGBE_TDT(j);
717
718 txr->txr_no_space = false;
719
720 /* Disable Head Writeback */
721 /*
722 * Note: for X550 series devices, these registers are actually
723 * prefixed with TPH_ isntead of DCA_, but the addresses and
724 * fields remain the same.
725 */
726 switch (hw->mac.type) {
727 case ixgbe_mac_82598EB:
728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
729 break;
730 default:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
732 break;
733 }
734 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
735 switch (hw->mac.type) {
736 case ixgbe_mac_82598EB:
737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
738 break;
739 default:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
741 break;
742 }
743
744 }
745
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 u32 dmatxctl, rttdcs;
748
749 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
750 dmatxctl |= IXGBE_DMATXCTL_TE;
751 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
752 /* Disable arbiter to set MTQC */
753 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
754 rttdcs |= IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
757 ixgbe_get_mtqc(adapter->iov_mode));
758 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
759 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
760 }
761
762 return;
763 } /* ixgbe_initialize_transmit_units */
764
765 static void
766 ixgbe_quirks(struct adapter *adapter)
767 {
768 device_t dev = adapter->dev;
769 struct ixgbe_hw *hw = &adapter->hw;
770 const char *vendor, *product;
771
772 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
773 /*
774 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
775 * MA10-ST0.
776 */
777 vendor = pmf_get_platform("system-vendor");
778 product = pmf_get_platform("system-product");
779
780 if ((vendor == NULL) || (product == NULL))
781 return;
782
783 if ((strcmp(vendor, "GIGABYTE") == 0) &&
784 (strcmp(product, "MA10-ST0") == 0)) {
785 aprint_verbose_dev(dev,
786 "Enable SFP+ MOD_ABS inverse quirk\n");
787 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
788 }
789 }
790 }
791
792 /************************************************************************
793 * ixgbe_attach - Device initialization routine
794 *
795 * Called when the driver is being loaded.
796 * Identifies the type of hardware, allocates all resources
797 * and initializes the hardware.
798 *
799 * return 0 on success, positive on failure
800 ************************************************************************/
801 static void
802 ixgbe_attach(device_t parent, device_t dev, void *aux)
803 {
804 struct adapter *adapter;
805 struct ixgbe_hw *hw;
806 int error = -1;
807 u32 ctrl_ext;
808 u16 high, low, nvmreg;
809 pcireg_t id, subid;
810 const ixgbe_vendor_info_t *ent;
811 struct pci_attach_args *pa = aux;
812 bool unsupported_sfp = false;
813 const char *str;
814 char wqname[MAXCOMLEN];
815 char buf[256];
816
817 INIT_DEBUGOUT("ixgbe_attach: begin");
818
819 /* Allocate, clear, and link in our adapter structure */
820 adapter = device_private(dev);
821 adapter->hw.back = adapter;
822 adapter->dev = dev;
823 hw = &adapter->hw;
824 adapter->osdep.pc = pa->pa_pc;
825 adapter->osdep.tag = pa->pa_tag;
826 if (pci_dma64_available(pa))
827 adapter->osdep.dmat = pa->pa_dmat64;
828 else
829 adapter->osdep.dmat = pa->pa_dmat;
830 adapter->osdep.attached = false;
831 adapter->osdep.detaching = false;
832
833 ent = ixgbe_lookup(pa);
834
835 KASSERT(ent != NULL);
836
837 aprint_normal(": %s, Version - %s\n",
838 ixgbe_strings[ent->index], ixgbe_driver_version);
839
840 /* Core Lock Init */
841 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
842
843 /* Set up the timer callout and workqueue */
844 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
845 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
846 error = workqueue_create(&adapter->timer_wq, wqname,
847 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
848 IXGBE_TASKLET_WQ_FLAGS);
849 if (error) {
850 aprint_error_dev(dev,
851 "could not create timer workqueue (%d)\n", error);
852 goto err_out;
853 }
854
855 /* Determine hardware revision */
856 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
857 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
858
859 hw->vendor_id = PCI_VENDOR(id);
860 hw->device_id = PCI_PRODUCT(id);
861 hw->revision_id =
862 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
863 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
864 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
865
866 /* Set quirk flags */
867 ixgbe_quirks(adapter);
868
869 /*
870 * Make sure BUSMASTER is set
871 */
872 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
873
874 /* Do base PCI setup - map BAR0 */
875 if (ixgbe_allocate_pci_resources(adapter, pa)) {
876 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* let hardware know driver is loaded */
882 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
883 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
884 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
885
886 /*
887 * Initialize the shared code
888 */
889 if (ixgbe_init_shared_code(hw) != 0) {
890 aprint_error_dev(dev, "Unable to initialize the shared code\n");
891 error = ENXIO;
892 goto err_out;
893 }
894
895 switch (hw->mac.type) {
896 case ixgbe_mac_82598EB:
897 str = "82598EB";
898 break;
899 case ixgbe_mac_82599EB:
900 str = "82599EB";
901 break;
902 case ixgbe_mac_X540:
903 str = "X540";
904 break;
905 case ixgbe_mac_X550:
906 str = "X550";
907 break;
908 case ixgbe_mac_X550EM_x:
909 str = "X550EM X";
910 break;
911 case ixgbe_mac_X550EM_a:
912 str = "X550EM A";
913 break;
914 default:
915 str = "Unknown";
916 break;
917 }
918 aprint_normal_dev(dev, "device %s\n", str);
919
920 if (hw->mbx.ops.init_params)
921 hw->mbx.ops.init_params(hw);
922
923 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924
925 /* Pick up the 82599 settings */
926 if (hw->mac.type != ixgbe_mac_82598EB) {
927 hw->phy.smart_speed = ixgbe_smart_speed;
928 adapter->num_segs = IXGBE_82599_SCATTER;
929 } else
930 adapter->num_segs = IXGBE_82598_SCATTER;
931
932 /* Ensure SW/FW semaphore is free */
933 ixgbe_init_swfw_semaphore(hw);
934
935 hw->mac.ops.set_lan_id(hw);
936 ixgbe_init_device_features(adapter);
937
938 if (ixgbe_configure_interrupts(adapter)) {
939 error = ENXIO;
940 goto err_out;
941 }
942
943 /* Allocate multicast array memory. */
944 adapter->mta = malloc(sizeof(*adapter->mta) *
945 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
946
947 /* Enable WoL (if supported) */
948 ixgbe_check_wol_support(adapter);
949
950 /* Register for VLAN events */
951 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
952
953 /* Verify adapter fan is still functional (if applicable) */
954 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
955 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
956 ixgbe_check_fan_failure(adapter, esdp, FALSE);
957 }
958
959 /* Set an initial default flow control value */
960 hw->fc.requested_mode = ixgbe_flow_control;
961
962 /* Sysctls for limiting the amount of work done in the taskqueues */
963 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
964 "max number of rx packets to process",
965 &adapter->rx_process_limit, ixgbe_rx_process_limit);
966
967 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
968 "max number of tx packets to process",
969 &adapter->tx_process_limit, ixgbe_tx_process_limit);
970
971 /* Do descriptor calc and sanity checks */
972 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
973 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
974 aprint_error_dev(dev, "TXD config issue, using default!\n");
975 adapter->num_tx_desc = DEFAULT_TXD;
976 } else
977 adapter->num_tx_desc = ixgbe_txd;
978
979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 aprint_error_dev(dev, "RXD config issue, using default!\n");
982 adapter->num_rx_desc = DEFAULT_RXD;
983 } else
984 adapter->num_rx_desc = ixgbe_rxd;
985
986 /* Allocate our TX/RX Queues */
987 if (ixgbe_allocate_queues(adapter)) {
988 error = ENOMEM;
989 goto err_out;
990 }
991
992 hw->phy.reset_if_overtemp = TRUE;
993 error = ixgbe_reset_hw(hw);
994 hw->phy.reset_if_overtemp = FALSE;
995 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
996 error = IXGBE_SUCCESS;
997 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
998 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
999 unsupported_sfp = true;
1000 error = IXGBE_SUCCESS;
1001 } else if (error) {
1002 aprint_error_dev(dev, "Hardware initialization failed\n");
1003 error = EIO;
1004 goto err_late;
1005 }
1006
1007 /* Make sure we have a good EEPROM before we read from it */
1008 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1009 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1010 error = EIO;
1011 goto err_late;
1012 }
1013
1014 aprint_normal("%s:", device_xname(dev));
1015 /* NVM Image Version */
1016 high = low = 0;
1017 switch (hw->mac.type) {
1018 case ixgbe_mac_X540:
1019 case ixgbe_mac_X550EM_a:
1020 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1021 if (nvmreg == 0xffff)
1022 break;
1023 high = (nvmreg >> 12) & 0x0f;
1024 low = (nvmreg >> 4) & 0xff;
1025 id = nvmreg & 0x0f;
1026 aprint_normal(" NVM Image Version %u.", high);
1027 if (hw->mac.type == ixgbe_mac_X540)
1028 str = "%x";
1029 else
1030 str = "%02x";
1031 aprint_normal(str, low);
1032 aprint_normal(" ID 0x%x,", id);
1033 break;
1034 case ixgbe_mac_X550EM_x:
1035 case ixgbe_mac_X550:
1036 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1037 if (nvmreg == 0xffff)
1038 break;
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0xff;
1041 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1042 break;
1043 default:
1044 break;
1045 }
1046 hw->eeprom.nvm_image_ver_high = high;
1047 hw->eeprom.nvm_image_ver_low = low;
1048
1049 /* PHY firmware revision */
1050 switch (hw->mac.type) {
1051 case ixgbe_mac_X540:
1052 case ixgbe_mac_X550:
1053 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1054 if (nvmreg == 0xffff)
1055 break;
1056 high = (nvmreg >> 12) & 0x0f;
1057 low = (nvmreg >> 4) & 0xff;
1058 id = nvmreg & 0x000f;
1059 aprint_normal(" PHY FW Revision %u.", high);
1060 if (hw->mac.type == ixgbe_mac_X540)
1061 str = "%x";
1062 else
1063 str = "%02x";
1064 aprint_normal(str, low);
1065 aprint_normal(" ID 0x%x,", id);
1066 break;
1067 default:
1068 break;
1069 }
1070
1071 /* NVM Map version & OEM NVM Image version */
1072 switch (hw->mac.type) {
1073 case ixgbe_mac_X550:
1074 case ixgbe_mac_X550EM_x:
1075 case ixgbe_mac_X550EM_a:
1076 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1077 if (nvmreg != 0xffff) {
1078 high = (nvmreg >> 12) & 0x0f;
1079 low = nvmreg & 0x00ff;
1080 aprint_normal(" NVM Map version %u.%02x,", high, low);
1081 }
1082 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1083 if (nvmreg != 0xffff) {
1084 high = (nvmreg >> 12) & 0x0f;
1085 low = nvmreg & 0x00ff;
1086 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1087 low);
1088 }
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 /* Print the ETrackID */
1095 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1096 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1097 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1098
1099 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1100 error = ixgbe_allocate_msix(adapter, pa);
1101 if (error) {
1102 /* Free allocated queue structures first */
1103 ixgbe_free_queues(adapter);
1104
1105 /* Fallback to legacy interrupt */
1106 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1107 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1108 adapter->feat_en |= IXGBE_FEATURE_MSI;
1109 adapter->num_queues = 1;
1110
1111 /* Allocate our TX/RX Queues again */
1112 if (ixgbe_allocate_queues(adapter)) {
1113 error = ENOMEM;
1114 goto err_out;
1115 }
1116 }
1117 }
1118 /* Recovery mode */
1119 switch (adapter->hw.mac.type) {
1120 case ixgbe_mac_X550:
1121 case ixgbe_mac_X550EM_x:
1122 case ixgbe_mac_X550EM_a:
1123 /* >= 2.00 */
1124 if (hw->eeprom.nvm_image_ver_high >= 2) {
1125 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1126 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1127 }
1128 break;
1129 default:
1130 break;
1131 }
1132
1133 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1134 error = ixgbe_allocate_legacy(adapter, pa);
1135 if (error)
1136 goto err_late;
1137
1138 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1139 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1140 error = workqueue_create(&adapter->admin_wq, wqname,
1141 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1142 IXGBE_TASKLET_WQ_FLAGS);
1143 if (error) {
1144 aprint_error_dev(dev,
1145 "could not create admin workqueue (%d)\n", error);
1146 goto err_out;
1147 }
1148
1149 error = ixgbe_start_hw(hw);
1150 switch (error) {
1151 case IXGBE_ERR_EEPROM_VERSION:
1152 aprint_error_dev(dev, "This device is a pre-production adapter/"
1153 "LOM. Please be aware there may be issues associated "
1154 "with your hardware.\nIf you are experiencing problems "
1155 "please contact your Intel or hardware representative "
1156 "who provided you with this hardware.\n");
1157 break;
1158 default:
1159 break;
1160 }
1161
1162 /* Setup OS specific network interface */
1163 if (ixgbe_setup_interface(dev, adapter) != 0)
1164 goto err_late;
1165
1166 /*
1167 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1168 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1169 */
1170 if (hw->phy.media_type == ixgbe_media_type_copper) {
1171 uint16_t id1, id2;
1172 int oui, model, rev;
1173 const char *descr;
1174
1175 id1 = hw->phy.id >> 16;
1176 id2 = hw->phy.id & 0xffff;
1177 oui = MII_OUI(id1, id2);
1178 model = MII_MODEL(id2);
1179 rev = MII_REV(id2);
1180 if ((descr = mii_get_descr(oui, model)) != NULL)
1181 aprint_normal_dev(dev,
1182 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1183 descr, oui, model, rev);
1184 else
1185 aprint_normal_dev(dev,
1186 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1187 oui, model, rev);
1188 }
1189
1190 /* Enable EEE power saving */
1191 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1192 hw->mac.ops.setup_eee(hw,
1193 adapter->feat_en & IXGBE_FEATURE_EEE);
1194
1195 /* Enable power to the phy. */
1196 if (!unsupported_sfp) {
1197 /* Enable the optics for 82599 SFP+ fiber */
1198 ixgbe_enable_tx_laser(hw);
1199
1200 /*
1201 * XXX Currently, ixgbe_set_phy_power() supports only copper
1202 * PHY, so it's not required to test with !unsupported_sfp.
1203 */
1204 ixgbe_set_phy_power(hw, TRUE);
1205 }
1206
1207 /* Initialize statistics */
1208 ixgbe_update_stats_counters(adapter);
1209
1210 /* Check PCIE slot type/speed/width */
1211 ixgbe_get_slot_info(adapter);
1212
1213 /*
1214 * Do time init and sysctl init here, but
1215 * only on the first port of a bypass adapter.
1216 */
1217 ixgbe_bypass_init(adapter);
1218
1219 /* Set an initial dmac value */
1220 adapter->dmac = 0;
1221 /* Set initial advertised speeds (if applicable) */
1222 adapter->advertise = ixgbe_get_advertise(adapter);
1223
1224 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1225 ixgbe_define_iov_schemas(dev, &error);
1226
1227 /* Add sysctls */
1228 ixgbe_add_device_sysctls(adapter);
1229 ixgbe_add_hw_stats(adapter);
1230
1231 /* For Netmap */
1232 adapter->init_locked = ixgbe_init_locked;
1233 adapter->stop_locked = ixgbe_stop_locked;
1234
1235 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1236 ixgbe_netmap_attach(adapter);
1237
1238 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1239 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1240 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1241 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1242
1243 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1244 pmf_class_network_register(dev, adapter->ifp);
1245 else
1246 aprint_error_dev(dev, "couldn't establish power handler\n");
1247
1248 /* Init recovery mode timer and state variable */
1249 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1250 adapter->recovery_mode = 0;
1251
1252 /* Set up the timer callout */
1253 callout_init(&adapter->recovery_mode_timer,
1254 IXGBE_CALLOUT_FLAGS);
1255 snprintf(wqname, sizeof(wqname), "%s-recovery",
1256 device_xname(dev));
1257 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1258 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1259 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1260 if (error) {
1261 aprint_error_dev(dev, "could not create "
1262 "recovery_mode_timer workqueue (%d)\n", error);
1263 goto err_out;
1264 }
1265
1266 /* Start the task */
1267 callout_reset(&adapter->recovery_mode_timer, hz,
1268 ixgbe_recovery_mode_timer, adapter);
1269 }
1270
1271 INIT_DEBUGOUT("ixgbe_attach: end");
1272 adapter->osdep.attached = true;
1273
1274 return;
1275
1276 err_late:
1277 ixgbe_free_queues(adapter);
1278 err_out:
1279 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1280 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1282 ixgbe_free_workqueue(adapter);
1283 ixgbe_free_pci_resources(adapter);
1284 if (adapter->mta != NULL)
1285 free(adapter->mta, M_DEVBUF);
1286 IXGBE_CORE_LOCK_DESTROY(adapter);
1287
1288 return;
1289 } /* ixgbe_attach */
1290
1291 /************************************************************************
1292 * ixgbe_check_wol_support
1293 *
1294 * Checks whether the adapter's ports are capable of
1295 * Wake On LAN by reading the adapter's NVM.
1296 *
1297 * Sets each port's hw->wol_enabled value depending
1298 * on the value read here.
1299 ************************************************************************/
1300 static void
1301 ixgbe_check_wol_support(struct adapter *adapter)
1302 {
1303 struct ixgbe_hw *hw = &adapter->hw;
1304 u16 dev_caps = 0;
1305
1306 /* Find out WoL support for port */
1307 adapter->wol_support = hw->wol_enabled = 0;
1308 ixgbe_get_device_caps(hw, &dev_caps);
1309 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1310 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1311 hw->bus.func == 0))
1312 adapter->wol_support = hw->wol_enabled = 1;
1313
1314 /* Save initial wake up filter configuration */
1315 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1316
1317 return;
1318 } /* ixgbe_check_wol_support */
1319
1320 /************************************************************************
1321 * ixgbe_setup_interface
1322 *
1323 * Setup networking device structure and register an interface.
1324 ************************************************************************/
1325 static int
1326 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1327 {
1328 struct ethercom *ec = &adapter->osdep.ec;
1329 struct ifnet *ifp;
1330 int rv;
1331
1332 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1333
1334 ifp = adapter->ifp = &ec->ec_if;
1335 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1336 ifp->if_baudrate = IF_Gbps(10);
1337 ifp->if_init = ixgbe_init;
1338 ifp->if_stop = ixgbe_ifstop;
1339 ifp->if_softc = adapter;
1340 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1341 #ifdef IXGBE_MPSAFE
1342 ifp->if_extflags = IFEF_MPSAFE;
1343 #endif
1344 ifp->if_ioctl = ixgbe_ioctl;
1345 #if __FreeBSD_version >= 1100045
1346 /* TSO parameters */
1347 ifp->if_hw_tsomax = 65518;
1348 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1349 ifp->if_hw_tsomaxsegsize = 2048;
1350 #endif
1351 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1352 #if 0
1353 ixgbe_start_locked = ixgbe_legacy_start_locked;
1354 #endif
1355 } else {
1356 ifp->if_transmit = ixgbe_mq_start;
1357 #if 0
1358 ixgbe_start_locked = ixgbe_mq_start_locked;
1359 #endif
1360 }
1361 ifp->if_start = ixgbe_legacy_start;
1362 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1363 IFQ_SET_READY(&ifp->if_snd);
1364
1365 rv = if_initialize(ifp);
1366 if (rv != 0) {
1367 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1368 return rv;
1369 }
1370 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1371 ether_ifattach(ifp, adapter->hw.mac.addr);
1372 aprint_normal_dev(dev, "Ethernet address %s\n",
1373 ether_sprintf(adapter->hw.mac.addr));
1374 /*
1375 * We use per TX queue softint, so if_deferred_start_init() isn't
1376 * used.
1377 */
1378 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1379
1380 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1381
1382 /*
1383 * Tell the upper layer(s) we support long frames.
1384 */
1385 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1386
1387 /* Set capability flags */
1388 ifp->if_capabilities |= IFCAP_RXCSUM
1389 | IFCAP_TXCSUM
1390 | IFCAP_TSOv4
1391 | IFCAP_TSOv6;
1392 ifp->if_capenable = 0;
1393
1394 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1395 | ETHERCAP_VLAN_HWCSUM
1396 | ETHERCAP_JUMBO_MTU
1397 | ETHERCAP_VLAN_MTU;
1398
1399 /* Enable the above capabilities by default */
1400 ec->ec_capenable = ec->ec_capabilities;
1401
1402 /*
1403 * Don't turn this on by default, if vlans are
1404 * created on another pseudo device (eg. lagg)
1405 * then vlan events are not passed thru, breaking
1406 * operation, but with HW FILTER off it works. If
1407 * using vlans directly on the ixgbe driver you can
1408 * enable this and get full hardware tag filtering.
1409 */
1410 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1411
1412 /*
1413 * Specify the media types supported by this adapter and register
1414 * callbacks to update media and link information
1415 */
1416 ec->ec_ifmedia = &adapter->media;
1417 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1418 ixgbe_media_status, &adapter->core_mtx);
1419
1420 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1421 ixgbe_add_media_types(adapter);
1422
1423 /* Set autoselect media by default */
1424 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1425
1426 if_register(ifp);
1427
1428 return (0);
1429 } /* ixgbe_setup_interface */
1430
1431 /************************************************************************
1432 * ixgbe_add_media_types
1433 ************************************************************************/
1434 static void
1435 ixgbe_add_media_types(struct adapter *adapter)
1436 {
1437 struct ixgbe_hw *hw = &adapter->hw;
1438 u64 layer;
1439
1440 layer = adapter->phy_layer;
1441
1442 #define ADD(mm, dd) \
1443 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1444
1445 ADD(IFM_NONE, 0);
1446
1447 /* Media types with matching NetBSD media defines */
1448 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1449 ADD(IFM_10G_T | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1452 ADD(IFM_1000_T | IFM_FDX, 0);
1453 }
1454 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1455 ADD(IFM_100_TX | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1458 ADD(IFM_10_T | IFM_FDX, 0);
1459 }
1460
1461 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1462 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1463 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1464 }
1465
1466 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1467 ADD(IFM_10G_LR | IFM_FDX, 0);
1468 if (hw->phy.multispeed_fiber) {
1469 ADD(IFM_1000_LX | IFM_FDX, 0);
1470 }
1471 }
1472 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1473 ADD(IFM_10G_SR | IFM_FDX, 0);
1474 if (hw->phy.multispeed_fiber) {
1475 ADD(IFM_1000_SX | IFM_FDX, 0);
1476 }
1477 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1478 ADD(IFM_1000_SX | IFM_FDX, 0);
1479 }
1480 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1481 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1482 }
1483
1484 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1485 ADD(IFM_10G_KR | IFM_FDX, 0);
1486 }
1487 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1488 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1489 }
1490 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1491 ADD(IFM_1000_KX | IFM_FDX, 0);
1492 }
1493 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1494 ADD(IFM_2500_KX | IFM_FDX, 0);
1495 }
1496 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1497 ADD(IFM_2500_T | IFM_FDX, 0);
1498 }
1499 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1500 ADD(IFM_5000_T | IFM_FDX, 0);
1501 }
1502 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1503 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1504 /* XXX no ifmedia_set? */
1505
1506 ADD(IFM_AUTO, 0);
1507
1508 #undef ADD
1509 } /* ixgbe_add_media_types */
1510
1511 /************************************************************************
1512 * ixgbe_is_sfp
1513 ************************************************************************/
1514 static inline bool
1515 ixgbe_is_sfp(struct ixgbe_hw *hw)
1516 {
1517 switch (hw->mac.type) {
1518 case ixgbe_mac_82598EB:
1519 if (hw->phy.type == ixgbe_phy_nl)
1520 return (TRUE);
1521 return (FALSE);
1522 case ixgbe_mac_82599EB:
1523 case ixgbe_mac_X550EM_x:
1524 case ixgbe_mac_X550EM_a:
1525 switch (hw->mac.ops.get_media_type(hw)) {
1526 case ixgbe_media_type_fiber:
1527 case ixgbe_media_type_fiber_qsfp:
1528 return (TRUE);
1529 default:
1530 return (FALSE);
1531 }
1532 default:
1533 return (FALSE);
1534 }
1535 } /* ixgbe_is_sfp */
1536
1537 static void
1538 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1539 {
1540
1541 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1542 workqueue_enqueue(adapter->admin_wq,
1543 &adapter->admin_wc, NULL);
1544 }
1545
1546 /************************************************************************
1547 * ixgbe_config_link
1548 ************************************************************************/
1549 static void
1550 ixgbe_config_link(struct adapter *adapter)
1551 {
1552 struct ixgbe_hw *hw = &adapter->hw;
1553 u32 autoneg, err = 0;
1554 u32 task_requests = 0;
1555 bool sfp, negotiate = false;
1556
1557 sfp = ixgbe_is_sfp(hw);
1558
1559 if (sfp) {
1560 if (hw->phy.multispeed_fiber) {
1561 ixgbe_enable_tx_laser(hw);
1562 task_requests |= IXGBE_REQUEST_TASK_MSF;
1563 }
1564 task_requests |= IXGBE_REQUEST_TASK_MOD;
1565 atomic_or_32(&adapter->task_requests, task_requests);
1566 ixgbe_schedule_admin_tasklet(adapter);
1567 } else {
1568 struct ifmedia *ifm = &adapter->media;
1569
1570 if (hw->mac.ops.check_link)
1571 err = ixgbe_check_link(hw, &adapter->link_speed,
1572 &adapter->link_up, FALSE);
1573 if (err)
1574 return;
1575
1576 /*
1577 * Check if it's the first call. If it's the first call,
1578 * get value for auto negotiation.
1579 */
1580 autoneg = hw->phy.autoneg_advertised;
1581 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1582 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1583 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1584 &negotiate);
1585 if (err)
1586 return;
1587 if (hw->mac.ops.setup_link)
1588 err = hw->mac.ops.setup_link(hw, autoneg,
1589 adapter->link_up);
1590 }
1591
1592 } /* ixgbe_config_link */
1593
1594 /************************************************************************
1595 * ixgbe_update_stats_counters - Update board statistics counters.
1596 ************************************************************************/
1597 static void
1598 ixgbe_update_stats_counters(struct adapter *adapter)
1599 {
1600 struct ifnet *ifp = adapter->ifp;
1601 struct ixgbe_hw *hw = &adapter->hw;
1602 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1603 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1604 u64 total_missed_rx = 0;
1605 uint64_t crcerrs, rlec;
1606 unsigned int queue_counters;
1607 int i;
1608
1609 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1610 stats->crcerrs.ev_count += crcerrs;
1611 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1612 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1613 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1614 if (hw->mac.type >= ixgbe_mac_X550)
1615 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1616
1617 /* 16 registers exist */
1618 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1619 for (i = 0; i < queue_counters; i++) {
1620 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1621 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1622 if (hw->mac.type >= ixgbe_mac_82599EB) {
1623 stats->qprdc[i].ev_count
1624 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1625 }
1626 }
1627
1628 /* 8 registers exist */
1629 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1630 uint32_t mp;
1631
1632 /* MPC */
1633 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1634 /* global total per queue */
1635 stats->mpc[i].ev_count += mp;
1636 /* running comprehensive total for stats display */
1637 total_missed_rx += mp;
1638
1639 if (hw->mac.type == ixgbe_mac_82598EB)
1640 stats->rnbc[i].ev_count
1641 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1642
1643 stats->pxontxc[i].ev_count
1644 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1645 stats->pxofftxc[i].ev_count
1646 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1647 if (hw->mac.type >= ixgbe_mac_82599EB) {
1648 stats->pxonrxc[i].ev_count
1649 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1650 stats->pxoffrxc[i].ev_count
1651 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1652 stats->pxon2offc[i].ev_count
1653 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1654 } else {
1655 stats->pxonrxc[i].ev_count
1656 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1657 stats->pxoffrxc[i].ev_count
1658 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1659 }
1660 }
1661 stats->mpctotal.ev_count += total_missed_rx;
1662
1663 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1664 if ((adapter->link_active == LINK_STATE_UP)
1665 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1666 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1667 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1668 }
1669 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1670 stats->rlec.ev_count += rlec;
1671
1672 /* Hardware workaround, gprc counts missed packets */
1673 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1674
1675 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1676 stats->lxontxc.ev_count += lxon;
1677 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1678 stats->lxofftxc.ev_count += lxoff;
1679 total = lxon + lxoff;
1680
1681 if (hw->mac.type != ixgbe_mac_82598EB) {
1682 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1683 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1684 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1685 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1686 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1687 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1688 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1689 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1690 } else {
1691 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1692 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1693 /* 82598 only has a counter in the high register */
1694 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1695 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1696 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1697 }
1698
1699 /*
1700 * Workaround: mprc hardware is incorrectly counting
1701 * broadcasts, so for now we subtract those.
1702 */
1703 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1704 stats->bprc.ev_count += bprc;
1705 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1706 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1707
1708 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1709 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1710 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1711 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1712 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1713 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1714
1715 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1716 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1717 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1718
1719 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1720 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1721 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1722 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1723 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1724 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1725 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1726 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1727 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1728 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1729 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1730 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1731 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1732 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1733 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1734 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1735 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1736 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1737 /* Only read FCOE on 82599 */
1738 if (hw->mac.type != ixgbe_mac_82598EB) {
1739 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1740 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1741 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1742 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1743 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1744 }
1745
1746 /*
1747 * Fill out the OS statistics structure. Only RX errors are required
1748 * here because all TX counters are incremented in the TX path and
1749 * normal RX counters are prepared in ether_input().
1750 */
1751 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1752 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1753 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1754 IF_STAT_PUTREF(ifp);
1755 } /* ixgbe_update_stats_counters */
1756
1757 /************************************************************************
1758 * ixgbe_add_hw_stats
1759 *
1760 * Add sysctl variables, one per statistic, to the system.
1761 ************************************************************************/
1762 static void
1763 ixgbe_add_hw_stats(struct adapter *adapter)
1764 {
1765 device_t dev = adapter->dev;
1766 const struct sysctlnode *rnode, *cnode;
1767 struct sysctllog **log = &adapter->sysctllog;
1768 struct tx_ring *txr = adapter->tx_rings;
1769 struct rx_ring *rxr = adapter->rx_rings;
1770 struct ixgbe_hw *hw = &adapter->hw;
1771 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1772 const char *xname = device_xname(dev);
1773 int i;
1774
1775 /* Driver Statistics */
1776 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1777 NULL, xname, "Driver tx dma soft fail EFBIG");
1778 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1779 NULL, xname, "m_defrag() failed");
1780 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1781 NULL, xname, "Driver tx dma hard fail EFBIG");
1782 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1783 NULL, xname, "Driver tx dma hard fail EINVAL");
1784 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1785 NULL, xname, "Driver tx dma hard fail other");
1786 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1787 NULL, xname, "Driver tx dma soft fail EAGAIN");
1788 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1789 NULL, xname, "Driver tx dma soft fail ENOMEM");
1790 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1791 NULL, xname, "Watchdog timeouts");
1792 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1793 NULL, xname, "TSO errors");
1794 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1795 NULL, xname, "Admin MSI-X IRQ Handled");
1796 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1797 NULL, xname, "Link event");
1798 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1799 NULL, xname, "SFP+ module event");
1800 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1801 NULL, xname, "Multispeed event");
1802 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1803 NULL, xname, "External PHY event");
1804
1805 /* Max number of traffic class is 8 */
1806 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1807 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1808 snprintf(adapter->tcs[i].evnamebuf,
1809 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1810 xname, i);
1811 if (i < __arraycount(stats->mpc)) {
1812 evcnt_attach_dynamic(&stats->mpc[i],
1813 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1814 "RX Missed Packet Count");
1815 if (hw->mac.type == ixgbe_mac_82598EB)
1816 evcnt_attach_dynamic(&stats->rnbc[i],
1817 EVCNT_TYPE_MISC, NULL,
1818 adapter->tcs[i].evnamebuf,
1819 "Receive No Buffers");
1820 }
1821 if (i < __arraycount(stats->pxontxc)) {
1822 evcnt_attach_dynamic(&stats->pxontxc[i],
1823 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1824 "pxontxc");
1825 evcnt_attach_dynamic(&stats->pxonrxc[i],
1826 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1827 "pxonrxc");
1828 evcnt_attach_dynamic(&stats->pxofftxc[i],
1829 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1830 "pxofftxc");
1831 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1832 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1833 "pxoffrxc");
1834 if (hw->mac.type >= ixgbe_mac_82599EB)
1835 evcnt_attach_dynamic(&stats->pxon2offc[i],
1836 EVCNT_TYPE_MISC, NULL,
1837 adapter->tcs[i].evnamebuf,
1838 "pxon2offc");
1839 }
1840 }
1841
1842 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1843 #ifdef LRO
1844 struct lro_ctrl *lro = &rxr->lro;
1845 #endif /* LRO */
1846
1847 snprintf(adapter->queues[i].evnamebuf,
1848 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1849 xname, i);
1850 snprintf(adapter->queues[i].namebuf,
1851 sizeof(adapter->queues[i].namebuf), "q%d", i);
1852
1853 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1854 aprint_error_dev(dev, "could not create sysctl root\n");
1855 break;
1856 }
1857
1858 if (sysctl_createv(log, 0, &rnode, &rnode,
1859 0, CTLTYPE_NODE,
1860 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1861 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1862 break;
1863
1864 if (sysctl_createv(log, 0, &rnode, &cnode,
1865 CTLFLAG_READWRITE, CTLTYPE_INT,
1866 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1867 ixgbe_sysctl_interrupt_rate_handler, 0,
1868 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1869 break;
1870
1871 if (sysctl_createv(log, 0, &rnode, &cnode,
1872 CTLFLAG_READONLY, CTLTYPE_INT,
1873 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1874 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1875 0, CTL_CREATE, CTL_EOL) != 0)
1876 break;
1877
1878 if (sysctl_createv(log, 0, &rnode, &cnode,
1879 CTLFLAG_READONLY, CTLTYPE_INT,
1880 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1881 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1882 0, CTL_CREATE, CTL_EOL) != 0)
1883 break;
1884
1885 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1886 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1887 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1888 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1889 "Handled queue in softint");
1890 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1891 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1892 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1893 NULL, adapter->queues[i].evnamebuf, "TSO");
1894 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1895 NULL, adapter->queues[i].evnamebuf,
1896 "Queue No Descriptor Available");
1897 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1898 NULL, adapter->queues[i].evnamebuf,
1899 "Queue Packets Transmitted");
1900 #ifndef IXGBE_LEGACY_TX
1901 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1902 NULL, adapter->queues[i].evnamebuf,
1903 "Packets dropped in pcq");
1904 #endif
1905
1906 if (sysctl_createv(log, 0, &rnode, &cnode,
1907 CTLFLAG_READONLY,
1908 CTLTYPE_INT,
1909 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1910 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1911 CTL_CREATE, CTL_EOL) != 0)
1912 break;
1913
1914 if (sysctl_createv(log, 0, &rnode, &cnode,
1915 CTLFLAG_READONLY,
1916 CTLTYPE_INT,
1917 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1918 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1919 CTL_CREATE, CTL_EOL) != 0)
1920 break;
1921
1922 if (sysctl_createv(log, 0, &rnode, &cnode,
1923 CTLFLAG_READONLY,
1924 CTLTYPE_INT,
1925 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1926 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1927 CTL_CREATE, CTL_EOL) != 0)
1928 break;
1929
1930 if (i < __arraycount(stats->qprc)) {
1931 evcnt_attach_dynamic(&stats->qprc[i],
1932 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1933 "qprc");
1934 evcnt_attach_dynamic(&stats->qptc[i],
1935 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1936 "qptc");
1937 evcnt_attach_dynamic(&stats->qbrc[i],
1938 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1939 "qbrc");
1940 evcnt_attach_dynamic(&stats->qbtc[i],
1941 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1942 "qbtc");
1943 if (hw->mac.type >= ixgbe_mac_82599EB)
1944 evcnt_attach_dynamic(&stats->qprdc[i],
1945 EVCNT_TYPE_MISC, NULL,
1946 adapter->queues[i].evnamebuf, "qprdc");
1947 }
1948
1949 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1950 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1951 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1952 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1953 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1954 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1955 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1956 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1957 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1958 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1959 #ifdef LRO
1960 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1961 CTLFLAG_RD, &lro->lro_queued, 0,
1962 "LRO Queued");
1963 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1964 CTLFLAG_RD, &lro->lro_flushed, 0,
1965 "LRO Flushed");
1966 #endif /* LRO */
1967 }
1968
1969 /* MAC stats get their own sub node */
1970
1971 snprintf(stats->namebuf,
1972 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1973
1974 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "rx csum offload - IP");
1976 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "rx csum offload - L4");
1978 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "rx csum offload - IP bad");
1980 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "rx csum offload - L4 bad");
1982 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "Interrupt conditions zero");
1984 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Legacy interrupts");
1986
1987 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "CRC Errors");
1989 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Illegal Byte Errors");
1991 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Byte Errors");
1993 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "MAC Short Packets Discarded");
1995 if (hw->mac.type >= ixgbe_mac_X550)
1996 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Bad SFD");
1998 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Total Packets Missed");
2000 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "MAC Local Faults");
2002 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "MAC Remote Faults");
2004 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Receive Length Errors");
2006 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Link XON Transmitted");
2008 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Link XON Received");
2010 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Link XOFF Transmitted");
2012 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Link XOFF Received");
2014
2015 /* Packet Reception Stats */
2016 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Total Octets Received");
2018 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Good Octets Received");
2020 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Total Packets Received");
2022 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Good Packets Received");
2024 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Multicast Packets Received");
2026 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Broadcast Packets Received");
2028 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "64 byte frames received ");
2030 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "65-127 byte frames received");
2032 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "128-255 byte frames received");
2034 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "256-511 byte frames received");
2036 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "512-1023 byte frames received");
2038 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "1023-1522 byte frames received");
2040 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "Receive Undersized");
2042 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "Fragmented Packets Received ");
2044 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "Oversized Packets Received");
2046 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "Received Jabber");
2048 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "Management Packets Received");
2050 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Management Packets Dropped");
2052 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Checksum Errors");
2054
2055 /* Packet Transmission Stats */
2056 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Good Octets Transmitted");
2058 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Total Packets Transmitted");
2060 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Good Packets Transmitted");
2062 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Broadcast Packets Transmitted");
2064 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2065 stats->namebuf, "Multicast Packets Transmitted");
2066 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "Management Packets Transmitted");
2068 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "64 byte frames transmitted ");
2070 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "65-127 byte frames transmitted");
2072 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "128-255 byte frames transmitted");
2074 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "256-511 byte frames transmitted");
2076 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "512-1023 byte frames transmitted");
2078 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "1024-1522 byte frames transmitted");
2080 } /* ixgbe_add_hw_stats */
2081
2082 static void
2083 ixgbe_clear_evcnt(struct adapter *adapter)
2084 {
2085 struct tx_ring *txr = adapter->tx_rings;
2086 struct rx_ring *rxr = adapter->rx_rings;
2087 struct ixgbe_hw *hw = &adapter->hw;
2088 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2089 int i;
2090
2091 adapter->efbig_tx_dma_setup.ev_count = 0;
2092 adapter->mbuf_defrag_failed.ev_count = 0;
2093 adapter->efbig2_tx_dma_setup.ev_count = 0;
2094 adapter->einval_tx_dma_setup.ev_count = 0;
2095 adapter->other_tx_dma_setup.ev_count = 0;
2096 adapter->eagain_tx_dma_setup.ev_count = 0;
2097 adapter->enomem_tx_dma_setup.ev_count = 0;
2098 adapter->tso_err.ev_count = 0;
2099 adapter->watchdog_events.ev_count = 0;
2100 adapter->admin_irqev.ev_count = 0;
2101 adapter->link_workev.ev_count = 0;
2102 adapter->mod_workev.ev_count = 0;
2103 adapter->msf_workev.ev_count = 0;
2104 adapter->phy_workev.ev_count = 0;
2105
2106 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2107 if (i < __arraycount(stats->mpc)) {
2108 stats->mpc[i].ev_count = 0;
2109 if (hw->mac.type == ixgbe_mac_82598EB)
2110 stats->rnbc[i].ev_count = 0;
2111 }
2112 if (i < __arraycount(stats->pxontxc)) {
2113 stats->pxontxc[i].ev_count = 0;
2114 stats->pxonrxc[i].ev_count = 0;
2115 stats->pxofftxc[i].ev_count = 0;
2116 stats->pxoffrxc[i].ev_count = 0;
2117 if (hw->mac.type >= ixgbe_mac_82599EB)
2118 stats->pxon2offc[i].ev_count = 0;
2119 }
2120 }
2121
2122 txr = adapter->tx_rings;
2123 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2124 adapter->queues[i].irqs.ev_count = 0;
2125 adapter->queues[i].handleq.ev_count = 0;
2126 adapter->queues[i].req.ev_count = 0;
2127 txr->no_desc_avail.ev_count = 0;
2128 txr->total_packets.ev_count = 0;
2129 txr->tso_tx.ev_count = 0;
2130 #ifndef IXGBE_LEGACY_TX
2131 txr->pcq_drops.ev_count = 0;
2132 #endif
2133 txr->q_efbig_tx_dma_setup = 0;
2134 txr->q_mbuf_defrag_failed = 0;
2135 txr->q_efbig2_tx_dma_setup = 0;
2136 txr->q_einval_tx_dma_setup = 0;
2137 txr->q_other_tx_dma_setup = 0;
2138 txr->q_eagain_tx_dma_setup = 0;
2139 txr->q_enomem_tx_dma_setup = 0;
2140 txr->q_tso_err = 0;
2141
2142 if (i < __arraycount(stats->qprc)) {
2143 stats->qprc[i].ev_count = 0;
2144 stats->qptc[i].ev_count = 0;
2145 stats->qbrc[i].ev_count = 0;
2146 stats->qbtc[i].ev_count = 0;
2147 if (hw->mac.type >= ixgbe_mac_82599EB)
2148 stats->qprdc[i].ev_count = 0;
2149 }
2150
2151 rxr->rx_packets.ev_count = 0;
2152 rxr->rx_bytes.ev_count = 0;
2153 rxr->rx_copies.ev_count = 0;
2154 rxr->no_jmbuf.ev_count = 0;
2155 rxr->rx_discarded.ev_count = 0;
2156 }
2157 stats->ipcs.ev_count = 0;
2158 stats->l4cs.ev_count = 0;
2159 stats->ipcs_bad.ev_count = 0;
2160 stats->l4cs_bad.ev_count = 0;
2161 stats->intzero.ev_count = 0;
2162 stats->legint.ev_count = 0;
2163 stats->crcerrs.ev_count = 0;
2164 stats->illerrc.ev_count = 0;
2165 stats->errbc.ev_count = 0;
2166 stats->mspdc.ev_count = 0;
2167 if (hw->mac.type >= ixgbe_mac_X550)
2168 stats->mbsdc.ev_count = 0;
2169 stats->mpctotal.ev_count = 0;
2170 stats->mlfc.ev_count = 0;
2171 stats->mrfc.ev_count = 0;
2172 stats->rlec.ev_count = 0;
2173 stats->lxontxc.ev_count = 0;
2174 stats->lxonrxc.ev_count = 0;
2175 stats->lxofftxc.ev_count = 0;
2176 stats->lxoffrxc.ev_count = 0;
2177
2178 /* Packet Reception Stats */
2179 stats->tor.ev_count = 0;
2180 stats->gorc.ev_count = 0;
2181 stats->tpr.ev_count = 0;
2182 stats->gprc.ev_count = 0;
2183 stats->mprc.ev_count = 0;
2184 stats->bprc.ev_count = 0;
2185 stats->prc64.ev_count = 0;
2186 stats->prc127.ev_count = 0;
2187 stats->prc255.ev_count = 0;
2188 stats->prc511.ev_count = 0;
2189 stats->prc1023.ev_count = 0;
2190 stats->prc1522.ev_count = 0;
2191 stats->ruc.ev_count = 0;
2192 stats->rfc.ev_count = 0;
2193 stats->roc.ev_count = 0;
2194 stats->rjc.ev_count = 0;
2195 stats->mngprc.ev_count = 0;
2196 stats->mngpdc.ev_count = 0;
2197 stats->xec.ev_count = 0;
2198
2199 /* Packet Transmission Stats */
2200 stats->gotc.ev_count = 0;
2201 stats->tpt.ev_count = 0;
2202 stats->gptc.ev_count = 0;
2203 stats->bptc.ev_count = 0;
2204 stats->mptc.ev_count = 0;
2205 stats->mngptc.ev_count = 0;
2206 stats->ptc64.ev_count = 0;
2207 stats->ptc127.ev_count = 0;
2208 stats->ptc255.ev_count = 0;
2209 stats->ptc511.ev_count = 0;
2210 stats->ptc1023.ev_count = 0;
2211 stats->ptc1522.ev_count = 0;
2212 }
2213
2214 /************************************************************************
2215 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2216 *
2217 * Retrieves the TDH value from the hardware
2218 ************************************************************************/
2219 static int
2220 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2221 {
2222 struct sysctlnode node = *rnode;
2223 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2224 struct adapter *adapter;
2225 uint32_t val;
2226
2227 if (!txr)
2228 return (0);
2229
2230 adapter = txr->adapter;
2231 if (ixgbe_fw_recovery_mode_swflag(adapter))
2232 return (EPERM);
2233
2234 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2235 node.sysctl_data = &val;
2236 return sysctl_lookup(SYSCTLFN_CALL(&node));
2237 } /* ixgbe_sysctl_tdh_handler */
2238
2239 /************************************************************************
2240 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2241 *
2242 * Retrieves the TDT value from the hardware
2243 ************************************************************************/
2244 static int
2245 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2246 {
2247 struct sysctlnode node = *rnode;
2248 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2249 struct adapter *adapter;
2250 uint32_t val;
2251
2252 if (!txr)
2253 return (0);
2254
2255 adapter = txr->adapter;
2256 if (ixgbe_fw_recovery_mode_swflag(adapter))
2257 return (EPERM);
2258
2259 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2260 node.sysctl_data = &val;
2261 return sysctl_lookup(SYSCTLFN_CALL(&node));
2262 } /* ixgbe_sysctl_tdt_handler */
2263
2264 /************************************************************************
2265 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2266 * handler function
2267 *
2268 * Retrieves the next_to_check value
2269 ************************************************************************/
2270 static int
2271 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2272 {
2273 struct sysctlnode node = *rnode;
2274 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2275 struct adapter *adapter;
2276 uint32_t val;
2277
2278 if (!rxr)
2279 return (0);
2280
2281 adapter = rxr->adapter;
2282 if (ixgbe_fw_recovery_mode_swflag(adapter))
2283 return (EPERM);
2284
2285 val = rxr->next_to_check;
2286 node.sysctl_data = &val;
2287 return sysctl_lookup(SYSCTLFN_CALL(&node));
2288 } /* ixgbe_sysctl_next_to_check_handler */
2289
2290 /************************************************************************
2291 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2292 *
2293 * Retrieves the RDH value from the hardware
2294 ************************************************************************/
2295 static int
2296 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2297 {
2298 struct sysctlnode node = *rnode;
2299 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2300 struct adapter *adapter;
2301 uint32_t val;
2302
2303 if (!rxr)
2304 return (0);
2305
2306 adapter = rxr->adapter;
2307 if (ixgbe_fw_recovery_mode_swflag(adapter))
2308 return (EPERM);
2309
2310 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2311 node.sysctl_data = &val;
2312 return sysctl_lookup(SYSCTLFN_CALL(&node));
2313 } /* ixgbe_sysctl_rdh_handler */
2314
2315 /************************************************************************
2316 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2317 *
2318 * Retrieves the RDT value from the hardware
2319 ************************************************************************/
2320 static int
2321 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2322 {
2323 struct sysctlnode node = *rnode;
2324 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2325 struct adapter *adapter;
2326 uint32_t val;
2327
2328 if (!rxr)
2329 return (0);
2330
2331 adapter = rxr->adapter;
2332 if (ixgbe_fw_recovery_mode_swflag(adapter))
2333 return (EPERM);
2334
2335 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2336 node.sysctl_data = &val;
2337 return sysctl_lookup(SYSCTLFN_CALL(&node));
2338 } /* ixgbe_sysctl_rdt_handler */
2339
2340 static int
2341 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2342 {
2343 struct ifnet *ifp = &ec->ec_if;
2344 struct adapter *adapter = ifp->if_softc;
2345 int rv;
2346
2347 if (set)
2348 rv = ixgbe_register_vlan(adapter, vid);
2349 else
2350 rv = ixgbe_unregister_vlan(adapter, vid);
2351
2352 if (rv != 0)
2353 return rv;
2354
2355 /*
2356 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2357 * or 0 to 1.
2358 */
2359 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2360 ixgbe_setup_vlan_hw_tagging(adapter);
2361
2362 return rv;
2363 }
2364
2365 /************************************************************************
2366 * ixgbe_register_vlan
2367 *
2368 * Run via vlan config EVENT, it enables us to use the
2369 * HW Filter table since we can get the vlan id. This
2370 * just creates the entry in the soft version of the
2371 * VFTA, init will repopulate the real table.
2372 ************************************************************************/
2373 static int
2374 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2375 {
2376 u16 index, bit;
2377 int error;
2378
2379 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2380 return EINVAL;
2381
2382 IXGBE_CORE_LOCK(adapter);
2383 index = (vtag >> 5) & 0x7F;
2384 bit = vtag & 0x1F;
2385 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2386 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2387 true);
2388 IXGBE_CORE_UNLOCK(adapter);
2389 if (error != 0)
2390 error = EACCES;
2391
2392 return error;
2393 } /* ixgbe_register_vlan */
2394
2395 /************************************************************************
2396 * ixgbe_unregister_vlan
2397 *
2398 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2399 ************************************************************************/
2400 static int
2401 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2402 {
2403 u16 index, bit;
2404 int error;
2405
2406 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2407 return EINVAL;
2408
2409 IXGBE_CORE_LOCK(adapter);
2410 index = (vtag >> 5) & 0x7F;
2411 bit = vtag & 0x1F;
2412 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2413 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2414 true);
2415 IXGBE_CORE_UNLOCK(adapter);
2416 if (error != 0)
2417 error = EACCES;
2418
2419 return error;
2420 } /* ixgbe_unregister_vlan */
2421
2422 static void
2423 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2424 {
2425 struct ethercom *ec = &adapter->osdep.ec;
2426 struct ixgbe_hw *hw = &adapter->hw;
2427 struct rx_ring *rxr;
2428 u32 ctrl;
2429 int i;
2430 bool hwtagging;
2431
2432 /* Enable HW tagging only if any vlan is attached */
2433 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2434 && VLAN_ATTACHED(ec);
2435
2436 /* Setup the queues for vlans */
2437 for (i = 0; i < adapter->num_queues; i++) {
2438 rxr = &adapter->rx_rings[i];
2439 /*
2440 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2441 */
2442 if (hw->mac.type != ixgbe_mac_82598EB) {
2443 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2444 if (hwtagging)
2445 ctrl |= IXGBE_RXDCTL_VME;
2446 else
2447 ctrl &= ~IXGBE_RXDCTL_VME;
2448 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2449 }
2450 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2451 }
2452
2453 /* VLAN hw tagging for 82598 */
2454 if (hw->mac.type == ixgbe_mac_82598EB) {
2455 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2456 if (hwtagging)
2457 ctrl |= IXGBE_VLNCTRL_VME;
2458 else
2459 ctrl &= ~IXGBE_VLNCTRL_VME;
2460 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2461 }
2462 } /* ixgbe_setup_vlan_hw_tagging */
2463
2464 static void
2465 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2466 {
2467 struct ethercom *ec = &adapter->osdep.ec;
2468 struct ixgbe_hw *hw = &adapter->hw;
2469 int i;
2470 u32 ctrl;
2471 struct vlanid_list *vlanidp;
2472
2473 /*
2474 * This function is called from both if_init and ifflags_cb()
2475 * on NetBSD.
2476 */
2477
2478 /*
2479 * Part 1:
2480 * Setup VLAN HW tagging
2481 */
2482 ixgbe_setup_vlan_hw_tagging(adapter);
2483
2484 /*
2485 * Part 2:
2486 * Setup VLAN HW filter
2487 */
2488 /* Cleanup shadow_vfta */
2489 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2490 adapter->shadow_vfta[i] = 0;
2491 /* Generate shadow_vfta from ec_vids */
2492 ETHER_LOCK(ec);
2493 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2494 uint32_t idx;
2495
2496 idx = vlanidp->vid / 32;
2497 KASSERT(idx < IXGBE_VFTA_SIZE);
2498 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2499 }
2500 ETHER_UNLOCK(ec);
2501 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2502 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2503
2504 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2505 /* Enable the Filter Table if enabled */
2506 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2507 ctrl |= IXGBE_VLNCTRL_VFE;
2508 else
2509 ctrl &= ~IXGBE_VLNCTRL_VFE;
2510 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2511 } /* ixgbe_setup_vlan_hw_support */
2512
2513 /************************************************************************
2514 * ixgbe_get_slot_info
2515 *
2516 * Get the width and transaction speed of
2517 * the slot this adapter is plugged into.
2518 ************************************************************************/
2519 static void
2520 ixgbe_get_slot_info(struct adapter *adapter)
2521 {
2522 device_t dev = adapter->dev;
2523 struct ixgbe_hw *hw = &adapter->hw;
2524 u32 offset;
2525 u16 link;
2526 int bus_info_valid = TRUE;
2527
2528 /* Some devices are behind an internal bridge */
2529 switch (hw->device_id) {
2530 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2531 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2532 goto get_parent_info;
2533 default:
2534 break;
2535 }
2536
2537 ixgbe_get_bus_info(hw);
2538
2539 /*
2540 * Some devices don't use PCI-E, but there is no need
2541 * to display "Unknown" for bus speed and width.
2542 */
2543 switch (hw->mac.type) {
2544 case ixgbe_mac_X550EM_x:
2545 case ixgbe_mac_X550EM_a:
2546 return;
2547 default:
2548 goto display;
2549 }
2550
2551 get_parent_info:
2552 /*
2553 * For the Quad port adapter we need to parse back
2554 * up the PCI tree to find the speed of the expansion
2555 * slot into which this adapter is plugged. A bit more work.
2556 */
2557 dev = device_parent(device_parent(dev));
2558 #if 0
2559 #ifdef IXGBE_DEBUG
2560 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2561 pci_get_slot(dev), pci_get_function(dev));
2562 #endif
2563 dev = device_parent(device_parent(dev));
2564 #ifdef IXGBE_DEBUG
2565 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2566 pci_get_slot(dev), pci_get_function(dev));
2567 #endif
2568 #endif
2569 /* Now get the PCI Express Capabilities offset */
2570 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2571 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2572 /*
2573 * Hmm...can't get PCI-Express capabilities.
2574 * Falling back to default method.
2575 */
2576 bus_info_valid = FALSE;
2577 ixgbe_get_bus_info(hw);
2578 goto display;
2579 }
2580 /* ...and read the Link Status Register */
2581 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2582 offset + PCIE_LCSR) >> 16;
2583 ixgbe_set_pci_config_data_generic(hw, link);
2584
2585 display:
2586 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2587 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2588 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2589 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2590 "Unknown"),
2591 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2592 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2593 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2594 "Unknown"));
2595
2596 if (bus_info_valid) {
2597 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2598 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2599 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2600 device_printf(dev, "PCI-Express bandwidth available"
2601 " for this card\n is not sufficient for"
2602 " optimal performance.\n");
2603 device_printf(dev, "For optimal performance a x8 "
2604 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2605 }
2606 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2607 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2608 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2609 device_printf(dev, "PCI-Express bandwidth available"
2610 " for this card\n is not sufficient for"
2611 " optimal performance.\n");
2612 device_printf(dev, "For optimal performance a x8 "
2613 "PCIE Gen3 slot is required.\n");
2614 }
2615 } else
2616 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2617
2618 return;
2619 } /* ixgbe_get_slot_info */
2620
2621 /************************************************************************
2622 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2623 ************************************************************************/
2624 static inline void
2625 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2626 {
2627 struct ixgbe_hw *hw = &adapter->hw;
2628 struct ix_queue *que = &adapter->queues[vector];
2629 u64 queue = 1ULL << vector;
2630 u32 mask;
2631
2632 mutex_enter(&que->dc_mtx);
2633 if (que->disabled_count > 0 && --que->disabled_count > 0)
2634 goto out;
2635
2636 if (hw->mac.type == ixgbe_mac_82598EB) {
2637 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2638 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2639 } else {
2640 mask = (queue & 0xFFFFFFFF);
2641 if (mask)
2642 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2643 mask = (queue >> 32);
2644 if (mask)
2645 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2646 }
2647 out:
2648 mutex_exit(&que->dc_mtx);
2649 } /* ixgbe_enable_queue */
2650
2651 /************************************************************************
2652 * ixgbe_disable_queue_internal
2653 ************************************************************************/
2654 static inline void
2655 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2656 {
2657 struct ixgbe_hw *hw = &adapter->hw;
2658 struct ix_queue *que = &adapter->queues[vector];
2659 u64 queue = 1ULL << vector;
2660 u32 mask;
2661
2662 mutex_enter(&que->dc_mtx);
2663
2664 if (que->disabled_count > 0) {
2665 if (nestok)
2666 que->disabled_count++;
2667 goto out;
2668 }
2669 que->disabled_count++;
2670
2671 if (hw->mac.type == ixgbe_mac_82598EB) {
2672 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2673 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2674 } else {
2675 mask = (queue & 0xFFFFFFFF);
2676 if (mask)
2677 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2678 mask = (queue >> 32);
2679 if (mask)
2680 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2681 }
2682 out:
2683 mutex_exit(&que->dc_mtx);
2684 } /* ixgbe_disable_queue_internal */
2685
2686 /************************************************************************
2687 * ixgbe_disable_queue
2688 ************************************************************************/
2689 static inline void
2690 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2691 {
2692
2693 ixgbe_disable_queue_internal(adapter, vector, true);
2694 } /* ixgbe_disable_queue */
2695
2696 /************************************************************************
2697 * ixgbe_sched_handle_que - schedule deferred packet processing
2698 ************************************************************************/
2699 static inline void
2700 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2701 {
2702
2703 if (que->txrx_use_workqueue) {
2704 /*
2705 * adapter->que_wq is bound to each CPU instead of
2706 * each NIC queue to reduce workqueue kthread. As we
2707 * should consider about interrupt affinity in this
2708 * function, the workqueue kthread must be WQ_PERCPU.
2709 * If create WQ_PERCPU workqueue kthread for each NIC
2710 * queue, that number of created workqueue kthread is
2711 * (number of used NIC queue) * (number of CPUs) =
2712 * (number of CPUs) ^ 2 most often.
2713 *
2714 * The same NIC queue's interrupts are avoided by
2715 * masking the queue's interrupt. And different
2716 * NIC queue's interrupts use different struct work
2717 * (que->wq_cookie). So, "enqueued flag" to avoid
2718 * twice workqueue_enqueue() is not required .
2719 */
2720 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2721 } else {
2722 softint_schedule(que->que_si);
2723 }
2724 }
2725
2726 /************************************************************************
2727 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2728 ************************************************************************/
2729 static int
2730 ixgbe_msix_que(void *arg)
2731 {
2732 struct ix_queue *que = arg;
2733 struct adapter *adapter = que->adapter;
2734 struct ifnet *ifp = adapter->ifp;
2735 struct tx_ring *txr = que->txr;
2736 struct rx_ring *rxr = que->rxr;
2737 bool more;
2738 u32 newitr = 0;
2739
2740 /* Protect against spurious interrupts */
2741 if ((ifp->if_flags & IFF_RUNNING) == 0)
2742 return 0;
2743
2744 ixgbe_disable_queue(adapter, que->msix);
2745 ++que->irqs.ev_count;
2746
2747 /*
2748 * Don't change "que->txrx_use_workqueue" from this point to avoid
2749 * flip-flopping softint/workqueue mode in one deferred processing.
2750 */
2751 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2752
2753 #ifdef __NetBSD__
2754 /* Don't run ixgbe_rxeof in interrupt context */
2755 more = true;
2756 #else
2757 more = ixgbe_rxeof(que);
2758 #endif
2759
2760 IXGBE_TX_LOCK(txr);
2761 ixgbe_txeof(txr);
2762 IXGBE_TX_UNLOCK(txr);
2763
2764 /* Do AIM now? */
2765
2766 if (adapter->enable_aim == false)
2767 goto no_calc;
2768 /*
2769 * Do Adaptive Interrupt Moderation:
2770 * - Write out last calculated setting
2771 * - Calculate based on average size over
2772 * the last interval.
2773 */
2774 if (que->eitr_setting)
2775 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2776
2777 que->eitr_setting = 0;
2778
2779 /* Idle, do nothing */
2780 if ((txr->bytes == 0) && (rxr->bytes == 0))
2781 goto no_calc;
2782
2783 if ((txr->bytes) && (txr->packets))
2784 newitr = txr->bytes/txr->packets;
2785 if ((rxr->bytes) && (rxr->packets))
2786 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2787 newitr += 24; /* account for hardware frame, crc */
2788
2789 /* set an upper boundary */
2790 newitr = uimin(newitr, 3000);
2791
2792 /* Be nice to the mid range */
2793 if ((newitr > 300) && (newitr < 1200))
2794 newitr = (newitr / 3);
2795 else
2796 newitr = (newitr / 2);
2797
2798 /*
2799 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2800 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2801 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2802 * on 1G and higher.
2803 */
2804 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2805 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2806 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2807 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2808 }
2809
2810 /* save for next interrupt */
2811 que->eitr_setting = newitr;
2812
2813 /* Reset state */
2814 txr->bytes = 0;
2815 txr->packets = 0;
2816 rxr->bytes = 0;
2817 rxr->packets = 0;
2818
2819 no_calc:
2820 if (more)
2821 ixgbe_sched_handle_que(adapter, que);
2822 else
2823 ixgbe_enable_queue(adapter, que->msix);
2824
2825 return 1;
2826 } /* ixgbe_msix_que */
2827
2828 /************************************************************************
2829 * ixgbe_media_status - Media Ioctl callback
2830 *
2831 * Called whenever the user queries the status of
2832 * the interface using ifconfig.
2833 ************************************************************************/
2834 static void
2835 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2836 {
2837 struct adapter *adapter = ifp->if_softc;
2838 struct ixgbe_hw *hw = &adapter->hw;
2839 int layer;
2840
2841 INIT_DEBUGOUT("ixgbe_media_status: begin");
2842 ixgbe_update_link_status(adapter);
2843
2844 ifmr->ifm_status = IFM_AVALID;
2845 ifmr->ifm_active = IFM_ETHER;
2846
2847 if (adapter->link_active != LINK_STATE_UP) {
2848 ifmr->ifm_active |= IFM_NONE;
2849 return;
2850 }
2851
2852 ifmr->ifm_status |= IFM_ACTIVE;
2853 layer = adapter->phy_layer;
2854
2855 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2856 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2857 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2858 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2859 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2860 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2861 switch (adapter->link_speed) {
2862 case IXGBE_LINK_SPEED_10GB_FULL:
2863 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2864 break;
2865 case IXGBE_LINK_SPEED_5GB_FULL:
2866 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2867 break;
2868 case IXGBE_LINK_SPEED_2_5GB_FULL:
2869 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2870 break;
2871 case IXGBE_LINK_SPEED_1GB_FULL:
2872 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2873 break;
2874 case IXGBE_LINK_SPEED_100_FULL:
2875 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2876 break;
2877 case IXGBE_LINK_SPEED_10_FULL:
2878 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2879 break;
2880 }
2881 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2882 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2883 switch (adapter->link_speed) {
2884 case IXGBE_LINK_SPEED_10GB_FULL:
2885 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2886 break;
2887 }
2888 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2889 switch (adapter->link_speed) {
2890 case IXGBE_LINK_SPEED_10GB_FULL:
2891 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2892 break;
2893 case IXGBE_LINK_SPEED_1GB_FULL:
2894 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2895 break;
2896 }
2897 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2898 switch (adapter->link_speed) {
2899 case IXGBE_LINK_SPEED_10GB_FULL:
2900 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2901 break;
2902 case IXGBE_LINK_SPEED_1GB_FULL:
2903 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2904 break;
2905 }
2906 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2907 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2908 switch (adapter->link_speed) {
2909 case IXGBE_LINK_SPEED_10GB_FULL:
2910 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2911 break;
2912 case IXGBE_LINK_SPEED_1GB_FULL:
2913 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2914 break;
2915 }
2916 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2917 switch (adapter->link_speed) {
2918 case IXGBE_LINK_SPEED_10GB_FULL:
2919 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2920 break;
2921 }
2922 /*
2923 * XXX: These need to use the proper media types once
2924 * they're added.
2925 */
2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2927 switch (adapter->link_speed) {
2928 case IXGBE_LINK_SPEED_10GB_FULL:
2929 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2930 break;
2931 case IXGBE_LINK_SPEED_2_5GB_FULL:
2932 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2933 break;
2934 case IXGBE_LINK_SPEED_1GB_FULL:
2935 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2936 break;
2937 }
2938 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2939 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2940 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2941 switch (adapter->link_speed) {
2942 case IXGBE_LINK_SPEED_10GB_FULL:
2943 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2944 break;
2945 case IXGBE_LINK_SPEED_2_5GB_FULL:
2946 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2947 break;
2948 case IXGBE_LINK_SPEED_1GB_FULL:
2949 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2950 break;
2951 }
2952
2953 /* If nothing is recognized... */
2954 #if 0
2955 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2956 ifmr->ifm_active |= IFM_UNKNOWN;
2957 #endif
2958
2959 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2960
2961 /* Display current flow control setting used on link */
2962 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2963 hw->fc.current_mode == ixgbe_fc_full)
2964 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2965 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2966 hw->fc.current_mode == ixgbe_fc_full)
2967 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2968
2969 return;
2970 } /* ixgbe_media_status */
2971
2972 /************************************************************************
2973 * ixgbe_media_change - Media Ioctl callback
2974 *
2975 * Called when the user changes speed/duplex using
2976 * media/mediopt option with ifconfig.
2977 ************************************************************************/
2978 static int
2979 ixgbe_media_change(struct ifnet *ifp)
2980 {
2981 struct adapter *adapter = ifp->if_softc;
2982 struct ifmedia *ifm = &adapter->media;
2983 struct ixgbe_hw *hw = &adapter->hw;
2984 ixgbe_link_speed speed = 0;
2985 ixgbe_link_speed link_caps = 0;
2986 bool negotiate = false;
2987 s32 err = IXGBE_NOT_IMPLEMENTED;
2988
2989 INIT_DEBUGOUT("ixgbe_media_change: begin");
2990
2991 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2992 return (EINVAL);
2993
2994 if (hw->phy.media_type == ixgbe_media_type_backplane)
2995 return (EPERM);
2996
2997 /*
2998 * We don't actually need to check against the supported
2999 * media types of the adapter; ifmedia will take care of
3000 * that for us.
3001 */
3002 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3003 case IFM_AUTO:
3004 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3005 &negotiate);
3006 if (err != IXGBE_SUCCESS) {
3007 device_printf(adapter->dev, "Unable to determine "
3008 "supported advertise speeds\n");
3009 return (ENODEV);
3010 }
3011 speed |= link_caps;
3012 break;
3013 case IFM_10G_T:
3014 case IFM_10G_LRM:
3015 case IFM_10G_LR:
3016 case IFM_10G_TWINAX:
3017 case IFM_10G_SR:
3018 case IFM_10G_CX4:
3019 case IFM_10G_KR:
3020 case IFM_10G_KX4:
3021 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3022 break;
3023 case IFM_5000_T:
3024 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3025 break;
3026 case IFM_2500_T:
3027 case IFM_2500_KX:
3028 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3029 break;
3030 case IFM_1000_T:
3031 case IFM_1000_LX:
3032 case IFM_1000_SX:
3033 case IFM_1000_KX:
3034 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3035 break;
3036 case IFM_100_TX:
3037 speed |= IXGBE_LINK_SPEED_100_FULL;
3038 break;
3039 case IFM_10_T:
3040 speed |= IXGBE_LINK_SPEED_10_FULL;
3041 break;
3042 case IFM_NONE:
3043 break;
3044 default:
3045 goto invalid;
3046 }
3047
3048 hw->mac.autotry_restart = TRUE;
3049 hw->mac.ops.setup_link(hw, speed, TRUE);
3050 adapter->advertise = 0;
3051 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3052 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3053 adapter->advertise |= 1 << 2;
3054 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3055 adapter->advertise |= 1 << 1;
3056 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3057 adapter->advertise |= 1 << 0;
3058 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3059 adapter->advertise |= 1 << 3;
3060 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3061 adapter->advertise |= 1 << 4;
3062 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3063 adapter->advertise |= 1 << 5;
3064 }
3065
3066 return (0);
3067
3068 invalid:
3069 device_printf(adapter->dev, "Invalid media type!\n");
3070
3071 return (EINVAL);
3072 } /* ixgbe_media_change */
3073
3074 /************************************************************************
3075 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3076 ************************************************************************/
3077 static int
3078 ixgbe_msix_admin(void *arg)
3079 {
3080 struct adapter *adapter = arg;
3081 struct ixgbe_hw *hw = &adapter->hw;
3082 u32 eicr, eicr_mask;
3083 u32 task_requests = 0;
3084 s32 retval;
3085
3086 ++adapter->admin_irqev.ev_count;
3087
3088 /* Pause other interrupts */
3089 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3090
3091 /* First get the cause */
3092 /*
3093 * The specifications of 82598, 82599, X540 and X550 say EICS register
3094 * is write only. However, Linux says it is a workaround for silicon
3095 * errata to read EICS instead of EICR to get interrupt cause. It seems
3096 * there is a problem about read clear mechanism for EICR register.
3097 */
3098 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3099 /* Be sure the queue bits are not cleared */
3100 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3101 /* Clear interrupt with write */
3102 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3103
3104 if (ixgbe_is_sfp(hw)) {
3105 /* Pluggable optics-related interrupt */
3106 if (hw->mac.type >= ixgbe_mac_X540)
3107 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3108 else
3109 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3110
3111 /*
3112 * An interrupt might not arrive when a module is inserted.
3113 * When an link status change interrupt occurred and the driver
3114 * still regard SFP as unplugged, issue the module softint
3115 * and then issue LSC interrupt.
3116 */
3117 if ((eicr & eicr_mask)
3118 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3119 && (eicr & IXGBE_EICR_LSC))) {
3120 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3121 task_requests |= IXGBE_REQUEST_TASK_MOD;
3122 }
3123
3124 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3125 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3126 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3127 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3128 task_requests |= IXGBE_REQUEST_TASK_MSF;
3129 }
3130 }
3131
3132 /* Link status change */
3133 if (eicr & IXGBE_EICR_LSC) {
3134 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3135 task_requests |= IXGBE_REQUEST_TASK_LSC;
3136 }
3137
3138 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3139 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3140 (eicr & IXGBE_EICR_FLOW_DIR)) {
3141 /* This is probably overkill :) */
3142 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3143 return 1;
3144 /* Disable the interrupt */
3145 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3146 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3147 }
3148
3149 if (eicr & IXGBE_EICR_ECC) {
3150 device_printf(adapter->dev,
3151 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3152 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3153 }
3154
3155 /* Check for over temp condition */
3156 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3157 switch (adapter->hw.mac.type) {
3158 case ixgbe_mac_X550EM_a:
3159 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3160 break;
3161 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3162 IXGBE_EICR_GPI_SDP0_X550EM_a);
3163 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3164 IXGBE_EICR_GPI_SDP0_X550EM_a);
3165 retval = hw->phy.ops.check_overtemp(hw);
3166 if (retval != IXGBE_ERR_OVERTEMP)
3167 break;
3168 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3169 device_printf(adapter->dev, "System shutdown required!\n");
3170 break;
3171 default:
3172 if (!(eicr & IXGBE_EICR_TS))
3173 break;
3174 retval = hw->phy.ops.check_overtemp(hw);
3175 if (retval != IXGBE_ERR_OVERTEMP)
3176 break;
3177 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3178 device_printf(adapter->dev, "System shutdown required!\n");
3179 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3180 break;
3181 }
3182 }
3183
3184 /* Check for VF message */
3185 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3186 (eicr & IXGBE_EICR_MAILBOX)) {
3187 task_requests |= IXGBE_REQUEST_TASK_MBX;
3188 }
3189 }
3190
3191 /* Check for fan failure */
3192 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3193 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3194 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3195 }
3196
3197 /* External PHY interrupt */
3198 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3199 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3200 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3201 task_requests |= IXGBE_REQUEST_TASK_PHY;
3202 }
3203
3204 if (task_requests != 0) {
3205 /* Re-enabling other interrupts is done in the admin task */
3206 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
3207 atomic_or_32(&adapter->task_requests, task_requests);
3208 ixgbe_schedule_admin_tasklet(adapter);
3209 } else {
3210 /* Re-enable other interrupts */
3211 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3212 }
3213
3214 return 1;
3215 } /* ixgbe_msix_admin */
3216
3217 static void
3218 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3219 {
3220
3221 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3222 itr |= itr << 16;
3223 else
3224 itr |= IXGBE_EITR_CNT_WDIS;
3225
3226 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3227 }
3228
3229
3230 /************************************************************************
3231 * ixgbe_sysctl_interrupt_rate_handler
3232 ************************************************************************/
3233 static int
3234 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3235 {
3236 struct sysctlnode node = *rnode;
3237 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3238 struct adapter *adapter;
3239 uint32_t reg, usec, rate;
3240 int error;
3241
3242 if (que == NULL)
3243 return 0;
3244
3245 adapter = que->adapter;
3246 if (ixgbe_fw_recovery_mode_swflag(adapter))
3247 return (EPERM);
3248
3249 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3250 usec = ((reg & 0x0FF8) >> 3);
3251 if (usec > 0)
3252 rate = 500000 / usec;
3253 else
3254 rate = 0;
3255 node.sysctl_data = &rate;
3256 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3257 if (error || newp == NULL)
3258 return error;
3259 reg &= ~0xfff; /* default, no limitation */
3260 if (rate > 0 && rate < 500000) {
3261 if (rate < 1000)
3262 rate = 1000;
3263 reg |= ((4000000 / rate) & 0xff8);
3264 /*
3265 * When RSC is used, ITR interval must be larger than
3266 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3267 * The minimum value is always greater than 2us on 100M
3268 * (and 10M?(not documented)), but it's not on 1G and higher.
3269 */
3270 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3271 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3272 if ((adapter->num_queues > 1)
3273 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3274 return EINVAL;
3275 }
3276 ixgbe_max_interrupt_rate = rate;
3277 } else
3278 ixgbe_max_interrupt_rate = 0;
3279 ixgbe_eitr_write(adapter, que->msix, reg);
3280
3281 return (0);
3282 } /* ixgbe_sysctl_interrupt_rate_handler */
3283
3284 const struct sysctlnode *
3285 ixgbe_sysctl_instance(struct adapter *adapter)
3286 {
3287 const char *dvname;
3288 struct sysctllog **log;
3289 int rc;
3290 const struct sysctlnode *rnode;
3291
3292 if (adapter->sysctltop != NULL)
3293 return adapter->sysctltop;
3294
3295 log = &adapter->sysctllog;
3296 dvname = device_xname(adapter->dev);
3297
3298 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3299 0, CTLTYPE_NODE, dvname,
3300 SYSCTL_DESCR("ixgbe information and settings"),
3301 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3302 goto err;
3303
3304 return rnode;
3305 err:
3306 device_printf(adapter->dev,
3307 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3308 return NULL;
3309 }
3310
3311 /************************************************************************
3312 * ixgbe_add_device_sysctls
3313 ************************************************************************/
3314 static void
3315 ixgbe_add_device_sysctls(struct adapter *adapter)
3316 {
3317 device_t dev = adapter->dev;
3318 struct ixgbe_hw *hw = &adapter->hw;
3319 struct sysctllog **log;
3320 const struct sysctlnode *rnode, *cnode;
3321
3322 log = &adapter->sysctllog;
3323
3324 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3325 aprint_error_dev(dev, "could not create sysctl root\n");
3326 return;
3327 }
3328
3329 if (sysctl_createv(log, 0, &rnode, &cnode,
3330 CTLFLAG_READWRITE, CTLTYPE_INT,
3331 "debug", SYSCTL_DESCR("Debug Info"),
3332 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3333 aprint_error_dev(dev, "could not create sysctl\n");
3334
3335 if (sysctl_createv(log, 0, &rnode, &cnode,
3336 CTLFLAG_READONLY, CTLTYPE_INT,
3337 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3338 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3339 aprint_error_dev(dev, "could not create sysctl\n");
3340
3341 if (sysctl_createv(log, 0, &rnode, &cnode,
3342 CTLFLAG_READONLY, CTLTYPE_INT,
3343 "num_queues", SYSCTL_DESCR("Number of queues"),
3344 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3345 aprint_error_dev(dev, "could not create sysctl\n");
3346
3347 /* Sysctls for all devices */
3348 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3349 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3350 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3351 CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353
3354 adapter->enable_aim = ixgbe_enable_aim;
3355 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3356 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3357 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3358 aprint_error_dev(dev, "could not create sysctl\n");
3359
3360 if (sysctl_createv(log, 0, &rnode, &cnode,
3361 CTLFLAG_READWRITE, CTLTYPE_INT,
3362 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3363 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3364 CTL_EOL) != 0)
3365 aprint_error_dev(dev, "could not create sysctl\n");
3366
3367 /*
3368 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3369 * it causesflip-flopping softint/workqueue mode in one deferred
3370 * processing. Therefore, preempt_disable()/preempt_enable() are
3371 * required in ixgbe_sched_handle_que() to avoid
3372 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3373 * I think changing "que->txrx_use_workqueue" in interrupt handler
3374 * is lighter than doing preempt_disable()/preempt_enable() in every
3375 * ixgbe_sched_handle_que().
3376 */
3377 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3378 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3379 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3380 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3381 aprint_error_dev(dev, "could not create sysctl\n");
3382
3383 #ifdef IXGBE_DEBUG
3384 /* testing sysctls (for all devices) */
3385 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3386 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3387 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3388 CTL_EOL) != 0)
3389 aprint_error_dev(dev, "could not create sysctl\n");
3390
3391 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3392 CTLTYPE_STRING, "print_rss_config",
3393 SYSCTL_DESCR("Prints RSS Configuration"),
3394 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3395 CTL_EOL) != 0)
3396 aprint_error_dev(dev, "could not create sysctl\n");
3397 #endif
3398 /* for X550 series devices */
3399 if (hw->mac.type >= ixgbe_mac_X550)
3400 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3401 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3402 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3403 CTL_EOL) != 0)
3404 aprint_error_dev(dev, "could not create sysctl\n");
3405
3406 /* for WoL-capable devices */
3407 if (adapter->wol_support) {
3408 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3409 CTLTYPE_BOOL, "wol_enable",
3410 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3411 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3412 CTL_EOL) != 0)
3413 aprint_error_dev(dev, "could not create sysctl\n");
3414
3415 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3416 CTLTYPE_INT, "wufc",
3417 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3418 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3419 CTL_EOL) != 0)
3420 aprint_error_dev(dev, "could not create sysctl\n");
3421 }
3422
3423 /* for X552/X557-AT devices */
3424 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3425 const struct sysctlnode *phy_node;
3426
3427 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3428 "phy", SYSCTL_DESCR("External PHY sysctls"),
3429 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3430 aprint_error_dev(dev, "could not create sysctl\n");
3431 return;
3432 }
3433
3434 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3435 CTLTYPE_INT, "temp",
3436 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3437 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3438 CTL_EOL) != 0)
3439 aprint_error_dev(dev, "could not create sysctl\n");
3440
3441 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3442 CTLTYPE_INT, "overtemp_occurred",
3443 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3444 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3445 CTL_CREATE, CTL_EOL) != 0)
3446 aprint_error_dev(dev, "could not create sysctl\n");
3447 }
3448
3449 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3450 && (hw->phy.type == ixgbe_phy_fw))
3451 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3452 CTLTYPE_BOOL, "force_10_100_autonego",
3453 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3454 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3455 CTL_CREATE, CTL_EOL) != 0)
3456 aprint_error_dev(dev, "could not create sysctl\n");
3457
3458 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3459 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3460 CTLTYPE_INT, "eee_state",
3461 SYSCTL_DESCR("EEE Power Save State"),
3462 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3463 CTL_EOL) != 0)
3464 aprint_error_dev(dev, "could not create sysctl\n");
3465 }
3466 } /* ixgbe_add_device_sysctls */
3467
3468 /************************************************************************
3469 * ixgbe_allocate_pci_resources
3470 ************************************************************************/
3471 static int
3472 ixgbe_allocate_pci_resources(struct adapter *adapter,
3473 const struct pci_attach_args *pa)
3474 {
3475 pcireg_t memtype, csr;
3476 device_t dev = adapter->dev;
3477 bus_addr_t addr;
3478 int flags;
3479
3480 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3481 switch (memtype) {
3482 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3483 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3484 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3485 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3486 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3487 goto map_err;
3488 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3489 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3490 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3491 }
3492 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3493 adapter->osdep.mem_size, flags,
3494 &adapter->osdep.mem_bus_space_handle) != 0) {
3495 map_err:
3496 adapter->osdep.mem_size = 0;
3497 aprint_error_dev(dev, "unable to map BAR0\n");
3498 return ENXIO;
3499 }
3500 /*
3501 * Enable address decoding for memory range in case BIOS or
3502 * UEFI don't set it.
3503 */
3504 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3505 PCI_COMMAND_STATUS_REG);
3506 csr |= PCI_COMMAND_MEM_ENABLE;
3507 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3508 csr);
3509 break;
3510 default:
3511 aprint_error_dev(dev, "unexpected type on BAR0\n");
3512 return ENXIO;
3513 }
3514
3515 return (0);
3516 } /* ixgbe_allocate_pci_resources */
3517
3518 static void
3519 ixgbe_free_workqueue(struct adapter *adapter)
3520 {
3521 struct ix_queue *que = adapter->queues;
3522 struct tx_ring *txr = adapter->tx_rings;
3523 int i;
3524
3525 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3526 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3527 if (txr->txr_si != NULL)
3528 softint_disestablish(txr->txr_si);
3529 }
3530 if (que->que_si != NULL)
3531 softint_disestablish(que->que_si);
3532 }
3533 if (adapter->txr_wq != NULL)
3534 workqueue_destroy(adapter->txr_wq);
3535 if (adapter->txr_wq_enqueued != NULL)
3536 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3537 if (adapter->que_wq != NULL)
3538 workqueue_destroy(adapter->que_wq);
3539
3540 if (adapter->admin_wq != NULL) {
3541 workqueue_destroy(adapter->admin_wq);
3542 adapter->admin_wq = NULL;
3543 }
3544 if (adapter->timer_wq != NULL) {
3545 workqueue_destroy(adapter->timer_wq);
3546 adapter->timer_wq = NULL;
3547 }
3548 if (adapter->recovery_mode_timer_wq != NULL) {
3549 /*
3550 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3551 * the recovery_mode_timer workqueue, so call it here.
3552 */
3553 workqueue_wait(adapter->recovery_mode_timer_wq,
3554 &adapter->recovery_mode_timer_wc);
3555 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3556 workqueue_destroy(adapter->recovery_mode_timer_wq);
3557 adapter->recovery_mode_timer_wq = NULL;
3558 }
3559 } /* ixgbe_free_workqueue */
3560
3561 /************************************************************************
3562 * ixgbe_detach - Device removal routine
3563 *
3564 * Called when the driver is being removed.
3565 * Stops the adapter and deallocates all the resources
3566 * that were allocated for driver operation.
3567 *
3568 * return 0 on success, positive on failure
3569 ************************************************************************/
3570 static int
3571 ixgbe_detach(device_t dev, int flags)
3572 {
3573 struct adapter *adapter = device_private(dev);
3574 struct rx_ring *rxr = adapter->rx_rings;
3575 struct tx_ring *txr = adapter->tx_rings;
3576 struct ixgbe_hw *hw = &adapter->hw;
3577 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3578 u32 ctrl_ext;
3579 int i;
3580
3581 INIT_DEBUGOUT("ixgbe_detach: begin");
3582 if (adapter->osdep.attached == false)
3583 return 0;
3584
3585 if (ixgbe_pci_iov_detach(dev) != 0) {
3586 device_printf(dev, "SR-IOV in use; detach first.\n");
3587 return (EBUSY);
3588 }
3589
3590 #if NVLAN > 0
3591 /* Make sure VLANs are not using driver */
3592 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3593 ; /* nothing to do: no VLANs */
3594 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3595 vlan_ifdetach(adapter->ifp);
3596 else {
3597 aprint_error_dev(dev, "VLANs in use, detach first\n");
3598 return (EBUSY);
3599 }
3600 #endif
3601
3602 adapter->osdep.detaching = true;
3603 /*
3604 * Stop the interface. ixgbe_setup_low_power_mode() calls
3605 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3606 * directly.
3607 */
3608 ixgbe_setup_low_power_mode(adapter);
3609
3610 callout_halt(&adapter->timer, NULL);
3611 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
3612 callout_stop(&adapter->recovery_mode_timer);
3613 callout_halt(&adapter->recovery_mode_timer, NULL);
3614 }
3615
3616 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3617 atomic_store_relaxed(&adapter->admin_pending, 0);
3618 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3619 atomic_store_relaxed(&adapter->timer_pending, 0);
3620
3621 pmf_device_deregister(dev);
3622
3623 ether_ifdetach(adapter->ifp);
3624
3625 ixgbe_free_workqueue(adapter);
3626
3627 /* let hardware know driver is unloading */
3628 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3629 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3631
3632 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3633 netmap_detach(adapter->ifp);
3634
3635 ixgbe_free_pci_resources(adapter);
3636 #if 0 /* XXX the NetBSD port is probably missing something here */
3637 bus_generic_detach(dev);
3638 #endif
3639 if_detach(adapter->ifp);
3640 ifmedia_fini(&adapter->media);
3641 if_percpuq_destroy(adapter->ipq);
3642
3643 sysctl_teardown(&adapter->sysctllog);
3644 evcnt_detach(&adapter->efbig_tx_dma_setup);
3645 evcnt_detach(&adapter->mbuf_defrag_failed);
3646 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3647 evcnt_detach(&adapter->einval_tx_dma_setup);
3648 evcnt_detach(&adapter->other_tx_dma_setup);
3649 evcnt_detach(&adapter->eagain_tx_dma_setup);
3650 evcnt_detach(&adapter->enomem_tx_dma_setup);
3651 evcnt_detach(&adapter->watchdog_events);
3652 evcnt_detach(&adapter->tso_err);
3653 evcnt_detach(&adapter->admin_irqev);
3654 evcnt_detach(&adapter->link_workev);
3655 evcnt_detach(&adapter->mod_workev);
3656 evcnt_detach(&adapter->msf_workev);
3657 evcnt_detach(&adapter->phy_workev);
3658
3659 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3660 if (i < __arraycount(stats->mpc)) {
3661 evcnt_detach(&stats->mpc[i]);
3662 if (hw->mac.type == ixgbe_mac_82598EB)
3663 evcnt_detach(&stats->rnbc[i]);
3664 }
3665 if (i < __arraycount(stats->pxontxc)) {
3666 evcnt_detach(&stats->pxontxc[i]);
3667 evcnt_detach(&stats->pxonrxc[i]);
3668 evcnt_detach(&stats->pxofftxc[i]);
3669 evcnt_detach(&stats->pxoffrxc[i]);
3670 if (hw->mac.type >= ixgbe_mac_82599EB)
3671 evcnt_detach(&stats->pxon2offc[i]);
3672 }
3673 }
3674
3675 txr = adapter->tx_rings;
3676 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3677 evcnt_detach(&adapter->queues[i].irqs);
3678 evcnt_detach(&adapter->queues[i].handleq);
3679 evcnt_detach(&adapter->queues[i].req);
3680 evcnt_detach(&txr->no_desc_avail);
3681 evcnt_detach(&txr->total_packets);
3682 evcnt_detach(&txr->tso_tx);
3683 #ifndef IXGBE_LEGACY_TX
3684 evcnt_detach(&txr->pcq_drops);
3685 #endif
3686
3687 if (i < __arraycount(stats->qprc)) {
3688 evcnt_detach(&stats->qprc[i]);
3689 evcnt_detach(&stats->qptc[i]);
3690 evcnt_detach(&stats->qbrc[i]);
3691 evcnt_detach(&stats->qbtc[i]);
3692 if (hw->mac.type >= ixgbe_mac_82599EB)
3693 evcnt_detach(&stats->qprdc[i]);
3694 }
3695
3696 evcnt_detach(&rxr->rx_packets);
3697 evcnt_detach(&rxr->rx_bytes);
3698 evcnt_detach(&rxr->rx_copies);
3699 evcnt_detach(&rxr->no_jmbuf);
3700 evcnt_detach(&rxr->rx_discarded);
3701 }
3702 evcnt_detach(&stats->ipcs);
3703 evcnt_detach(&stats->l4cs);
3704 evcnt_detach(&stats->ipcs_bad);
3705 evcnt_detach(&stats->l4cs_bad);
3706 evcnt_detach(&stats->intzero);
3707 evcnt_detach(&stats->legint);
3708 evcnt_detach(&stats->crcerrs);
3709 evcnt_detach(&stats->illerrc);
3710 evcnt_detach(&stats->errbc);
3711 evcnt_detach(&stats->mspdc);
3712 if (hw->mac.type >= ixgbe_mac_X550)
3713 evcnt_detach(&stats->mbsdc);
3714 evcnt_detach(&stats->mpctotal);
3715 evcnt_detach(&stats->mlfc);
3716 evcnt_detach(&stats->mrfc);
3717 evcnt_detach(&stats->rlec);
3718 evcnt_detach(&stats->lxontxc);
3719 evcnt_detach(&stats->lxonrxc);
3720 evcnt_detach(&stats->lxofftxc);
3721 evcnt_detach(&stats->lxoffrxc);
3722
3723 /* Packet Reception Stats */
3724 evcnt_detach(&stats->tor);
3725 evcnt_detach(&stats->gorc);
3726 evcnt_detach(&stats->tpr);
3727 evcnt_detach(&stats->gprc);
3728 evcnt_detach(&stats->mprc);
3729 evcnt_detach(&stats->bprc);
3730 evcnt_detach(&stats->prc64);
3731 evcnt_detach(&stats->prc127);
3732 evcnt_detach(&stats->prc255);
3733 evcnt_detach(&stats->prc511);
3734 evcnt_detach(&stats->prc1023);
3735 evcnt_detach(&stats->prc1522);
3736 evcnt_detach(&stats->ruc);
3737 evcnt_detach(&stats->rfc);
3738 evcnt_detach(&stats->roc);
3739 evcnt_detach(&stats->rjc);
3740 evcnt_detach(&stats->mngprc);
3741 evcnt_detach(&stats->mngpdc);
3742 evcnt_detach(&stats->xec);
3743
3744 /* Packet Transmission Stats */
3745 evcnt_detach(&stats->gotc);
3746 evcnt_detach(&stats->tpt);
3747 evcnt_detach(&stats->gptc);
3748 evcnt_detach(&stats->bptc);
3749 evcnt_detach(&stats->mptc);
3750 evcnt_detach(&stats->mngptc);
3751 evcnt_detach(&stats->ptc64);
3752 evcnt_detach(&stats->ptc127);
3753 evcnt_detach(&stats->ptc255);
3754 evcnt_detach(&stats->ptc511);
3755 evcnt_detach(&stats->ptc1023);
3756 evcnt_detach(&stats->ptc1522);
3757
3758 ixgbe_free_queues(adapter);
3759 free(adapter->mta, M_DEVBUF);
3760
3761 IXGBE_CORE_LOCK_DESTROY(adapter);
3762
3763 return (0);
3764 } /* ixgbe_detach */
3765
3766 /************************************************************************
3767 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3768 *
3769 * Prepare the adapter/port for LPLU and/or WoL
3770 ************************************************************************/
3771 static int
3772 ixgbe_setup_low_power_mode(struct adapter *adapter)
3773 {
3774 struct ixgbe_hw *hw = &adapter->hw;
3775 device_t dev = adapter->dev;
3776 struct ifnet *ifp = adapter->ifp;
3777 s32 error = 0;
3778
3779 /* Limit power management flow to X550EM baseT */
3780 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3781 hw->phy.ops.enter_lplu) {
3782 /* X550EM baseT adapters need a special LPLU flow */
3783 hw->phy.reset_disable = true;
3784 ixgbe_ifstop(ifp, 1);
3785 error = hw->phy.ops.enter_lplu(hw);
3786 if (error)
3787 device_printf(dev,
3788 "Error entering LPLU: %d\n", error);
3789 hw->phy.reset_disable = false;
3790 } else {
3791 /* Just stop for other adapters */
3792 ixgbe_ifstop(ifp, 1);
3793 }
3794
3795 IXGBE_CORE_LOCK(adapter);
3796
3797 if (!hw->wol_enabled) {
3798 ixgbe_set_phy_power(hw, FALSE);
3799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3800 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3801 } else {
3802 /* Turn off support for APM wakeup. (Using ACPI instead) */
3803 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3804 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3805
3806 /*
3807 * Clear Wake Up Status register to prevent any previous wakeup
3808 * events from waking us up immediately after we suspend.
3809 */
3810 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3811
3812 /*
3813 * Program the Wakeup Filter Control register with user filter
3814 * settings
3815 */
3816 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3817
3818 /* Enable wakeups and power management in Wakeup Control */
3819 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3820 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3821
3822 }
3823
3824 IXGBE_CORE_UNLOCK(adapter);
3825
3826 return error;
3827 } /* ixgbe_setup_low_power_mode */
3828
3829 /************************************************************************
3830 * ixgbe_shutdown - Shutdown entry point
3831 ************************************************************************/
3832 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3833 static int
3834 ixgbe_shutdown(device_t dev)
3835 {
3836 struct adapter *adapter = device_private(dev);
3837 int error = 0;
3838
3839 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3840
3841 error = ixgbe_setup_low_power_mode(adapter);
3842
3843 return (error);
3844 } /* ixgbe_shutdown */
3845 #endif
3846
3847 /************************************************************************
3848 * ixgbe_suspend
3849 *
3850 * From D0 to D3
3851 ************************************************************************/
3852 static bool
3853 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3854 {
3855 struct adapter *adapter = device_private(dev);
3856 int error = 0;
3857
3858 INIT_DEBUGOUT("ixgbe_suspend: begin");
3859
3860 error = ixgbe_setup_low_power_mode(adapter);
3861
3862 return (error);
3863 } /* ixgbe_suspend */
3864
3865 /************************************************************************
3866 * ixgbe_resume
3867 *
3868 * From D3 to D0
3869 ************************************************************************/
3870 static bool
3871 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3872 {
3873 struct adapter *adapter = device_private(dev);
3874 struct ifnet *ifp = adapter->ifp;
3875 struct ixgbe_hw *hw = &adapter->hw;
3876 u32 wus;
3877
3878 INIT_DEBUGOUT("ixgbe_resume: begin");
3879
3880 IXGBE_CORE_LOCK(adapter);
3881
3882 /* Read & clear WUS register */
3883 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3884 if (wus)
3885 device_printf(dev, "Woken up by (WUS): %#010x\n",
3886 IXGBE_READ_REG(hw, IXGBE_WUS));
3887 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3888 /* And clear WUFC until next low-power transition */
3889 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3890
3891 /*
3892 * Required after D3->D0 transition;
3893 * will re-advertise all previous advertised speeds
3894 */
3895 if (ifp->if_flags & IFF_UP)
3896 ixgbe_init_locked(adapter);
3897
3898 IXGBE_CORE_UNLOCK(adapter);
3899
3900 return true;
3901 } /* ixgbe_resume */
3902
3903 /*
3904 * Set the various hardware offload abilities.
3905 *
3906 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3907 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3908 * mbuf offload flags the driver will understand.
3909 */
3910 static void
3911 ixgbe_set_if_hwassist(struct adapter *adapter)
3912 {
3913 /* XXX */
3914 }
3915
3916 /************************************************************************
3917 * ixgbe_init_locked - Init entry point
3918 *
3919 * Used in two ways: It is used by the stack as an init
3920 * entry point in network interface structure. It is also
3921 * used by the driver as a hw/sw initialization routine to
3922 * get to a consistent state.
3923 *
3924 * return 0 on success, positive on failure
3925 ************************************************************************/
3926 static void
3927 ixgbe_init_locked(struct adapter *adapter)
3928 {
3929 struct ifnet *ifp = adapter->ifp;
3930 device_t dev = adapter->dev;
3931 struct ixgbe_hw *hw = &adapter->hw;
3932 struct ix_queue *que;
3933 struct tx_ring *txr;
3934 struct rx_ring *rxr;
3935 u32 txdctl, mhadd;
3936 u32 rxdctl, rxctrl;
3937 u32 ctrl_ext;
3938 bool unsupported_sfp = false;
3939 int i, j, err;
3940
3941 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3942
3943 KASSERT(mutex_owned(&adapter->core_mtx));
3944 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3945
3946 hw->need_unsupported_sfp_recovery = false;
3947 hw->adapter_stopped = FALSE;
3948 ixgbe_stop_adapter(hw);
3949 callout_stop(&adapter->timer);
3950 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3951 callout_stop(&adapter->recovery_mode_timer);
3952 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3953 que->disabled_count = 0;
3954
3955 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3956 adapter->max_frame_size =
3957 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3958
3959 /* Queue indices may change with IOV mode */
3960 ixgbe_align_all_queue_indices(adapter);
3961
3962 /* reprogram the RAR[0] in case user changed it. */
3963 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3964
3965 /* Get the latest mac address, User can use a LAA */
3966 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3967 IXGBE_ETH_LENGTH_OF_ADDRESS);
3968 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3969 hw->addr_ctrl.rar_used_count = 1;
3970
3971 /* Set hardware offload abilities from ifnet flags */
3972 ixgbe_set_if_hwassist(adapter);
3973
3974 /* Prepare transmit descriptors and buffers */
3975 if (ixgbe_setup_transmit_structures(adapter)) {
3976 device_printf(dev, "Could not setup transmit structures\n");
3977 ixgbe_stop_locked(adapter);
3978 return;
3979 }
3980
3981 ixgbe_init_hw(hw);
3982
3983 ixgbe_initialize_iov(adapter);
3984
3985 ixgbe_initialize_transmit_units(adapter);
3986
3987 /* Setup Multicast table */
3988 ixgbe_set_rxfilter(adapter);
3989
3990 /* Determine the correct mbuf pool, based on frame size */
3991 if (adapter->max_frame_size <= MCLBYTES)
3992 adapter->rx_mbuf_sz = MCLBYTES;
3993 else
3994 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3995
3996 /* Prepare receive descriptors and buffers */
3997 if (ixgbe_setup_receive_structures(adapter)) {
3998 device_printf(dev, "Could not setup receive structures\n");
3999 ixgbe_stop_locked(adapter);
4000 return;
4001 }
4002
4003 /* Configure RX settings */
4004 ixgbe_initialize_receive_units(adapter);
4005
4006 /* Initialize variable holding task enqueue requests interrupts */
4007 adapter->task_requests = 0;
4008
4009 /* Enable SDP & MSI-X interrupts based on adapter */
4010 ixgbe_config_gpie(adapter);
4011
4012 /* Set MTU size */
4013 if (ifp->if_mtu > ETHERMTU) {
4014 /* aka IXGBE_MAXFRS on 82599 and newer */
4015 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4016 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4017 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4018 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4019 }
4020
4021 /* Now enable all the queues */
4022 for (i = 0; i < adapter->num_queues; i++) {
4023 txr = &adapter->tx_rings[i];
4024 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4025 txdctl |= IXGBE_TXDCTL_ENABLE;
4026 /* Set WTHRESH to 8, burst writeback */
4027 txdctl |= (8 << 16);
4028 /*
4029 * When the internal queue falls below PTHRESH (32),
4030 * start prefetching as long as there are at least
4031 * HTHRESH (1) buffers ready. The values are taken
4032 * from the Intel linux driver 3.8.21.
4033 * Prefetching enables tx line rate even with 1 queue.
4034 */
4035 txdctl |= (32 << 0) | (1 << 8);
4036 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4037 }
4038
4039 for (i = 0; i < adapter->num_queues; i++) {
4040 rxr = &adapter->rx_rings[i];
4041 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4042 if (hw->mac.type == ixgbe_mac_82598EB) {
4043 /*
4044 * PTHRESH = 21
4045 * HTHRESH = 4
4046 * WTHRESH = 8
4047 */
4048 rxdctl &= ~0x3FFFFF;
4049 rxdctl |= 0x080420;
4050 }
4051 rxdctl |= IXGBE_RXDCTL_ENABLE;
4052 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4053 for (j = 0; j < 10; j++) {
4054 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4055 IXGBE_RXDCTL_ENABLE)
4056 break;
4057 else
4058 msec_delay(1);
4059 }
4060 IXGBE_WRITE_BARRIER(hw);
4061
4062 /*
4063 * In netmap mode, we must preserve the buffers made
4064 * available to userspace before the if_init()
4065 * (this is true by default on the TX side, because
4066 * init makes all buffers available to userspace).
4067 *
4068 * netmap_reset() and the device specific routines
4069 * (e.g. ixgbe_setup_receive_rings()) map these
4070 * buffers at the end of the NIC ring, so here we
4071 * must set the RDT (tail) register to make sure
4072 * they are not overwritten.
4073 *
4074 * In this driver the NIC ring starts at RDH = 0,
4075 * RDT points to the last slot available for reception (?),
4076 * so RDT = num_rx_desc - 1 means the whole ring is available.
4077 */
4078 #ifdef DEV_NETMAP
4079 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4080 (ifp->if_capenable & IFCAP_NETMAP)) {
4081 struct netmap_adapter *na = NA(adapter->ifp);
4082 struct netmap_kring *kring = na->rx_rings[i];
4083 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4084
4085 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4086 } else
4087 #endif /* DEV_NETMAP */
4088 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4089 adapter->num_rx_desc - 1);
4090 }
4091
4092 /* Enable Receive engine */
4093 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4094 if (hw->mac.type == ixgbe_mac_82598EB)
4095 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4096 rxctrl |= IXGBE_RXCTRL_RXEN;
4097 ixgbe_enable_rx_dma(hw, rxctrl);
4098
4099 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4100 atomic_store_relaxed(&adapter->timer_pending, 0);
4101 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4102 callout_reset(&adapter->recovery_mode_timer, hz,
4103 ixgbe_recovery_mode_timer, adapter);
4104
4105 /* Set up MSI/MSI-X routing */
4106 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4107 ixgbe_configure_ivars(adapter);
4108 /* Set up auto-mask */
4109 if (hw->mac.type == ixgbe_mac_82598EB)
4110 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4111 else {
4112 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4113 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4114 }
4115 } else { /* Simple settings for Legacy/MSI */
4116 ixgbe_set_ivar(adapter, 0, 0, 0);
4117 ixgbe_set_ivar(adapter, 0, 0, 1);
4118 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4119 }
4120
4121 ixgbe_init_fdir(adapter);
4122
4123 /*
4124 * Check on any SFP devices that
4125 * need to be kick-started
4126 */
4127 if (hw->phy.type == ixgbe_phy_none) {
4128 err = hw->phy.ops.identify(hw);
4129 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4130 unsupported_sfp = true;
4131 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4132 unsupported_sfp = true;
4133
4134 if (unsupported_sfp)
4135 device_printf(dev,
4136 "Unsupported SFP+ module type was detected.\n");
4137
4138 /* Set moderation on the Link interrupt */
4139 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4140
4141 /* Enable EEE power saving */
4142 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4143 hw->mac.ops.setup_eee(hw,
4144 adapter->feat_en & IXGBE_FEATURE_EEE);
4145
4146 /* Enable power to the phy. */
4147 if (!unsupported_sfp) {
4148 ixgbe_set_phy_power(hw, TRUE);
4149
4150 /* Config/Enable Link */
4151 ixgbe_config_link(adapter);
4152 }
4153
4154 /* Hardware Packet Buffer & Flow Control setup */
4155 ixgbe_config_delay_values(adapter);
4156
4157 /* Initialize the FC settings */
4158 ixgbe_start_hw(hw);
4159
4160 /* Set up VLAN support and filter */
4161 ixgbe_setup_vlan_hw_support(adapter);
4162
4163 /* Setup DMA Coalescing */
4164 ixgbe_config_dmac(adapter);
4165
4166 /* OK to schedule workqueues. */
4167 adapter->schedule_wqs_ok = true;
4168
4169 /* And now turn on interrupts */
4170 ixgbe_enable_intr(adapter);
4171
4172 /* Enable the use of the MBX by the VF's */
4173 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4174 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4175 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4176 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4177 }
4178
4179 /* Update saved flags. See ixgbe_ifflags_cb() */
4180 adapter->if_flags = ifp->if_flags;
4181 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4182
4183 /* Now inform the stack we're ready */
4184 ifp->if_flags |= IFF_RUNNING;
4185
4186 return;
4187 } /* ixgbe_init_locked */
4188
4189 /************************************************************************
4190 * ixgbe_init
4191 ************************************************************************/
4192 static int
4193 ixgbe_init(struct ifnet *ifp)
4194 {
4195 struct adapter *adapter = ifp->if_softc;
4196
4197 IXGBE_CORE_LOCK(adapter);
4198 ixgbe_init_locked(adapter);
4199 IXGBE_CORE_UNLOCK(adapter);
4200
4201 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4202 } /* ixgbe_init */
4203
4204 /************************************************************************
4205 * ixgbe_set_ivar
4206 *
4207 * Setup the correct IVAR register for a particular MSI-X interrupt
4208 * (yes this is all very magic and confusing :)
4209 * - entry is the register array entry
4210 * - vector is the MSI-X vector for this queue
4211 * - type is RX/TX/MISC
4212 ************************************************************************/
4213 static void
4214 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4215 {
4216 struct ixgbe_hw *hw = &adapter->hw;
4217 u32 ivar, index;
4218
4219 vector |= IXGBE_IVAR_ALLOC_VAL;
4220
4221 switch (hw->mac.type) {
4222 case ixgbe_mac_82598EB:
4223 if (type == -1)
4224 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4225 else
4226 entry += (type * 64);
4227 index = (entry >> 2) & 0x1F;
4228 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4229 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4230 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4231 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4232 break;
4233 case ixgbe_mac_82599EB:
4234 case ixgbe_mac_X540:
4235 case ixgbe_mac_X550:
4236 case ixgbe_mac_X550EM_x:
4237 case ixgbe_mac_X550EM_a:
4238 if (type == -1) { /* MISC IVAR */
4239 index = (entry & 1) * 8;
4240 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4241 ivar &= ~(0xffUL << index);
4242 ivar |= ((u32)vector << index);
4243 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4244 } else { /* RX/TX IVARS */
4245 index = (16 * (entry & 1)) + (8 * type);
4246 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4247 ivar &= ~(0xffUL << index);
4248 ivar |= ((u32)vector << index);
4249 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4250 }
4251 break;
4252 default:
4253 break;
4254 }
4255 } /* ixgbe_set_ivar */
4256
4257 /************************************************************************
4258 * ixgbe_configure_ivars
4259 ************************************************************************/
4260 static void
4261 ixgbe_configure_ivars(struct adapter *adapter)
4262 {
4263 struct ix_queue *que = adapter->queues;
4264 u32 newitr;
4265
4266 if (ixgbe_max_interrupt_rate > 0)
4267 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4268 else {
4269 /*
4270 * Disable DMA coalescing if interrupt moderation is
4271 * disabled.
4272 */
4273 adapter->dmac = 0;
4274 newitr = 0;
4275 }
4276
4277 for (int i = 0; i < adapter->num_queues; i++, que++) {
4278 struct rx_ring *rxr = &adapter->rx_rings[i];
4279 struct tx_ring *txr = &adapter->tx_rings[i];
4280 /* First the RX queue entry */
4281 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4282 /* ... and the TX */
4283 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4284 /* Set an Initial EITR value */
4285 ixgbe_eitr_write(adapter, que->msix, newitr);
4286 /*
4287 * To eliminate influence of the previous state.
4288 * At this point, Tx/Rx interrupt handler
4289 * (ixgbe_msix_que()) cannot be called, so both
4290 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4291 */
4292 que->eitr_setting = 0;
4293 }
4294
4295 /* For the Link interrupt */
4296 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4297 } /* ixgbe_configure_ivars */
4298
4299 /************************************************************************
4300 * ixgbe_config_gpie
4301 ************************************************************************/
4302 static void
4303 ixgbe_config_gpie(struct adapter *adapter)
4304 {
4305 struct ixgbe_hw *hw = &adapter->hw;
4306 u32 gpie;
4307
4308 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4309
4310 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4311 /* Enable Enhanced MSI-X mode */
4312 gpie |= IXGBE_GPIE_MSIX_MODE
4313 | IXGBE_GPIE_EIAME
4314 | IXGBE_GPIE_PBA_SUPPORT
4315 | IXGBE_GPIE_OCD;
4316 }
4317
4318 /* Fan Failure Interrupt */
4319 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4320 gpie |= IXGBE_SDP1_GPIEN;
4321
4322 /* Thermal Sensor Interrupt */
4323 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4324 gpie |= IXGBE_SDP0_GPIEN_X540;
4325
4326 /* Link detection */
4327 switch (hw->mac.type) {
4328 case ixgbe_mac_82599EB:
4329 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4330 break;
4331 case ixgbe_mac_X550EM_x:
4332 case ixgbe_mac_X550EM_a:
4333 gpie |= IXGBE_SDP0_GPIEN_X540;
4334 break;
4335 default:
4336 break;
4337 }
4338
4339 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4340
4341 } /* ixgbe_config_gpie */
4342
4343 /************************************************************************
4344 * ixgbe_config_delay_values
4345 *
4346 * Requires adapter->max_frame_size to be set.
4347 ************************************************************************/
4348 static void
4349 ixgbe_config_delay_values(struct adapter *adapter)
4350 {
4351 struct ixgbe_hw *hw = &adapter->hw;
4352 u32 rxpb, frame, size, tmp;
4353
4354 frame = adapter->max_frame_size;
4355
4356 /* Calculate High Water */
4357 switch (hw->mac.type) {
4358 case ixgbe_mac_X540:
4359 case ixgbe_mac_X550:
4360 case ixgbe_mac_X550EM_x:
4361 case ixgbe_mac_X550EM_a:
4362 tmp = IXGBE_DV_X540(frame, frame);
4363 break;
4364 default:
4365 tmp = IXGBE_DV(frame, frame);
4366 break;
4367 }
4368 size = IXGBE_BT2KB(tmp);
4369 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4370 hw->fc.high_water[0] = rxpb - size;
4371
4372 /* Now calculate Low Water */
4373 switch (hw->mac.type) {
4374 case ixgbe_mac_X540:
4375 case ixgbe_mac_X550:
4376 case ixgbe_mac_X550EM_x:
4377 case ixgbe_mac_X550EM_a:
4378 tmp = IXGBE_LOW_DV_X540(frame);
4379 break;
4380 default:
4381 tmp = IXGBE_LOW_DV(frame);
4382 break;
4383 }
4384 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4385
4386 hw->fc.pause_time = IXGBE_FC_PAUSE;
4387 hw->fc.send_xon = TRUE;
4388 } /* ixgbe_config_delay_values */
4389
4390 /************************************************************************
4391 * ixgbe_set_rxfilter - Multicast Update
4392 *
4393 * Called whenever multicast address list is updated.
4394 ************************************************************************/
4395 static void
4396 ixgbe_set_rxfilter(struct adapter *adapter)
4397 {
4398 struct ixgbe_mc_addr *mta;
4399 struct ifnet *ifp = adapter->ifp;
4400 u8 *update_ptr;
4401 int mcnt = 0;
4402 u32 fctrl;
4403 struct ethercom *ec = &adapter->osdep.ec;
4404 struct ether_multi *enm;
4405 struct ether_multistep step;
4406
4407 KASSERT(mutex_owned(&adapter->core_mtx));
4408 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4409
4410 mta = adapter->mta;
4411 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4412
4413 ETHER_LOCK(ec);
4414 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4415 ETHER_FIRST_MULTI(step, ec, enm);
4416 while (enm != NULL) {
4417 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4418 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4419 ETHER_ADDR_LEN) != 0)) {
4420 ec->ec_flags |= ETHER_F_ALLMULTI;
4421 break;
4422 }
4423 bcopy(enm->enm_addrlo,
4424 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4425 mta[mcnt].vmdq = adapter->pool;
4426 mcnt++;
4427 ETHER_NEXT_MULTI(step, enm);
4428 }
4429
4430 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4431 if (ifp->if_flags & IFF_PROMISC)
4432 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4433 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4434 fctrl |= IXGBE_FCTRL_MPE;
4435 fctrl &= ~IXGBE_FCTRL_UPE;
4436 } else
4437 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4438
4439 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4440
4441 /* Update multicast filter entries only when it's not ALLMULTI */
4442 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4443 ETHER_UNLOCK(ec);
4444 update_ptr = (u8 *)mta;
4445 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4446 ixgbe_mc_array_itr, TRUE);
4447 } else
4448 ETHER_UNLOCK(ec);
4449 } /* ixgbe_set_rxfilter */
4450
4451 /************************************************************************
4452 * ixgbe_mc_array_itr
4453 *
4454 * An iterator function needed by the multicast shared code.
4455 * It feeds the shared code routine the addresses in the
4456 * array of ixgbe_set_rxfilter() one by one.
4457 ************************************************************************/
4458 static u8 *
4459 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4460 {
4461 struct ixgbe_mc_addr *mta;
4462
4463 mta = (struct ixgbe_mc_addr *)*update_ptr;
4464 *vmdq = mta->vmdq;
4465
4466 *update_ptr = (u8*)(mta + 1);
4467
4468 return (mta->addr);
4469 } /* ixgbe_mc_array_itr */
4470
4471 /************************************************************************
4472 * ixgbe_local_timer - Timer routine
4473 *
4474 * Checks for link status, updates statistics,
4475 * and runs the watchdog check.
4476 ************************************************************************/
4477 static void
4478 ixgbe_local_timer(void *arg)
4479 {
4480 struct adapter *adapter = arg;
4481
4482 if (adapter->schedule_wqs_ok) {
4483 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4484 workqueue_enqueue(adapter->timer_wq,
4485 &adapter->timer_wc, NULL);
4486 }
4487 }
4488
4489 static void
4490 ixgbe_handle_timer(struct work *wk, void *context)
4491 {
4492 struct adapter *adapter = context;
4493 struct ixgbe_hw *hw = &adapter->hw;
4494 device_t dev = adapter->dev;
4495 struct ix_queue *que = adapter->queues;
4496 u64 queues = 0;
4497 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4498 int hung = 0;
4499 int i;
4500
4501 IXGBE_CORE_LOCK(adapter);
4502
4503 /* Check for pluggable optics */
4504 if (ixgbe_is_sfp(hw)) {
4505 bool sched_mod_task = false;
4506
4507 if (hw->mac.type == ixgbe_mac_82598EB) {
4508 /*
4509 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4510 * any GPIO(SDP). So just schedule TASK_MOD.
4511 */
4512 sched_mod_task = true;
4513 } else {
4514 bool was_full, is_full;
4515
4516 was_full =
4517 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4518 is_full = ixgbe_sfp_cage_full(hw);
4519
4520 /* Do probe if cage state changed */
4521 if (was_full ^ is_full)
4522 sched_mod_task = true;
4523 }
4524 if (sched_mod_task) {
4525 atomic_or_32(&adapter->task_requests,
4526 IXGBE_REQUEST_TASK_MOD);
4527 ixgbe_schedule_admin_tasklet(adapter);
4528 }
4529 }
4530
4531 ixgbe_update_link_status(adapter);
4532 ixgbe_update_stats_counters(adapter);
4533
4534 /* Update some event counters */
4535 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4536 que = adapter->queues;
4537 for (i = 0; i < adapter->num_queues; i++, que++) {
4538 struct tx_ring *txr = que->txr;
4539
4540 v0 += txr->q_efbig_tx_dma_setup;
4541 v1 += txr->q_mbuf_defrag_failed;
4542 v2 += txr->q_efbig2_tx_dma_setup;
4543 v3 += txr->q_einval_tx_dma_setup;
4544 v4 += txr->q_other_tx_dma_setup;
4545 v5 += txr->q_eagain_tx_dma_setup;
4546 v6 += txr->q_enomem_tx_dma_setup;
4547 v7 += txr->q_tso_err;
4548 }
4549 adapter->efbig_tx_dma_setup.ev_count = v0;
4550 adapter->mbuf_defrag_failed.ev_count = v1;
4551 adapter->efbig2_tx_dma_setup.ev_count = v2;
4552 adapter->einval_tx_dma_setup.ev_count = v3;
4553 adapter->other_tx_dma_setup.ev_count = v4;
4554 adapter->eagain_tx_dma_setup.ev_count = v5;
4555 adapter->enomem_tx_dma_setup.ev_count = v6;
4556 adapter->tso_err.ev_count = v7;
4557
4558 /*
4559 * Check the TX queues status
4560 * - mark hung queues so we don't schedule on them
4561 * - watchdog only if all queues show hung
4562 */
4563 que = adapter->queues;
4564 for (i = 0; i < adapter->num_queues; i++, que++) {
4565 /* Keep track of queues with work for soft irq */
4566 if (que->txr->busy)
4567 queues |= 1ULL << que->me;
4568 /*
4569 * Each time txeof runs without cleaning, but there
4570 * are uncleaned descriptors it increments busy. If
4571 * we get to the MAX we declare it hung.
4572 */
4573 if (que->busy == IXGBE_QUEUE_HUNG) {
4574 ++hung;
4575 /* Mark the queue as inactive */
4576 adapter->active_queues &= ~(1ULL << que->me);
4577 continue;
4578 } else {
4579 /* Check if we've come back from hung */
4580 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4581 adapter->active_queues |= 1ULL << que->me;
4582 }
4583 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4584 device_printf(dev,
4585 "Warning queue %d appears to be hung!\n", i);
4586 que->txr->busy = IXGBE_QUEUE_HUNG;
4587 ++hung;
4588 }
4589 }
4590
4591 /* Only truly watchdog if all queues show hung */
4592 if (hung == adapter->num_queues)
4593 goto watchdog;
4594 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4595 else if (queues != 0) { /* Force an IRQ on queues with work */
4596 que = adapter->queues;
4597 for (i = 0; i < adapter->num_queues; i++, que++) {
4598 mutex_enter(&que->dc_mtx);
4599 if (que->disabled_count == 0)
4600 ixgbe_rearm_queues(adapter,
4601 queues & ((u64)1 << i));
4602 mutex_exit(&que->dc_mtx);
4603 }
4604 }
4605 #endif
4606
4607 atomic_store_relaxed(&adapter->timer_pending, 0);
4608 IXGBE_CORE_UNLOCK(adapter);
4609 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4610 return;
4611
4612 watchdog:
4613 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4614 adapter->ifp->if_flags &= ~IFF_RUNNING;
4615 adapter->watchdog_events.ev_count++;
4616 ixgbe_init_locked(adapter);
4617 IXGBE_CORE_UNLOCK(adapter);
4618 } /* ixgbe_handle_timer */
4619
4620 /************************************************************************
4621 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4622 ************************************************************************/
4623 static void
4624 ixgbe_recovery_mode_timer(void *arg)
4625 {
4626 struct adapter *adapter = arg;
4627
4628 if (__predict_false(adapter->osdep.detaching == false)) {
4629 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4630 0, 1) == 0) {
4631 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4632 &adapter->recovery_mode_timer_wc, NULL);
4633 }
4634 }
4635 }
4636
4637 static void
4638 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4639 {
4640 struct adapter *adapter = context;
4641 struct ixgbe_hw *hw = &adapter->hw;
4642
4643 IXGBE_CORE_LOCK(adapter);
4644 if (ixgbe_fw_recovery_mode(hw)) {
4645 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4646 /* Firmware error detected, entering recovery mode */
4647 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4648
4649 if (hw->adapter_stopped == FALSE)
4650 ixgbe_stop_locked(adapter);
4651 }
4652 } else
4653 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4654
4655 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4656 callout_reset(&adapter->recovery_mode_timer, hz,
4657 ixgbe_recovery_mode_timer, adapter);
4658 IXGBE_CORE_UNLOCK(adapter);
4659 } /* ixgbe_handle_recovery_mode_timer */
4660
4661 /************************************************************************
4662 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4663 ************************************************************************/
4664 static void
4665 ixgbe_handle_mod(void *context)
4666 {
4667 struct adapter *adapter = context;
4668 struct ixgbe_hw *hw = &adapter->hw;
4669 device_t dev = adapter->dev;
4670 enum ixgbe_sfp_type last_sfp_type;
4671 u32 err;
4672 bool last_unsupported_sfp_recovery;
4673
4674 last_sfp_type = hw->phy.sfp_type;
4675 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4676 ++adapter->mod_workev.ev_count;
4677 if (adapter->hw.need_crosstalk_fix) {
4678 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4679 !ixgbe_sfp_cage_full(hw))
4680 goto out;
4681 }
4682
4683 err = hw->phy.ops.identify_sfp(hw);
4684 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4685 if (last_unsupported_sfp_recovery == false)
4686 device_printf(dev,
4687 "Unsupported SFP+ module type was detected.\n");
4688 goto out;
4689 }
4690
4691 if (hw->need_unsupported_sfp_recovery) {
4692 device_printf(dev, "Recovering from unsupported SFP\n");
4693 /*
4694 * We could recover the status by calling setup_sfp(),
4695 * setup_link() and some others. It's complex and might not
4696 * work correctly on some unknown cases. To avoid such type of
4697 * problem, call ixgbe_init_locked(). It's simple and safe
4698 * approach.
4699 */
4700 ixgbe_init_locked(adapter);
4701 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4702 (hw->phy.sfp_type != last_sfp_type)) {
4703 /* A module is inserted and changed. */
4704
4705 if (hw->mac.type == ixgbe_mac_82598EB)
4706 err = hw->phy.ops.reset(hw);
4707 else {
4708 err = hw->mac.ops.setup_sfp(hw);
4709 hw->phy.sfp_setup_needed = FALSE;
4710 }
4711 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4712 device_printf(dev,
4713 "Setup failure - unsupported SFP+ module type.\n");
4714 goto out;
4715 }
4716 }
4717
4718 out:
4719 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4720 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4721
4722 /* Adjust media types shown in ifconfig */
4723 IXGBE_CORE_UNLOCK(adapter);
4724 ifmedia_removeall(&adapter->media);
4725 ixgbe_add_media_types(adapter);
4726 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4727 IXGBE_CORE_LOCK(adapter);
4728
4729 /*
4730 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4731 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4732 * flap because the function calls setup_link().
4733 */
4734 if (hw->mac.type != ixgbe_mac_82598EB)
4735 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4736
4737 /*
4738 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4739 * the workqueue now.
4740 */
4741 } /* ixgbe_handle_mod */
4742
4743
4744 /************************************************************************
4745 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4746 ************************************************************************/
4747 static void
4748 ixgbe_handle_msf(void *context)
4749 {
4750 struct adapter *adapter = context;
4751 struct ixgbe_hw *hw = &adapter->hw;
4752 u32 autoneg;
4753 bool negotiate;
4754
4755 ++adapter->msf_workev.ev_count;
4756
4757 autoneg = hw->phy.autoneg_advertised;
4758 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4759 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4760 if (hw->mac.ops.setup_link)
4761 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4762 } /* ixgbe_handle_msf */
4763
4764 /************************************************************************
4765 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4766 ************************************************************************/
4767 static void
4768 ixgbe_handle_phy(void *context)
4769 {
4770 struct adapter *adapter = context;
4771 struct ixgbe_hw *hw = &adapter->hw;
4772 int error;
4773
4774 ++adapter->phy_workev.ev_count;
4775 error = hw->phy.ops.handle_lasi(hw);
4776 if (error == IXGBE_ERR_OVERTEMP)
4777 device_printf(adapter->dev,
4778 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4779 " PHY will downshift to lower power state!\n");
4780 else if (error)
4781 device_printf(adapter->dev,
4782 "Error handling LASI interrupt: %d\n", error);
4783 } /* ixgbe_handle_phy */
4784
4785 static void
4786 ixgbe_handle_admin(struct work *wk, void *context)
4787 {
4788 struct adapter *adapter = context;
4789 struct ifnet *ifp = adapter->ifp;
4790 struct ixgbe_hw *hw = &adapter->hw;
4791 u32 req;
4792
4793 /*
4794 * Hold the IFNET_LOCK across this entire call. This will
4795 * prevent additional changes to adapter->phy_layer
4796 * and serialize calls to this tasklet. We cannot hold the
4797 * CORE_LOCK while calling into the ifmedia functions as
4798 * they call ifmedia_lock() and the lock is CORE_LOCK.
4799 */
4800 IFNET_LOCK(ifp);
4801 IXGBE_CORE_LOCK(adapter);
4802 while ((req =
4803 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR))
4804 != 0) {
4805 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4806 ixgbe_handle_link(adapter);
4807 atomic_and_32(&adapter->task_requests,
4808 ~IXGBE_REQUEST_TASK_LSC);
4809 }
4810 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4811 ixgbe_handle_mod(adapter);
4812 atomic_and_32(&adapter->task_requests,
4813 ~IXGBE_REQUEST_TASK_MOD);
4814 }
4815 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4816 ixgbe_handle_msf(adapter);
4817 atomic_and_32(&adapter->task_requests,
4818 ~IXGBE_REQUEST_TASK_MSF);
4819 }
4820 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4821 ixgbe_handle_phy(adapter);
4822 atomic_and_32(&adapter->task_requests,
4823 ~IXGBE_REQUEST_TASK_PHY);
4824 }
4825 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4826 ixgbe_reinit_fdir(adapter);
4827 atomic_and_32(&adapter->task_requests,
4828 ~IXGBE_REQUEST_TASK_FDIR);
4829 }
4830 #if 0 /* notyet */
4831 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4832 ixgbe_handle_mbx(adapter);
4833 atomic_and_32(&adapter->task_requests,
4834 ~IXGBE_REQUEST_TASK_MBX);
4835 }
4836 #endif
4837 }
4838 atomic_store_relaxed(&adapter->admin_pending, 0);
4839 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4840 atomic_and_32(&adapter->task_requests,
4841 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4842 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4843 /* Re-enable other interrupts */
4844 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4845 } else
4846 ixgbe_enable_intr(adapter);
4847 }
4848
4849 IXGBE_CORE_UNLOCK(adapter);
4850 IFNET_UNLOCK(ifp);
4851 } /* ixgbe_handle_admin */
4852
4853 static void
4854 ixgbe_ifstop(struct ifnet *ifp, int disable)
4855 {
4856 struct adapter *adapter = ifp->if_softc;
4857
4858 IXGBE_CORE_LOCK(adapter);
4859 ixgbe_stop_locked(adapter);
4860 IXGBE_CORE_UNLOCK(adapter);
4861
4862 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4863 atomic_store_relaxed(&adapter->timer_pending, 0);
4864 }
4865
4866 /************************************************************************
4867 * ixgbe_stop_locked - Stop the hardware
4868 *
4869 * Disables all traffic on the adapter by issuing a
4870 * global reset on the MAC and deallocates TX/RX buffers.
4871 ************************************************************************/
4872 static void
4873 ixgbe_stop_locked(void *arg)
4874 {
4875 struct ifnet *ifp;
4876 struct adapter *adapter = arg;
4877 struct ixgbe_hw *hw = &adapter->hw;
4878
4879 ifp = adapter->ifp;
4880
4881 KASSERT(mutex_owned(&adapter->core_mtx));
4882
4883 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4884 ixgbe_disable_intr(adapter);
4885 callout_stop(&adapter->timer);
4886
4887 /* Don't schedule workqueues. */
4888 adapter->schedule_wqs_ok = false;
4889
4890 /* Let the stack know...*/
4891 ifp->if_flags &= ~IFF_RUNNING;
4892
4893 ixgbe_reset_hw(hw);
4894 hw->adapter_stopped = FALSE;
4895 ixgbe_stop_adapter(hw);
4896 if (hw->mac.type == ixgbe_mac_82599EB)
4897 ixgbe_stop_mac_link_on_d3_82599(hw);
4898 /* Turn off the laser - noop with no optics */
4899 ixgbe_disable_tx_laser(hw);
4900
4901 /* Update the stack */
4902 adapter->link_up = FALSE;
4903 ixgbe_update_link_status(adapter);
4904
4905 /* reprogram the RAR[0] in case user changed it. */
4906 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4907
4908 return;
4909 } /* ixgbe_stop_locked */
4910
4911 /************************************************************************
4912 * ixgbe_update_link_status - Update OS on link state
4913 *
4914 * Note: Only updates the OS on the cached link state.
4915 * The real check of the hardware only happens with
4916 * a link interrupt.
4917 ************************************************************************/
4918 static void
4919 ixgbe_update_link_status(struct adapter *adapter)
4920 {
4921 struct ifnet *ifp = adapter->ifp;
4922 device_t dev = adapter->dev;
4923 struct ixgbe_hw *hw = &adapter->hw;
4924
4925 KASSERT(mutex_owned(&adapter->core_mtx));
4926
4927 if (adapter->link_up) {
4928 if (adapter->link_active != LINK_STATE_UP) {
4929 /*
4930 * To eliminate influence of the previous state
4931 * in the same way as ixgbe_init_locked().
4932 */
4933 struct ix_queue *que = adapter->queues;
4934 for (int i = 0; i < adapter->num_queues; i++, que++)
4935 que->eitr_setting = 0;
4936
4937 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4938 /*
4939 * Discard count for both MAC Local Fault and
4940 * Remote Fault because those registers are
4941 * valid only when the link speed is up and
4942 * 10Gbps.
4943 */
4944 IXGBE_READ_REG(hw, IXGBE_MLFC);
4945 IXGBE_READ_REG(hw, IXGBE_MRFC);
4946 }
4947
4948 if (bootverbose) {
4949 const char *bpsmsg;
4950
4951 switch (adapter->link_speed) {
4952 case IXGBE_LINK_SPEED_10GB_FULL:
4953 bpsmsg = "10 Gbps";
4954 break;
4955 case IXGBE_LINK_SPEED_5GB_FULL:
4956 bpsmsg = "5 Gbps";
4957 break;
4958 case IXGBE_LINK_SPEED_2_5GB_FULL:
4959 bpsmsg = "2.5 Gbps";
4960 break;
4961 case IXGBE_LINK_SPEED_1GB_FULL:
4962 bpsmsg = "1 Gbps";
4963 break;
4964 case IXGBE_LINK_SPEED_100_FULL:
4965 bpsmsg = "100 Mbps";
4966 break;
4967 case IXGBE_LINK_SPEED_10_FULL:
4968 bpsmsg = "10 Mbps";
4969 break;
4970 default:
4971 bpsmsg = "unknown speed";
4972 break;
4973 }
4974 device_printf(dev, "Link is up %s %s \n",
4975 bpsmsg, "Full Duplex");
4976 }
4977 adapter->link_active = LINK_STATE_UP;
4978 /* Update any Flow Control changes */
4979 ixgbe_fc_enable(&adapter->hw);
4980 /* Update DMA coalescing config */
4981 ixgbe_config_dmac(adapter);
4982 if_link_state_change(ifp, LINK_STATE_UP);
4983
4984 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4985 ixgbe_ping_all_vfs(adapter);
4986 }
4987 } else {
4988 /*
4989 * Do it when link active changes to DOWN. i.e.
4990 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4991 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4992 */
4993 if (adapter->link_active != LINK_STATE_DOWN) {
4994 if (bootverbose)
4995 device_printf(dev, "Link is Down\n");
4996 if_link_state_change(ifp, LINK_STATE_DOWN);
4997 adapter->link_active = LINK_STATE_DOWN;
4998 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4999 ixgbe_ping_all_vfs(adapter);
5000 ixgbe_drain_all(adapter);
5001 }
5002 }
5003 } /* ixgbe_update_link_status */
5004
5005 /************************************************************************
5006 * ixgbe_config_dmac - Configure DMA Coalescing
5007 ************************************************************************/
5008 static void
5009 ixgbe_config_dmac(struct adapter *adapter)
5010 {
5011 struct ixgbe_hw *hw = &adapter->hw;
5012 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5013
5014 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5015 return;
5016
5017 if (dcfg->watchdog_timer ^ adapter->dmac ||
5018 dcfg->link_speed ^ adapter->link_speed) {
5019 dcfg->watchdog_timer = adapter->dmac;
5020 dcfg->fcoe_en = false;
5021 dcfg->link_speed = adapter->link_speed;
5022 dcfg->num_tcs = 1;
5023
5024 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5025 dcfg->watchdog_timer, dcfg->link_speed);
5026
5027 hw->mac.ops.dmac_config(hw);
5028 }
5029 } /* ixgbe_config_dmac */
5030
5031 /************************************************************************
5032 * ixgbe_enable_intr
5033 ************************************************************************/
5034 static void
5035 ixgbe_enable_intr(struct adapter *adapter)
5036 {
5037 struct ixgbe_hw *hw = &adapter->hw;
5038 struct ix_queue *que = adapter->queues;
5039 u32 mask, fwsm;
5040
5041 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5042
5043 switch (adapter->hw.mac.type) {
5044 case ixgbe_mac_82599EB:
5045 mask |= IXGBE_EIMS_ECC;
5046 /* Temperature sensor on some adapters */
5047 mask |= IXGBE_EIMS_GPI_SDP0;
5048 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5049 mask |= IXGBE_EIMS_GPI_SDP1;
5050 mask |= IXGBE_EIMS_GPI_SDP2;
5051 break;
5052 case ixgbe_mac_X540:
5053 /* Detect if Thermal Sensor is enabled */
5054 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5055 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5056 mask |= IXGBE_EIMS_TS;
5057 mask |= IXGBE_EIMS_ECC;
5058 break;
5059 case ixgbe_mac_X550:
5060 /* MAC thermal sensor is automatically enabled */
5061 mask |= IXGBE_EIMS_TS;
5062 mask |= IXGBE_EIMS_ECC;
5063 break;
5064 case ixgbe_mac_X550EM_x:
5065 case ixgbe_mac_X550EM_a:
5066 /* Some devices use SDP0 for important information */
5067 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5068 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5069 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5070 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5071 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5072 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5073 mask |= IXGBE_EICR_GPI_SDP0_X540;
5074 mask |= IXGBE_EIMS_ECC;
5075 break;
5076 default:
5077 break;
5078 }
5079
5080 /* Enable Fan Failure detection */
5081 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5082 mask |= IXGBE_EIMS_GPI_SDP1;
5083 /* Enable SR-IOV */
5084 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5085 mask |= IXGBE_EIMS_MAILBOX;
5086 /* Enable Flow Director */
5087 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5088 mask |= IXGBE_EIMS_FLOW_DIR;
5089
5090 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5091
5092 /* With MSI-X we use auto clear */
5093 if (adapter->msix_mem) {
5094 mask = IXGBE_EIMS_ENABLE_MASK;
5095 /* Don't autoclear Link */
5096 mask &= ~IXGBE_EIMS_OTHER;
5097 mask &= ~IXGBE_EIMS_LSC;
5098 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5099 mask &= ~IXGBE_EIMS_MAILBOX;
5100 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5101 }
5102
5103 /*
5104 * Now enable all queues, this is done separately to
5105 * allow for handling the extended (beyond 32) MSI-X
5106 * vectors that can be used by 82599
5107 */
5108 for (int i = 0; i < adapter->num_queues; i++, que++)
5109 ixgbe_enable_queue(adapter, que->msix);
5110
5111 IXGBE_WRITE_FLUSH(hw);
5112
5113 } /* ixgbe_enable_intr */
5114
5115 /************************************************************************
5116 * ixgbe_disable_intr_internal
5117 ************************************************************************/
5118 static void
5119 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5120 {
5121 struct ix_queue *que = adapter->queues;
5122
5123 /* disable interrupts other than queues */
5124 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5125
5126 if (adapter->msix_mem)
5127 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5128
5129 for (int i = 0; i < adapter->num_queues; i++, que++)
5130 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5131
5132 IXGBE_WRITE_FLUSH(&adapter->hw);
5133
5134 } /* ixgbe_do_disable_intr_internal */
5135
5136 /************************************************************************
5137 * ixgbe_disable_intr
5138 ************************************************************************/
5139 static void
5140 ixgbe_disable_intr(struct adapter *adapter)
5141 {
5142
5143 ixgbe_disable_intr_internal(adapter, true);
5144 } /* ixgbe_disable_intr */
5145
5146 /************************************************************************
5147 * ixgbe_ensure_disabled_intr
5148 ************************************************************************/
5149 void
5150 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5151 {
5152
5153 ixgbe_disable_intr_internal(adapter, false);
5154 } /* ixgbe_ensure_disabled_intr */
5155
5156 /************************************************************************
5157 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5158 ************************************************************************/
5159 static int
5160 ixgbe_legacy_irq(void *arg)
5161 {
5162 struct ix_queue *que = arg;
5163 struct adapter *adapter = que->adapter;
5164 struct ixgbe_hw *hw = &adapter->hw;
5165 struct ifnet *ifp = adapter->ifp;
5166 struct tx_ring *txr = adapter->tx_rings;
5167 bool more = false;
5168 bool reenable_intr = true;
5169 u32 eicr, eicr_mask;
5170 u32 task_requests = 0;
5171
5172 /* Silicon errata #26 on 82598 */
5173 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5174
5175 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5176
5177 adapter->stats.pf.legint.ev_count++;
5178 ++que->irqs.ev_count;
5179 if (eicr == 0) {
5180 adapter->stats.pf.intzero.ev_count++;
5181 if ((ifp->if_flags & IFF_UP) != 0)
5182 ixgbe_enable_intr(adapter);
5183 return 0;
5184 }
5185
5186 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5187 /*
5188 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5189 */
5190 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5191
5192 #ifdef __NetBSD__
5193 /* Don't run ixgbe_rxeof in interrupt context */
5194 more = true;
5195 #else
5196 more = ixgbe_rxeof(que);
5197 #endif
5198
5199 IXGBE_TX_LOCK(txr);
5200 ixgbe_txeof(txr);
5201 #ifdef notyet
5202 if (!ixgbe_ring_empty(ifp, txr->br))
5203 ixgbe_start_locked(ifp, txr);
5204 #endif
5205 IXGBE_TX_UNLOCK(txr);
5206 }
5207
5208 /* Check for fan failure */
5209 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5210 ixgbe_check_fan_failure(adapter, eicr, true);
5211 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5212 }
5213
5214 /* Link status change */
5215 if (eicr & IXGBE_EICR_LSC)
5216 task_requests |= IXGBE_REQUEST_TASK_LSC;
5217
5218 if (ixgbe_is_sfp(hw)) {
5219 /* Pluggable optics-related interrupt */
5220 if (hw->mac.type >= ixgbe_mac_X540)
5221 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5222 else
5223 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5224
5225 if (eicr & eicr_mask) {
5226 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5227 task_requests |= IXGBE_REQUEST_TASK_MOD;
5228 }
5229
5230 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5231 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5232 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5233 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5234 task_requests |= IXGBE_REQUEST_TASK_MSF;
5235 }
5236 }
5237
5238 /* External PHY interrupt */
5239 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5240 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5241 task_requests |= IXGBE_REQUEST_TASK_PHY;
5242
5243 if (more) {
5244 que->req.ev_count++;
5245 ixgbe_sched_handle_que(adapter, que);
5246 reenable_intr = false;
5247 }
5248 if (task_requests != 0) {
5249 /* Re-enabling other interrupts is done in the admin task */
5250 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5251 atomic_or_32(&adapter->task_requests, task_requests);
5252 ixgbe_schedule_admin_tasklet(adapter);
5253 reenable_intr = false;
5254 }
5255
5256 if (reenable_intr == true)
5257 ixgbe_enable_intr(adapter);
5258
5259 return 1;
5260 } /* ixgbe_legacy_irq */
5261
5262 /************************************************************************
5263 * ixgbe_free_pciintr_resources
5264 ************************************************************************/
5265 static void
5266 ixgbe_free_pciintr_resources(struct adapter *adapter)
5267 {
5268 struct ix_queue *que = adapter->queues;
5269 int rid;
5270
5271 /*
5272 * Release all msix queue resources:
5273 */
5274 for (int i = 0; i < adapter->num_queues; i++, que++) {
5275 if (que->res != NULL) {
5276 pci_intr_disestablish(adapter->osdep.pc,
5277 adapter->osdep.ihs[i]);
5278 adapter->osdep.ihs[i] = NULL;
5279 }
5280 }
5281
5282 /* Clean the Legacy or Link interrupt last */
5283 if (adapter->vector) /* we are doing MSIX */
5284 rid = adapter->vector;
5285 else
5286 rid = 0;
5287
5288 if (adapter->osdep.ihs[rid] != NULL) {
5289 pci_intr_disestablish(adapter->osdep.pc,
5290 adapter->osdep.ihs[rid]);
5291 adapter->osdep.ihs[rid] = NULL;
5292 }
5293
5294 if (adapter->osdep.intrs != NULL) {
5295 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5296 adapter->osdep.nintrs);
5297 adapter->osdep.intrs = NULL;
5298 }
5299 } /* ixgbe_free_pciintr_resources */
5300
5301 /************************************************************************
5302 * ixgbe_free_pci_resources
5303 ************************************************************************/
5304 static void
5305 ixgbe_free_pci_resources(struct adapter *adapter)
5306 {
5307
5308 ixgbe_free_pciintr_resources(adapter);
5309
5310 if (adapter->osdep.mem_size != 0) {
5311 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5312 adapter->osdep.mem_bus_space_handle,
5313 adapter->osdep.mem_size);
5314 }
5315
5316 } /* ixgbe_free_pci_resources */
5317
5318 /************************************************************************
5319 * ixgbe_set_sysctl_value
5320 ************************************************************************/
5321 static void
5322 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5323 const char *description, int *limit, int value)
5324 {
5325 device_t dev = adapter->dev;
5326 struct sysctllog **log;
5327 const struct sysctlnode *rnode, *cnode;
5328
5329 /*
5330 * It's not required to check recovery mode because this function never
5331 * touches hardware.
5332 */
5333
5334 log = &adapter->sysctllog;
5335 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5336 aprint_error_dev(dev, "could not create sysctl root\n");
5337 return;
5338 }
5339 if (sysctl_createv(log, 0, &rnode, &cnode,
5340 CTLFLAG_READWRITE, CTLTYPE_INT,
5341 name, SYSCTL_DESCR(description),
5342 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5343 aprint_error_dev(dev, "could not create sysctl\n");
5344 *limit = value;
5345 } /* ixgbe_set_sysctl_value */
5346
5347 /************************************************************************
5348 * ixgbe_sysctl_flowcntl
5349 *
5350 * SYSCTL wrapper around setting Flow Control
5351 ************************************************************************/
5352 static int
5353 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5354 {
5355 struct sysctlnode node = *rnode;
5356 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5357 int error, fc;
5358
5359 if (ixgbe_fw_recovery_mode_swflag(adapter))
5360 return (EPERM);
5361
5362 fc = adapter->hw.fc.current_mode;
5363 node.sysctl_data = &fc;
5364 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5365 if (error != 0 || newp == NULL)
5366 return error;
5367
5368 /* Don't bother if it's not changed */
5369 if (fc == adapter->hw.fc.current_mode)
5370 return (0);
5371
5372 return ixgbe_set_flowcntl(adapter, fc);
5373 } /* ixgbe_sysctl_flowcntl */
5374
5375 /************************************************************************
5376 * ixgbe_set_flowcntl - Set flow control
5377 *
5378 * Flow control values:
5379 * 0 - off
5380 * 1 - rx pause
5381 * 2 - tx pause
5382 * 3 - full
5383 ************************************************************************/
5384 static int
5385 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5386 {
5387 switch (fc) {
5388 case ixgbe_fc_rx_pause:
5389 case ixgbe_fc_tx_pause:
5390 case ixgbe_fc_full:
5391 adapter->hw.fc.requested_mode = fc;
5392 if (adapter->num_queues > 1)
5393 ixgbe_disable_rx_drop(adapter);
5394 break;
5395 case ixgbe_fc_none:
5396 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5397 if (adapter->num_queues > 1)
5398 ixgbe_enable_rx_drop(adapter);
5399 break;
5400 default:
5401 return (EINVAL);
5402 }
5403
5404 #if 0 /* XXX NetBSD */
5405 /* Don't autoneg if forcing a value */
5406 adapter->hw.fc.disable_fc_autoneg = TRUE;
5407 #endif
5408 ixgbe_fc_enable(&adapter->hw);
5409
5410 return (0);
5411 } /* ixgbe_set_flowcntl */
5412
5413 /************************************************************************
5414 * ixgbe_enable_rx_drop
5415 *
5416 * Enable the hardware to drop packets when the buffer is
5417 * full. This is useful with multiqueue, so that no single
5418 * queue being full stalls the entire RX engine. We only
5419 * enable this when Multiqueue is enabled AND Flow Control
5420 * is disabled.
5421 ************************************************************************/
5422 static void
5423 ixgbe_enable_rx_drop(struct adapter *adapter)
5424 {
5425 struct ixgbe_hw *hw = &adapter->hw;
5426 struct rx_ring *rxr;
5427 u32 srrctl;
5428
5429 for (int i = 0; i < adapter->num_queues; i++) {
5430 rxr = &adapter->rx_rings[i];
5431 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5432 srrctl |= IXGBE_SRRCTL_DROP_EN;
5433 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5434 }
5435
5436 /* enable drop for each vf */
5437 for (int i = 0; i < adapter->num_vfs; i++) {
5438 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5439 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5440 IXGBE_QDE_ENABLE));
5441 }
5442 } /* ixgbe_enable_rx_drop */
5443
5444 /************************************************************************
5445 * ixgbe_disable_rx_drop
5446 ************************************************************************/
5447 static void
5448 ixgbe_disable_rx_drop(struct adapter *adapter)
5449 {
5450 struct ixgbe_hw *hw = &adapter->hw;
5451 struct rx_ring *rxr;
5452 u32 srrctl;
5453
5454 for (int i = 0; i < adapter->num_queues; i++) {
5455 rxr = &adapter->rx_rings[i];
5456 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5457 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5458 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5459 }
5460
5461 /* disable drop for each vf */
5462 for (int i = 0; i < adapter->num_vfs; i++) {
5463 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5464 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5465 }
5466 } /* ixgbe_disable_rx_drop */
5467
5468 /************************************************************************
5469 * ixgbe_sysctl_advertise
5470 *
5471 * SYSCTL wrapper around setting advertised speed
5472 ************************************************************************/
5473 static int
5474 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5475 {
5476 struct sysctlnode node = *rnode;
5477 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5478 int error = 0, advertise;
5479
5480 if (ixgbe_fw_recovery_mode_swflag(adapter))
5481 return (EPERM);
5482
5483 advertise = adapter->advertise;
5484 node.sysctl_data = &advertise;
5485 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5486 if (error != 0 || newp == NULL)
5487 return error;
5488
5489 return ixgbe_set_advertise(adapter, advertise);
5490 } /* ixgbe_sysctl_advertise */
5491
5492 /************************************************************************
5493 * ixgbe_set_advertise - Control advertised link speed
5494 *
5495 * Flags:
5496 * 0x00 - Default (all capable link speed)
5497 * 0x01 - advertise 100 Mb
5498 * 0x02 - advertise 1G
5499 * 0x04 - advertise 10G
5500 * 0x08 - advertise 10 Mb
5501 * 0x10 - advertise 2.5G
5502 * 0x20 - advertise 5G
5503 ************************************************************************/
5504 static int
5505 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5506 {
5507 device_t dev;
5508 struct ixgbe_hw *hw;
5509 ixgbe_link_speed speed = 0;
5510 ixgbe_link_speed link_caps = 0;
5511 s32 err = IXGBE_NOT_IMPLEMENTED;
5512 bool negotiate = FALSE;
5513
5514 /* Checks to validate new value */
5515 if (adapter->advertise == advertise) /* no change */
5516 return (0);
5517
5518 dev = adapter->dev;
5519 hw = &adapter->hw;
5520
5521 /* No speed changes for backplane media */
5522 if (hw->phy.media_type == ixgbe_media_type_backplane)
5523 return (ENODEV);
5524
5525 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5526 (hw->phy.multispeed_fiber))) {
5527 device_printf(dev,
5528 "Advertised speed can only be set on copper or "
5529 "multispeed fiber media types.\n");
5530 return (EINVAL);
5531 }
5532
5533 if (advertise < 0x0 || advertise > 0x2f) {
5534 device_printf(dev,
5535 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5536 return (EINVAL);
5537 }
5538
5539 if (hw->mac.ops.get_link_capabilities) {
5540 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5541 &negotiate);
5542 if (err != IXGBE_SUCCESS) {
5543 device_printf(dev, "Unable to determine supported advertise speeds\n");
5544 return (ENODEV);
5545 }
5546 }
5547
5548 /* Set new value and report new advertised mode */
5549 if (advertise & 0x1) {
5550 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5551 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5552 return (EINVAL);
5553 }
5554 speed |= IXGBE_LINK_SPEED_100_FULL;
5555 }
5556 if (advertise & 0x2) {
5557 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5558 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5559 return (EINVAL);
5560 }
5561 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5562 }
5563 if (advertise & 0x4) {
5564 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5565 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5566 return (EINVAL);
5567 }
5568 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5569 }
5570 if (advertise & 0x8) {
5571 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5572 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5573 return (EINVAL);
5574 }
5575 speed |= IXGBE_LINK_SPEED_10_FULL;
5576 }
5577 if (advertise & 0x10) {
5578 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5579 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5580 return (EINVAL);
5581 }
5582 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5583 }
5584 if (advertise & 0x20) {
5585 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5586 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5587 return (EINVAL);
5588 }
5589 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5590 }
5591 if (advertise == 0)
5592 speed = link_caps; /* All capable link speed */
5593
5594 hw->mac.autotry_restart = TRUE;
5595 hw->mac.ops.setup_link(hw, speed, TRUE);
5596 adapter->advertise = advertise;
5597
5598 return (0);
5599 } /* ixgbe_set_advertise */
5600
5601 /************************************************************************
5602 * ixgbe_get_advertise - Get current advertised speed settings
5603 *
5604 * Formatted for sysctl usage.
5605 * Flags:
5606 * 0x01 - advertise 100 Mb
5607 * 0x02 - advertise 1G
5608 * 0x04 - advertise 10G
5609 * 0x08 - advertise 10 Mb (yes, Mb)
5610 * 0x10 - advertise 2.5G
5611 * 0x20 - advertise 5G
5612 ************************************************************************/
5613 static int
5614 ixgbe_get_advertise(struct adapter *adapter)
5615 {
5616 struct ixgbe_hw *hw = &adapter->hw;
5617 int speed;
5618 ixgbe_link_speed link_caps = 0;
5619 s32 err;
5620 bool negotiate = FALSE;
5621
5622 /*
5623 * Advertised speed means nothing unless it's copper or
5624 * multi-speed fiber
5625 */
5626 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5627 !(hw->phy.multispeed_fiber))
5628 return (0);
5629
5630 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5631 if (err != IXGBE_SUCCESS)
5632 return (0);
5633
5634 speed =
5635 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5636 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5637 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5638 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5639 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5640 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5641
5642 return speed;
5643 } /* ixgbe_get_advertise */
5644
5645 /************************************************************************
5646 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5647 *
5648 * Control values:
5649 * 0/1 - off / on (use default value of 1000)
5650 *
5651 * Legal timer values are:
5652 * 50,100,250,500,1000,2000,5000,10000
5653 *
5654 * Turning off interrupt moderation will also turn this off.
5655 ************************************************************************/
5656 static int
5657 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5658 {
5659 struct sysctlnode node = *rnode;
5660 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5661 struct ifnet *ifp = adapter->ifp;
5662 int error;
5663 int newval;
5664
5665 if (ixgbe_fw_recovery_mode_swflag(adapter))
5666 return (EPERM);
5667
5668 newval = adapter->dmac;
5669 node.sysctl_data = &newval;
5670 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5671 if ((error) || (newp == NULL))
5672 return (error);
5673
5674 switch (newval) {
5675 case 0:
5676 /* Disabled */
5677 adapter->dmac = 0;
5678 break;
5679 case 1:
5680 /* Enable and use default */
5681 adapter->dmac = 1000;
5682 break;
5683 case 50:
5684 case 100:
5685 case 250:
5686 case 500:
5687 case 1000:
5688 case 2000:
5689 case 5000:
5690 case 10000:
5691 /* Legal values - allow */
5692 adapter->dmac = newval;
5693 break;
5694 default:
5695 /* Do nothing, illegal value */
5696 return (EINVAL);
5697 }
5698
5699 /* Re-initialize hardware if it's already running */
5700 if (ifp->if_flags & IFF_RUNNING)
5701 ifp->if_init(ifp);
5702
5703 return (0);
5704 }
5705
5706 #ifdef IXGBE_DEBUG
5707 /************************************************************************
5708 * ixgbe_sysctl_power_state
5709 *
5710 * Sysctl to test power states
5711 * Values:
5712 * 0 - set device to D0
5713 * 3 - set device to D3
5714 * (none) - get current device power state
5715 ************************************************************************/
5716 static int
5717 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5718 {
5719 #ifdef notyet
5720 struct sysctlnode node = *rnode;
5721 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5722 device_t dev = adapter->dev;
5723 int curr_ps, new_ps, error = 0;
5724
5725 if (ixgbe_fw_recovery_mode_swflag(adapter))
5726 return (EPERM);
5727
5728 curr_ps = new_ps = pci_get_powerstate(dev);
5729
5730 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5731 if ((error) || (req->newp == NULL))
5732 return (error);
5733
5734 if (new_ps == curr_ps)
5735 return (0);
5736
5737 if (new_ps == 3 && curr_ps == 0)
5738 error = DEVICE_SUSPEND(dev);
5739 else if (new_ps == 0 && curr_ps == 3)
5740 error = DEVICE_RESUME(dev);
5741 else
5742 return (EINVAL);
5743
5744 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5745
5746 return (error);
5747 #else
5748 return 0;
5749 #endif
5750 } /* ixgbe_sysctl_power_state */
5751 #endif
5752
5753 /************************************************************************
5754 * ixgbe_sysctl_wol_enable
5755 *
5756 * Sysctl to enable/disable the WoL capability,
5757 * if supported by the adapter.
5758 *
5759 * Values:
5760 * 0 - disabled
5761 * 1 - enabled
5762 ************************************************************************/
5763 static int
5764 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5765 {
5766 struct sysctlnode node = *rnode;
5767 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5768 struct ixgbe_hw *hw = &adapter->hw;
5769 bool new_wol_enabled;
5770 int error = 0;
5771
5772 /*
5773 * It's not required to check recovery mode because this function never
5774 * touches hardware.
5775 */
5776 new_wol_enabled = hw->wol_enabled;
5777 node.sysctl_data = &new_wol_enabled;
5778 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5779 if ((error) || (newp == NULL))
5780 return (error);
5781 if (new_wol_enabled == hw->wol_enabled)
5782 return (0);
5783
5784 if (new_wol_enabled && !adapter->wol_support)
5785 return (ENODEV);
5786 else
5787 hw->wol_enabled = new_wol_enabled;
5788
5789 return (0);
5790 } /* ixgbe_sysctl_wol_enable */
5791
5792 /************************************************************************
5793 * ixgbe_sysctl_wufc - Wake Up Filter Control
5794 *
5795 * Sysctl to enable/disable the types of packets that the
5796 * adapter will wake up on upon receipt.
5797 * Flags:
5798 * 0x1 - Link Status Change
5799 * 0x2 - Magic Packet
5800 * 0x4 - Direct Exact
5801 * 0x8 - Directed Multicast
5802 * 0x10 - Broadcast
5803 * 0x20 - ARP/IPv4 Request Packet
5804 * 0x40 - Direct IPv4 Packet
5805 * 0x80 - Direct IPv6 Packet
5806 *
5807 * Settings not listed above will cause the sysctl to return an error.
5808 ************************************************************************/
5809 static int
5810 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5811 {
5812 struct sysctlnode node = *rnode;
5813 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5814 int error = 0;
5815 u32 new_wufc;
5816
5817 /*
5818 * It's not required to check recovery mode because this function never
5819 * touches hardware.
5820 */
5821 new_wufc = adapter->wufc;
5822 node.sysctl_data = &new_wufc;
5823 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5824 if ((error) || (newp == NULL))
5825 return (error);
5826 if (new_wufc == adapter->wufc)
5827 return (0);
5828
5829 if (new_wufc & 0xffffff00)
5830 return (EINVAL);
5831
5832 new_wufc &= 0xff;
5833 new_wufc |= (0xffffff & adapter->wufc);
5834 adapter->wufc = new_wufc;
5835
5836 return (0);
5837 } /* ixgbe_sysctl_wufc */
5838
5839 #ifdef IXGBE_DEBUG
5840 /************************************************************************
5841 * ixgbe_sysctl_print_rss_config
5842 ************************************************************************/
5843 static int
5844 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5845 {
5846 #ifdef notyet
5847 struct sysctlnode node = *rnode;
5848 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5849 struct ixgbe_hw *hw = &adapter->hw;
5850 device_t dev = adapter->dev;
5851 struct sbuf *buf;
5852 int error = 0, reta_size;
5853 u32 reg;
5854
5855 if (ixgbe_fw_recovery_mode_swflag(adapter))
5856 return (EPERM);
5857
5858 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5859 if (!buf) {
5860 device_printf(dev, "Could not allocate sbuf for output.\n");
5861 return (ENOMEM);
5862 }
5863
5864 // TODO: use sbufs to make a string to print out
5865 /* Set multiplier for RETA setup and table size based on MAC */
5866 switch (adapter->hw.mac.type) {
5867 case ixgbe_mac_X550:
5868 case ixgbe_mac_X550EM_x:
5869 case ixgbe_mac_X550EM_a:
5870 reta_size = 128;
5871 break;
5872 default:
5873 reta_size = 32;
5874 break;
5875 }
5876
5877 /* Print out the redirection table */
5878 sbuf_cat(buf, "\n");
5879 for (int i = 0; i < reta_size; i++) {
5880 if (i < 32) {
5881 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5882 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5883 } else {
5884 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5885 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5886 }
5887 }
5888
5889 // TODO: print more config
5890
5891 error = sbuf_finish(buf);
5892 if (error)
5893 device_printf(dev, "Error finishing sbuf: %d\n", error);
5894
5895 sbuf_delete(buf);
5896 #endif
5897 return (0);
5898 } /* ixgbe_sysctl_print_rss_config */
5899 #endif /* IXGBE_DEBUG */
5900
5901 /************************************************************************
5902 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5903 *
5904 * For X552/X557-AT devices using an external PHY
5905 ************************************************************************/
5906 static int
5907 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5908 {
5909 struct sysctlnode node = *rnode;
5910 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5911 struct ixgbe_hw *hw = &adapter->hw;
5912 int val;
5913 u16 reg;
5914 int error;
5915
5916 if (ixgbe_fw_recovery_mode_swflag(adapter))
5917 return (EPERM);
5918
5919 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5920 device_printf(adapter->dev,
5921 "Device has no supported external thermal sensor.\n");
5922 return (ENODEV);
5923 }
5924
5925 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5926 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5927 device_printf(adapter->dev,
5928 "Error reading from PHY's current temperature register\n");
5929 return (EAGAIN);
5930 }
5931
5932 node.sysctl_data = &val;
5933
5934 /* Shift temp for output */
5935 val = reg >> 8;
5936
5937 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5938 if ((error) || (newp == NULL))
5939 return (error);
5940
5941 return (0);
5942 } /* ixgbe_sysctl_phy_temp */
5943
5944 /************************************************************************
5945 * ixgbe_sysctl_phy_overtemp_occurred
5946 *
5947 * Reports (directly from the PHY) whether the current PHY
5948 * temperature is over the overtemp threshold.
5949 ************************************************************************/
5950 static int
5951 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5952 {
5953 struct sysctlnode node = *rnode;
5954 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5955 struct ixgbe_hw *hw = &adapter->hw;
5956 int val, error;
5957 u16 reg;
5958
5959 if (ixgbe_fw_recovery_mode_swflag(adapter))
5960 return (EPERM);
5961
5962 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5963 device_printf(adapter->dev,
5964 "Device has no supported external thermal sensor.\n");
5965 return (ENODEV);
5966 }
5967
5968 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5969 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5970 device_printf(adapter->dev,
5971 "Error reading from PHY's temperature status register\n");
5972 return (EAGAIN);
5973 }
5974
5975 node.sysctl_data = &val;
5976
5977 /* Get occurrence bit */
5978 val = !!(reg & 0x4000);
5979
5980 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5981 if ((error) || (newp == NULL))
5982 return (error);
5983
5984 return (0);
5985 } /* ixgbe_sysctl_phy_overtemp_occurred */
5986
5987 /************************************************************************
5988 * ixgbe_sysctl_eee_state
5989 *
5990 * Sysctl to set EEE power saving feature
5991 * Values:
5992 * 0 - disable EEE
5993 * 1 - enable EEE
5994 * (none) - get current device EEE state
5995 ************************************************************************/
5996 static int
5997 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5998 {
5999 struct sysctlnode node = *rnode;
6000 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6001 struct ifnet *ifp = adapter->ifp;
6002 device_t dev = adapter->dev;
6003 int curr_eee, new_eee, error = 0;
6004 s32 retval;
6005
6006 if (ixgbe_fw_recovery_mode_swflag(adapter))
6007 return (EPERM);
6008
6009 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6010 node.sysctl_data = &new_eee;
6011 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6012 if ((error) || (newp == NULL))
6013 return (error);
6014
6015 /* Nothing to do */
6016 if (new_eee == curr_eee)
6017 return (0);
6018
6019 /* Not supported */
6020 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6021 return (EINVAL);
6022
6023 /* Bounds checking */
6024 if ((new_eee < 0) || (new_eee > 1))
6025 return (EINVAL);
6026
6027 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6028 if (retval) {
6029 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6030 return (EINVAL);
6031 }
6032
6033 /* Restart auto-neg */
6034 ifp->if_init(ifp);
6035
6036 device_printf(dev, "New EEE state: %d\n", new_eee);
6037
6038 /* Cache new value */
6039 if (new_eee)
6040 adapter->feat_en |= IXGBE_FEATURE_EEE;
6041 else
6042 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6043
6044 return (error);
6045 } /* ixgbe_sysctl_eee_state */
6046
6047 #define PRINTQS(adapter, regname) \
6048 do { \
6049 struct ixgbe_hw *_hw = &(adapter)->hw; \
6050 int _i; \
6051 \
6052 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6053 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6054 printf((_i == 0) ? "\t" : " "); \
6055 printf("%08x", IXGBE_READ_REG(_hw, \
6056 IXGBE_##regname(_i))); \
6057 } \
6058 printf("\n"); \
6059 } while (0)
6060
6061 /************************************************************************
6062 * ixgbe_print_debug_info
6063 *
6064 * Called only when em_display_debug_stats is enabled.
6065 * Provides a way to take a look at important statistics
6066 * maintained by the driver and hardware.
6067 ************************************************************************/
6068 static void
6069 ixgbe_print_debug_info(struct adapter *adapter)
6070 {
6071 device_t dev = adapter->dev;
6072 struct ixgbe_hw *hw = &adapter->hw;
6073 int table_size;
6074 int i;
6075
6076 switch (adapter->hw.mac.type) {
6077 case ixgbe_mac_X550:
6078 case ixgbe_mac_X550EM_x:
6079 case ixgbe_mac_X550EM_a:
6080 table_size = 128;
6081 break;
6082 default:
6083 table_size = 32;
6084 break;
6085 }
6086
6087 device_printf(dev, "[E]RETA:\n");
6088 for (i = 0; i < table_size; i++) {
6089 if (i < 32)
6090 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6091 IXGBE_RETA(i)));
6092 else
6093 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6094 IXGBE_ERETA(i - 32)));
6095 }
6096
6097 device_printf(dev, "queue:");
6098 for (i = 0; i < adapter->num_queues; i++) {
6099 printf((i == 0) ? "\t" : " ");
6100 printf("%8d", i);
6101 }
6102 printf("\n");
6103 PRINTQS(adapter, RDBAL);
6104 PRINTQS(adapter, RDBAH);
6105 PRINTQS(adapter, RDLEN);
6106 PRINTQS(adapter, SRRCTL);
6107 PRINTQS(adapter, RDH);
6108 PRINTQS(adapter, RDT);
6109 PRINTQS(adapter, RXDCTL);
6110
6111 device_printf(dev, "RQSMR:");
6112 for (i = 0; i < adapter->num_queues / 4; i++) {
6113 printf((i == 0) ? "\t" : " ");
6114 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6115 }
6116 printf("\n");
6117
6118 device_printf(dev, "disabled_count:");
6119 for (i = 0; i < adapter->num_queues; i++) {
6120 printf((i == 0) ? "\t" : " ");
6121 printf("%8d", adapter->queues[i].disabled_count);
6122 }
6123 printf("\n");
6124
6125 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6126 if (hw->mac.type != ixgbe_mac_82598EB) {
6127 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6128 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6129 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6130 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6131 }
6132 } /* ixgbe_print_debug_info */
6133
6134 /************************************************************************
6135 * ixgbe_sysctl_debug
6136 ************************************************************************/
6137 static int
6138 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6139 {
6140 struct sysctlnode node = *rnode;
6141 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6142 int error, result = 0;
6143
6144 if (ixgbe_fw_recovery_mode_swflag(adapter))
6145 return (EPERM);
6146
6147 node.sysctl_data = &result;
6148 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6149
6150 if (error || newp == NULL)
6151 return error;
6152
6153 if (result == 1)
6154 ixgbe_print_debug_info(adapter);
6155
6156 return 0;
6157 } /* ixgbe_sysctl_debug */
6158
6159 /************************************************************************
6160 * ixgbe_init_device_features
6161 ************************************************************************/
6162 static void
6163 ixgbe_init_device_features(struct adapter *adapter)
6164 {
6165 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6166 | IXGBE_FEATURE_RSS
6167 | IXGBE_FEATURE_MSI
6168 | IXGBE_FEATURE_MSIX
6169 | IXGBE_FEATURE_LEGACY_IRQ
6170 | IXGBE_FEATURE_LEGACY_TX;
6171
6172 /* Set capabilities first... */
6173 switch (adapter->hw.mac.type) {
6174 case ixgbe_mac_82598EB:
6175 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6176 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6177 break;
6178 case ixgbe_mac_X540:
6179 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6180 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6181 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6182 (adapter->hw.bus.func == 0))
6183 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6184 break;
6185 case ixgbe_mac_X550:
6186 /*
6187 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6188 * NVM Image version.
6189 */
6190 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6191 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6192 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6193 break;
6194 case ixgbe_mac_X550EM_x:
6195 /*
6196 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6197 * NVM Image version.
6198 */
6199 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6200 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6201 break;
6202 case ixgbe_mac_X550EM_a:
6203 /*
6204 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6205 * NVM Image version.
6206 */
6207 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6208 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6209 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6210 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6211 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6212 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6213 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6214 }
6215 break;
6216 case ixgbe_mac_82599EB:
6217 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6218 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6219 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6220 (adapter->hw.bus.func == 0))
6221 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6222 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6223 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6224 break;
6225 default:
6226 break;
6227 }
6228
6229 /* Enabled by default... */
6230 /* Fan failure detection */
6231 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6232 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6233 /* Netmap */
6234 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6235 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6236 /* EEE */
6237 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6238 adapter->feat_en |= IXGBE_FEATURE_EEE;
6239 /* Thermal Sensor */
6240 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6241 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6242 /*
6243 * Recovery mode:
6244 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6245 * NVM Image version.
6246 */
6247
6248 /* Enabled via global sysctl... */
6249 /* Flow Director */
6250 if (ixgbe_enable_fdir) {
6251 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6252 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6253 else
6254 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6255 }
6256 /* Legacy (single queue) transmit */
6257 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6258 ixgbe_enable_legacy_tx)
6259 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6260 /*
6261 * Message Signal Interrupts - Extended (MSI-X)
6262 * Normal MSI is only enabled if MSI-X calls fail.
6263 */
6264 if (!ixgbe_enable_msix)
6265 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6266 /* Receive-Side Scaling (RSS) */
6267 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6268 adapter->feat_en |= IXGBE_FEATURE_RSS;
6269
6270 /* Disable features with unmet dependencies... */
6271 /* No MSI-X */
6272 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6273 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6274 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6275 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6276 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6277 }
6278 } /* ixgbe_init_device_features */
6279
6280 /************************************************************************
6281 * ixgbe_probe - Device identification routine
6282 *
6283 * Determines if the driver should be loaded on
6284 * adapter based on its PCI vendor/device ID.
6285 *
6286 * return BUS_PROBE_DEFAULT on success, positive on failure
6287 ************************************************************************/
6288 static int
6289 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6290 {
6291 const struct pci_attach_args *pa = aux;
6292
6293 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6294 }
6295
6296 static const ixgbe_vendor_info_t *
6297 ixgbe_lookup(const struct pci_attach_args *pa)
6298 {
6299 const ixgbe_vendor_info_t *ent;
6300 pcireg_t subid;
6301
6302 INIT_DEBUGOUT("ixgbe_lookup: begin");
6303
6304 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6305 return NULL;
6306
6307 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6308
6309 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6310 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6311 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6312 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6313 (ent->subvendor_id == 0)) &&
6314 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6315 (ent->subdevice_id == 0))) {
6316 return ent;
6317 }
6318 }
6319 return NULL;
6320 }
6321
6322 static int
6323 ixgbe_ifflags_cb(struct ethercom *ec)
6324 {
6325 struct ifnet *ifp = &ec->ec_if;
6326 struct adapter *adapter = ifp->if_softc;
6327 u_short change;
6328 int rv = 0;
6329
6330 IXGBE_CORE_LOCK(adapter);
6331
6332 change = ifp->if_flags ^ adapter->if_flags;
6333 if (change != 0)
6334 adapter->if_flags = ifp->if_flags;
6335
6336 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6337 rv = ENETRESET;
6338 goto out;
6339 } else if ((change & IFF_PROMISC) != 0)
6340 ixgbe_set_rxfilter(adapter);
6341
6342 /* Check for ec_capenable. */
6343 change = ec->ec_capenable ^ adapter->ec_capenable;
6344 adapter->ec_capenable = ec->ec_capenable;
6345 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6346 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6347 rv = ENETRESET;
6348 goto out;
6349 }
6350
6351 /*
6352 * Special handling is not required for ETHERCAP_VLAN_MTU.
6353 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6354 */
6355
6356 /* Set up VLAN support and filter */
6357 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6358 ixgbe_setup_vlan_hw_support(adapter);
6359
6360 out:
6361 IXGBE_CORE_UNLOCK(adapter);
6362
6363 return rv;
6364 }
6365
6366 /************************************************************************
6367 * ixgbe_ioctl - Ioctl entry point
6368 *
6369 * Called when the user wants to configure the interface.
6370 *
6371 * return 0 on success, positive on failure
6372 ************************************************************************/
6373 static int
6374 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6375 {
6376 struct adapter *adapter = ifp->if_softc;
6377 struct ixgbe_hw *hw = &adapter->hw;
6378 struct ifcapreq *ifcr = data;
6379 struct ifreq *ifr = data;
6380 int error = 0;
6381 int l4csum_en;
6382 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6383 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6384
6385 if (ixgbe_fw_recovery_mode_swflag(adapter))
6386 return (EPERM);
6387
6388 switch (command) {
6389 case SIOCSIFFLAGS:
6390 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6391 break;
6392 case SIOCADDMULTI:
6393 case SIOCDELMULTI:
6394 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6395 break;
6396 case SIOCSIFMEDIA:
6397 case SIOCGIFMEDIA:
6398 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6399 break;
6400 case SIOCSIFCAP:
6401 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6402 break;
6403 case SIOCSIFMTU:
6404 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6405 break;
6406 #ifdef __NetBSD__
6407 case SIOCINITIFADDR:
6408 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6409 break;
6410 case SIOCGIFFLAGS:
6411 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6412 break;
6413 case SIOCGIFAFLAG_IN:
6414 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6415 break;
6416 case SIOCGIFADDR:
6417 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6418 break;
6419 case SIOCGIFMTU:
6420 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6421 break;
6422 case SIOCGIFCAP:
6423 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6424 break;
6425 case SIOCGETHERCAP:
6426 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6427 break;
6428 case SIOCGLIFADDR:
6429 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6430 break;
6431 case SIOCZIFDATA:
6432 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6433 hw->mac.ops.clear_hw_cntrs(hw);
6434 ixgbe_clear_evcnt(adapter);
6435 break;
6436 case SIOCAIFADDR:
6437 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6438 break;
6439 #endif
6440 default:
6441 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6442 break;
6443 }
6444
6445 switch (command) {
6446 case SIOCGI2C:
6447 {
6448 struct ixgbe_i2c_req i2c;
6449
6450 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6451 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6452 if (error != 0)
6453 break;
6454 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6455 error = EINVAL;
6456 break;
6457 }
6458 if (i2c.len > sizeof(i2c.data)) {
6459 error = EINVAL;
6460 break;
6461 }
6462
6463 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6464 i2c.dev_addr, i2c.data);
6465 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6466 break;
6467 }
6468 case SIOCSIFCAP:
6469 /* Layer-4 Rx checksum offload has to be turned on and
6470 * off as a unit.
6471 */
6472 l4csum_en = ifcr->ifcr_capenable & l4csum;
6473 if (l4csum_en != l4csum && l4csum_en != 0)
6474 return EINVAL;
6475 /*FALLTHROUGH*/
6476 case SIOCADDMULTI:
6477 case SIOCDELMULTI:
6478 case SIOCSIFFLAGS:
6479 case SIOCSIFMTU:
6480 default:
6481 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6482 return error;
6483 if ((ifp->if_flags & IFF_RUNNING) == 0)
6484 ;
6485 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6486 IXGBE_CORE_LOCK(adapter);
6487 if ((ifp->if_flags & IFF_RUNNING) != 0)
6488 ixgbe_init_locked(adapter);
6489 ixgbe_recalculate_max_frame(adapter);
6490 IXGBE_CORE_UNLOCK(adapter);
6491 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6492 /*
6493 * Multicast list has changed; set the hardware filter
6494 * accordingly.
6495 */
6496 IXGBE_CORE_LOCK(adapter);
6497 ixgbe_disable_intr(adapter);
6498 ixgbe_set_rxfilter(adapter);
6499 ixgbe_enable_intr(adapter);
6500 IXGBE_CORE_UNLOCK(adapter);
6501 }
6502 return 0;
6503 }
6504
6505 return error;
6506 } /* ixgbe_ioctl */
6507
6508 /************************************************************************
6509 * ixgbe_check_fan_failure
6510 ************************************************************************/
6511 static void
6512 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6513 {
6514 u32 mask;
6515
6516 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6517 IXGBE_ESDP_SDP1;
6518
6519 if (reg & mask)
6520 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6521 } /* ixgbe_check_fan_failure */
6522
6523 /************************************************************************
6524 * ixgbe_handle_que
6525 ************************************************************************/
6526 static void
6527 ixgbe_handle_que(void *context)
6528 {
6529 struct ix_queue *que = context;
6530 struct adapter *adapter = que->adapter;
6531 struct tx_ring *txr = que->txr;
6532 struct ifnet *ifp = adapter->ifp;
6533 bool more = false;
6534
6535 que->handleq.ev_count++;
6536
6537 if (ifp->if_flags & IFF_RUNNING) {
6538 more = ixgbe_rxeof(que);
6539 IXGBE_TX_LOCK(txr);
6540 more |= ixgbe_txeof(txr);
6541 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6542 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6543 ixgbe_mq_start_locked(ifp, txr);
6544 /* Only for queue 0 */
6545 /* NetBSD still needs this for CBQ */
6546 if ((&adapter->queues[0] == que)
6547 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6548 ixgbe_legacy_start_locked(ifp, txr);
6549 IXGBE_TX_UNLOCK(txr);
6550 }
6551
6552 if (more) {
6553 que->req.ev_count++;
6554 ixgbe_sched_handle_que(adapter, que);
6555 } else if (que->res != NULL) {
6556 /* Re-enable this interrupt */
6557 ixgbe_enable_queue(adapter, que->msix);
6558 } else
6559 ixgbe_enable_intr(adapter);
6560
6561 return;
6562 } /* ixgbe_handle_que */
6563
6564 /************************************************************************
6565 * ixgbe_handle_que_work
6566 ************************************************************************/
6567 static void
6568 ixgbe_handle_que_work(struct work *wk, void *context)
6569 {
6570 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6571
6572 /*
6573 * "enqueued flag" is not required here.
6574 * See ixgbe_msix_que().
6575 */
6576 ixgbe_handle_que(que);
6577 }
6578
6579 /************************************************************************
6580 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6581 ************************************************************************/
6582 static int
6583 ixgbe_allocate_legacy(struct adapter *adapter,
6584 const struct pci_attach_args *pa)
6585 {
6586 device_t dev = adapter->dev;
6587 struct ix_queue *que = adapter->queues;
6588 struct tx_ring *txr = adapter->tx_rings;
6589 int counts[PCI_INTR_TYPE_SIZE];
6590 pci_intr_type_t intr_type, max_type;
6591 char intrbuf[PCI_INTRSTR_LEN];
6592 char wqname[MAXCOMLEN];
6593 const char *intrstr = NULL;
6594 int defertx_error = 0, error;
6595
6596 /* We allocate a single interrupt resource */
6597 max_type = PCI_INTR_TYPE_MSI;
6598 counts[PCI_INTR_TYPE_MSIX] = 0;
6599 counts[PCI_INTR_TYPE_MSI] =
6600 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6601 /* Check not feat_en but feat_cap to fallback to INTx */
6602 counts[PCI_INTR_TYPE_INTX] =
6603 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6604
6605 alloc_retry:
6606 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6607 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6608 return ENXIO;
6609 }
6610 adapter->osdep.nintrs = 1;
6611 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6612 intrbuf, sizeof(intrbuf));
6613 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6614 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6615 device_xname(dev));
6616 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6617 if (adapter->osdep.ihs[0] == NULL) {
6618 aprint_error_dev(dev,"unable to establish %s\n",
6619 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6620 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6621 adapter->osdep.intrs = NULL;
6622 switch (intr_type) {
6623 case PCI_INTR_TYPE_MSI:
6624 /* The next try is for INTx: Disable MSI */
6625 max_type = PCI_INTR_TYPE_INTX;
6626 counts[PCI_INTR_TYPE_INTX] = 1;
6627 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6628 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6629 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6630 goto alloc_retry;
6631 } else
6632 break;
6633 case PCI_INTR_TYPE_INTX:
6634 default:
6635 /* See below */
6636 break;
6637 }
6638 }
6639 if (intr_type == PCI_INTR_TYPE_INTX) {
6640 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6641 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6642 }
6643 if (adapter->osdep.ihs[0] == NULL) {
6644 aprint_error_dev(dev,
6645 "couldn't establish interrupt%s%s\n",
6646 intrstr ? " at " : "", intrstr ? intrstr : "");
6647 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6648 adapter->osdep.intrs = NULL;
6649 return ENXIO;
6650 }
6651 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6652 /*
6653 * Try allocating a fast interrupt and the associated deferred
6654 * processing contexts.
6655 */
6656 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6657 txr->txr_si =
6658 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6659 ixgbe_deferred_mq_start, txr);
6660
6661 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6662 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6663 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6664 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6665 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6666 }
6667 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6668 ixgbe_handle_que, que);
6669 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6670 error = workqueue_create(&adapter->que_wq, wqname,
6671 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6672 IXGBE_WORKQUEUE_FLAGS);
6673
6674 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6675 && ((txr->txr_si == NULL) || defertx_error != 0))
6676 || (que->que_si == NULL) || error != 0) {
6677 aprint_error_dev(dev,
6678 "could not establish software interrupts\n");
6679
6680 return ENXIO;
6681 }
6682 /* For simplicity in the handlers */
6683 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6684
6685 return (0);
6686 } /* ixgbe_allocate_legacy */
6687
6688 /************************************************************************
6689 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6690 ************************************************************************/
6691 static int
6692 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6693 {
6694 device_t dev = adapter->dev;
6695 struct ix_queue *que = adapter->queues;
6696 struct tx_ring *txr = adapter->tx_rings;
6697 pci_chipset_tag_t pc;
6698 char intrbuf[PCI_INTRSTR_LEN];
6699 char intr_xname[32];
6700 char wqname[MAXCOMLEN];
6701 const char *intrstr = NULL;
6702 int error, vector = 0;
6703 int cpu_id = 0;
6704 kcpuset_t *affinity;
6705 #ifdef RSS
6706 unsigned int rss_buckets = 0;
6707 kcpuset_t cpu_mask;
6708 #endif
6709
6710 pc = adapter->osdep.pc;
6711 #ifdef RSS
6712 /*
6713 * If we're doing RSS, the number of queues needs to
6714 * match the number of RSS buckets that are configured.
6715 *
6716 * + If there's more queues than RSS buckets, we'll end
6717 * up with queues that get no traffic.
6718 *
6719 * + If there's more RSS buckets than queues, we'll end
6720 * up having multiple RSS buckets map to the same queue,
6721 * so there'll be some contention.
6722 */
6723 rss_buckets = rss_getnumbuckets();
6724 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6725 (adapter->num_queues != rss_buckets)) {
6726 device_printf(dev,
6727 "%s: number of queues (%d) != number of RSS buckets (%d)"
6728 "; performance will be impacted.\n",
6729 __func__, adapter->num_queues, rss_buckets);
6730 }
6731 #endif
6732
6733 adapter->osdep.nintrs = adapter->num_queues + 1;
6734 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6735 adapter->osdep.nintrs) != 0) {
6736 aprint_error_dev(dev,
6737 "failed to allocate MSI-X interrupt\n");
6738 return (ENXIO);
6739 }
6740
6741 kcpuset_create(&affinity, false);
6742 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6743 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6744 device_xname(dev), i);
6745 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6746 sizeof(intrbuf));
6747 #ifdef IXGBE_MPSAFE
6748 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6749 true);
6750 #endif
6751 /* Set the handler function */
6752 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6753 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6754 intr_xname);
6755 if (que->res == NULL) {
6756 aprint_error_dev(dev,
6757 "Failed to register QUE handler\n");
6758 error = ENXIO;
6759 goto err_out;
6760 }
6761 que->msix = vector;
6762 adapter->active_queues |= 1ULL << que->msix;
6763
6764 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6765 #ifdef RSS
6766 /*
6767 * The queue ID is used as the RSS layer bucket ID.
6768 * We look up the queue ID -> RSS CPU ID and select
6769 * that.
6770 */
6771 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6772 CPU_SETOF(cpu_id, &cpu_mask);
6773 #endif
6774 } else {
6775 /*
6776 * Bind the MSI-X vector, and thus the
6777 * rings to the corresponding CPU.
6778 *
6779 * This just happens to match the default RSS
6780 * round-robin bucket -> queue -> CPU allocation.
6781 */
6782 if (adapter->num_queues > 1)
6783 cpu_id = i;
6784 }
6785 /* Round-robin affinity */
6786 kcpuset_zero(affinity);
6787 kcpuset_set(affinity, cpu_id % ncpu);
6788 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6789 NULL);
6790 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6791 intrstr);
6792 if (error == 0) {
6793 #if 1 /* def IXGBE_DEBUG */
6794 #ifdef RSS
6795 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6796 cpu_id % ncpu);
6797 #else
6798 aprint_normal(", bound queue %d to cpu %d", i,
6799 cpu_id % ncpu);
6800 #endif
6801 #endif /* IXGBE_DEBUG */
6802 }
6803 aprint_normal("\n");
6804
6805 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6806 txr->txr_si = softint_establish(
6807 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6808 ixgbe_deferred_mq_start, txr);
6809 if (txr->txr_si == NULL) {
6810 aprint_error_dev(dev,
6811 "couldn't establish software interrupt\n");
6812 error = ENXIO;
6813 goto err_out;
6814 }
6815 }
6816 que->que_si
6817 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6818 ixgbe_handle_que, que);
6819 if (que->que_si == NULL) {
6820 aprint_error_dev(dev,
6821 "couldn't establish software interrupt\n");
6822 error = ENXIO;
6823 goto err_out;
6824 }
6825 }
6826 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6827 error = workqueue_create(&adapter->txr_wq, wqname,
6828 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6829 IXGBE_WORKQUEUE_FLAGS);
6830 if (error) {
6831 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6832 goto err_out;
6833 }
6834 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6835
6836 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6837 error = workqueue_create(&adapter->que_wq, wqname,
6838 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6839 IXGBE_WORKQUEUE_FLAGS);
6840 if (error) {
6841 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6842 goto err_out;
6843 }
6844
6845 /* and Link */
6846 cpu_id++;
6847 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6848 adapter->vector = vector;
6849 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6850 sizeof(intrbuf));
6851 #ifdef IXGBE_MPSAFE
6852 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6853 true);
6854 #endif
6855 /* Set the link handler function */
6856 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6857 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6858 intr_xname);
6859 if (adapter->osdep.ihs[vector] == NULL) {
6860 aprint_error_dev(dev, "Failed to register LINK handler\n");
6861 error = ENXIO;
6862 goto err_out;
6863 }
6864 /* Round-robin affinity */
6865 kcpuset_zero(affinity);
6866 kcpuset_set(affinity, cpu_id % ncpu);
6867 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6868 NULL);
6869
6870 aprint_normal_dev(dev,
6871 "for link, interrupting at %s", intrstr);
6872 if (error == 0)
6873 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6874 else
6875 aprint_normal("\n");
6876
6877 kcpuset_destroy(affinity);
6878 aprint_normal_dev(dev,
6879 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6880
6881 return (0);
6882
6883 err_out:
6884 kcpuset_destroy(affinity);
6885 ixgbe_free_workqueue(adapter);
6886 ixgbe_free_pciintr_resources(adapter);
6887 return (error);
6888 } /* ixgbe_allocate_msix */
6889
6890 /************************************************************************
6891 * ixgbe_configure_interrupts
6892 *
6893 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6894 * This will also depend on user settings.
6895 ************************************************************************/
6896 static int
6897 ixgbe_configure_interrupts(struct adapter *adapter)
6898 {
6899 device_t dev = adapter->dev;
6900 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6901 int want, queues, msgs;
6902
6903 /* Default to 1 queue if MSI-X setup fails */
6904 adapter->num_queues = 1;
6905
6906 /* Override by tuneable */
6907 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6908 goto msi;
6909
6910 /*
6911 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6912 * interrupt slot.
6913 */
6914 if (ncpu == 1)
6915 goto msi;
6916
6917 /* First try MSI-X */
6918 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6919 msgs = MIN(msgs, IXG_MAX_NINTR);
6920 if (msgs < 2)
6921 goto msi;
6922
6923 adapter->msix_mem = (void *)1; /* XXX */
6924
6925 /* Figure out a reasonable auto config value */
6926 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6927
6928 #ifdef RSS
6929 /* If we're doing RSS, clamp at the number of RSS buckets */
6930 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6931 queues = uimin(queues, rss_getnumbuckets());
6932 #endif
6933 if (ixgbe_num_queues > queues) {
6934 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6935 ixgbe_num_queues = queues;
6936 }
6937
6938 if (ixgbe_num_queues != 0)
6939 queues = ixgbe_num_queues;
6940 else
6941 queues = uimin(queues,
6942 uimin(mac->max_tx_queues, mac->max_rx_queues));
6943
6944 /* reflect correct sysctl value */
6945 ixgbe_num_queues = queues;
6946
6947 /*
6948 * Want one vector (RX/TX pair) per queue
6949 * plus an additional for Link.
6950 */
6951 want = queues + 1;
6952 if (msgs >= want)
6953 msgs = want;
6954 else {
6955 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6956 "%d vectors but %d queues wanted!\n",
6957 msgs, want);
6958 goto msi;
6959 }
6960 adapter->num_queues = queues;
6961 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6962 return (0);
6963
6964 /*
6965 * MSI-X allocation failed or provided us with
6966 * less vectors than needed. Free MSI-X resources
6967 * and we'll try enabling MSI.
6968 */
6969 msi:
6970 /* Without MSI-X, some features are no longer supported */
6971 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6972 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6973 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6974 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6975
6976 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6977 adapter->msix_mem = NULL; /* XXX */
6978 if (msgs > 1)
6979 msgs = 1;
6980 if (msgs != 0) {
6981 msgs = 1;
6982 adapter->feat_en |= IXGBE_FEATURE_MSI;
6983 return (0);
6984 }
6985
6986 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6987 aprint_error_dev(dev,
6988 "Device does not support legacy interrupts.\n");
6989 return 1;
6990 }
6991
6992 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6993
6994 return (0);
6995 } /* ixgbe_configure_interrupts */
6996
6997
6998 /************************************************************************
6999 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7000 *
7001 * Done outside of interrupt context since the driver might sleep
7002 ************************************************************************/
7003 static void
7004 ixgbe_handle_link(void *context)
7005 {
7006 struct adapter *adapter = context;
7007 struct ixgbe_hw *hw = &adapter->hw;
7008
7009 ++adapter->link_workev.ev_count;
7010 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7011 ixgbe_update_link_status(adapter);
7012
7013 /* Re-enable link interrupts */
7014 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7015 } /* ixgbe_handle_link */
7016
7017 #if 0
7018 /************************************************************************
7019 * ixgbe_rearm_queues
7020 ************************************************************************/
7021 static __inline void
7022 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7023 {
7024 u32 mask;
7025
7026 switch (adapter->hw.mac.type) {
7027 case ixgbe_mac_82598EB:
7028 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7030 break;
7031 case ixgbe_mac_82599EB:
7032 case ixgbe_mac_X540:
7033 case ixgbe_mac_X550:
7034 case ixgbe_mac_X550EM_x:
7035 case ixgbe_mac_X550EM_a:
7036 mask = (queues & 0xFFFFFFFF);
7037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7038 mask = (queues >> 32);
7039 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7040 break;
7041 default:
7042 break;
7043 }
7044 } /* ixgbe_rearm_queues */
7045 #endif
7046