ixgbe.c revision 1.260 1 /* $NetBSD: ixgbe.c,v 1.260 2020/11/17 04:50:29 knakahara Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_phy.h"
74 #include "ixgbe_sriov.h"
75 #include "vlan.h"
76
77 #include <sys/cprng.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 /************************************************************************
82 * Driver version
83 ************************************************************************/
84 static const char ixgbe_driver_version[] = "4.0.1-k";
85 /* XXX NetBSD: + 3.3.10 */
86
87 /************************************************************************
88 * PCI Device ID Table
89 *
90 * Used by probe to select devices to load on
91 * Last field stores an index into ixgbe_strings
92 * Last entry must be all 0s
93 *
94 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
95 ************************************************************************/
96 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
97 {
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
148 /* required last entry */
149 {0, 0, 0, 0, 0}
150 };
151
152 /************************************************************************
153 * Table of branding strings
154 ************************************************************************/
155 static const char *ixgbe_strings[] = {
156 "Intel(R) PRO/10GbE PCI-Express Network Driver"
157 };
158
159 /************************************************************************
160 * Function prototypes
161 ************************************************************************/
162 static int ixgbe_probe(device_t, cfdata_t, void *);
163 static void ixgbe_quirks(struct adapter *);
164 static void ixgbe_attach(device_t, device_t, void *);
165 static int ixgbe_detach(device_t, int);
166 #if 0
167 static int ixgbe_shutdown(device_t);
168 #endif
169 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
170 static bool ixgbe_resume(device_t, const pmf_qual_t *);
171 static int ixgbe_ifflags_cb(struct ethercom *);
172 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
173 static int ixgbe_init(struct ifnet *);
174 static void ixgbe_init_locked(struct adapter *);
175 static void ixgbe_ifstop(struct ifnet *, int);
176 static void ixgbe_stop_locked(void *);
177 static void ixgbe_init_device_features(struct adapter *);
178 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
179 static void ixgbe_add_media_types(struct adapter *);
180 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
181 static int ixgbe_media_change(struct ifnet *);
182 static int ixgbe_allocate_pci_resources(struct adapter *,
183 const struct pci_attach_args *);
184 static void ixgbe_free_deferred_handlers(struct adapter *);
185 static void ixgbe_get_slot_info(struct adapter *);
186 static int ixgbe_allocate_msix(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_allocate_legacy(struct adapter *,
189 const struct pci_attach_args *);
190 static int ixgbe_configure_interrupts(struct adapter *);
191 static void ixgbe_free_pciintr_resources(struct adapter *);
192 static void ixgbe_free_pci_resources(struct adapter *);
193 static void ixgbe_local_timer(void *);
194 static void ixgbe_handle_timer(struct work *, void *);
195 static void ixgbe_recovery_mode_timer(void *);
196 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_schedule_admin_tasklet(struct adapter *);
202 static void ixgbe_config_link(struct adapter *);
203 static void ixgbe_check_wol_support(struct adapter *);
204 static int ixgbe_setup_low_power_mode(struct adapter *);
205 #if 0
206 static void ixgbe_rearm_queues(struct adapter *, u64);
207 #endif
208
209 static void ixgbe_initialize_transmit_units(struct adapter *);
210 static void ixgbe_initialize_receive_units(struct adapter *);
211 static void ixgbe_enable_rx_drop(struct adapter *);
212 static void ixgbe_disable_rx_drop(struct adapter *);
213 static void ixgbe_initialize_rss_mapping(struct adapter *);
214
215 static void ixgbe_enable_intr(struct adapter *);
216 static void ixgbe_disable_intr(struct adapter *);
217 static void ixgbe_update_stats_counters(struct adapter *);
218 static void ixgbe_set_rxfilter(struct adapter *);
219 static void ixgbe_update_link_status(struct adapter *);
220 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
221 static void ixgbe_configure_ivars(struct adapter *);
222 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
223 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
224
225 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
226 static void ixgbe_setup_vlan_hw_support(struct adapter *);
227 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
228 static int ixgbe_register_vlan(struct adapter *, u16);
229 static int ixgbe_unregister_vlan(struct adapter *, u16);
230
231 static void ixgbe_add_device_sysctls(struct adapter *);
232 static void ixgbe_add_hw_stats(struct adapter *);
233 static void ixgbe_clear_evcnt(struct adapter *);
234 static int ixgbe_set_flowcntl(struct adapter *, int);
235 static int ixgbe_set_advertise(struct adapter *, int);
236 static int ixgbe_get_advertise(struct adapter *);
237
238 /* Sysctl handlers */
239 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
240 const char *, int *, int);
241 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
247 #ifdef IXGBE_DEBUG
248 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
250 #endif
251 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
260
261 /* Legacy (single vector) interrupt handler */
262 static int ixgbe_legacy_irq(void *);
263
264 /* The MSI/MSI-X Interrupt handlers */
265 static int ixgbe_msix_que(void *);
266 static int ixgbe_msix_admin(void *);
267
268 /* Event handlers running on workqueue */
269 static void ixgbe_handle_que(void *);
270 static void ixgbe_handle_link(void *);
271 static void ixgbe_handle_msf(void *);
272 static void ixgbe_handle_mod(void *);
273 static void ixgbe_handle_phy(void *);
274
275 /* Deferred workqueue handlers */
276 static void ixgbe_handle_admin(struct work *, void *);
277 static void ixgbe_handle_que_work(struct work *, void *);
278
279 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
280
281 /************************************************************************
282 * NetBSD Device Interface Entry Points
283 ************************************************************************/
284 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
285 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
286 DVF_DETACH_SHUTDOWN);
287
288 #if 0
289 devclass_t ix_devclass;
290 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
291
292 MODULE_DEPEND(ix, pci, 1, 1, 1);
293 MODULE_DEPEND(ix, ether, 1, 1, 1);
294 #ifdef DEV_NETMAP
295 MODULE_DEPEND(ix, netmap, 1, 1, 1);
296 #endif
297 #endif
298
299 /*
300 * TUNEABLE PARAMETERS:
301 */
302
303 /*
304 * AIM: Adaptive Interrupt Moderation
305 * which means that the interrupt rate
306 * is varied over time based on the
307 * traffic for that interrupt vector
308 */
309 static bool ixgbe_enable_aim = true;
310 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
311 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
312 "Enable adaptive interrupt moderation");
313
314 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
315 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
316 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
317
318 /* How many packets rxeof tries to clean at a time */
319 static int ixgbe_rx_process_limit = 256;
320 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
321 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
322
323 /* How many packets txeof tries to clean at a time */
324 static int ixgbe_tx_process_limit = 256;
325 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
326 &ixgbe_tx_process_limit, 0,
327 "Maximum number of sent packets to process at a time, -1 means unlimited");
328
329 /* Flow control setting, default to full */
330 static int ixgbe_flow_control = ixgbe_fc_full;
331 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
332 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
333
334 /* Which packet processing uses workqueue or softint */
335 static bool ixgbe_txrx_workqueue = false;
336
337 /*
338 * Smart speed setting, default to on
339 * this only works as a compile option
340 * right now as its during attach, set
341 * this to 'ixgbe_smart_speed_off' to
342 * disable.
343 */
344 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
345
346 /*
347 * MSI-X should be the default for best performance,
348 * but this allows it to be forced off for testing.
349 */
350 static int ixgbe_enable_msix = 1;
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
352 "Enable MSI-X interrupts");
353
354 /*
355 * Number of Queues, can be set to 0,
356 * it then autoconfigures based on the
357 * number of cpus with a max of 8. This
358 * can be overridden manually here.
359 */
360 static int ixgbe_num_queues = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
362 "Number of queues to configure, 0 indicates autoconfigure");
363
364 /*
365 * Number of TX descriptors per ring,
366 * setting higher than RX as this seems
367 * the better performing choice.
368 */
369 static int ixgbe_txd = PERFORM_TXD;
370 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
371 "Number of transmit descriptors per queue");
372
373 /* Number of RX descriptors per ring */
374 static int ixgbe_rxd = PERFORM_RXD;
375 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
376 "Number of receive descriptors per queue");
377
378 /*
379 * Defining this on will allow the use
380 * of unsupported SFP+ modules, note that
381 * doing so you are on your own :)
382 */
383 static int allow_unsupported_sfp = false;
384 #define TUNABLE_INT(__x, __y)
385 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
386
387 /*
388 * Not sure if Flow Director is fully baked,
389 * so we'll default to turning it off.
390 */
391 static int ixgbe_enable_fdir = 0;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
393 "Enable Flow Director");
394
395 /* Legacy Transmit (single queue) */
396 static int ixgbe_enable_legacy_tx = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
398 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
399
400 /* Receive-Side Scaling */
401 static int ixgbe_enable_rss = 1;
402 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
403 "Enable Receive-Side Scaling (RSS)");
404
405 #if 0
406 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
407 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
408 #endif
409
410 #ifdef NET_MPSAFE
411 #define IXGBE_MPSAFE 1
412 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
413 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
414 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
415 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
416 #else
417 #define IXGBE_CALLOUT_FLAGS 0
418 #define IXGBE_SOFTINT_FLAGS 0
419 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
420 #define IXGBE_TASKLET_WQ_FLAGS 0
421 #endif
422 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
423
424 /************************************************************************
425 * ixgbe_initialize_rss_mapping
426 ************************************************************************/
427 static void
428 ixgbe_initialize_rss_mapping(struct adapter *adapter)
429 {
430 struct ixgbe_hw *hw = &adapter->hw;
431 u32 reta = 0, mrqc, rss_key[10];
432 int queue_id, table_size, index_mult;
433 int i, j;
434 u32 rss_hash_config;
435
436 /* force use default RSS key. */
437 #ifdef __NetBSD__
438 rss_getkey((uint8_t *) &rss_key);
439 #else
440 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
441 /* Fetch the configured RSS key */
442 rss_getkey((uint8_t *) &rss_key);
443 } else {
444 /* set up random bits */
445 cprng_fast(&rss_key, sizeof(rss_key));
446 }
447 #endif
448
449 /* Set multiplier for RETA setup and table size based on MAC */
450 index_mult = 0x1;
451 table_size = 128;
452 switch (adapter->hw.mac.type) {
453 case ixgbe_mac_82598EB:
454 index_mult = 0x11;
455 break;
456 case ixgbe_mac_X550:
457 case ixgbe_mac_X550EM_x:
458 case ixgbe_mac_X550EM_a:
459 table_size = 512;
460 break;
461 default:
462 break;
463 }
464
465 /* Set up the redirection table */
466 for (i = 0, j = 0; i < table_size; i++, j++) {
467 if (j == adapter->num_queues)
468 j = 0;
469
470 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
471 /*
472 * Fetch the RSS bucket id for the given indirection
473 * entry. Cap it at the number of configured buckets
474 * (which is num_queues.)
475 */
476 queue_id = rss_get_indirection_to_bucket(i);
477 queue_id = queue_id % adapter->num_queues;
478 } else
479 queue_id = (j * index_mult);
480
481 /*
482 * The low 8 bits are for hash value (n+0);
483 * The next 8 bits are for hash value (n+1), etc.
484 */
485 reta = reta >> 8;
486 reta = reta | (((uint32_t) queue_id) << 24);
487 if ((i & 3) == 3) {
488 if (i < 128)
489 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
490 else
491 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
492 reta);
493 reta = 0;
494 }
495 }
496
497 /* Now fill our hash function seeds */
498 for (i = 0; i < 10; i++)
499 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
500
501 /* Perform hash on these packet types */
502 if (adapter->feat_en & IXGBE_FEATURE_RSS)
503 rss_hash_config = rss_gethashconfig();
504 else {
505 /*
506 * Disable UDP - IP fragments aren't currently being handled
507 * and so we end up with a mix of 2-tuple and 4-tuple
508 * traffic.
509 */
510 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
511 | RSS_HASHTYPE_RSS_TCP_IPV4
512 | RSS_HASHTYPE_RSS_IPV6
513 | RSS_HASHTYPE_RSS_TCP_IPV6
514 | RSS_HASHTYPE_RSS_IPV6_EX
515 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
516 }
517
518 mrqc = IXGBE_MRQC_RSSEN;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
537 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
538 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
539 } /* ixgbe_initialize_rss_mapping */
540
541 /************************************************************************
542 * ixgbe_initialize_receive_units - Setup receive registers and features.
543 ************************************************************************/
544 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
545
546 static void
547 ixgbe_initialize_receive_units(struct adapter *adapter)
548 {
549 struct rx_ring *rxr = adapter->rx_rings;
550 struct ixgbe_hw *hw = &adapter->hw;
551 struct ifnet *ifp = adapter->ifp;
552 int i, j;
553 u32 bufsz, fctrl, srrctl, rxcsum;
554 u32 hlreg;
555
556 /*
557 * Make sure receives are disabled while
558 * setting up the descriptor ring
559 */
560 ixgbe_disable_rx(hw);
561
562 /* Enable broadcasts */
563 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
564 fctrl |= IXGBE_FCTRL_BAM;
565 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
566 fctrl |= IXGBE_FCTRL_DPF;
567 fctrl |= IXGBE_FCTRL_PMCF;
568 }
569 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
570
571 /* Set for Jumbo Frames? */
572 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
573 if (ifp->if_mtu > ETHERMTU)
574 hlreg |= IXGBE_HLREG0_JUMBOEN;
575 else
576 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
577
578 #ifdef DEV_NETMAP
579 /* CRC stripping is conditional in Netmap */
580 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
581 (ifp->if_capenable & IFCAP_NETMAP) &&
582 !ix_crcstrip)
583 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
584 else
585 #endif /* DEV_NETMAP */
586 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
587
588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
589
590 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
591 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
592
593 for (i = 0; i < adapter->num_queues; i++, rxr++) {
594 u64 rdba = rxr->rxdma.dma_paddr;
595 u32 reg;
596 int regnum = i / 4; /* 1 register per 4 queues */
597 int regshift = i % 4; /* 4 bits per 1 queue */
598 j = rxr->me;
599
600 /* Setup the Base and Length of the Rx Descriptor Ring */
601 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
602 (rdba & 0x00000000ffffffffULL));
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
604 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
605 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
606
607 /* Set up the SRRCTL register */
608 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
609 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
610 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
611 srrctl |= bufsz;
612 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
613
614 /* Set RQSMR (Receive Queue Statistic Mapping) register */
615 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
616 reg &= ~(0x000000ffUL << (regshift * 8));
617 reg |= i << (regshift * 8);
618 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
619
620 /*
621 * Set DROP_EN iff we have no flow control and >1 queue.
622 * Note that srrctl was cleared shortly before during reset,
623 * so we do not need to clear the bit, but do it just in case
624 * this code is moved elsewhere.
625 */
626 if (adapter->num_queues > 1 &&
627 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
628 srrctl |= IXGBE_SRRCTL_DROP_EN;
629 } else {
630 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
631 }
632
633 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
634
635 /* Setup the HW Rx Head and Tail Descriptor Pointers */
636 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
637 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
638
639 /* Set the driver rx tail address */
640 rxr->tail = IXGBE_RDT(rxr->me);
641 }
642
643 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
644 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
645 | IXGBE_PSRTYPE_UDPHDR
646 | IXGBE_PSRTYPE_IPV4HDR
647 | IXGBE_PSRTYPE_IPV6HDR;
648 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
649 }
650
651 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
652
653 ixgbe_initialize_rss_mapping(adapter);
654
655 if (adapter->num_queues > 1) {
656 /* RSS and RX IPP Checksum are mutually exclusive */
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658 }
659
660 if (ifp->if_capenable & IFCAP_RXCSUM)
661 rxcsum |= IXGBE_RXCSUM_PCSD;
662
663 /* This is useful for calculating UDP/IP fragment checksums */
664 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
665 rxcsum |= IXGBE_RXCSUM_IPPCSE;
666
667 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
668
669 } /* ixgbe_initialize_receive_units */
670
671 /************************************************************************
672 * ixgbe_initialize_transmit_units - Enable transmit units.
673 ************************************************************************/
674 static void
675 ixgbe_initialize_transmit_units(struct adapter *adapter)
676 {
677 struct tx_ring *txr = adapter->tx_rings;
678 struct ixgbe_hw *hw = &adapter->hw;
679 int i;
680
681 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 u32 tqsmreg, reg;
688 int regnum = i / 4; /* 1 register per 4 queues */
689 int regshift = i % 4; /* 4 bits per 1 queue */
690 int j = txr->me;
691
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
693 (tdba & 0x00000000ffffffffULL));
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
695 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
696 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
697
698 /*
699 * Set TQSMR (Transmit Queue Statistic Mapping) register.
700 * Register location is different between 82598 and others.
701 */
702 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
703 tqsmreg = IXGBE_TQSMR(regnum);
704 else
705 tqsmreg = IXGBE_TQSM(regnum);
706 reg = IXGBE_READ_REG(hw, tqsmreg);
707 reg &= ~(0x000000ffUL << (regshift * 8));
708 reg |= i << (regshift * 8);
709 IXGBE_WRITE_REG(hw, tqsmreg, reg);
710
711 /* Setup the HW Tx Head and Tail descriptor pointers */
712 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
713 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
714
715 /* Cache the tail address */
716 txr->tail = IXGBE_TDT(j);
717
718 txr->txr_no_space = false;
719
720 /* Disable Head Writeback */
721 /*
722 * Note: for X550 series devices, these registers are actually
723 * prefixed with TPH_ isntead of DCA_, but the addresses and
724 * fields remain the same.
725 */
726 switch (hw->mac.type) {
727 case ixgbe_mac_82598EB:
728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
729 break;
730 default:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
732 break;
733 }
734 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
735 switch (hw->mac.type) {
736 case ixgbe_mac_82598EB:
737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
738 break;
739 default:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
741 break;
742 }
743
744 }
745
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 u32 dmatxctl, rttdcs;
748
749 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
750 dmatxctl |= IXGBE_DMATXCTL_TE;
751 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
752 /* Disable arbiter to set MTQC */
753 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
754 rttdcs |= IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
757 ixgbe_get_mtqc(adapter->iov_mode));
758 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
759 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
760 }
761
762 return;
763 } /* ixgbe_initialize_transmit_units */
764
765 static void
766 ixgbe_quirks(struct adapter *adapter)
767 {
768 device_t dev = adapter->dev;
769 struct ixgbe_hw *hw = &adapter->hw;
770 const char *vendor, *product;
771
772 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
773 /*
774 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
775 * MA10-ST0.
776 */
777 vendor = pmf_get_platform("system-vendor");
778 product = pmf_get_platform("system-product");
779
780 if ((vendor == NULL) || (product == NULL))
781 return;
782
783 if ((strcmp(vendor, "GIGABYTE") == 0) &&
784 (strcmp(product, "MA10-ST0") == 0)) {
785 aprint_verbose_dev(dev,
786 "Enable SFP+ MOD_ABS inverse quirk\n");
787 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
788 }
789 }
790 }
791
792 /************************************************************************
793 * ixgbe_attach - Device initialization routine
794 *
795 * Called when the driver is being loaded.
796 * Identifies the type of hardware, allocates all resources
797 * and initializes the hardware.
798 *
799 * return 0 on success, positive on failure
800 ************************************************************************/
801 static void
802 ixgbe_attach(device_t parent, device_t dev, void *aux)
803 {
804 struct adapter *adapter;
805 struct ixgbe_hw *hw;
806 int error = -1;
807 u32 ctrl_ext;
808 u16 high, low, nvmreg;
809 pcireg_t id, subid;
810 const ixgbe_vendor_info_t *ent;
811 struct pci_attach_args *pa = aux;
812 bool unsupported_sfp = false;
813 const char *str;
814 char wqname[MAXCOMLEN];
815 char buf[256];
816
817 INIT_DEBUGOUT("ixgbe_attach: begin");
818
819 /* Allocate, clear, and link in our adapter structure */
820 adapter = device_private(dev);
821 adapter->hw.back = adapter;
822 adapter->dev = dev;
823 hw = &adapter->hw;
824 adapter->osdep.pc = pa->pa_pc;
825 adapter->osdep.tag = pa->pa_tag;
826 if (pci_dma64_available(pa))
827 adapter->osdep.dmat = pa->pa_dmat64;
828 else
829 adapter->osdep.dmat = pa->pa_dmat;
830 adapter->osdep.attached = false;
831 adapter->osdep.detaching = false;
832
833 ent = ixgbe_lookup(pa);
834
835 KASSERT(ent != NULL);
836
837 aprint_normal(": %s, Version - %s\n",
838 ixgbe_strings[ent->index], ixgbe_driver_version);
839
840 /* Core Lock Init */
841 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
842
843 /* Set up the timer callout and workqueue */
844 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
845 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
846 error = workqueue_create(&adapter->timer_wq, wqname,
847 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
848 IXGBE_TASKLET_WQ_FLAGS);
849 if (error) {
850 aprint_error_dev(dev,
851 "could not create timer workqueue (%d)\n", error);
852 goto err_out;
853 }
854
855 /* Determine hardware revision */
856 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
857 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
858
859 hw->vendor_id = PCI_VENDOR(id);
860 hw->device_id = PCI_PRODUCT(id);
861 hw->revision_id =
862 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
863 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
864 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
865
866 /* Set quirk flags */
867 ixgbe_quirks(adapter);
868
869 /*
870 * Make sure BUSMASTER is set
871 */
872 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
873
874 /* Do base PCI setup - map BAR0 */
875 if (ixgbe_allocate_pci_resources(adapter, pa)) {
876 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* let hardware know driver is loaded */
882 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
883 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
884 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
885
886 /*
887 * Initialize the shared code
888 */
889 if (ixgbe_init_shared_code(hw) != 0) {
890 aprint_error_dev(dev, "Unable to initialize the shared code\n");
891 error = ENXIO;
892 goto err_out;
893 }
894
895 switch (hw->mac.type) {
896 case ixgbe_mac_82598EB:
897 str = "82598EB";
898 break;
899 case ixgbe_mac_82599EB:
900 str = "82599EB";
901 break;
902 case ixgbe_mac_X540:
903 str = "X540";
904 break;
905 case ixgbe_mac_X550:
906 str = "X550";
907 break;
908 case ixgbe_mac_X550EM_x:
909 str = "X550EM X";
910 break;
911 case ixgbe_mac_X550EM_a:
912 str = "X550EM A";
913 break;
914 default:
915 str = "Unknown";
916 break;
917 }
918 aprint_normal_dev(dev, "device %s\n", str);
919
920 if (hw->mbx.ops.init_params)
921 hw->mbx.ops.init_params(hw);
922
923 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924
925 /* Pick up the 82599 settings */
926 if (hw->mac.type != ixgbe_mac_82598EB) {
927 hw->phy.smart_speed = ixgbe_smart_speed;
928 adapter->num_segs = IXGBE_82599_SCATTER;
929 } else
930 adapter->num_segs = IXGBE_82598_SCATTER;
931
932 /* Ensure SW/FW semaphore is free */
933 ixgbe_init_swfw_semaphore(hw);
934
935 hw->mac.ops.set_lan_id(hw);
936 ixgbe_init_device_features(adapter);
937
938 if (ixgbe_configure_interrupts(adapter)) {
939 error = ENXIO;
940 goto err_out;
941 }
942
943 /* Allocate multicast array memory. */
944 adapter->mta = malloc(sizeof(*adapter->mta) *
945 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
946
947 /* Enable WoL (if supported) */
948 ixgbe_check_wol_support(adapter);
949
950 /* Register for VLAN events */
951 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
952
953 /* Verify adapter fan is still functional (if applicable) */
954 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
955 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
956 ixgbe_check_fan_failure(adapter, esdp, FALSE);
957 }
958
959 /* Set an initial default flow control value */
960 hw->fc.requested_mode = ixgbe_flow_control;
961
962 /* Sysctls for limiting the amount of work done in the taskqueues */
963 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
964 "max number of rx packets to process",
965 &adapter->rx_process_limit, ixgbe_rx_process_limit);
966
967 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
968 "max number of tx packets to process",
969 &adapter->tx_process_limit, ixgbe_tx_process_limit);
970
971 /* Do descriptor calc and sanity checks */
972 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
973 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
974 aprint_error_dev(dev, "TXD config issue, using default!\n");
975 adapter->num_tx_desc = DEFAULT_TXD;
976 } else
977 adapter->num_tx_desc = ixgbe_txd;
978
979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 aprint_error_dev(dev, "RXD config issue, using default!\n");
982 adapter->num_rx_desc = DEFAULT_RXD;
983 } else
984 adapter->num_rx_desc = ixgbe_rxd;
985
986 /* Allocate our TX/RX Queues */
987 if (ixgbe_allocate_queues(adapter)) {
988 error = ENOMEM;
989 goto err_out;
990 }
991
992 hw->phy.reset_if_overtemp = TRUE;
993 error = ixgbe_reset_hw(hw);
994 hw->phy.reset_if_overtemp = FALSE;
995 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
996 error = IXGBE_SUCCESS;
997 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
998 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
999 unsupported_sfp = true;
1000 error = IXGBE_SUCCESS;
1001 } else if (error) {
1002 aprint_error_dev(dev, "Hardware initialization failed\n");
1003 error = EIO;
1004 goto err_late;
1005 }
1006
1007 /* Make sure we have a good EEPROM before we read from it */
1008 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1009 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1010 error = EIO;
1011 goto err_late;
1012 }
1013
1014 aprint_normal("%s:", device_xname(dev));
1015 /* NVM Image Version */
1016 high = low = 0;
1017 switch (hw->mac.type) {
1018 case ixgbe_mac_X540:
1019 case ixgbe_mac_X550EM_a:
1020 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1021 if (nvmreg == 0xffff)
1022 break;
1023 high = (nvmreg >> 12) & 0x0f;
1024 low = (nvmreg >> 4) & 0xff;
1025 id = nvmreg & 0x0f;
1026 aprint_normal(" NVM Image Version %u.", high);
1027 if (hw->mac.type == ixgbe_mac_X540)
1028 str = "%x";
1029 else
1030 str = "%02x";
1031 aprint_normal(str, low);
1032 aprint_normal(" ID 0x%x,", id);
1033 break;
1034 case ixgbe_mac_X550EM_x:
1035 case ixgbe_mac_X550:
1036 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1037 if (nvmreg == 0xffff)
1038 break;
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0xff;
1041 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1042 break;
1043 default:
1044 break;
1045 }
1046 hw->eeprom.nvm_image_ver_high = high;
1047 hw->eeprom.nvm_image_ver_low = low;
1048
1049 /* PHY firmware revision */
1050 switch (hw->mac.type) {
1051 case ixgbe_mac_X540:
1052 case ixgbe_mac_X550:
1053 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1054 if (nvmreg == 0xffff)
1055 break;
1056 high = (nvmreg >> 12) & 0x0f;
1057 low = (nvmreg >> 4) & 0xff;
1058 id = nvmreg & 0x000f;
1059 aprint_normal(" PHY FW Revision %u.", high);
1060 if (hw->mac.type == ixgbe_mac_X540)
1061 str = "%x";
1062 else
1063 str = "%02x";
1064 aprint_normal(str, low);
1065 aprint_normal(" ID 0x%x,", id);
1066 break;
1067 default:
1068 break;
1069 }
1070
1071 /* NVM Map version & OEM NVM Image version */
1072 switch (hw->mac.type) {
1073 case ixgbe_mac_X550:
1074 case ixgbe_mac_X550EM_x:
1075 case ixgbe_mac_X550EM_a:
1076 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1077 if (nvmreg != 0xffff) {
1078 high = (nvmreg >> 12) & 0x0f;
1079 low = nvmreg & 0x00ff;
1080 aprint_normal(" NVM Map version %u.%02x,", high, low);
1081 }
1082 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1083 if (nvmreg != 0xffff) {
1084 high = (nvmreg >> 12) & 0x0f;
1085 low = nvmreg & 0x00ff;
1086 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1087 low);
1088 }
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 /* Print the ETrackID */
1095 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1096 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1097 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1098
1099 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1100 error = ixgbe_allocate_msix(adapter, pa);
1101 if (error) {
1102 /* Free allocated queue structures first */
1103 ixgbe_free_queues(adapter);
1104
1105 /* Fallback to legacy interrupt */
1106 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1107 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1108 adapter->feat_en |= IXGBE_FEATURE_MSI;
1109 adapter->num_queues = 1;
1110
1111 /* Allocate our TX/RX Queues again */
1112 if (ixgbe_allocate_queues(adapter)) {
1113 error = ENOMEM;
1114 goto err_out;
1115 }
1116 }
1117 }
1118 /* Recovery mode */
1119 switch (adapter->hw.mac.type) {
1120 case ixgbe_mac_X550:
1121 case ixgbe_mac_X550EM_x:
1122 case ixgbe_mac_X550EM_a:
1123 /* >= 2.00 */
1124 if (hw->eeprom.nvm_image_ver_high >= 2) {
1125 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1126 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1127 }
1128 break;
1129 default:
1130 break;
1131 }
1132
1133 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1134 error = ixgbe_allocate_legacy(adapter, pa);
1135 if (error)
1136 goto err_late;
1137
1138 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1139 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1140 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1141 error = workqueue_create(&adapter->admin_wq, wqname,
1142 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1143 IXGBE_TASKLET_WQ_FLAGS);
1144 if (error) {
1145 aprint_error_dev(dev,
1146 "could not create admin workqueue (%d)\n", error);
1147 goto err_out;
1148 }
1149
1150 error = ixgbe_start_hw(hw);
1151 switch (error) {
1152 case IXGBE_ERR_EEPROM_VERSION:
1153 aprint_error_dev(dev, "This device is a pre-production adapter/"
1154 "LOM. Please be aware there may be issues associated "
1155 "with your hardware.\nIf you are experiencing problems "
1156 "please contact your Intel or hardware representative "
1157 "who provided you with this hardware.\n");
1158 break;
1159 default:
1160 break;
1161 }
1162
1163 /* Setup OS specific network interface */
1164 if (ixgbe_setup_interface(dev, adapter) != 0)
1165 goto err_late;
1166
1167 /*
1168 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1169 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1170 */
1171 if (hw->phy.media_type == ixgbe_media_type_copper) {
1172 uint16_t id1, id2;
1173 int oui, model, rev;
1174 const char *descr;
1175
1176 id1 = hw->phy.id >> 16;
1177 id2 = hw->phy.id & 0xffff;
1178 oui = MII_OUI(id1, id2);
1179 model = MII_MODEL(id2);
1180 rev = MII_REV(id2);
1181 if ((descr = mii_get_descr(oui, model)) != NULL)
1182 aprint_normal_dev(dev,
1183 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1184 descr, oui, model, rev);
1185 else
1186 aprint_normal_dev(dev,
1187 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1188 oui, model, rev);
1189 }
1190
1191 /* Enable EEE power saving */
1192 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1193 hw->mac.ops.setup_eee(hw,
1194 adapter->feat_en & IXGBE_FEATURE_EEE);
1195
1196 /* Enable power to the phy. */
1197 if (!unsupported_sfp) {
1198 /* Enable the optics for 82599 SFP+ fiber */
1199 ixgbe_enable_tx_laser(hw);
1200
1201 /*
1202 * XXX Currently, ixgbe_set_phy_power() supports only copper
1203 * PHY, so it's not required to test with !unsupported_sfp.
1204 */
1205 ixgbe_set_phy_power(hw, TRUE);
1206 }
1207
1208 /* Initialize statistics */
1209 ixgbe_update_stats_counters(adapter);
1210
1211 /* Check PCIE slot type/speed/width */
1212 ixgbe_get_slot_info(adapter);
1213
1214 /*
1215 * Do time init and sysctl init here, but
1216 * only on the first port of a bypass adapter.
1217 */
1218 ixgbe_bypass_init(adapter);
1219
1220 /* Set an initial dmac value */
1221 adapter->dmac = 0;
1222 /* Set initial advertised speeds (if applicable) */
1223 adapter->advertise = ixgbe_get_advertise(adapter);
1224
1225 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1226 ixgbe_define_iov_schemas(dev, &error);
1227
1228 /* Add sysctls */
1229 ixgbe_add_device_sysctls(adapter);
1230 ixgbe_add_hw_stats(adapter);
1231
1232 /* For Netmap */
1233 adapter->init_locked = ixgbe_init_locked;
1234 adapter->stop_locked = ixgbe_stop_locked;
1235
1236 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1237 ixgbe_netmap_attach(adapter);
1238
1239 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1240 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1241 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1242 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1243
1244 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1245 pmf_class_network_register(dev, adapter->ifp);
1246 else
1247 aprint_error_dev(dev, "couldn't establish power handler\n");
1248
1249 /* Init recovery mode timer and state variable */
1250 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1251 adapter->recovery_mode = 0;
1252
1253 /* Set up the timer callout */
1254 callout_init(&adapter->recovery_mode_timer,
1255 IXGBE_CALLOUT_FLAGS);
1256 snprintf(wqname, sizeof(wqname), "%s-recovery",
1257 device_xname(dev));
1258 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1259 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1260 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1261 if (error) {
1262 aprint_error_dev(dev, "could not create "
1263 "recovery_mode_timer workqueue (%d)\n", error);
1264 goto err_out;
1265 }
1266
1267 /* Start the task */
1268 callout_reset(&adapter->recovery_mode_timer, hz,
1269 ixgbe_recovery_mode_timer, adapter);
1270 }
1271
1272 INIT_DEBUGOUT("ixgbe_attach: end");
1273 adapter->osdep.attached = true;
1274
1275 return;
1276
1277 err_late:
1278 ixgbe_free_queues(adapter);
1279 err_out:
1280 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1281 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1283 ixgbe_free_deferred_handlers(adapter);
1284 ixgbe_free_pci_resources(adapter);
1285 if (adapter->mta != NULL)
1286 free(adapter->mta, M_DEVBUF);
1287 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1288 IXGBE_CORE_LOCK_DESTROY(adapter);
1289
1290 return;
1291 } /* ixgbe_attach */
1292
1293 /************************************************************************
1294 * ixgbe_check_wol_support
1295 *
1296 * Checks whether the adapter's ports are capable of
1297 * Wake On LAN by reading the adapter's NVM.
1298 *
1299 * Sets each port's hw->wol_enabled value depending
1300 * on the value read here.
1301 ************************************************************************/
1302 static void
1303 ixgbe_check_wol_support(struct adapter *adapter)
1304 {
1305 struct ixgbe_hw *hw = &adapter->hw;
1306 u16 dev_caps = 0;
1307
1308 /* Find out WoL support for port */
1309 adapter->wol_support = hw->wol_enabled = 0;
1310 ixgbe_get_device_caps(hw, &dev_caps);
1311 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1312 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1313 hw->bus.func == 0))
1314 adapter->wol_support = hw->wol_enabled = 1;
1315
1316 /* Save initial wake up filter configuration */
1317 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1318
1319 return;
1320 } /* ixgbe_check_wol_support */
1321
1322 /************************************************************************
1323 * ixgbe_setup_interface
1324 *
1325 * Setup networking device structure and register an interface.
1326 ************************************************************************/
1327 static int
1328 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1329 {
1330 struct ethercom *ec = &adapter->osdep.ec;
1331 struct ifnet *ifp;
1332 int rv;
1333
1334 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1335
1336 ifp = adapter->ifp = &ec->ec_if;
1337 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1338 ifp->if_baudrate = IF_Gbps(10);
1339 ifp->if_init = ixgbe_init;
1340 ifp->if_stop = ixgbe_ifstop;
1341 ifp->if_softc = adapter;
1342 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1343 #ifdef IXGBE_MPSAFE
1344 ifp->if_extflags = IFEF_MPSAFE;
1345 #endif
1346 ifp->if_ioctl = ixgbe_ioctl;
1347 #if __FreeBSD_version >= 1100045
1348 /* TSO parameters */
1349 ifp->if_hw_tsomax = 65518;
1350 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1351 ifp->if_hw_tsomaxsegsize = 2048;
1352 #endif
1353 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1354 #if 0
1355 ixgbe_start_locked = ixgbe_legacy_start_locked;
1356 #endif
1357 } else {
1358 ifp->if_transmit = ixgbe_mq_start;
1359 #if 0
1360 ixgbe_start_locked = ixgbe_mq_start_locked;
1361 #endif
1362 }
1363 ifp->if_start = ixgbe_legacy_start;
1364 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1365 IFQ_SET_READY(&ifp->if_snd);
1366
1367 rv = if_initialize(ifp);
1368 if (rv != 0) {
1369 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1370 return rv;
1371 }
1372 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1373 ether_ifattach(ifp, adapter->hw.mac.addr);
1374 aprint_normal_dev(dev, "Ethernet address %s\n",
1375 ether_sprintf(adapter->hw.mac.addr));
1376 /*
1377 * We use per TX queue softint, so if_deferred_start_init() isn't
1378 * used.
1379 */
1380 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1381
1382 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1383
1384 /*
1385 * Tell the upper layer(s) we support long frames.
1386 */
1387 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1388
1389 /* Set capability flags */
1390 ifp->if_capabilities |= IFCAP_RXCSUM
1391 | IFCAP_TXCSUM
1392 | IFCAP_TSOv4
1393 | IFCAP_TSOv6;
1394 ifp->if_capenable = 0;
1395
1396 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1397 | ETHERCAP_VLAN_HWCSUM
1398 | ETHERCAP_JUMBO_MTU
1399 | ETHERCAP_VLAN_MTU;
1400
1401 /* Enable the above capabilities by default */
1402 ec->ec_capenable = ec->ec_capabilities;
1403
1404 /*
1405 * Don't turn this on by default, if vlans are
1406 * created on another pseudo device (eg. lagg)
1407 * then vlan events are not passed thru, breaking
1408 * operation, but with HW FILTER off it works. If
1409 * using vlans directly on the ixgbe driver you can
1410 * enable this and get full hardware tag filtering.
1411 */
1412 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1413
1414 /*
1415 * Specify the media types supported by this adapter and register
1416 * callbacks to update media and link information
1417 */
1418 ec->ec_ifmedia = &adapter->media;
1419 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1420 ixgbe_media_status, &adapter->core_mtx);
1421
1422 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1423 ixgbe_add_media_types(adapter);
1424
1425 /* Set autoselect media by default */
1426 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1427
1428 if_register(ifp);
1429
1430 return (0);
1431 } /* ixgbe_setup_interface */
1432
1433 /************************************************************************
1434 * ixgbe_add_media_types
1435 ************************************************************************/
1436 static void
1437 ixgbe_add_media_types(struct adapter *adapter)
1438 {
1439 struct ixgbe_hw *hw = &adapter->hw;
1440 u64 layer;
1441
1442 layer = adapter->phy_layer;
1443
1444 #define ADD(mm, dd) \
1445 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1446
1447 ADD(IFM_NONE, 0);
1448
1449 /* Media types with matching NetBSD media defines */
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1451 ADD(IFM_10G_T | IFM_FDX, 0);
1452 }
1453 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1454 ADD(IFM_1000_T | IFM_FDX, 0);
1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1457 ADD(IFM_100_TX | IFM_FDX, 0);
1458 }
1459 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1460 ADD(IFM_10_T | IFM_FDX, 0);
1461 }
1462
1463 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1464 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1465 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1466 }
1467
1468 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1469 ADD(IFM_10G_LR | IFM_FDX, 0);
1470 if (hw->phy.multispeed_fiber) {
1471 ADD(IFM_1000_LX | IFM_FDX, 0);
1472 }
1473 }
1474 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1475 ADD(IFM_10G_SR | IFM_FDX, 0);
1476 if (hw->phy.multispeed_fiber) {
1477 ADD(IFM_1000_SX | IFM_FDX, 0);
1478 }
1479 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1480 ADD(IFM_1000_SX | IFM_FDX, 0);
1481 }
1482 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1483 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1484 }
1485
1486 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1487 ADD(IFM_10G_KR | IFM_FDX, 0);
1488 }
1489 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1490 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1491 }
1492 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1493 ADD(IFM_1000_KX | IFM_FDX, 0);
1494 }
1495 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1496 ADD(IFM_2500_KX | IFM_FDX, 0);
1497 }
1498 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1499 ADD(IFM_2500_T | IFM_FDX, 0);
1500 }
1501 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1502 ADD(IFM_5000_T | IFM_FDX, 0);
1503 }
1504 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1505 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1506 /* XXX no ifmedia_set? */
1507
1508 ADD(IFM_AUTO, 0);
1509
1510 #undef ADD
1511 } /* ixgbe_add_media_types */
1512
1513 /************************************************************************
1514 * ixgbe_is_sfp
1515 ************************************************************************/
1516 static inline bool
1517 ixgbe_is_sfp(struct ixgbe_hw *hw)
1518 {
1519 switch (hw->mac.type) {
1520 case ixgbe_mac_82598EB:
1521 if (hw->phy.type == ixgbe_phy_nl)
1522 return (TRUE);
1523 return (FALSE);
1524 case ixgbe_mac_82599EB:
1525 case ixgbe_mac_X550EM_x:
1526 case ixgbe_mac_X550EM_a:
1527 switch (hw->mac.ops.get_media_type(hw)) {
1528 case ixgbe_media_type_fiber:
1529 case ixgbe_media_type_fiber_qsfp:
1530 return (TRUE);
1531 default:
1532 return (FALSE);
1533 }
1534 default:
1535 return (FALSE);
1536 }
1537 } /* ixgbe_is_sfp */
1538
1539 static void
1540 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1541 {
1542
1543 KASSERT(mutex_owned(&adapter->admin_mtx));
1544
1545 if (__predict_true(adapter->osdep.detaching == false)) {
1546 if (adapter->admin_pending == 0)
1547 workqueue_enqueue(adapter->admin_wq,
1548 &adapter->admin_wc, NULL);
1549 adapter->admin_pending = 1;
1550 }
1551 }
1552
1553 /************************************************************************
1554 * ixgbe_config_link
1555 ************************************************************************/
1556 static void
1557 ixgbe_config_link(struct adapter *adapter)
1558 {
1559 struct ixgbe_hw *hw = &adapter->hw;
1560 u32 autoneg, err = 0;
1561 u32 task_requests = 0;
1562 bool sfp, negotiate = false;
1563
1564 sfp = ixgbe_is_sfp(hw);
1565
1566 if (sfp) {
1567 if (hw->phy.multispeed_fiber) {
1568 ixgbe_enable_tx_laser(hw);
1569 task_requests |= IXGBE_REQUEST_TASK_MSF;
1570 }
1571 task_requests |= IXGBE_REQUEST_TASK_MOD;
1572
1573 mutex_enter(&adapter->admin_mtx);
1574 adapter->task_requests |= task_requests;
1575 ixgbe_schedule_admin_tasklet(adapter);
1576 mutex_exit(&adapter->admin_mtx);
1577 } else {
1578 struct ifmedia *ifm = &adapter->media;
1579
1580 if (hw->mac.ops.check_link)
1581 err = ixgbe_check_link(hw, &adapter->link_speed,
1582 &adapter->link_up, FALSE);
1583 if (err)
1584 return;
1585
1586 /*
1587 * Check if it's the first call. If it's the first call,
1588 * get value for auto negotiation.
1589 */
1590 autoneg = hw->phy.autoneg_advertised;
1591 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1592 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1593 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1594 &negotiate);
1595 if (err)
1596 return;
1597 if (hw->mac.ops.setup_link)
1598 err = hw->mac.ops.setup_link(hw, autoneg,
1599 adapter->link_up);
1600 }
1601
1602 } /* ixgbe_config_link */
1603
1604 /************************************************************************
1605 * ixgbe_update_stats_counters - Update board statistics counters.
1606 ************************************************************************/
1607 static void
1608 ixgbe_update_stats_counters(struct adapter *adapter)
1609 {
1610 struct ifnet *ifp = adapter->ifp;
1611 struct ixgbe_hw *hw = &adapter->hw;
1612 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1613 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1614 u64 total_missed_rx = 0;
1615 uint64_t crcerrs, rlec;
1616 unsigned int queue_counters;
1617 int i;
1618
1619 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1620 stats->crcerrs.ev_count += crcerrs;
1621 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1622 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1623 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1624 if (hw->mac.type >= ixgbe_mac_X550)
1625 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1626
1627 /* 16 registers exist */
1628 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1629 for (i = 0; i < queue_counters; i++) {
1630 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1631 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1632 if (hw->mac.type >= ixgbe_mac_82599EB) {
1633 stats->qprdc[i].ev_count
1634 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1635 }
1636 }
1637
1638 /* 8 registers exist */
1639 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1640 uint32_t mp;
1641
1642 /* MPC */
1643 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1644 /* global total per queue */
1645 stats->mpc[i].ev_count += mp;
1646 /* running comprehensive total for stats display */
1647 total_missed_rx += mp;
1648
1649 if (hw->mac.type == ixgbe_mac_82598EB)
1650 stats->rnbc[i].ev_count
1651 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1652
1653 stats->pxontxc[i].ev_count
1654 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1655 stats->pxofftxc[i].ev_count
1656 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1657 if (hw->mac.type >= ixgbe_mac_82599EB) {
1658 stats->pxonrxc[i].ev_count
1659 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1660 stats->pxoffrxc[i].ev_count
1661 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1662 stats->pxon2offc[i].ev_count
1663 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1664 } else {
1665 stats->pxonrxc[i].ev_count
1666 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1667 stats->pxoffrxc[i].ev_count
1668 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1669 }
1670 }
1671 stats->mpctotal.ev_count += total_missed_rx;
1672
1673 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1674 if ((adapter->link_active == LINK_STATE_UP)
1675 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1676 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1677 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1678 }
1679 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1680 stats->rlec.ev_count += rlec;
1681
1682 /* Hardware workaround, gprc counts missed packets */
1683 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1684
1685 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1686 stats->lxontxc.ev_count += lxon;
1687 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1688 stats->lxofftxc.ev_count += lxoff;
1689 total = lxon + lxoff;
1690
1691 if (hw->mac.type != ixgbe_mac_82598EB) {
1692 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1693 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1694 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1695 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1696 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1697 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1698 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1699 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1700 } else {
1701 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1702 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1703 /* 82598 only has a counter in the high register */
1704 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1705 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1706 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1707 }
1708
1709 /*
1710 * Workaround: mprc hardware is incorrectly counting
1711 * broadcasts, so for now we subtract those.
1712 */
1713 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1714 stats->bprc.ev_count += bprc;
1715 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1716 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1717
1718 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1719 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1720 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1721 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1722 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1723 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1724
1725 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1726 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1727 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1728
1729 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1730 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1731 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1732 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1733 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1734 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1735 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1736 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1737 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1738 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1739 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1740 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1741 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1742 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1743 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1744 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1745 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1746 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1747 /* Only read FCOE on 82599 */
1748 if (hw->mac.type != ixgbe_mac_82598EB) {
1749 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1750 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1751 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1752 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1753 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1754 }
1755
1756 /*
1757 * Fill out the OS statistics structure. Only RX errors are required
1758 * here because all TX counters are incremented in the TX path and
1759 * normal RX counters are prepared in ether_input().
1760 */
1761 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1762 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1763 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1764 IF_STAT_PUTREF(ifp);
1765 } /* ixgbe_update_stats_counters */
1766
1767 /************************************************************************
1768 * ixgbe_add_hw_stats
1769 *
1770 * Add sysctl variables, one per statistic, to the system.
1771 ************************************************************************/
1772 static void
1773 ixgbe_add_hw_stats(struct adapter *adapter)
1774 {
1775 device_t dev = adapter->dev;
1776 const struct sysctlnode *rnode, *cnode;
1777 struct sysctllog **log = &adapter->sysctllog;
1778 struct tx_ring *txr = adapter->tx_rings;
1779 struct rx_ring *rxr = adapter->rx_rings;
1780 struct ixgbe_hw *hw = &adapter->hw;
1781 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1782 const char *xname = device_xname(dev);
1783 int i;
1784
1785 /* Driver Statistics */
1786 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1787 NULL, xname, "Driver tx dma soft fail EFBIG");
1788 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1789 NULL, xname, "m_defrag() failed");
1790 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1791 NULL, xname, "Driver tx dma hard fail EFBIG");
1792 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1793 NULL, xname, "Driver tx dma hard fail EINVAL");
1794 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1795 NULL, xname, "Driver tx dma hard fail other");
1796 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1797 NULL, xname, "Driver tx dma soft fail EAGAIN");
1798 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1799 NULL, xname, "Driver tx dma soft fail ENOMEM");
1800 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1801 NULL, xname, "Watchdog timeouts");
1802 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1803 NULL, xname, "TSO errors");
1804 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1805 NULL, xname, "Admin MSI-X IRQ Handled");
1806 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1807 NULL, xname, "Link event");
1808 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1809 NULL, xname, "SFP+ module event");
1810 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1811 NULL, xname, "Multispeed event");
1812 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1813 NULL, xname, "External PHY event");
1814
1815 /* Max number of traffic class is 8 */
1816 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1817 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1818 snprintf(adapter->tcs[i].evnamebuf,
1819 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1820 xname, i);
1821 if (i < __arraycount(stats->mpc)) {
1822 evcnt_attach_dynamic(&stats->mpc[i],
1823 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1824 "RX Missed Packet Count");
1825 if (hw->mac.type == ixgbe_mac_82598EB)
1826 evcnt_attach_dynamic(&stats->rnbc[i],
1827 EVCNT_TYPE_MISC, NULL,
1828 adapter->tcs[i].evnamebuf,
1829 "Receive No Buffers");
1830 }
1831 if (i < __arraycount(stats->pxontxc)) {
1832 evcnt_attach_dynamic(&stats->pxontxc[i],
1833 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1834 "pxontxc");
1835 evcnt_attach_dynamic(&stats->pxonrxc[i],
1836 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1837 "pxonrxc");
1838 evcnt_attach_dynamic(&stats->pxofftxc[i],
1839 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1840 "pxofftxc");
1841 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1842 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1843 "pxoffrxc");
1844 if (hw->mac.type >= ixgbe_mac_82599EB)
1845 evcnt_attach_dynamic(&stats->pxon2offc[i],
1846 EVCNT_TYPE_MISC, NULL,
1847 adapter->tcs[i].evnamebuf,
1848 "pxon2offc");
1849 }
1850 }
1851
1852 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1853 #ifdef LRO
1854 struct lro_ctrl *lro = &rxr->lro;
1855 #endif /* LRO */
1856
1857 snprintf(adapter->queues[i].evnamebuf,
1858 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1859 xname, i);
1860 snprintf(adapter->queues[i].namebuf,
1861 sizeof(adapter->queues[i].namebuf), "q%d", i);
1862
1863 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1864 aprint_error_dev(dev, "could not create sysctl root\n");
1865 break;
1866 }
1867
1868 if (sysctl_createv(log, 0, &rnode, &rnode,
1869 0, CTLTYPE_NODE,
1870 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1871 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1872 break;
1873
1874 if (sysctl_createv(log, 0, &rnode, &cnode,
1875 CTLFLAG_READWRITE, CTLTYPE_INT,
1876 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1877 ixgbe_sysctl_interrupt_rate_handler, 0,
1878 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1879 break;
1880
1881 if (sysctl_createv(log, 0, &rnode, &cnode,
1882 CTLFLAG_READONLY, CTLTYPE_INT,
1883 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1884 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1885 0, CTL_CREATE, CTL_EOL) != 0)
1886 break;
1887
1888 if (sysctl_createv(log, 0, &rnode, &cnode,
1889 CTLFLAG_READONLY, CTLTYPE_INT,
1890 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1891 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1892 0, CTL_CREATE, CTL_EOL) != 0)
1893 break;
1894
1895 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1896 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1897 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1898 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1899 "Handled queue in softint");
1900 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1901 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1902 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1903 NULL, adapter->queues[i].evnamebuf, "TSO");
1904 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1905 NULL, adapter->queues[i].evnamebuf,
1906 "Queue No Descriptor Available");
1907 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1908 NULL, adapter->queues[i].evnamebuf,
1909 "Queue Packets Transmitted");
1910 #ifndef IXGBE_LEGACY_TX
1911 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1912 NULL, adapter->queues[i].evnamebuf,
1913 "Packets dropped in pcq");
1914 #endif
1915
1916 if (sysctl_createv(log, 0, &rnode, &cnode,
1917 CTLFLAG_READONLY,
1918 CTLTYPE_INT,
1919 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1920 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1921 CTL_CREATE, CTL_EOL) != 0)
1922 break;
1923
1924 if (sysctl_createv(log, 0, &rnode, &cnode,
1925 CTLFLAG_READONLY,
1926 CTLTYPE_INT,
1927 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1928 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1929 CTL_CREATE, CTL_EOL) != 0)
1930 break;
1931
1932 if (sysctl_createv(log, 0, &rnode, &cnode,
1933 CTLFLAG_READONLY,
1934 CTLTYPE_INT,
1935 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1936 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1937 CTL_CREATE, CTL_EOL) != 0)
1938 break;
1939
1940 if (i < __arraycount(stats->qprc)) {
1941 evcnt_attach_dynamic(&stats->qprc[i],
1942 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1943 "qprc");
1944 evcnt_attach_dynamic(&stats->qptc[i],
1945 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1946 "qptc");
1947 evcnt_attach_dynamic(&stats->qbrc[i],
1948 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1949 "qbrc");
1950 evcnt_attach_dynamic(&stats->qbtc[i],
1951 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1952 "qbtc");
1953 if (hw->mac.type >= ixgbe_mac_82599EB)
1954 evcnt_attach_dynamic(&stats->qprdc[i],
1955 EVCNT_TYPE_MISC, NULL,
1956 adapter->queues[i].evnamebuf, "qprdc");
1957 }
1958
1959 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1960 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1961 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1962 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1963 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1964 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1965 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1966 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1967 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1968 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1969 #ifdef LRO
1970 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1971 CTLFLAG_RD, &lro->lro_queued, 0,
1972 "LRO Queued");
1973 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1974 CTLFLAG_RD, &lro->lro_flushed, 0,
1975 "LRO Flushed");
1976 #endif /* LRO */
1977 }
1978
1979 /* MAC stats get their own sub node */
1980
1981 snprintf(stats->namebuf,
1982 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1983
1984 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "rx csum offload - IP");
1986 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "rx csum offload - L4");
1988 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "rx csum offload - IP bad");
1990 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "rx csum offload - L4 bad");
1992 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Interrupt conditions zero");
1994 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Legacy interrupts");
1996
1997 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "CRC Errors");
1999 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "Illegal Byte Errors");
2001 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "Byte Errors");
2003 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "MAC Short Packets Discarded");
2005 if (hw->mac.type >= ixgbe_mac_X550)
2006 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Bad SFD");
2008 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Total Packets Missed");
2010 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "MAC Local Faults");
2012 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "MAC Remote Faults");
2014 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Receive Length Errors");
2016 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Link XON Transmitted");
2018 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Link XON Received");
2020 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Link XOFF Transmitted");
2022 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Link XOFF Received");
2024
2025 /* Packet Reception Stats */
2026 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Total Octets Received");
2028 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Good Octets Received");
2030 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Total Packets Received");
2032 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Good Packets Received");
2034 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Multicast Packets Received");
2036 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Broadcast Packets Received");
2038 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "64 byte frames received ");
2040 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "65-127 byte frames received");
2042 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "128-255 byte frames received");
2044 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "256-511 byte frames received");
2046 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "512-1023 byte frames received");
2048 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "1023-1522 byte frames received");
2050 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Receive Undersized");
2052 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Fragmented Packets Received ");
2054 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Oversized Packets Received");
2056 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Received Jabber");
2058 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Management Packets Received");
2060 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Management Packets Dropped");
2062 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Checksum Errors");
2064
2065 /* Packet Transmission Stats */
2066 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "Good Octets Transmitted");
2068 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "Total Packets Transmitted");
2070 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "Good Packets Transmitted");
2072 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "Broadcast Packets Transmitted");
2074 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "Multicast Packets Transmitted");
2076 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "Management Packets Transmitted");
2078 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "64 byte frames transmitted ");
2080 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2081 stats->namebuf, "65-127 byte frames transmitted");
2082 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2083 stats->namebuf, "128-255 byte frames transmitted");
2084 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2085 stats->namebuf, "256-511 byte frames transmitted");
2086 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2087 stats->namebuf, "512-1023 byte frames transmitted");
2088 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2089 stats->namebuf, "1024-1522 byte frames transmitted");
2090 } /* ixgbe_add_hw_stats */
2091
2092 static void
2093 ixgbe_clear_evcnt(struct adapter *adapter)
2094 {
2095 struct tx_ring *txr = adapter->tx_rings;
2096 struct rx_ring *rxr = adapter->rx_rings;
2097 struct ixgbe_hw *hw = &adapter->hw;
2098 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2099 int i;
2100
2101 adapter->efbig_tx_dma_setup.ev_count = 0;
2102 adapter->mbuf_defrag_failed.ev_count = 0;
2103 adapter->efbig2_tx_dma_setup.ev_count = 0;
2104 adapter->einval_tx_dma_setup.ev_count = 0;
2105 adapter->other_tx_dma_setup.ev_count = 0;
2106 adapter->eagain_tx_dma_setup.ev_count = 0;
2107 adapter->enomem_tx_dma_setup.ev_count = 0;
2108 adapter->tso_err.ev_count = 0;
2109 adapter->watchdog_events.ev_count = 0;
2110 adapter->admin_irqev.ev_count = 0;
2111 adapter->link_workev.ev_count = 0;
2112 adapter->mod_workev.ev_count = 0;
2113 adapter->msf_workev.ev_count = 0;
2114 adapter->phy_workev.ev_count = 0;
2115
2116 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2117 if (i < __arraycount(stats->mpc)) {
2118 stats->mpc[i].ev_count = 0;
2119 if (hw->mac.type == ixgbe_mac_82598EB)
2120 stats->rnbc[i].ev_count = 0;
2121 }
2122 if (i < __arraycount(stats->pxontxc)) {
2123 stats->pxontxc[i].ev_count = 0;
2124 stats->pxonrxc[i].ev_count = 0;
2125 stats->pxofftxc[i].ev_count = 0;
2126 stats->pxoffrxc[i].ev_count = 0;
2127 if (hw->mac.type >= ixgbe_mac_82599EB)
2128 stats->pxon2offc[i].ev_count = 0;
2129 }
2130 }
2131
2132 txr = adapter->tx_rings;
2133 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2134 adapter->queues[i].irqs.ev_count = 0;
2135 adapter->queues[i].handleq.ev_count = 0;
2136 adapter->queues[i].req.ev_count = 0;
2137 txr->no_desc_avail.ev_count = 0;
2138 txr->total_packets.ev_count = 0;
2139 txr->tso_tx.ev_count = 0;
2140 #ifndef IXGBE_LEGACY_TX
2141 txr->pcq_drops.ev_count = 0;
2142 #endif
2143 txr->q_efbig_tx_dma_setup = 0;
2144 txr->q_mbuf_defrag_failed = 0;
2145 txr->q_efbig2_tx_dma_setup = 0;
2146 txr->q_einval_tx_dma_setup = 0;
2147 txr->q_other_tx_dma_setup = 0;
2148 txr->q_eagain_tx_dma_setup = 0;
2149 txr->q_enomem_tx_dma_setup = 0;
2150 txr->q_tso_err = 0;
2151
2152 if (i < __arraycount(stats->qprc)) {
2153 stats->qprc[i].ev_count = 0;
2154 stats->qptc[i].ev_count = 0;
2155 stats->qbrc[i].ev_count = 0;
2156 stats->qbtc[i].ev_count = 0;
2157 if (hw->mac.type >= ixgbe_mac_82599EB)
2158 stats->qprdc[i].ev_count = 0;
2159 }
2160
2161 rxr->rx_packets.ev_count = 0;
2162 rxr->rx_bytes.ev_count = 0;
2163 rxr->rx_copies.ev_count = 0;
2164 rxr->no_jmbuf.ev_count = 0;
2165 rxr->rx_discarded.ev_count = 0;
2166 }
2167 stats->ipcs.ev_count = 0;
2168 stats->l4cs.ev_count = 0;
2169 stats->ipcs_bad.ev_count = 0;
2170 stats->l4cs_bad.ev_count = 0;
2171 stats->intzero.ev_count = 0;
2172 stats->legint.ev_count = 0;
2173 stats->crcerrs.ev_count = 0;
2174 stats->illerrc.ev_count = 0;
2175 stats->errbc.ev_count = 0;
2176 stats->mspdc.ev_count = 0;
2177 if (hw->mac.type >= ixgbe_mac_X550)
2178 stats->mbsdc.ev_count = 0;
2179 stats->mpctotal.ev_count = 0;
2180 stats->mlfc.ev_count = 0;
2181 stats->mrfc.ev_count = 0;
2182 stats->rlec.ev_count = 0;
2183 stats->lxontxc.ev_count = 0;
2184 stats->lxonrxc.ev_count = 0;
2185 stats->lxofftxc.ev_count = 0;
2186 stats->lxoffrxc.ev_count = 0;
2187
2188 /* Packet Reception Stats */
2189 stats->tor.ev_count = 0;
2190 stats->gorc.ev_count = 0;
2191 stats->tpr.ev_count = 0;
2192 stats->gprc.ev_count = 0;
2193 stats->mprc.ev_count = 0;
2194 stats->bprc.ev_count = 0;
2195 stats->prc64.ev_count = 0;
2196 stats->prc127.ev_count = 0;
2197 stats->prc255.ev_count = 0;
2198 stats->prc511.ev_count = 0;
2199 stats->prc1023.ev_count = 0;
2200 stats->prc1522.ev_count = 0;
2201 stats->ruc.ev_count = 0;
2202 stats->rfc.ev_count = 0;
2203 stats->roc.ev_count = 0;
2204 stats->rjc.ev_count = 0;
2205 stats->mngprc.ev_count = 0;
2206 stats->mngpdc.ev_count = 0;
2207 stats->xec.ev_count = 0;
2208
2209 /* Packet Transmission Stats */
2210 stats->gotc.ev_count = 0;
2211 stats->tpt.ev_count = 0;
2212 stats->gptc.ev_count = 0;
2213 stats->bptc.ev_count = 0;
2214 stats->mptc.ev_count = 0;
2215 stats->mngptc.ev_count = 0;
2216 stats->ptc64.ev_count = 0;
2217 stats->ptc127.ev_count = 0;
2218 stats->ptc255.ev_count = 0;
2219 stats->ptc511.ev_count = 0;
2220 stats->ptc1023.ev_count = 0;
2221 stats->ptc1522.ev_count = 0;
2222 }
2223
2224 /************************************************************************
2225 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2226 *
2227 * Retrieves the TDH value from the hardware
2228 ************************************************************************/
2229 static int
2230 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2231 {
2232 struct sysctlnode node = *rnode;
2233 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2234 struct adapter *adapter;
2235 uint32_t val;
2236
2237 if (!txr)
2238 return (0);
2239
2240 adapter = txr->adapter;
2241 if (ixgbe_fw_recovery_mode_swflag(adapter))
2242 return (EPERM);
2243
2244 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2245 node.sysctl_data = &val;
2246 return sysctl_lookup(SYSCTLFN_CALL(&node));
2247 } /* ixgbe_sysctl_tdh_handler */
2248
2249 /************************************************************************
2250 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2251 *
2252 * Retrieves the TDT value from the hardware
2253 ************************************************************************/
2254 static int
2255 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2256 {
2257 struct sysctlnode node = *rnode;
2258 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2259 struct adapter *adapter;
2260 uint32_t val;
2261
2262 if (!txr)
2263 return (0);
2264
2265 adapter = txr->adapter;
2266 if (ixgbe_fw_recovery_mode_swflag(adapter))
2267 return (EPERM);
2268
2269 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2270 node.sysctl_data = &val;
2271 return sysctl_lookup(SYSCTLFN_CALL(&node));
2272 } /* ixgbe_sysctl_tdt_handler */
2273
2274 /************************************************************************
2275 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2276 * handler function
2277 *
2278 * Retrieves the next_to_check value
2279 ************************************************************************/
2280 static int
2281 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2282 {
2283 struct sysctlnode node = *rnode;
2284 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2285 struct adapter *adapter;
2286 uint32_t val;
2287
2288 if (!rxr)
2289 return (0);
2290
2291 adapter = rxr->adapter;
2292 if (ixgbe_fw_recovery_mode_swflag(adapter))
2293 return (EPERM);
2294
2295 val = rxr->next_to_check;
2296 node.sysctl_data = &val;
2297 return sysctl_lookup(SYSCTLFN_CALL(&node));
2298 } /* ixgbe_sysctl_next_to_check_handler */
2299
2300 /************************************************************************
2301 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2302 *
2303 * Retrieves the RDH value from the hardware
2304 ************************************************************************/
2305 static int
2306 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2307 {
2308 struct sysctlnode node = *rnode;
2309 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2310 struct adapter *adapter;
2311 uint32_t val;
2312
2313 if (!rxr)
2314 return (0);
2315
2316 adapter = rxr->adapter;
2317 if (ixgbe_fw_recovery_mode_swflag(adapter))
2318 return (EPERM);
2319
2320 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2321 node.sysctl_data = &val;
2322 return sysctl_lookup(SYSCTLFN_CALL(&node));
2323 } /* ixgbe_sysctl_rdh_handler */
2324
2325 /************************************************************************
2326 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2327 *
2328 * Retrieves the RDT value from the hardware
2329 ************************************************************************/
2330 static int
2331 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2332 {
2333 struct sysctlnode node = *rnode;
2334 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2335 struct adapter *adapter;
2336 uint32_t val;
2337
2338 if (!rxr)
2339 return (0);
2340
2341 adapter = rxr->adapter;
2342 if (ixgbe_fw_recovery_mode_swflag(adapter))
2343 return (EPERM);
2344
2345 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2346 node.sysctl_data = &val;
2347 return sysctl_lookup(SYSCTLFN_CALL(&node));
2348 } /* ixgbe_sysctl_rdt_handler */
2349
2350 static int
2351 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2352 {
2353 struct ifnet *ifp = &ec->ec_if;
2354 struct adapter *adapter = ifp->if_softc;
2355 int rv;
2356
2357 if (set)
2358 rv = ixgbe_register_vlan(adapter, vid);
2359 else
2360 rv = ixgbe_unregister_vlan(adapter, vid);
2361
2362 if (rv != 0)
2363 return rv;
2364
2365 /*
2366 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2367 * or 0 to 1.
2368 */
2369 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2370 ixgbe_setup_vlan_hw_tagging(adapter);
2371
2372 return rv;
2373 }
2374
2375 /************************************************************************
2376 * ixgbe_register_vlan
2377 *
2378 * Run via vlan config EVENT, it enables us to use the
2379 * HW Filter table since we can get the vlan id. This
2380 * just creates the entry in the soft version of the
2381 * VFTA, init will repopulate the real table.
2382 ************************************************************************/
2383 static int
2384 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2385 {
2386 u16 index, bit;
2387 int error;
2388
2389 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2390 return EINVAL;
2391
2392 IXGBE_CORE_LOCK(adapter);
2393 index = (vtag >> 5) & 0x7F;
2394 bit = vtag & 0x1F;
2395 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2396 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2397 true);
2398 IXGBE_CORE_UNLOCK(adapter);
2399 if (error != 0)
2400 error = EACCES;
2401
2402 return error;
2403 } /* ixgbe_register_vlan */
2404
2405 /************************************************************************
2406 * ixgbe_unregister_vlan
2407 *
2408 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2409 ************************************************************************/
2410 static int
2411 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2412 {
2413 u16 index, bit;
2414 int error;
2415
2416 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2417 return EINVAL;
2418
2419 IXGBE_CORE_LOCK(adapter);
2420 index = (vtag >> 5) & 0x7F;
2421 bit = vtag & 0x1F;
2422 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2423 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2424 true);
2425 IXGBE_CORE_UNLOCK(adapter);
2426 if (error != 0)
2427 error = EACCES;
2428
2429 return error;
2430 } /* ixgbe_unregister_vlan */
2431
2432 static void
2433 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2434 {
2435 struct ethercom *ec = &adapter->osdep.ec;
2436 struct ixgbe_hw *hw = &adapter->hw;
2437 struct rx_ring *rxr;
2438 u32 ctrl;
2439 int i;
2440 bool hwtagging;
2441
2442 /* Enable HW tagging only if any vlan is attached */
2443 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2444 && VLAN_ATTACHED(ec);
2445
2446 /* Setup the queues for vlans */
2447 for (i = 0; i < adapter->num_queues; i++) {
2448 rxr = &adapter->rx_rings[i];
2449 /*
2450 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2451 */
2452 if (hw->mac.type != ixgbe_mac_82598EB) {
2453 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2454 if (hwtagging)
2455 ctrl |= IXGBE_RXDCTL_VME;
2456 else
2457 ctrl &= ~IXGBE_RXDCTL_VME;
2458 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2459 }
2460 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2461 }
2462
2463 /* VLAN hw tagging for 82598 */
2464 if (hw->mac.type == ixgbe_mac_82598EB) {
2465 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2466 if (hwtagging)
2467 ctrl |= IXGBE_VLNCTRL_VME;
2468 else
2469 ctrl &= ~IXGBE_VLNCTRL_VME;
2470 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2471 }
2472 } /* ixgbe_setup_vlan_hw_tagging */
2473
2474 static void
2475 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2476 {
2477 struct ethercom *ec = &adapter->osdep.ec;
2478 struct ixgbe_hw *hw = &adapter->hw;
2479 int i;
2480 u32 ctrl;
2481 struct vlanid_list *vlanidp;
2482
2483 /*
2484 * This function is called from both if_init and ifflags_cb()
2485 * on NetBSD.
2486 */
2487
2488 /*
2489 * Part 1:
2490 * Setup VLAN HW tagging
2491 */
2492 ixgbe_setup_vlan_hw_tagging(adapter);
2493
2494 /*
2495 * Part 2:
2496 * Setup VLAN HW filter
2497 */
2498 /* Cleanup shadow_vfta */
2499 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2500 adapter->shadow_vfta[i] = 0;
2501 /* Generate shadow_vfta from ec_vids */
2502 ETHER_LOCK(ec);
2503 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2504 uint32_t idx;
2505
2506 idx = vlanidp->vid / 32;
2507 KASSERT(idx < IXGBE_VFTA_SIZE);
2508 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2509 }
2510 ETHER_UNLOCK(ec);
2511 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2512 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2513
2514 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2515 /* Enable the Filter Table if enabled */
2516 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2517 ctrl |= IXGBE_VLNCTRL_VFE;
2518 else
2519 ctrl &= ~IXGBE_VLNCTRL_VFE;
2520 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2521 } /* ixgbe_setup_vlan_hw_support */
2522
2523 /************************************************************************
2524 * ixgbe_get_slot_info
2525 *
2526 * Get the width and transaction speed of
2527 * the slot this adapter is plugged into.
2528 ************************************************************************/
2529 static void
2530 ixgbe_get_slot_info(struct adapter *adapter)
2531 {
2532 device_t dev = adapter->dev;
2533 struct ixgbe_hw *hw = &adapter->hw;
2534 u32 offset;
2535 u16 link;
2536 int bus_info_valid = TRUE;
2537
2538 /* Some devices are behind an internal bridge */
2539 switch (hw->device_id) {
2540 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2541 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2542 goto get_parent_info;
2543 default:
2544 break;
2545 }
2546
2547 ixgbe_get_bus_info(hw);
2548
2549 /*
2550 * Some devices don't use PCI-E, but there is no need
2551 * to display "Unknown" for bus speed and width.
2552 */
2553 switch (hw->mac.type) {
2554 case ixgbe_mac_X550EM_x:
2555 case ixgbe_mac_X550EM_a:
2556 return;
2557 default:
2558 goto display;
2559 }
2560
2561 get_parent_info:
2562 /*
2563 * For the Quad port adapter we need to parse back
2564 * up the PCI tree to find the speed of the expansion
2565 * slot into which this adapter is plugged. A bit more work.
2566 */
2567 dev = device_parent(device_parent(dev));
2568 #if 0
2569 #ifdef IXGBE_DEBUG
2570 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2571 pci_get_slot(dev), pci_get_function(dev));
2572 #endif
2573 dev = device_parent(device_parent(dev));
2574 #ifdef IXGBE_DEBUG
2575 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2576 pci_get_slot(dev), pci_get_function(dev));
2577 #endif
2578 #endif
2579 /* Now get the PCI Express Capabilities offset */
2580 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2581 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2582 /*
2583 * Hmm...can't get PCI-Express capabilities.
2584 * Falling back to default method.
2585 */
2586 bus_info_valid = FALSE;
2587 ixgbe_get_bus_info(hw);
2588 goto display;
2589 }
2590 /* ...and read the Link Status Register */
2591 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2592 offset + PCIE_LCSR) >> 16;
2593 ixgbe_set_pci_config_data_generic(hw, link);
2594
2595 display:
2596 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2597 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2598 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2599 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2600 "Unknown"),
2601 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2602 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2603 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2604 "Unknown"));
2605
2606 if (bus_info_valid) {
2607 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2608 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2609 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2610 device_printf(dev, "PCI-Express bandwidth available"
2611 " for this card\n is not sufficient for"
2612 " optimal performance.\n");
2613 device_printf(dev, "For optimal performance a x8 "
2614 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2615 }
2616 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2617 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2618 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2619 device_printf(dev, "PCI-Express bandwidth available"
2620 " for this card\n is not sufficient for"
2621 " optimal performance.\n");
2622 device_printf(dev, "For optimal performance a x8 "
2623 "PCIE Gen3 slot is required.\n");
2624 }
2625 } else
2626 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2627
2628 return;
2629 } /* ixgbe_get_slot_info */
2630
2631 /************************************************************************
2632 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2633 ************************************************************************/
2634 static inline void
2635 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2636 {
2637 struct ixgbe_hw *hw = &adapter->hw;
2638 struct ix_queue *que = &adapter->queues[vector];
2639 u64 queue = 1ULL << vector;
2640 u32 mask;
2641
2642 mutex_enter(&que->dc_mtx);
2643 if (que->disabled_count > 0 && --que->disabled_count > 0)
2644 goto out;
2645
2646 if (hw->mac.type == ixgbe_mac_82598EB) {
2647 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2648 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2649 } else {
2650 mask = (queue & 0xFFFFFFFF);
2651 if (mask)
2652 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2653 mask = (queue >> 32);
2654 if (mask)
2655 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2656 }
2657 out:
2658 mutex_exit(&que->dc_mtx);
2659 } /* ixgbe_enable_queue */
2660
2661 /************************************************************************
2662 * ixgbe_disable_queue_internal
2663 ************************************************************************/
2664 static inline void
2665 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2666 {
2667 struct ixgbe_hw *hw = &adapter->hw;
2668 struct ix_queue *que = &adapter->queues[vector];
2669 u64 queue = 1ULL << vector;
2670 u32 mask;
2671
2672 mutex_enter(&que->dc_mtx);
2673
2674 if (que->disabled_count > 0) {
2675 if (nestok)
2676 que->disabled_count++;
2677 goto out;
2678 }
2679 que->disabled_count++;
2680
2681 if (hw->mac.type == ixgbe_mac_82598EB) {
2682 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2683 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2684 } else {
2685 mask = (queue & 0xFFFFFFFF);
2686 if (mask)
2687 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2688 mask = (queue >> 32);
2689 if (mask)
2690 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2691 }
2692 out:
2693 mutex_exit(&que->dc_mtx);
2694 } /* ixgbe_disable_queue_internal */
2695
2696 /************************************************************************
2697 * ixgbe_disable_queue
2698 ************************************************************************/
2699 static inline void
2700 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2701 {
2702
2703 ixgbe_disable_queue_internal(adapter, vector, true);
2704 } /* ixgbe_disable_queue */
2705
2706 /************************************************************************
2707 * ixgbe_sched_handle_que - schedule deferred packet processing
2708 ************************************************************************/
2709 static inline void
2710 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2711 {
2712
2713 if (que->txrx_use_workqueue) {
2714 /*
2715 * adapter->que_wq is bound to each CPU instead of
2716 * each NIC queue to reduce workqueue kthread. As we
2717 * should consider about interrupt affinity in this
2718 * function, the workqueue kthread must be WQ_PERCPU.
2719 * If create WQ_PERCPU workqueue kthread for each NIC
2720 * queue, that number of created workqueue kthread is
2721 * (number of used NIC queue) * (number of CPUs) =
2722 * (number of CPUs) ^ 2 most often.
2723 *
2724 * The same NIC queue's interrupts are avoided by
2725 * masking the queue's interrupt. And different
2726 * NIC queue's interrupts use different struct work
2727 * (que->wq_cookie). So, "enqueued flag" to avoid
2728 * twice workqueue_enqueue() is not required .
2729 */
2730 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2731 } else {
2732 softint_schedule(que->que_si);
2733 }
2734 }
2735
2736 /************************************************************************
2737 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2738 ************************************************************************/
2739 static int
2740 ixgbe_msix_que(void *arg)
2741 {
2742 struct ix_queue *que = arg;
2743 struct adapter *adapter = que->adapter;
2744 struct ifnet *ifp = adapter->ifp;
2745 struct tx_ring *txr = que->txr;
2746 struct rx_ring *rxr = que->rxr;
2747 bool more;
2748 u32 newitr = 0;
2749
2750 /* Protect against spurious interrupts */
2751 if ((ifp->if_flags & IFF_RUNNING) == 0)
2752 return 0;
2753
2754 ixgbe_disable_queue(adapter, que->msix);
2755 ++que->irqs.ev_count;
2756
2757 /*
2758 * Don't change "que->txrx_use_workqueue" from this point to avoid
2759 * flip-flopping softint/workqueue mode in one deferred processing.
2760 */
2761 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2762
2763 #ifdef __NetBSD__
2764 /* Don't run ixgbe_rxeof in interrupt context */
2765 more = true;
2766 #else
2767 more = ixgbe_rxeof(que);
2768 #endif
2769
2770 IXGBE_TX_LOCK(txr);
2771 ixgbe_txeof(txr);
2772 IXGBE_TX_UNLOCK(txr);
2773
2774 /* Do AIM now? */
2775
2776 if (adapter->enable_aim == false)
2777 goto no_calc;
2778 /*
2779 * Do Adaptive Interrupt Moderation:
2780 * - Write out last calculated setting
2781 * - Calculate based on average size over
2782 * the last interval.
2783 */
2784 if (que->eitr_setting)
2785 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2786
2787 que->eitr_setting = 0;
2788
2789 /* Idle, do nothing */
2790 if ((txr->bytes == 0) && (rxr->bytes == 0))
2791 goto no_calc;
2792
2793 if ((txr->bytes) && (txr->packets))
2794 newitr = txr->bytes/txr->packets;
2795 if ((rxr->bytes) && (rxr->packets))
2796 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2797 newitr += 24; /* account for hardware frame, crc */
2798
2799 /* set an upper boundary */
2800 newitr = uimin(newitr, 3000);
2801
2802 /* Be nice to the mid range */
2803 if ((newitr > 300) && (newitr < 1200))
2804 newitr = (newitr / 3);
2805 else
2806 newitr = (newitr / 2);
2807
2808 /*
2809 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2810 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2811 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2812 * on 1G and higher.
2813 */
2814 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2815 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2816 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2817 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2818 }
2819
2820 /* save for next interrupt */
2821 que->eitr_setting = newitr;
2822
2823 /* Reset state */
2824 txr->bytes = 0;
2825 txr->packets = 0;
2826 rxr->bytes = 0;
2827 rxr->packets = 0;
2828
2829 no_calc:
2830 if (more)
2831 ixgbe_sched_handle_que(adapter, que);
2832 else
2833 ixgbe_enable_queue(adapter, que->msix);
2834
2835 return 1;
2836 } /* ixgbe_msix_que */
2837
2838 /************************************************************************
2839 * ixgbe_media_status - Media Ioctl callback
2840 *
2841 * Called whenever the user queries the status of
2842 * the interface using ifconfig.
2843 ************************************************************************/
2844 static void
2845 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2846 {
2847 struct adapter *adapter = ifp->if_softc;
2848 struct ixgbe_hw *hw = &adapter->hw;
2849 int layer;
2850
2851 INIT_DEBUGOUT("ixgbe_media_status: begin");
2852 ixgbe_update_link_status(adapter);
2853
2854 ifmr->ifm_status = IFM_AVALID;
2855 ifmr->ifm_active = IFM_ETHER;
2856
2857 if (adapter->link_active != LINK_STATE_UP) {
2858 ifmr->ifm_active |= IFM_NONE;
2859 return;
2860 }
2861
2862 ifmr->ifm_status |= IFM_ACTIVE;
2863 layer = adapter->phy_layer;
2864
2865 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2866 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2867 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2868 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2869 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2870 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2871 switch (adapter->link_speed) {
2872 case IXGBE_LINK_SPEED_10GB_FULL:
2873 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2874 break;
2875 case IXGBE_LINK_SPEED_5GB_FULL:
2876 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2877 break;
2878 case IXGBE_LINK_SPEED_2_5GB_FULL:
2879 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2880 break;
2881 case IXGBE_LINK_SPEED_1GB_FULL:
2882 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2883 break;
2884 case IXGBE_LINK_SPEED_100_FULL:
2885 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2886 break;
2887 case IXGBE_LINK_SPEED_10_FULL:
2888 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2889 break;
2890 }
2891 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2892 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2893 switch (adapter->link_speed) {
2894 case IXGBE_LINK_SPEED_10GB_FULL:
2895 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2896 break;
2897 }
2898 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2899 switch (adapter->link_speed) {
2900 case IXGBE_LINK_SPEED_10GB_FULL:
2901 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2902 break;
2903 case IXGBE_LINK_SPEED_1GB_FULL:
2904 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2905 break;
2906 }
2907 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2908 switch (adapter->link_speed) {
2909 case IXGBE_LINK_SPEED_10GB_FULL:
2910 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2911 break;
2912 case IXGBE_LINK_SPEED_1GB_FULL:
2913 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2914 break;
2915 }
2916 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2917 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2918 switch (adapter->link_speed) {
2919 case IXGBE_LINK_SPEED_10GB_FULL:
2920 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2921 break;
2922 case IXGBE_LINK_SPEED_1GB_FULL:
2923 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2924 break;
2925 }
2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2927 switch (adapter->link_speed) {
2928 case IXGBE_LINK_SPEED_10GB_FULL:
2929 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2930 break;
2931 }
2932 /*
2933 * XXX: These need to use the proper media types once
2934 * they're added.
2935 */
2936 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2937 switch (adapter->link_speed) {
2938 case IXGBE_LINK_SPEED_10GB_FULL:
2939 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2940 break;
2941 case IXGBE_LINK_SPEED_2_5GB_FULL:
2942 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2943 break;
2944 case IXGBE_LINK_SPEED_1GB_FULL:
2945 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2946 break;
2947 }
2948 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2949 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2950 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2951 switch (adapter->link_speed) {
2952 case IXGBE_LINK_SPEED_10GB_FULL:
2953 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2954 break;
2955 case IXGBE_LINK_SPEED_2_5GB_FULL:
2956 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2957 break;
2958 case IXGBE_LINK_SPEED_1GB_FULL:
2959 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2960 break;
2961 }
2962
2963 /* If nothing is recognized... */
2964 #if 0
2965 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2966 ifmr->ifm_active |= IFM_UNKNOWN;
2967 #endif
2968
2969 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2970
2971 /* Display current flow control setting used on link */
2972 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2973 hw->fc.current_mode == ixgbe_fc_full)
2974 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2975 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2976 hw->fc.current_mode == ixgbe_fc_full)
2977 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2978
2979 return;
2980 } /* ixgbe_media_status */
2981
2982 /************************************************************************
2983 * ixgbe_media_change - Media Ioctl callback
2984 *
2985 * Called when the user changes speed/duplex using
2986 * media/mediopt option with ifconfig.
2987 ************************************************************************/
2988 static int
2989 ixgbe_media_change(struct ifnet *ifp)
2990 {
2991 struct adapter *adapter = ifp->if_softc;
2992 struct ifmedia *ifm = &adapter->media;
2993 struct ixgbe_hw *hw = &adapter->hw;
2994 ixgbe_link_speed speed = 0;
2995 ixgbe_link_speed link_caps = 0;
2996 bool negotiate = false;
2997 s32 err = IXGBE_NOT_IMPLEMENTED;
2998
2999 INIT_DEBUGOUT("ixgbe_media_change: begin");
3000
3001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3002 return (EINVAL);
3003
3004 if (hw->phy.media_type == ixgbe_media_type_backplane)
3005 return (EPERM);
3006
3007 /*
3008 * We don't actually need to check against the supported
3009 * media types of the adapter; ifmedia will take care of
3010 * that for us.
3011 */
3012 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3013 case IFM_AUTO:
3014 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3015 &negotiate);
3016 if (err != IXGBE_SUCCESS) {
3017 device_printf(adapter->dev, "Unable to determine "
3018 "supported advertise speeds\n");
3019 return (ENODEV);
3020 }
3021 speed |= link_caps;
3022 break;
3023 case IFM_10G_T:
3024 case IFM_10G_LRM:
3025 case IFM_10G_LR:
3026 case IFM_10G_TWINAX:
3027 case IFM_10G_SR:
3028 case IFM_10G_CX4:
3029 case IFM_10G_KR:
3030 case IFM_10G_KX4:
3031 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3032 break;
3033 case IFM_5000_T:
3034 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3035 break;
3036 case IFM_2500_T:
3037 case IFM_2500_KX:
3038 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3039 break;
3040 case IFM_1000_T:
3041 case IFM_1000_LX:
3042 case IFM_1000_SX:
3043 case IFM_1000_KX:
3044 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3045 break;
3046 case IFM_100_TX:
3047 speed |= IXGBE_LINK_SPEED_100_FULL;
3048 break;
3049 case IFM_10_T:
3050 speed |= IXGBE_LINK_SPEED_10_FULL;
3051 break;
3052 case IFM_NONE:
3053 break;
3054 default:
3055 goto invalid;
3056 }
3057
3058 hw->mac.autotry_restart = TRUE;
3059 hw->mac.ops.setup_link(hw, speed, TRUE);
3060 adapter->advertise = 0;
3061 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3062 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3063 adapter->advertise |= 1 << 2;
3064 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3065 adapter->advertise |= 1 << 1;
3066 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3067 adapter->advertise |= 1 << 0;
3068 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3069 adapter->advertise |= 1 << 3;
3070 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3071 adapter->advertise |= 1 << 4;
3072 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3073 adapter->advertise |= 1 << 5;
3074 }
3075
3076 return (0);
3077
3078 invalid:
3079 device_printf(adapter->dev, "Invalid media type!\n");
3080
3081 return (EINVAL);
3082 } /* ixgbe_media_change */
3083
3084 /************************************************************************
3085 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3086 ************************************************************************/
3087 static int
3088 ixgbe_msix_admin(void *arg)
3089 {
3090 struct adapter *adapter = arg;
3091 struct ixgbe_hw *hw = &adapter->hw;
3092 u32 eicr, eicr_mask;
3093 u32 task_requests = 0;
3094 s32 retval;
3095
3096 ++adapter->admin_irqev.ev_count;
3097
3098 /* Pause other interrupts */
3099 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3100
3101 /* First get the cause */
3102 /*
3103 * The specifications of 82598, 82599, X540 and X550 say EICS register
3104 * is write only. However, Linux says it is a workaround for silicon
3105 * errata to read EICS instead of EICR to get interrupt cause. It seems
3106 * there is a problem about read clear mechanism for EICR register.
3107 */
3108 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3109 /* Be sure the queue bits are not cleared */
3110 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3111 /* Clear interrupt with write */
3112 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3113
3114 if (ixgbe_is_sfp(hw)) {
3115 /* Pluggable optics-related interrupt */
3116 if (hw->mac.type >= ixgbe_mac_X540)
3117 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3118 else
3119 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3120
3121 /*
3122 * An interrupt might not arrive when a module is inserted.
3123 * When an link status change interrupt occurred and the driver
3124 * still regard SFP as unplugged, issue the module softint
3125 * and then issue LSC interrupt.
3126 */
3127 if ((eicr & eicr_mask)
3128 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3129 && (eicr & IXGBE_EICR_LSC))) {
3130 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3131 task_requests |= IXGBE_REQUEST_TASK_MOD;
3132 }
3133
3134 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3135 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3136 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3137 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3138 task_requests |= IXGBE_REQUEST_TASK_MSF;
3139 }
3140 }
3141
3142 /* Link status change */
3143 if (eicr & IXGBE_EICR_LSC) {
3144 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3145 task_requests |= IXGBE_REQUEST_TASK_LSC;
3146 }
3147
3148 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3149 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3150 (eicr & IXGBE_EICR_FLOW_DIR)) {
3151 /* This is probably overkill :) */
3152 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3153 return 1;
3154 /* Disable the interrupt */
3155 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3156 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3157 }
3158
3159 if (eicr & IXGBE_EICR_ECC) {
3160 device_printf(adapter->dev,
3161 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3162 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3163 }
3164
3165 /* Check for over temp condition */
3166 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3167 switch (adapter->hw.mac.type) {
3168 case ixgbe_mac_X550EM_a:
3169 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3170 break;
3171 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3172 IXGBE_EICR_GPI_SDP0_X550EM_a);
3173 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3174 IXGBE_EICR_GPI_SDP0_X550EM_a);
3175 retval = hw->phy.ops.check_overtemp(hw);
3176 if (retval != IXGBE_ERR_OVERTEMP)
3177 break;
3178 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3179 device_printf(adapter->dev, "System shutdown required!\n");
3180 break;
3181 default:
3182 if (!(eicr & IXGBE_EICR_TS))
3183 break;
3184 retval = hw->phy.ops.check_overtemp(hw);
3185 if (retval != IXGBE_ERR_OVERTEMP)
3186 break;
3187 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3188 device_printf(adapter->dev, "System shutdown required!\n");
3189 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3190 break;
3191 }
3192 }
3193
3194 /* Check for VF message */
3195 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3196 (eicr & IXGBE_EICR_MAILBOX)) {
3197 task_requests |= IXGBE_REQUEST_TASK_MBX;
3198 }
3199 }
3200
3201 /* Check for fan failure */
3202 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3203 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3204 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3205 }
3206
3207 /* External PHY interrupt */
3208 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3209 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3210 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3211 task_requests |= IXGBE_REQUEST_TASK_PHY;
3212 }
3213
3214 if (task_requests != 0) {
3215 /* Re-enabling other interrupts is done in the admin task */
3216 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
3217
3218 mutex_enter(&adapter->admin_mtx);
3219 adapter->task_requests |= task_requests;
3220 ixgbe_schedule_admin_tasklet(adapter);
3221 mutex_exit(&adapter->admin_mtx);
3222 } else {
3223 /* Re-enable other interrupts */
3224 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3225 }
3226
3227 return 1;
3228 } /* ixgbe_msix_admin */
3229
3230 static void
3231 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3232 {
3233
3234 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3235 itr |= itr << 16;
3236 else
3237 itr |= IXGBE_EITR_CNT_WDIS;
3238
3239 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3240 }
3241
3242
3243 /************************************************************************
3244 * ixgbe_sysctl_interrupt_rate_handler
3245 ************************************************************************/
3246 static int
3247 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3248 {
3249 struct sysctlnode node = *rnode;
3250 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3251 struct adapter *adapter;
3252 uint32_t reg, usec, rate;
3253 int error;
3254
3255 if (que == NULL)
3256 return 0;
3257
3258 adapter = que->adapter;
3259 if (ixgbe_fw_recovery_mode_swflag(adapter))
3260 return (EPERM);
3261
3262 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3263 usec = ((reg & 0x0FF8) >> 3);
3264 if (usec > 0)
3265 rate = 500000 / usec;
3266 else
3267 rate = 0;
3268 node.sysctl_data = &rate;
3269 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3270 if (error || newp == NULL)
3271 return error;
3272 reg &= ~0xfff; /* default, no limitation */
3273 if (rate > 0 && rate < 500000) {
3274 if (rate < 1000)
3275 rate = 1000;
3276 reg |= ((4000000 / rate) & 0xff8);
3277 /*
3278 * When RSC is used, ITR interval must be larger than
3279 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3280 * The minimum value is always greater than 2us on 100M
3281 * (and 10M?(not documented)), but it's not on 1G and higher.
3282 */
3283 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3284 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3285 if ((adapter->num_queues > 1)
3286 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3287 return EINVAL;
3288 }
3289 ixgbe_max_interrupt_rate = rate;
3290 } else
3291 ixgbe_max_interrupt_rate = 0;
3292 ixgbe_eitr_write(adapter, que->msix, reg);
3293
3294 return (0);
3295 } /* ixgbe_sysctl_interrupt_rate_handler */
3296
3297 const struct sysctlnode *
3298 ixgbe_sysctl_instance(struct adapter *adapter)
3299 {
3300 const char *dvname;
3301 struct sysctllog **log;
3302 int rc;
3303 const struct sysctlnode *rnode;
3304
3305 if (adapter->sysctltop != NULL)
3306 return adapter->sysctltop;
3307
3308 log = &adapter->sysctllog;
3309 dvname = device_xname(adapter->dev);
3310
3311 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3312 0, CTLTYPE_NODE, dvname,
3313 SYSCTL_DESCR("ixgbe information and settings"),
3314 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3315 goto err;
3316
3317 return rnode;
3318 err:
3319 device_printf(adapter->dev,
3320 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3321 return NULL;
3322 }
3323
3324 /************************************************************************
3325 * ixgbe_add_device_sysctls
3326 ************************************************************************/
3327 static void
3328 ixgbe_add_device_sysctls(struct adapter *adapter)
3329 {
3330 device_t dev = adapter->dev;
3331 struct ixgbe_hw *hw = &adapter->hw;
3332 struct sysctllog **log;
3333 const struct sysctlnode *rnode, *cnode;
3334
3335 log = &adapter->sysctllog;
3336
3337 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3338 aprint_error_dev(dev, "could not create sysctl root\n");
3339 return;
3340 }
3341
3342 if (sysctl_createv(log, 0, &rnode, &cnode,
3343 CTLFLAG_READWRITE, CTLTYPE_INT,
3344 "debug", SYSCTL_DESCR("Debug Info"),
3345 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3346 aprint_error_dev(dev, "could not create sysctl\n");
3347
3348 if (sysctl_createv(log, 0, &rnode, &cnode,
3349 CTLFLAG_READONLY, CTLTYPE_INT,
3350 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3351 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353
3354 if (sysctl_createv(log, 0, &rnode, &cnode,
3355 CTLFLAG_READONLY, CTLTYPE_INT,
3356 "num_queues", SYSCTL_DESCR("Number of queues"),
3357 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3358 aprint_error_dev(dev, "could not create sysctl\n");
3359
3360 /* Sysctls for all devices */
3361 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3362 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3363 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3364 CTL_EOL) != 0)
3365 aprint_error_dev(dev, "could not create sysctl\n");
3366
3367 adapter->enable_aim = ixgbe_enable_aim;
3368 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3369 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3370 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3371 aprint_error_dev(dev, "could not create sysctl\n");
3372
3373 if (sysctl_createv(log, 0, &rnode, &cnode,
3374 CTLFLAG_READWRITE, CTLTYPE_INT,
3375 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3376 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3377 CTL_EOL) != 0)
3378 aprint_error_dev(dev, "could not create sysctl\n");
3379
3380 /*
3381 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3382 * it causesflip-flopping softint/workqueue mode in one deferred
3383 * processing. Therefore, preempt_disable()/preempt_enable() are
3384 * required in ixgbe_sched_handle_que() to avoid
3385 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3386 * I think changing "que->txrx_use_workqueue" in interrupt handler
3387 * is lighter than doing preempt_disable()/preempt_enable() in every
3388 * ixgbe_sched_handle_que().
3389 */
3390 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3391 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3392 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3393 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3394 aprint_error_dev(dev, "could not create sysctl\n");
3395
3396 #ifdef IXGBE_DEBUG
3397 /* testing sysctls (for all devices) */
3398 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3399 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3400 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3401 CTL_EOL) != 0)
3402 aprint_error_dev(dev, "could not create sysctl\n");
3403
3404 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3405 CTLTYPE_STRING, "print_rss_config",
3406 SYSCTL_DESCR("Prints RSS Configuration"),
3407 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3408 CTL_EOL) != 0)
3409 aprint_error_dev(dev, "could not create sysctl\n");
3410 #endif
3411 /* for X550 series devices */
3412 if (hw->mac.type >= ixgbe_mac_X550)
3413 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3414 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3415 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3416 CTL_EOL) != 0)
3417 aprint_error_dev(dev, "could not create sysctl\n");
3418
3419 /* for WoL-capable devices */
3420 if (adapter->wol_support) {
3421 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3422 CTLTYPE_BOOL, "wol_enable",
3423 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3424 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3425 CTL_EOL) != 0)
3426 aprint_error_dev(dev, "could not create sysctl\n");
3427
3428 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3429 CTLTYPE_INT, "wufc",
3430 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3431 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3432 CTL_EOL) != 0)
3433 aprint_error_dev(dev, "could not create sysctl\n");
3434 }
3435
3436 /* for X552/X557-AT devices */
3437 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3438 const struct sysctlnode *phy_node;
3439
3440 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3441 "phy", SYSCTL_DESCR("External PHY sysctls"),
3442 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3443 aprint_error_dev(dev, "could not create sysctl\n");
3444 return;
3445 }
3446
3447 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3448 CTLTYPE_INT, "temp",
3449 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3450 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3451 CTL_EOL) != 0)
3452 aprint_error_dev(dev, "could not create sysctl\n");
3453
3454 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3455 CTLTYPE_INT, "overtemp_occurred",
3456 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3457 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3458 CTL_CREATE, CTL_EOL) != 0)
3459 aprint_error_dev(dev, "could not create sysctl\n");
3460 }
3461
3462 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3463 && (hw->phy.type == ixgbe_phy_fw))
3464 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3465 CTLTYPE_BOOL, "force_10_100_autonego",
3466 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3467 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3468 CTL_CREATE, CTL_EOL) != 0)
3469 aprint_error_dev(dev, "could not create sysctl\n");
3470
3471 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3472 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3473 CTLTYPE_INT, "eee_state",
3474 SYSCTL_DESCR("EEE Power Save State"),
3475 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3476 CTL_EOL) != 0)
3477 aprint_error_dev(dev, "could not create sysctl\n");
3478 }
3479 } /* ixgbe_add_device_sysctls */
3480
3481 /************************************************************************
3482 * ixgbe_allocate_pci_resources
3483 ************************************************************************/
3484 static int
3485 ixgbe_allocate_pci_resources(struct adapter *adapter,
3486 const struct pci_attach_args *pa)
3487 {
3488 pcireg_t memtype, csr;
3489 device_t dev = adapter->dev;
3490 bus_addr_t addr;
3491 int flags;
3492
3493 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3494 switch (memtype) {
3495 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3496 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3497 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3498 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3499 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3500 goto map_err;
3501 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3502 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3503 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3504 }
3505 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3506 adapter->osdep.mem_size, flags,
3507 &adapter->osdep.mem_bus_space_handle) != 0) {
3508 map_err:
3509 adapter->osdep.mem_size = 0;
3510 aprint_error_dev(dev, "unable to map BAR0\n");
3511 return ENXIO;
3512 }
3513 /*
3514 * Enable address decoding for memory range in case BIOS or
3515 * UEFI don't set it.
3516 */
3517 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3518 PCI_COMMAND_STATUS_REG);
3519 csr |= PCI_COMMAND_MEM_ENABLE;
3520 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3521 csr);
3522 break;
3523 default:
3524 aprint_error_dev(dev, "unexpected type on BAR0\n");
3525 return ENXIO;
3526 }
3527
3528 return (0);
3529 } /* ixgbe_allocate_pci_resources */
3530
3531 static void
3532 ixgbe_free_deferred_handlers(struct adapter *adapter)
3533 {
3534 struct ix_queue *que = adapter->queues;
3535 struct tx_ring *txr = adapter->tx_rings;
3536 int i;
3537
3538 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3539 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3540 if (txr->txr_si != NULL)
3541 softint_disestablish(txr->txr_si);
3542 }
3543 if (que->que_si != NULL)
3544 softint_disestablish(que->que_si);
3545 }
3546 if (adapter->txr_wq != NULL)
3547 workqueue_destroy(adapter->txr_wq);
3548 if (adapter->txr_wq_enqueued != NULL)
3549 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3550 if (adapter->que_wq != NULL)
3551 workqueue_destroy(adapter->que_wq);
3552
3553 if (adapter->admin_wq != NULL) {
3554 workqueue_destroy(adapter->admin_wq);
3555 adapter->admin_wq = NULL;
3556 }
3557 if (adapter->timer_wq != NULL) {
3558 workqueue_destroy(adapter->timer_wq);
3559 adapter->timer_wq = NULL;
3560 }
3561 if (adapter->recovery_mode_timer_wq != NULL) {
3562 /*
3563 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3564 * the recovery_mode_timer workqueue, so call it here.
3565 */
3566 workqueue_wait(adapter->recovery_mode_timer_wq,
3567 &adapter->recovery_mode_timer_wc);
3568 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3569 workqueue_destroy(adapter->recovery_mode_timer_wq);
3570 adapter->recovery_mode_timer_wq = NULL;
3571 }
3572 } /* ixgbe_free_deferred_handlers */
3573
3574 /************************************************************************
3575 * ixgbe_detach - Device removal routine
3576 *
3577 * Called when the driver is being removed.
3578 * Stops the adapter and deallocates all the resources
3579 * that were allocated for driver operation.
3580 *
3581 * return 0 on success, positive on failure
3582 ************************************************************************/
3583 static int
3584 ixgbe_detach(device_t dev, int flags)
3585 {
3586 struct adapter *adapter = device_private(dev);
3587 struct rx_ring *rxr = adapter->rx_rings;
3588 struct tx_ring *txr = adapter->tx_rings;
3589 struct ixgbe_hw *hw = &adapter->hw;
3590 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3591 u32 ctrl_ext;
3592 int i;
3593
3594 INIT_DEBUGOUT("ixgbe_detach: begin");
3595 if (adapter->osdep.attached == false)
3596 return 0;
3597
3598 if (ixgbe_pci_iov_detach(dev) != 0) {
3599 device_printf(dev, "SR-IOV in use; detach first.\n");
3600 return (EBUSY);
3601 }
3602
3603 #if NVLAN > 0
3604 /* Make sure VLANs are not using driver */
3605 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3606 ; /* nothing to do: no VLANs */
3607 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3608 vlan_ifdetach(adapter->ifp);
3609 else {
3610 aprint_error_dev(dev, "VLANs in use, detach first\n");
3611 return (EBUSY);
3612 }
3613 #endif
3614
3615 adapter->osdep.detaching = true;
3616 /*
3617 * Stop the interface. ixgbe_setup_low_power_mode() calls
3618 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3619 * directly.
3620 */
3621 ixgbe_setup_low_power_mode(adapter);
3622
3623 callout_halt(&adapter->timer, NULL);
3624 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3625 callout_halt(&adapter->recovery_mode_timer, NULL);
3626
3627 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3628 atomic_store_relaxed(&adapter->admin_pending, 0);
3629 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3630 atomic_store_relaxed(&adapter->timer_pending, 0);
3631
3632 pmf_device_deregister(dev);
3633
3634 ether_ifdetach(adapter->ifp);
3635
3636 ixgbe_free_deferred_handlers(adapter);
3637
3638 /* let hardware know driver is unloading */
3639 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3640 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3642
3643 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3644 netmap_detach(adapter->ifp);
3645
3646 ixgbe_free_pci_resources(adapter);
3647 #if 0 /* XXX the NetBSD port is probably missing something here */
3648 bus_generic_detach(dev);
3649 #endif
3650 if_detach(adapter->ifp);
3651 ifmedia_fini(&adapter->media);
3652 if_percpuq_destroy(adapter->ipq);
3653
3654 sysctl_teardown(&adapter->sysctllog);
3655 evcnt_detach(&adapter->efbig_tx_dma_setup);
3656 evcnt_detach(&adapter->mbuf_defrag_failed);
3657 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3658 evcnt_detach(&adapter->einval_tx_dma_setup);
3659 evcnt_detach(&adapter->other_tx_dma_setup);
3660 evcnt_detach(&adapter->eagain_tx_dma_setup);
3661 evcnt_detach(&adapter->enomem_tx_dma_setup);
3662 evcnt_detach(&adapter->watchdog_events);
3663 evcnt_detach(&adapter->tso_err);
3664 evcnt_detach(&adapter->admin_irqev);
3665 evcnt_detach(&adapter->link_workev);
3666 evcnt_detach(&adapter->mod_workev);
3667 evcnt_detach(&adapter->msf_workev);
3668 evcnt_detach(&adapter->phy_workev);
3669
3670 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3671 if (i < __arraycount(stats->mpc)) {
3672 evcnt_detach(&stats->mpc[i]);
3673 if (hw->mac.type == ixgbe_mac_82598EB)
3674 evcnt_detach(&stats->rnbc[i]);
3675 }
3676 if (i < __arraycount(stats->pxontxc)) {
3677 evcnt_detach(&stats->pxontxc[i]);
3678 evcnt_detach(&stats->pxonrxc[i]);
3679 evcnt_detach(&stats->pxofftxc[i]);
3680 evcnt_detach(&stats->pxoffrxc[i]);
3681 if (hw->mac.type >= ixgbe_mac_82599EB)
3682 evcnt_detach(&stats->pxon2offc[i]);
3683 }
3684 }
3685
3686 txr = adapter->tx_rings;
3687 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3688 evcnt_detach(&adapter->queues[i].irqs);
3689 evcnt_detach(&adapter->queues[i].handleq);
3690 evcnt_detach(&adapter->queues[i].req);
3691 evcnt_detach(&txr->no_desc_avail);
3692 evcnt_detach(&txr->total_packets);
3693 evcnt_detach(&txr->tso_tx);
3694 #ifndef IXGBE_LEGACY_TX
3695 evcnt_detach(&txr->pcq_drops);
3696 #endif
3697
3698 if (i < __arraycount(stats->qprc)) {
3699 evcnt_detach(&stats->qprc[i]);
3700 evcnt_detach(&stats->qptc[i]);
3701 evcnt_detach(&stats->qbrc[i]);
3702 evcnt_detach(&stats->qbtc[i]);
3703 if (hw->mac.type >= ixgbe_mac_82599EB)
3704 evcnt_detach(&stats->qprdc[i]);
3705 }
3706
3707 evcnt_detach(&rxr->rx_packets);
3708 evcnt_detach(&rxr->rx_bytes);
3709 evcnt_detach(&rxr->rx_copies);
3710 evcnt_detach(&rxr->no_jmbuf);
3711 evcnt_detach(&rxr->rx_discarded);
3712 }
3713 evcnt_detach(&stats->ipcs);
3714 evcnt_detach(&stats->l4cs);
3715 evcnt_detach(&stats->ipcs_bad);
3716 evcnt_detach(&stats->l4cs_bad);
3717 evcnt_detach(&stats->intzero);
3718 evcnt_detach(&stats->legint);
3719 evcnt_detach(&stats->crcerrs);
3720 evcnt_detach(&stats->illerrc);
3721 evcnt_detach(&stats->errbc);
3722 evcnt_detach(&stats->mspdc);
3723 if (hw->mac.type >= ixgbe_mac_X550)
3724 evcnt_detach(&stats->mbsdc);
3725 evcnt_detach(&stats->mpctotal);
3726 evcnt_detach(&stats->mlfc);
3727 evcnt_detach(&stats->mrfc);
3728 evcnt_detach(&stats->rlec);
3729 evcnt_detach(&stats->lxontxc);
3730 evcnt_detach(&stats->lxonrxc);
3731 evcnt_detach(&stats->lxofftxc);
3732 evcnt_detach(&stats->lxoffrxc);
3733
3734 /* Packet Reception Stats */
3735 evcnt_detach(&stats->tor);
3736 evcnt_detach(&stats->gorc);
3737 evcnt_detach(&stats->tpr);
3738 evcnt_detach(&stats->gprc);
3739 evcnt_detach(&stats->mprc);
3740 evcnt_detach(&stats->bprc);
3741 evcnt_detach(&stats->prc64);
3742 evcnt_detach(&stats->prc127);
3743 evcnt_detach(&stats->prc255);
3744 evcnt_detach(&stats->prc511);
3745 evcnt_detach(&stats->prc1023);
3746 evcnt_detach(&stats->prc1522);
3747 evcnt_detach(&stats->ruc);
3748 evcnt_detach(&stats->rfc);
3749 evcnt_detach(&stats->roc);
3750 evcnt_detach(&stats->rjc);
3751 evcnt_detach(&stats->mngprc);
3752 evcnt_detach(&stats->mngpdc);
3753 evcnt_detach(&stats->xec);
3754
3755 /* Packet Transmission Stats */
3756 evcnt_detach(&stats->gotc);
3757 evcnt_detach(&stats->tpt);
3758 evcnt_detach(&stats->gptc);
3759 evcnt_detach(&stats->bptc);
3760 evcnt_detach(&stats->mptc);
3761 evcnt_detach(&stats->mngptc);
3762 evcnt_detach(&stats->ptc64);
3763 evcnt_detach(&stats->ptc127);
3764 evcnt_detach(&stats->ptc255);
3765 evcnt_detach(&stats->ptc511);
3766 evcnt_detach(&stats->ptc1023);
3767 evcnt_detach(&stats->ptc1522);
3768
3769 ixgbe_free_queues(adapter);
3770 free(adapter->mta, M_DEVBUF);
3771
3772 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3773 IXGBE_CORE_LOCK_DESTROY(adapter);
3774
3775 return (0);
3776 } /* ixgbe_detach */
3777
3778 /************************************************************************
3779 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3780 *
3781 * Prepare the adapter/port for LPLU and/or WoL
3782 ************************************************************************/
3783 static int
3784 ixgbe_setup_low_power_mode(struct adapter *adapter)
3785 {
3786 struct ixgbe_hw *hw = &adapter->hw;
3787 device_t dev = adapter->dev;
3788 struct ifnet *ifp = adapter->ifp;
3789 s32 error = 0;
3790
3791 /* Limit power management flow to X550EM baseT */
3792 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3793 hw->phy.ops.enter_lplu) {
3794 /* X550EM baseT adapters need a special LPLU flow */
3795 hw->phy.reset_disable = true;
3796 ixgbe_ifstop(ifp, 1);
3797 error = hw->phy.ops.enter_lplu(hw);
3798 if (error)
3799 device_printf(dev,
3800 "Error entering LPLU: %d\n", error);
3801 hw->phy.reset_disable = false;
3802 } else {
3803 /* Just stop for other adapters */
3804 ixgbe_ifstop(ifp, 1);
3805 }
3806
3807 IXGBE_CORE_LOCK(adapter);
3808
3809 if (!hw->wol_enabled) {
3810 ixgbe_set_phy_power(hw, FALSE);
3811 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3812 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3813 } else {
3814 /* Turn off support for APM wakeup. (Using ACPI instead) */
3815 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3816 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3817
3818 /*
3819 * Clear Wake Up Status register to prevent any previous wakeup
3820 * events from waking us up immediately after we suspend.
3821 */
3822 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3823
3824 /*
3825 * Program the Wakeup Filter Control register with user filter
3826 * settings
3827 */
3828 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3829
3830 /* Enable wakeups and power management in Wakeup Control */
3831 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3832 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3833
3834 }
3835
3836 IXGBE_CORE_UNLOCK(adapter);
3837
3838 return error;
3839 } /* ixgbe_setup_low_power_mode */
3840
3841 /************************************************************************
3842 * ixgbe_shutdown - Shutdown entry point
3843 ************************************************************************/
3844 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3845 static int
3846 ixgbe_shutdown(device_t dev)
3847 {
3848 struct adapter *adapter = device_private(dev);
3849 int error = 0;
3850
3851 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3852
3853 error = ixgbe_setup_low_power_mode(adapter);
3854
3855 return (error);
3856 } /* ixgbe_shutdown */
3857 #endif
3858
3859 /************************************************************************
3860 * ixgbe_suspend
3861 *
3862 * From D0 to D3
3863 ************************************************************************/
3864 static bool
3865 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3866 {
3867 struct adapter *adapter = device_private(dev);
3868 int error = 0;
3869
3870 INIT_DEBUGOUT("ixgbe_suspend: begin");
3871
3872 error = ixgbe_setup_low_power_mode(adapter);
3873
3874 return (error);
3875 } /* ixgbe_suspend */
3876
3877 /************************************************************************
3878 * ixgbe_resume
3879 *
3880 * From D3 to D0
3881 ************************************************************************/
3882 static bool
3883 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3884 {
3885 struct adapter *adapter = device_private(dev);
3886 struct ifnet *ifp = adapter->ifp;
3887 struct ixgbe_hw *hw = &adapter->hw;
3888 u32 wus;
3889
3890 INIT_DEBUGOUT("ixgbe_resume: begin");
3891
3892 IXGBE_CORE_LOCK(adapter);
3893
3894 /* Read & clear WUS register */
3895 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3896 if (wus)
3897 device_printf(dev, "Woken up by (WUS): %#010x\n",
3898 IXGBE_READ_REG(hw, IXGBE_WUS));
3899 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3900 /* And clear WUFC until next low-power transition */
3901 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3902
3903 /*
3904 * Required after D3->D0 transition;
3905 * will re-advertise all previous advertised speeds
3906 */
3907 if (ifp->if_flags & IFF_UP)
3908 ixgbe_init_locked(adapter);
3909
3910 IXGBE_CORE_UNLOCK(adapter);
3911
3912 return true;
3913 } /* ixgbe_resume */
3914
3915 /*
3916 * Set the various hardware offload abilities.
3917 *
3918 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3919 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3920 * mbuf offload flags the driver will understand.
3921 */
3922 static void
3923 ixgbe_set_if_hwassist(struct adapter *adapter)
3924 {
3925 /* XXX */
3926 }
3927
3928 /************************************************************************
3929 * ixgbe_init_locked - Init entry point
3930 *
3931 * Used in two ways: It is used by the stack as an init
3932 * entry point in network interface structure. It is also
3933 * used by the driver as a hw/sw initialization routine to
3934 * get to a consistent state.
3935 *
3936 * return 0 on success, positive on failure
3937 ************************************************************************/
3938 static void
3939 ixgbe_init_locked(struct adapter *adapter)
3940 {
3941 struct ifnet *ifp = adapter->ifp;
3942 device_t dev = adapter->dev;
3943 struct ixgbe_hw *hw = &adapter->hw;
3944 struct ix_queue *que;
3945 struct tx_ring *txr;
3946 struct rx_ring *rxr;
3947 u32 txdctl, mhadd;
3948 u32 rxdctl, rxctrl;
3949 u32 ctrl_ext;
3950 bool unsupported_sfp = false;
3951 int i, j, err;
3952
3953 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3954
3955 KASSERT(mutex_owned(&adapter->core_mtx));
3956 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3957
3958 hw->need_unsupported_sfp_recovery = false;
3959 hw->adapter_stopped = FALSE;
3960 ixgbe_stop_adapter(hw);
3961 callout_stop(&adapter->timer);
3962 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3963 callout_stop(&adapter->recovery_mode_timer);
3964 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3965 que->disabled_count = 0;
3966
3967 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3968 adapter->max_frame_size =
3969 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3970
3971 /* Queue indices may change with IOV mode */
3972 ixgbe_align_all_queue_indices(adapter);
3973
3974 /* reprogram the RAR[0] in case user changed it. */
3975 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3976
3977 /* Get the latest mac address, User can use a LAA */
3978 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3979 IXGBE_ETH_LENGTH_OF_ADDRESS);
3980 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3981 hw->addr_ctrl.rar_used_count = 1;
3982
3983 /* Set hardware offload abilities from ifnet flags */
3984 ixgbe_set_if_hwassist(adapter);
3985
3986 /* Prepare transmit descriptors and buffers */
3987 if (ixgbe_setup_transmit_structures(adapter)) {
3988 device_printf(dev, "Could not setup transmit structures\n");
3989 ixgbe_stop_locked(adapter);
3990 return;
3991 }
3992
3993 ixgbe_init_hw(hw);
3994
3995 ixgbe_initialize_iov(adapter);
3996
3997 ixgbe_initialize_transmit_units(adapter);
3998
3999 /* Setup Multicast table */
4000 ixgbe_set_rxfilter(adapter);
4001
4002 /* Determine the correct mbuf pool, based on frame size */
4003 if (adapter->max_frame_size <= MCLBYTES)
4004 adapter->rx_mbuf_sz = MCLBYTES;
4005 else
4006 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4007
4008 /* Prepare receive descriptors and buffers */
4009 if (ixgbe_setup_receive_structures(adapter)) {
4010 device_printf(dev, "Could not setup receive structures\n");
4011 ixgbe_stop_locked(adapter);
4012 return;
4013 }
4014
4015 /* Configure RX settings */
4016 ixgbe_initialize_receive_units(adapter);
4017
4018 /* Initialize variable holding task enqueue requests interrupts */
4019 adapter->task_requests = 0;
4020
4021 /* Enable SDP & MSI-X interrupts based on adapter */
4022 ixgbe_config_gpie(adapter);
4023
4024 /* Set MTU size */
4025 if (ifp->if_mtu > ETHERMTU) {
4026 /* aka IXGBE_MAXFRS on 82599 and newer */
4027 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4028 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4029 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4030 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4031 }
4032
4033 /* Now enable all the queues */
4034 for (i = 0; i < adapter->num_queues; i++) {
4035 txr = &adapter->tx_rings[i];
4036 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4037 txdctl |= IXGBE_TXDCTL_ENABLE;
4038 /* Set WTHRESH to 8, burst writeback */
4039 txdctl |= (8 << 16);
4040 /*
4041 * When the internal queue falls below PTHRESH (32),
4042 * start prefetching as long as there are at least
4043 * HTHRESH (1) buffers ready. The values are taken
4044 * from the Intel linux driver 3.8.21.
4045 * Prefetching enables tx line rate even with 1 queue.
4046 */
4047 txdctl |= (32 << 0) | (1 << 8);
4048 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4049 }
4050
4051 for (i = 0; i < adapter->num_queues; i++) {
4052 rxr = &adapter->rx_rings[i];
4053 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4054 if (hw->mac.type == ixgbe_mac_82598EB) {
4055 /*
4056 * PTHRESH = 21
4057 * HTHRESH = 4
4058 * WTHRESH = 8
4059 */
4060 rxdctl &= ~0x3FFFFF;
4061 rxdctl |= 0x080420;
4062 }
4063 rxdctl |= IXGBE_RXDCTL_ENABLE;
4064 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4065 for (j = 0; j < 10; j++) {
4066 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4067 IXGBE_RXDCTL_ENABLE)
4068 break;
4069 else
4070 msec_delay(1);
4071 }
4072 IXGBE_WRITE_BARRIER(hw);
4073
4074 /*
4075 * In netmap mode, we must preserve the buffers made
4076 * available to userspace before the if_init()
4077 * (this is true by default on the TX side, because
4078 * init makes all buffers available to userspace).
4079 *
4080 * netmap_reset() and the device specific routines
4081 * (e.g. ixgbe_setup_receive_rings()) map these
4082 * buffers at the end of the NIC ring, so here we
4083 * must set the RDT (tail) register to make sure
4084 * they are not overwritten.
4085 *
4086 * In this driver the NIC ring starts at RDH = 0,
4087 * RDT points to the last slot available for reception (?),
4088 * so RDT = num_rx_desc - 1 means the whole ring is available.
4089 */
4090 #ifdef DEV_NETMAP
4091 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4092 (ifp->if_capenable & IFCAP_NETMAP)) {
4093 struct netmap_adapter *na = NA(adapter->ifp);
4094 struct netmap_kring *kring = na->rx_rings[i];
4095 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4096
4097 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4098 } else
4099 #endif /* DEV_NETMAP */
4100 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4101 adapter->num_rx_desc - 1);
4102 }
4103
4104 /* Enable Receive engine */
4105 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4106 if (hw->mac.type == ixgbe_mac_82598EB)
4107 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4108 rxctrl |= IXGBE_RXCTRL_RXEN;
4109 ixgbe_enable_rx_dma(hw, rxctrl);
4110
4111 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4112 atomic_store_relaxed(&adapter->timer_pending, 0);
4113 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4114 callout_reset(&adapter->recovery_mode_timer, hz,
4115 ixgbe_recovery_mode_timer, adapter);
4116
4117 /* Set up MSI/MSI-X routing */
4118 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4119 ixgbe_configure_ivars(adapter);
4120 /* Set up auto-mask */
4121 if (hw->mac.type == ixgbe_mac_82598EB)
4122 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4123 else {
4124 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4125 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4126 }
4127 } else { /* Simple settings for Legacy/MSI */
4128 ixgbe_set_ivar(adapter, 0, 0, 0);
4129 ixgbe_set_ivar(adapter, 0, 0, 1);
4130 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4131 }
4132
4133 ixgbe_init_fdir(adapter);
4134
4135 /*
4136 * Check on any SFP devices that
4137 * need to be kick-started
4138 */
4139 if (hw->phy.type == ixgbe_phy_none) {
4140 err = hw->phy.ops.identify(hw);
4141 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4142 unsupported_sfp = true;
4143 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4144 unsupported_sfp = true;
4145
4146 if (unsupported_sfp)
4147 device_printf(dev,
4148 "Unsupported SFP+ module type was detected.\n");
4149
4150 /* Set moderation on the Link interrupt */
4151 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4152
4153 /* Enable EEE power saving */
4154 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4155 hw->mac.ops.setup_eee(hw,
4156 adapter->feat_en & IXGBE_FEATURE_EEE);
4157
4158 /* Enable power to the phy. */
4159 if (!unsupported_sfp) {
4160 ixgbe_set_phy_power(hw, TRUE);
4161
4162 /* Config/Enable Link */
4163 ixgbe_config_link(adapter);
4164 }
4165
4166 /* Hardware Packet Buffer & Flow Control setup */
4167 ixgbe_config_delay_values(adapter);
4168
4169 /* Initialize the FC settings */
4170 ixgbe_start_hw(hw);
4171
4172 /* Set up VLAN support and filter */
4173 ixgbe_setup_vlan_hw_support(adapter);
4174
4175 /* Setup DMA Coalescing */
4176 ixgbe_config_dmac(adapter);
4177
4178 /* OK to schedule workqueues. */
4179 adapter->schedule_wqs_ok = true;
4180
4181 /* And now turn on interrupts */
4182 ixgbe_enable_intr(adapter);
4183
4184 /* Enable the use of the MBX by the VF's */
4185 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4186 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4187 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4188 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4189 }
4190
4191 /* Update saved flags. See ixgbe_ifflags_cb() */
4192 adapter->if_flags = ifp->if_flags;
4193 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4194
4195 /* Now inform the stack we're ready */
4196 ifp->if_flags |= IFF_RUNNING;
4197
4198 return;
4199 } /* ixgbe_init_locked */
4200
4201 /************************************************************************
4202 * ixgbe_init
4203 ************************************************************************/
4204 static int
4205 ixgbe_init(struct ifnet *ifp)
4206 {
4207 struct adapter *adapter = ifp->if_softc;
4208
4209 IXGBE_CORE_LOCK(adapter);
4210 ixgbe_init_locked(adapter);
4211 IXGBE_CORE_UNLOCK(adapter);
4212
4213 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4214 } /* ixgbe_init */
4215
4216 /************************************************************************
4217 * ixgbe_set_ivar
4218 *
4219 * Setup the correct IVAR register for a particular MSI-X interrupt
4220 * (yes this is all very magic and confusing :)
4221 * - entry is the register array entry
4222 * - vector is the MSI-X vector for this queue
4223 * - type is RX/TX/MISC
4224 ************************************************************************/
4225 static void
4226 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4227 {
4228 struct ixgbe_hw *hw = &adapter->hw;
4229 u32 ivar, index;
4230
4231 vector |= IXGBE_IVAR_ALLOC_VAL;
4232
4233 switch (hw->mac.type) {
4234 case ixgbe_mac_82598EB:
4235 if (type == -1)
4236 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4237 else
4238 entry += (type * 64);
4239 index = (entry >> 2) & 0x1F;
4240 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4241 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4242 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4243 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4244 break;
4245 case ixgbe_mac_82599EB:
4246 case ixgbe_mac_X540:
4247 case ixgbe_mac_X550:
4248 case ixgbe_mac_X550EM_x:
4249 case ixgbe_mac_X550EM_a:
4250 if (type == -1) { /* MISC IVAR */
4251 index = (entry & 1) * 8;
4252 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4253 ivar &= ~(0xffUL << index);
4254 ivar |= ((u32)vector << index);
4255 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4256 } else { /* RX/TX IVARS */
4257 index = (16 * (entry & 1)) + (8 * type);
4258 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4259 ivar &= ~(0xffUL << index);
4260 ivar |= ((u32)vector << index);
4261 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4262 }
4263 break;
4264 default:
4265 break;
4266 }
4267 } /* ixgbe_set_ivar */
4268
4269 /************************************************************************
4270 * ixgbe_configure_ivars
4271 ************************************************************************/
4272 static void
4273 ixgbe_configure_ivars(struct adapter *adapter)
4274 {
4275 struct ix_queue *que = adapter->queues;
4276 u32 newitr;
4277
4278 if (ixgbe_max_interrupt_rate > 0)
4279 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4280 else {
4281 /*
4282 * Disable DMA coalescing if interrupt moderation is
4283 * disabled.
4284 */
4285 adapter->dmac = 0;
4286 newitr = 0;
4287 }
4288
4289 for (int i = 0; i < adapter->num_queues; i++, que++) {
4290 struct rx_ring *rxr = &adapter->rx_rings[i];
4291 struct tx_ring *txr = &adapter->tx_rings[i];
4292 /* First the RX queue entry */
4293 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4294 /* ... and the TX */
4295 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4296 /* Set an Initial EITR value */
4297 ixgbe_eitr_write(adapter, que->msix, newitr);
4298 /*
4299 * To eliminate influence of the previous state.
4300 * At this point, Tx/Rx interrupt handler
4301 * (ixgbe_msix_que()) cannot be called, so both
4302 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4303 */
4304 que->eitr_setting = 0;
4305 }
4306
4307 /* For the Link interrupt */
4308 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4309 } /* ixgbe_configure_ivars */
4310
4311 /************************************************************************
4312 * ixgbe_config_gpie
4313 ************************************************************************/
4314 static void
4315 ixgbe_config_gpie(struct adapter *adapter)
4316 {
4317 struct ixgbe_hw *hw = &adapter->hw;
4318 u32 gpie;
4319
4320 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4321
4322 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4323 /* Enable Enhanced MSI-X mode */
4324 gpie |= IXGBE_GPIE_MSIX_MODE
4325 | IXGBE_GPIE_EIAME
4326 | IXGBE_GPIE_PBA_SUPPORT
4327 | IXGBE_GPIE_OCD;
4328 }
4329
4330 /* Fan Failure Interrupt */
4331 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4332 gpie |= IXGBE_SDP1_GPIEN;
4333
4334 /* Thermal Sensor Interrupt */
4335 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4336 gpie |= IXGBE_SDP0_GPIEN_X540;
4337
4338 /* Link detection */
4339 switch (hw->mac.type) {
4340 case ixgbe_mac_82599EB:
4341 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4342 break;
4343 case ixgbe_mac_X550EM_x:
4344 case ixgbe_mac_X550EM_a:
4345 gpie |= IXGBE_SDP0_GPIEN_X540;
4346 break;
4347 default:
4348 break;
4349 }
4350
4351 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4352
4353 } /* ixgbe_config_gpie */
4354
4355 /************************************************************************
4356 * ixgbe_config_delay_values
4357 *
4358 * Requires adapter->max_frame_size to be set.
4359 ************************************************************************/
4360 static void
4361 ixgbe_config_delay_values(struct adapter *adapter)
4362 {
4363 struct ixgbe_hw *hw = &adapter->hw;
4364 u32 rxpb, frame, size, tmp;
4365
4366 frame = adapter->max_frame_size;
4367
4368 /* Calculate High Water */
4369 switch (hw->mac.type) {
4370 case ixgbe_mac_X540:
4371 case ixgbe_mac_X550:
4372 case ixgbe_mac_X550EM_x:
4373 case ixgbe_mac_X550EM_a:
4374 tmp = IXGBE_DV_X540(frame, frame);
4375 break;
4376 default:
4377 tmp = IXGBE_DV(frame, frame);
4378 break;
4379 }
4380 size = IXGBE_BT2KB(tmp);
4381 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4382 hw->fc.high_water[0] = rxpb - size;
4383
4384 /* Now calculate Low Water */
4385 switch (hw->mac.type) {
4386 case ixgbe_mac_X540:
4387 case ixgbe_mac_X550:
4388 case ixgbe_mac_X550EM_x:
4389 case ixgbe_mac_X550EM_a:
4390 tmp = IXGBE_LOW_DV_X540(frame);
4391 break;
4392 default:
4393 tmp = IXGBE_LOW_DV(frame);
4394 break;
4395 }
4396 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4397
4398 hw->fc.pause_time = IXGBE_FC_PAUSE;
4399 hw->fc.send_xon = TRUE;
4400 } /* ixgbe_config_delay_values */
4401
4402 /************************************************************************
4403 * ixgbe_set_rxfilter - Multicast Update
4404 *
4405 * Called whenever multicast address list is updated.
4406 ************************************************************************/
4407 static void
4408 ixgbe_set_rxfilter(struct adapter *adapter)
4409 {
4410 struct ixgbe_mc_addr *mta;
4411 struct ifnet *ifp = adapter->ifp;
4412 u8 *update_ptr;
4413 int mcnt = 0;
4414 u32 fctrl;
4415 struct ethercom *ec = &adapter->osdep.ec;
4416 struct ether_multi *enm;
4417 struct ether_multistep step;
4418
4419 KASSERT(mutex_owned(&adapter->core_mtx));
4420 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4421
4422 mta = adapter->mta;
4423 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4424
4425 ETHER_LOCK(ec);
4426 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4427 ETHER_FIRST_MULTI(step, ec, enm);
4428 while (enm != NULL) {
4429 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4430 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4431 ETHER_ADDR_LEN) != 0)) {
4432 ec->ec_flags |= ETHER_F_ALLMULTI;
4433 break;
4434 }
4435 bcopy(enm->enm_addrlo,
4436 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4437 mta[mcnt].vmdq = adapter->pool;
4438 mcnt++;
4439 ETHER_NEXT_MULTI(step, enm);
4440 }
4441
4442 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4443 if (ifp->if_flags & IFF_PROMISC)
4444 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4445 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4446 fctrl |= IXGBE_FCTRL_MPE;
4447 fctrl &= ~IXGBE_FCTRL_UPE;
4448 } else
4449 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4450
4451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4452
4453 /* Update multicast filter entries only when it's not ALLMULTI */
4454 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4455 ETHER_UNLOCK(ec);
4456 update_ptr = (u8 *)mta;
4457 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4458 ixgbe_mc_array_itr, TRUE);
4459 } else
4460 ETHER_UNLOCK(ec);
4461 } /* ixgbe_set_rxfilter */
4462
4463 /************************************************************************
4464 * ixgbe_mc_array_itr
4465 *
4466 * An iterator function needed by the multicast shared code.
4467 * It feeds the shared code routine the addresses in the
4468 * array of ixgbe_set_rxfilter() one by one.
4469 ************************************************************************/
4470 static u8 *
4471 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4472 {
4473 struct ixgbe_mc_addr *mta;
4474
4475 mta = (struct ixgbe_mc_addr *)*update_ptr;
4476 *vmdq = mta->vmdq;
4477
4478 *update_ptr = (u8*)(mta + 1);
4479
4480 return (mta->addr);
4481 } /* ixgbe_mc_array_itr */
4482
4483 /************************************************************************
4484 * ixgbe_local_timer - Timer routine
4485 *
4486 * Checks for link status, updates statistics,
4487 * and runs the watchdog check.
4488 ************************************************************************/
4489 static void
4490 ixgbe_local_timer(void *arg)
4491 {
4492 struct adapter *adapter = arg;
4493
4494 if (adapter->schedule_wqs_ok) {
4495 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4496 workqueue_enqueue(adapter->timer_wq,
4497 &adapter->timer_wc, NULL);
4498 }
4499 }
4500
4501 static void
4502 ixgbe_handle_timer(struct work *wk, void *context)
4503 {
4504 struct adapter *adapter = context;
4505 struct ixgbe_hw *hw = &adapter->hw;
4506 device_t dev = adapter->dev;
4507 struct ix_queue *que = adapter->queues;
4508 u64 queues = 0;
4509 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4510 int hung = 0;
4511 int i;
4512
4513 IXGBE_CORE_LOCK(adapter);
4514
4515 /* Check for pluggable optics */
4516 if (ixgbe_is_sfp(hw)) {
4517 bool sched_mod_task = false;
4518
4519 if (hw->mac.type == ixgbe_mac_82598EB) {
4520 /*
4521 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4522 * any GPIO(SDP). So just schedule TASK_MOD.
4523 */
4524 sched_mod_task = true;
4525 } else {
4526 bool was_full, is_full;
4527
4528 was_full =
4529 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4530 is_full = ixgbe_sfp_cage_full(hw);
4531
4532 /* Do probe if cage state changed */
4533 if (was_full ^ is_full)
4534 sched_mod_task = true;
4535 }
4536 if (sched_mod_task) {
4537 mutex_enter(&adapter->admin_mtx);
4538 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
4539 ixgbe_schedule_admin_tasklet(adapter);
4540 mutex_exit(&adapter->admin_mtx);
4541 }
4542 }
4543
4544 ixgbe_update_link_status(adapter);
4545 ixgbe_update_stats_counters(adapter);
4546
4547 /* Update some event counters */
4548 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4549 que = adapter->queues;
4550 for (i = 0; i < adapter->num_queues; i++, que++) {
4551 struct tx_ring *txr = que->txr;
4552
4553 v0 += txr->q_efbig_tx_dma_setup;
4554 v1 += txr->q_mbuf_defrag_failed;
4555 v2 += txr->q_efbig2_tx_dma_setup;
4556 v3 += txr->q_einval_tx_dma_setup;
4557 v4 += txr->q_other_tx_dma_setup;
4558 v5 += txr->q_eagain_tx_dma_setup;
4559 v6 += txr->q_enomem_tx_dma_setup;
4560 v7 += txr->q_tso_err;
4561 }
4562 adapter->efbig_tx_dma_setup.ev_count = v0;
4563 adapter->mbuf_defrag_failed.ev_count = v1;
4564 adapter->efbig2_tx_dma_setup.ev_count = v2;
4565 adapter->einval_tx_dma_setup.ev_count = v3;
4566 adapter->other_tx_dma_setup.ev_count = v4;
4567 adapter->eagain_tx_dma_setup.ev_count = v5;
4568 adapter->enomem_tx_dma_setup.ev_count = v6;
4569 adapter->tso_err.ev_count = v7;
4570
4571 /*
4572 * Check the TX queues status
4573 * - mark hung queues so we don't schedule on them
4574 * - watchdog only if all queues show hung
4575 */
4576 que = adapter->queues;
4577 for (i = 0; i < adapter->num_queues; i++, que++) {
4578 /* Keep track of queues with work for soft irq */
4579 if (que->txr->busy)
4580 queues |= 1ULL << que->me;
4581 /*
4582 * Each time txeof runs without cleaning, but there
4583 * are uncleaned descriptors it increments busy. If
4584 * we get to the MAX we declare it hung.
4585 */
4586 if (que->busy == IXGBE_QUEUE_HUNG) {
4587 ++hung;
4588 /* Mark the queue as inactive */
4589 adapter->active_queues &= ~(1ULL << que->me);
4590 continue;
4591 } else {
4592 /* Check if we've come back from hung */
4593 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4594 adapter->active_queues |= 1ULL << que->me;
4595 }
4596 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4597 device_printf(dev,
4598 "Warning queue %d appears to be hung!\n", i);
4599 que->txr->busy = IXGBE_QUEUE_HUNG;
4600 ++hung;
4601 }
4602 }
4603
4604 /* Only truly watchdog if all queues show hung */
4605 if (hung == adapter->num_queues)
4606 goto watchdog;
4607 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4608 else if (queues != 0) { /* Force an IRQ on queues with work */
4609 que = adapter->queues;
4610 for (i = 0; i < adapter->num_queues; i++, que++) {
4611 mutex_enter(&que->dc_mtx);
4612 if (que->disabled_count == 0)
4613 ixgbe_rearm_queues(adapter,
4614 queues & ((u64)1 << i));
4615 mutex_exit(&que->dc_mtx);
4616 }
4617 }
4618 #endif
4619
4620 atomic_store_relaxed(&adapter->timer_pending, 0);
4621 IXGBE_CORE_UNLOCK(adapter);
4622 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4623 return;
4624
4625 watchdog:
4626 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4627 adapter->ifp->if_flags &= ~IFF_RUNNING;
4628 adapter->watchdog_events.ev_count++;
4629 ixgbe_init_locked(adapter);
4630 IXGBE_CORE_UNLOCK(adapter);
4631 } /* ixgbe_handle_timer */
4632
4633 /************************************************************************
4634 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4635 ************************************************************************/
4636 static void
4637 ixgbe_recovery_mode_timer(void *arg)
4638 {
4639 struct adapter *adapter = arg;
4640
4641 if (__predict_true(adapter->osdep.detaching == false)) {
4642 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4643 0, 1) == 0) {
4644 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4645 &adapter->recovery_mode_timer_wc, NULL);
4646 }
4647 }
4648 }
4649
4650 static void
4651 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4652 {
4653 struct adapter *adapter = context;
4654 struct ixgbe_hw *hw = &adapter->hw;
4655
4656 IXGBE_CORE_LOCK(adapter);
4657 if (ixgbe_fw_recovery_mode(hw)) {
4658 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4659 /* Firmware error detected, entering recovery mode */
4660 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4661
4662 if (hw->adapter_stopped == FALSE)
4663 ixgbe_stop_locked(adapter);
4664 }
4665 } else
4666 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4667
4668 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4669 callout_reset(&adapter->recovery_mode_timer, hz,
4670 ixgbe_recovery_mode_timer, adapter);
4671 IXGBE_CORE_UNLOCK(adapter);
4672 } /* ixgbe_handle_recovery_mode_timer */
4673
4674 /************************************************************************
4675 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4676 ************************************************************************/
4677 static void
4678 ixgbe_handle_mod(void *context)
4679 {
4680 struct adapter *adapter = context;
4681 struct ixgbe_hw *hw = &adapter->hw;
4682 device_t dev = adapter->dev;
4683 enum ixgbe_sfp_type last_sfp_type;
4684 u32 err;
4685 bool last_unsupported_sfp_recovery;
4686
4687 KASSERT(mutex_owned(&adapter->core_mtx));
4688
4689 last_sfp_type = hw->phy.sfp_type;
4690 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4691 ++adapter->mod_workev.ev_count;
4692 if (adapter->hw.need_crosstalk_fix) {
4693 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4694 !ixgbe_sfp_cage_full(hw))
4695 goto out;
4696 }
4697
4698 err = hw->phy.ops.identify_sfp(hw);
4699 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4700 if (last_unsupported_sfp_recovery == false)
4701 device_printf(dev,
4702 "Unsupported SFP+ module type was detected.\n");
4703 goto out;
4704 }
4705
4706 if (hw->need_unsupported_sfp_recovery) {
4707 device_printf(dev, "Recovering from unsupported SFP\n");
4708 /*
4709 * We could recover the status by calling setup_sfp(),
4710 * setup_link() and some others. It's complex and might not
4711 * work correctly on some unknown cases. To avoid such type of
4712 * problem, call ixgbe_init_locked(). It's simple and safe
4713 * approach.
4714 */
4715 ixgbe_init_locked(adapter);
4716 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4717 (hw->phy.sfp_type != last_sfp_type)) {
4718 /* A module is inserted and changed. */
4719
4720 if (hw->mac.type == ixgbe_mac_82598EB)
4721 err = hw->phy.ops.reset(hw);
4722 else {
4723 err = hw->mac.ops.setup_sfp(hw);
4724 hw->phy.sfp_setup_needed = FALSE;
4725 }
4726 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4727 device_printf(dev,
4728 "Setup failure - unsupported SFP+ module type.\n");
4729 goto out;
4730 }
4731 }
4732
4733 out:
4734 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4735 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4736
4737 /* Adjust media types shown in ifconfig */
4738 IXGBE_CORE_UNLOCK(adapter);
4739 ifmedia_removeall(&adapter->media);
4740 ixgbe_add_media_types(adapter);
4741 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4742 IXGBE_CORE_LOCK(adapter);
4743
4744 /*
4745 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4746 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4747 * flap because the function calls setup_link().
4748 */
4749 if (hw->mac.type != ixgbe_mac_82598EB) {
4750 mutex_enter(&adapter->admin_mtx);
4751 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4752 mutex_exit(&adapter->admin_mtx);
4753 }
4754
4755 /*
4756 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4757 * the workqueue now.
4758 */
4759 } /* ixgbe_handle_mod */
4760
4761
4762 /************************************************************************
4763 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4764 ************************************************************************/
4765 static void
4766 ixgbe_handle_msf(void *context)
4767 {
4768 struct adapter *adapter = context;
4769 struct ixgbe_hw *hw = &adapter->hw;
4770 u32 autoneg;
4771 bool negotiate;
4772
4773 KASSERT(mutex_owned(&adapter->core_mtx));
4774
4775 ++adapter->msf_workev.ev_count;
4776
4777 autoneg = hw->phy.autoneg_advertised;
4778 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4779 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4780 if (hw->mac.ops.setup_link)
4781 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4782 } /* ixgbe_handle_msf */
4783
4784 /************************************************************************
4785 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4786 ************************************************************************/
4787 static void
4788 ixgbe_handle_phy(void *context)
4789 {
4790 struct adapter *adapter = context;
4791 struct ixgbe_hw *hw = &adapter->hw;
4792 int error;
4793
4794 KASSERT(mutex_owned(&adapter->core_mtx));
4795
4796 ++adapter->phy_workev.ev_count;
4797 error = hw->phy.ops.handle_lasi(hw);
4798 if (error == IXGBE_ERR_OVERTEMP)
4799 device_printf(adapter->dev,
4800 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4801 " PHY will downshift to lower power state!\n");
4802 else if (error)
4803 device_printf(adapter->dev,
4804 "Error handling LASI interrupt: %d\n", error);
4805 } /* ixgbe_handle_phy */
4806
4807 static void
4808 ixgbe_handle_admin(struct work *wk, void *context)
4809 {
4810 struct adapter *adapter = context;
4811 struct ifnet *ifp = adapter->ifp;
4812 struct ixgbe_hw *hw = &adapter->hw;
4813 u32 task_requests;
4814
4815 mutex_enter(&adapter->admin_mtx);
4816 adapter->admin_pending = 0;
4817 task_requests = adapter->task_requests;
4818 adapter->task_requests = 0;
4819 mutex_exit(&adapter->admin_mtx);
4820
4821 /*
4822 * Hold the IFNET_LOCK across this entire call. This will
4823 * prevent additional changes to adapter->phy_layer
4824 * and serialize calls to this tasklet. We cannot hold the
4825 * CORE_LOCK while calling into the ifmedia functions as
4826 * they call ifmedia_lock() and the lock is CORE_LOCK.
4827 */
4828 IFNET_LOCK(ifp);
4829 IXGBE_CORE_LOCK(adapter);
4830 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4831 ixgbe_handle_link(adapter);
4832 }
4833 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4834 ixgbe_handle_mod(adapter);
4835 }
4836 if ((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) {
4837 ixgbe_handle_msf(adapter);
4838 }
4839 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4840 ixgbe_handle_phy(adapter);
4841 }
4842 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4843 ixgbe_reinit_fdir(adapter);
4844 }
4845 #if 0 /* notyet */
4846 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4847 ixgbe_handle_mbx(adapter);
4848 }
4849 #endif
4850 if ((task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4851 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4852 /* Re-enable other interrupts */
4853 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4854 } else
4855 ixgbe_enable_intr(adapter);
4856 }
4857
4858 IXGBE_CORE_UNLOCK(adapter);
4859 IFNET_UNLOCK(ifp);
4860 } /* ixgbe_handle_admin */
4861
4862 static void
4863 ixgbe_ifstop(struct ifnet *ifp, int disable)
4864 {
4865 struct adapter *adapter = ifp->if_softc;
4866
4867 IXGBE_CORE_LOCK(adapter);
4868 ixgbe_stop_locked(adapter);
4869 IXGBE_CORE_UNLOCK(adapter);
4870
4871 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4872 atomic_store_relaxed(&adapter->timer_pending, 0);
4873 }
4874
4875 /************************************************************************
4876 * ixgbe_stop_locked - Stop the hardware
4877 *
4878 * Disables all traffic on the adapter by issuing a
4879 * global reset on the MAC and deallocates TX/RX buffers.
4880 ************************************************************************/
4881 static void
4882 ixgbe_stop_locked(void *arg)
4883 {
4884 struct ifnet *ifp;
4885 struct adapter *adapter = arg;
4886 struct ixgbe_hw *hw = &adapter->hw;
4887
4888 ifp = adapter->ifp;
4889
4890 KASSERT(mutex_owned(&adapter->core_mtx));
4891
4892 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4893 ixgbe_disable_intr(adapter);
4894 callout_stop(&adapter->timer);
4895
4896 /* Don't schedule workqueues. */
4897 adapter->schedule_wqs_ok = false;
4898
4899 /* Let the stack know...*/
4900 ifp->if_flags &= ~IFF_RUNNING;
4901
4902 ixgbe_reset_hw(hw);
4903 hw->adapter_stopped = FALSE;
4904 ixgbe_stop_adapter(hw);
4905 if (hw->mac.type == ixgbe_mac_82599EB)
4906 ixgbe_stop_mac_link_on_d3_82599(hw);
4907 /* Turn off the laser - noop with no optics */
4908 ixgbe_disable_tx_laser(hw);
4909
4910 /* Update the stack */
4911 adapter->link_up = FALSE;
4912 ixgbe_update_link_status(adapter);
4913
4914 /* reprogram the RAR[0] in case user changed it. */
4915 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4916
4917 return;
4918 } /* ixgbe_stop_locked */
4919
4920 /************************************************************************
4921 * ixgbe_update_link_status - Update OS on link state
4922 *
4923 * Note: Only updates the OS on the cached link state.
4924 * The real check of the hardware only happens with
4925 * a link interrupt.
4926 ************************************************************************/
4927 static void
4928 ixgbe_update_link_status(struct adapter *adapter)
4929 {
4930 struct ifnet *ifp = adapter->ifp;
4931 device_t dev = adapter->dev;
4932 struct ixgbe_hw *hw = &adapter->hw;
4933
4934 KASSERT(mutex_owned(&adapter->core_mtx));
4935
4936 if (adapter->link_up) {
4937 if (adapter->link_active != LINK_STATE_UP) {
4938 /*
4939 * To eliminate influence of the previous state
4940 * in the same way as ixgbe_init_locked().
4941 */
4942 struct ix_queue *que = adapter->queues;
4943 for (int i = 0; i < adapter->num_queues; i++, que++)
4944 que->eitr_setting = 0;
4945
4946 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4947 /*
4948 * Discard count for both MAC Local Fault and
4949 * Remote Fault because those registers are
4950 * valid only when the link speed is up and
4951 * 10Gbps.
4952 */
4953 IXGBE_READ_REG(hw, IXGBE_MLFC);
4954 IXGBE_READ_REG(hw, IXGBE_MRFC);
4955 }
4956
4957 if (bootverbose) {
4958 const char *bpsmsg;
4959
4960 switch (adapter->link_speed) {
4961 case IXGBE_LINK_SPEED_10GB_FULL:
4962 bpsmsg = "10 Gbps";
4963 break;
4964 case IXGBE_LINK_SPEED_5GB_FULL:
4965 bpsmsg = "5 Gbps";
4966 break;
4967 case IXGBE_LINK_SPEED_2_5GB_FULL:
4968 bpsmsg = "2.5 Gbps";
4969 break;
4970 case IXGBE_LINK_SPEED_1GB_FULL:
4971 bpsmsg = "1 Gbps";
4972 break;
4973 case IXGBE_LINK_SPEED_100_FULL:
4974 bpsmsg = "100 Mbps";
4975 break;
4976 case IXGBE_LINK_SPEED_10_FULL:
4977 bpsmsg = "10 Mbps";
4978 break;
4979 default:
4980 bpsmsg = "unknown speed";
4981 break;
4982 }
4983 device_printf(dev, "Link is up %s %s \n",
4984 bpsmsg, "Full Duplex");
4985 }
4986 adapter->link_active = LINK_STATE_UP;
4987 /* Update any Flow Control changes */
4988 ixgbe_fc_enable(&adapter->hw);
4989 /* Update DMA coalescing config */
4990 ixgbe_config_dmac(adapter);
4991 if_link_state_change(ifp, LINK_STATE_UP);
4992
4993 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4994 ixgbe_ping_all_vfs(adapter);
4995 }
4996 } else {
4997 /*
4998 * Do it when link active changes to DOWN. i.e.
4999 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5000 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5001 */
5002 if (adapter->link_active != LINK_STATE_DOWN) {
5003 if (bootverbose)
5004 device_printf(dev, "Link is Down\n");
5005 if_link_state_change(ifp, LINK_STATE_DOWN);
5006 adapter->link_active = LINK_STATE_DOWN;
5007 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5008 ixgbe_ping_all_vfs(adapter);
5009 ixgbe_drain_all(adapter);
5010 }
5011 }
5012 } /* ixgbe_update_link_status */
5013
5014 /************************************************************************
5015 * ixgbe_config_dmac - Configure DMA Coalescing
5016 ************************************************************************/
5017 static void
5018 ixgbe_config_dmac(struct adapter *adapter)
5019 {
5020 struct ixgbe_hw *hw = &adapter->hw;
5021 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5022
5023 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5024 return;
5025
5026 if (dcfg->watchdog_timer ^ adapter->dmac ||
5027 dcfg->link_speed ^ adapter->link_speed) {
5028 dcfg->watchdog_timer = adapter->dmac;
5029 dcfg->fcoe_en = false;
5030 dcfg->link_speed = adapter->link_speed;
5031 dcfg->num_tcs = 1;
5032
5033 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5034 dcfg->watchdog_timer, dcfg->link_speed);
5035
5036 hw->mac.ops.dmac_config(hw);
5037 }
5038 } /* ixgbe_config_dmac */
5039
5040 /************************************************************************
5041 * ixgbe_enable_intr
5042 ************************************************************************/
5043 static void
5044 ixgbe_enable_intr(struct adapter *adapter)
5045 {
5046 struct ixgbe_hw *hw = &adapter->hw;
5047 struct ix_queue *que = adapter->queues;
5048 u32 mask, fwsm;
5049
5050 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5051
5052 switch (adapter->hw.mac.type) {
5053 case ixgbe_mac_82599EB:
5054 mask |= IXGBE_EIMS_ECC;
5055 /* Temperature sensor on some adapters */
5056 mask |= IXGBE_EIMS_GPI_SDP0;
5057 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5058 mask |= IXGBE_EIMS_GPI_SDP1;
5059 mask |= IXGBE_EIMS_GPI_SDP2;
5060 break;
5061 case ixgbe_mac_X540:
5062 /* Detect if Thermal Sensor is enabled */
5063 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5064 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5065 mask |= IXGBE_EIMS_TS;
5066 mask |= IXGBE_EIMS_ECC;
5067 break;
5068 case ixgbe_mac_X550:
5069 /* MAC thermal sensor is automatically enabled */
5070 mask |= IXGBE_EIMS_TS;
5071 mask |= IXGBE_EIMS_ECC;
5072 break;
5073 case ixgbe_mac_X550EM_x:
5074 case ixgbe_mac_X550EM_a:
5075 /* Some devices use SDP0 for important information */
5076 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5077 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5078 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5079 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5080 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5081 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5082 mask |= IXGBE_EICR_GPI_SDP0_X540;
5083 mask |= IXGBE_EIMS_ECC;
5084 break;
5085 default:
5086 break;
5087 }
5088
5089 /* Enable Fan Failure detection */
5090 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5091 mask |= IXGBE_EIMS_GPI_SDP1;
5092 /* Enable SR-IOV */
5093 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5094 mask |= IXGBE_EIMS_MAILBOX;
5095 /* Enable Flow Director */
5096 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5097 mask |= IXGBE_EIMS_FLOW_DIR;
5098
5099 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5100
5101 /* With MSI-X we use auto clear */
5102 if (adapter->msix_mem) {
5103 mask = IXGBE_EIMS_ENABLE_MASK;
5104 /* Don't autoclear Link */
5105 mask &= ~IXGBE_EIMS_OTHER;
5106 mask &= ~IXGBE_EIMS_LSC;
5107 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5108 mask &= ~IXGBE_EIMS_MAILBOX;
5109 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5110 }
5111
5112 /*
5113 * Now enable all queues, this is done separately to
5114 * allow for handling the extended (beyond 32) MSI-X
5115 * vectors that can be used by 82599
5116 */
5117 for (int i = 0; i < adapter->num_queues; i++, que++)
5118 ixgbe_enable_queue(adapter, que->msix);
5119
5120 IXGBE_WRITE_FLUSH(hw);
5121
5122 } /* ixgbe_enable_intr */
5123
5124 /************************************************************************
5125 * ixgbe_disable_intr_internal
5126 ************************************************************************/
5127 static void
5128 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5129 {
5130 struct ix_queue *que = adapter->queues;
5131
5132 /* disable interrupts other than queues */
5133 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5134
5135 if (adapter->msix_mem)
5136 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5137
5138 for (int i = 0; i < adapter->num_queues; i++, que++)
5139 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5140
5141 IXGBE_WRITE_FLUSH(&adapter->hw);
5142
5143 } /* ixgbe_do_disable_intr_internal */
5144
5145 /************************************************************************
5146 * ixgbe_disable_intr
5147 ************************************************************************/
5148 static void
5149 ixgbe_disable_intr(struct adapter *adapter)
5150 {
5151
5152 ixgbe_disable_intr_internal(adapter, true);
5153 } /* ixgbe_disable_intr */
5154
5155 /************************************************************************
5156 * ixgbe_ensure_disabled_intr
5157 ************************************************************************/
5158 void
5159 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5160 {
5161
5162 ixgbe_disable_intr_internal(adapter, false);
5163 } /* ixgbe_ensure_disabled_intr */
5164
5165 /************************************************************************
5166 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5167 ************************************************************************/
5168 static int
5169 ixgbe_legacy_irq(void *arg)
5170 {
5171 struct ix_queue *que = arg;
5172 struct adapter *adapter = que->adapter;
5173 struct ixgbe_hw *hw = &adapter->hw;
5174 struct ifnet *ifp = adapter->ifp;
5175 struct tx_ring *txr = adapter->tx_rings;
5176 bool more = false;
5177 bool reenable_intr = true;
5178 u32 eicr, eicr_mask;
5179 u32 task_requests = 0;
5180
5181 /* Silicon errata #26 on 82598 */
5182 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5183
5184 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5185
5186 adapter->stats.pf.legint.ev_count++;
5187 ++que->irqs.ev_count;
5188 if (eicr == 0) {
5189 adapter->stats.pf.intzero.ev_count++;
5190 if ((ifp->if_flags & IFF_UP) != 0)
5191 ixgbe_enable_intr(adapter);
5192 return 0;
5193 }
5194
5195 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5196 /*
5197 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5198 */
5199 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5200
5201 #ifdef __NetBSD__
5202 /* Don't run ixgbe_rxeof in interrupt context */
5203 more = true;
5204 #else
5205 more = ixgbe_rxeof(que);
5206 #endif
5207
5208 IXGBE_TX_LOCK(txr);
5209 ixgbe_txeof(txr);
5210 #ifdef notyet
5211 if (!ixgbe_ring_empty(ifp, txr->br))
5212 ixgbe_start_locked(ifp, txr);
5213 #endif
5214 IXGBE_TX_UNLOCK(txr);
5215 }
5216
5217 /* Check for fan failure */
5218 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5219 ixgbe_check_fan_failure(adapter, eicr, true);
5220 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5221 }
5222
5223 /* Link status change */
5224 if (eicr & IXGBE_EICR_LSC)
5225 task_requests |= IXGBE_REQUEST_TASK_LSC;
5226
5227 if (ixgbe_is_sfp(hw)) {
5228 /* Pluggable optics-related interrupt */
5229 if (hw->mac.type >= ixgbe_mac_X540)
5230 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5231 else
5232 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5233
5234 if (eicr & eicr_mask) {
5235 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5236 task_requests |= IXGBE_REQUEST_TASK_MOD;
5237 }
5238
5239 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5240 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5241 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5242 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5243 task_requests |= IXGBE_REQUEST_TASK_MSF;
5244 }
5245 }
5246
5247 /* External PHY interrupt */
5248 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5249 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5250 task_requests |= IXGBE_REQUEST_TASK_PHY;
5251
5252 if (more) {
5253 que->req.ev_count++;
5254 ixgbe_sched_handle_que(adapter, que);
5255 reenable_intr = false;
5256 }
5257 if (task_requests != 0) {
5258 /* Re-enabling other interrupts is done in the admin task */
5259 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5260
5261 mutex_enter(&adapter->admin_mtx);
5262 adapter->task_requests |= task_requests;
5263 ixgbe_schedule_admin_tasklet(adapter);
5264 mutex_exit(&adapter->admin_mtx);
5265
5266 reenable_intr = false;
5267 }
5268
5269 if (reenable_intr == true)
5270 ixgbe_enable_intr(adapter);
5271
5272 return 1;
5273 } /* ixgbe_legacy_irq */
5274
5275 /************************************************************************
5276 * ixgbe_free_pciintr_resources
5277 ************************************************************************/
5278 static void
5279 ixgbe_free_pciintr_resources(struct adapter *adapter)
5280 {
5281 struct ix_queue *que = adapter->queues;
5282 int rid;
5283
5284 /*
5285 * Release all msix queue resources:
5286 */
5287 for (int i = 0; i < adapter->num_queues; i++, que++) {
5288 if (que->res != NULL) {
5289 pci_intr_disestablish(adapter->osdep.pc,
5290 adapter->osdep.ihs[i]);
5291 adapter->osdep.ihs[i] = NULL;
5292 }
5293 }
5294
5295 /* Clean the Legacy or Link interrupt last */
5296 if (adapter->vector) /* we are doing MSIX */
5297 rid = adapter->vector;
5298 else
5299 rid = 0;
5300
5301 if (adapter->osdep.ihs[rid] != NULL) {
5302 pci_intr_disestablish(adapter->osdep.pc,
5303 adapter->osdep.ihs[rid]);
5304 adapter->osdep.ihs[rid] = NULL;
5305 }
5306
5307 if (adapter->osdep.intrs != NULL) {
5308 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5309 adapter->osdep.nintrs);
5310 adapter->osdep.intrs = NULL;
5311 }
5312 } /* ixgbe_free_pciintr_resources */
5313
5314 /************************************************************************
5315 * ixgbe_free_pci_resources
5316 ************************************************************************/
5317 static void
5318 ixgbe_free_pci_resources(struct adapter *adapter)
5319 {
5320
5321 ixgbe_free_pciintr_resources(adapter);
5322
5323 if (adapter->osdep.mem_size != 0) {
5324 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5325 adapter->osdep.mem_bus_space_handle,
5326 adapter->osdep.mem_size);
5327 }
5328
5329 } /* ixgbe_free_pci_resources */
5330
5331 /************************************************************************
5332 * ixgbe_set_sysctl_value
5333 ************************************************************************/
5334 static void
5335 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5336 const char *description, int *limit, int value)
5337 {
5338 device_t dev = adapter->dev;
5339 struct sysctllog **log;
5340 const struct sysctlnode *rnode, *cnode;
5341
5342 /*
5343 * It's not required to check recovery mode because this function never
5344 * touches hardware.
5345 */
5346
5347 log = &adapter->sysctllog;
5348 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5349 aprint_error_dev(dev, "could not create sysctl root\n");
5350 return;
5351 }
5352 if (sysctl_createv(log, 0, &rnode, &cnode,
5353 CTLFLAG_READWRITE, CTLTYPE_INT,
5354 name, SYSCTL_DESCR(description),
5355 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5356 aprint_error_dev(dev, "could not create sysctl\n");
5357 *limit = value;
5358 } /* ixgbe_set_sysctl_value */
5359
5360 /************************************************************************
5361 * ixgbe_sysctl_flowcntl
5362 *
5363 * SYSCTL wrapper around setting Flow Control
5364 ************************************************************************/
5365 static int
5366 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5367 {
5368 struct sysctlnode node = *rnode;
5369 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5370 int error, fc;
5371
5372 if (ixgbe_fw_recovery_mode_swflag(adapter))
5373 return (EPERM);
5374
5375 fc = adapter->hw.fc.current_mode;
5376 node.sysctl_data = &fc;
5377 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5378 if (error != 0 || newp == NULL)
5379 return error;
5380
5381 /* Don't bother if it's not changed */
5382 if (fc == adapter->hw.fc.current_mode)
5383 return (0);
5384
5385 return ixgbe_set_flowcntl(adapter, fc);
5386 } /* ixgbe_sysctl_flowcntl */
5387
5388 /************************************************************************
5389 * ixgbe_set_flowcntl - Set flow control
5390 *
5391 * Flow control values:
5392 * 0 - off
5393 * 1 - rx pause
5394 * 2 - tx pause
5395 * 3 - full
5396 ************************************************************************/
5397 static int
5398 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5399 {
5400 switch (fc) {
5401 case ixgbe_fc_rx_pause:
5402 case ixgbe_fc_tx_pause:
5403 case ixgbe_fc_full:
5404 adapter->hw.fc.requested_mode = fc;
5405 if (adapter->num_queues > 1)
5406 ixgbe_disable_rx_drop(adapter);
5407 break;
5408 case ixgbe_fc_none:
5409 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5410 if (adapter->num_queues > 1)
5411 ixgbe_enable_rx_drop(adapter);
5412 break;
5413 default:
5414 return (EINVAL);
5415 }
5416
5417 #if 0 /* XXX NetBSD */
5418 /* Don't autoneg if forcing a value */
5419 adapter->hw.fc.disable_fc_autoneg = TRUE;
5420 #endif
5421 ixgbe_fc_enable(&adapter->hw);
5422
5423 return (0);
5424 } /* ixgbe_set_flowcntl */
5425
5426 /************************************************************************
5427 * ixgbe_enable_rx_drop
5428 *
5429 * Enable the hardware to drop packets when the buffer is
5430 * full. This is useful with multiqueue, so that no single
5431 * queue being full stalls the entire RX engine. We only
5432 * enable this when Multiqueue is enabled AND Flow Control
5433 * is disabled.
5434 ************************************************************************/
5435 static void
5436 ixgbe_enable_rx_drop(struct adapter *adapter)
5437 {
5438 struct ixgbe_hw *hw = &adapter->hw;
5439 struct rx_ring *rxr;
5440 u32 srrctl;
5441
5442 for (int i = 0; i < adapter->num_queues; i++) {
5443 rxr = &adapter->rx_rings[i];
5444 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5445 srrctl |= IXGBE_SRRCTL_DROP_EN;
5446 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5447 }
5448
5449 /* enable drop for each vf */
5450 for (int i = 0; i < adapter->num_vfs; i++) {
5451 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5452 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5453 IXGBE_QDE_ENABLE));
5454 }
5455 } /* ixgbe_enable_rx_drop */
5456
5457 /************************************************************************
5458 * ixgbe_disable_rx_drop
5459 ************************************************************************/
5460 static void
5461 ixgbe_disable_rx_drop(struct adapter *adapter)
5462 {
5463 struct ixgbe_hw *hw = &adapter->hw;
5464 struct rx_ring *rxr;
5465 u32 srrctl;
5466
5467 for (int i = 0; i < adapter->num_queues; i++) {
5468 rxr = &adapter->rx_rings[i];
5469 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5470 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5471 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5472 }
5473
5474 /* disable drop for each vf */
5475 for (int i = 0; i < adapter->num_vfs; i++) {
5476 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5477 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5478 }
5479 } /* ixgbe_disable_rx_drop */
5480
5481 /************************************************************************
5482 * ixgbe_sysctl_advertise
5483 *
5484 * SYSCTL wrapper around setting advertised speed
5485 ************************************************************************/
5486 static int
5487 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5488 {
5489 struct sysctlnode node = *rnode;
5490 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5491 int error = 0, advertise;
5492
5493 if (ixgbe_fw_recovery_mode_swflag(adapter))
5494 return (EPERM);
5495
5496 advertise = adapter->advertise;
5497 node.sysctl_data = &advertise;
5498 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5499 if (error != 0 || newp == NULL)
5500 return error;
5501
5502 return ixgbe_set_advertise(adapter, advertise);
5503 } /* ixgbe_sysctl_advertise */
5504
5505 /************************************************************************
5506 * ixgbe_set_advertise - Control advertised link speed
5507 *
5508 * Flags:
5509 * 0x00 - Default (all capable link speed)
5510 * 0x01 - advertise 100 Mb
5511 * 0x02 - advertise 1G
5512 * 0x04 - advertise 10G
5513 * 0x08 - advertise 10 Mb
5514 * 0x10 - advertise 2.5G
5515 * 0x20 - advertise 5G
5516 ************************************************************************/
5517 static int
5518 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5519 {
5520 device_t dev;
5521 struct ixgbe_hw *hw;
5522 ixgbe_link_speed speed = 0;
5523 ixgbe_link_speed link_caps = 0;
5524 s32 err = IXGBE_NOT_IMPLEMENTED;
5525 bool negotiate = FALSE;
5526
5527 /* Checks to validate new value */
5528 if (adapter->advertise == advertise) /* no change */
5529 return (0);
5530
5531 dev = adapter->dev;
5532 hw = &adapter->hw;
5533
5534 /* No speed changes for backplane media */
5535 if (hw->phy.media_type == ixgbe_media_type_backplane)
5536 return (ENODEV);
5537
5538 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5539 (hw->phy.multispeed_fiber))) {
5540 device_printf(dev,
5541 "Advertised speed can only be set on copper or "
5542 "multispeed fiber media types.\n");
5543 return (EINVAL);
5544 }
5545
5546 if (advertise < 0x0 || advertise > 0x3f) {
5547 device_printf(dev,
5548 "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5549 return (EINVAL);
5550 }
5551
5552 if (hw->mac.ops.get_link_capabilities) {
5553 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5554 &negotiate);
5555 if (err != IXGBE_SUCCESS) {
5556 device_printf(dev, "Unable to determine supported advertise speeds\n");
5557 return (ENODEV);
5558 }
5559 }
5560
5561 /* Set new value and report new advertised mode */
5562 if (advertise & 0x1) {
5563 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5564 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5565 return (EINVAL);
5566 }
5567 speed |= IXGBE_LINK_SPEED_100_FULL;
5568 }
5569 if (advertise & 0x2) {
5570 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5571 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5572 return (EINVAL);
5573 }
5574 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5575 }
5576 if (advertise & 0x4) {
5577 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5578 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5579 return (EINVAL);
5580 }
5581 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5582 }
5583 if (advertise & 0x8) {
5584 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5585 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5586 return (EINVAL);
5587 }
5588 speed |= IXGBE_LINK_SPEED_10_FULL;
5589 }
5590 if (advertise & 0x10) {
5591 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5592 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5593 return (EINVAL);
5594 }
5595 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5596 }
5597 if (advertise & 0x20) {
5598 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5599 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5600 return (EINVAL);
5601 }
5602 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5603 }
5604 if (advertise == 0)
5605 speed = link_caps; /* All capable link speed */
5606
5607 hw->mac.autotry_restart = TRUE;
5608 hw->mac.ops.setup_link(hw, speed, TRUE);
5609 adapter->advertise = advertise;
5610
5611 return (0);
5612 } /* ixgbe_set_advertise */
5613
5614 /************************************************************************
5615 * ixgbe_get_advertise - Get current advertised speed settings
5616 *
5617 * Formatted for sysctl usage.
5618 * Flags:
5619 * 0x01 - advertise 100 Mb
5620 * 0x02 - advertise 1G
5621 * 0x04 - advertise 10G
5622 * 0x08 - advertise 10 Mb (yes, Mb)
5623 * 0x10 - advertise 2.5G
5624 * 0x20 - advertise 5G
5625 ************************************************************************/
5626 static int
5627 ixgbe_get_advertise(struct adapter *adapter)
5628 {
5629 struct ixgbe_hw *hw = &adapter->hw;
5630 int speed;
5631 ixgbe_link_speed link_caps = 0;
5632 s32 err;
5633 bool negotiate = FALSE;
5634
5635 /*
5636 * Advertised speed means nothing unless it's copper or
5637 * multi-speed fiber
5638 */
5639 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5640 !(hw->phy.multispeed_fiber))
5641 return (0);
5642
5643 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5644 if (err != IXGBE_SUCCESS)
5645 return (0);
5646
5647 speed =
5648 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5649 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5650 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5651 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5652 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5653 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5654
5655 return speed;
5656 } /* ixgbe_get_advertise */
5657
5658 /************************************************************************
5659 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5660 *
5661 * Control values:
5662 * 0/1 - off / on (use default value of 1000)
5663 *
5664 * Legal timer values are:
5665 * 50,100,250,500,1000,2000,5000,10000
5666 *
5667 * Turning off interrupt moderation will also turn this off.
5668 ************************************************************************/
5669 static int
5670 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5671 {
5672 struct sysctlnode node = *rnode;
5673 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5674 struct ifnet *ifp = adapter->ifp;
5675 int error;
5676 int newval;
5677
5678 if (ixgbe_fw_recovery_mode_swflag(adapter))
5679 return (EPERM);
5680
5681 newval = adapter->dmac;
5682 node.sysctl_data = &newval;
5683 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5684 if ((error) || (newp == NULL))
5685 return (error);
5686
5687 switch (newval) {
5688 case 0:
5689 /* Disabled */
5690 adapter->dmac = 0;
5691 break;
5692 case 1:
5693 /* Enable and use default */
5694 adapter->dmac = 1000;
5695 break;
5696 case 50:
5697 case 100:
5698 case 250:
5699 case 500:
5700 case 1000:
5701 case 2000:
5702 case 5000:
5703 case 10000:
5704 /* Legal values - allow */
5705 adapter->dmac = newval;
5706 break;
5707 default:
5708 /* Do nothing, illegal value */
5709 return (EINVAL);
5710 }
5711
5712 /* Re-initialize hardware if it's already running */
5713 if (ifp->if_flags & IFF_RUNNING)
5714 ifp->if_init(ifp);
5715
5716 return (0);
5717 }
5718
5719 #ifdef IXGBE_DEBUG
5720 /************************************************************************
5721 * ixgbe_sysctl_power_state
5722 *
5723 * Sysctl to test power states
5724 * Values:
5725 * 0 - set device to D0
5726 * 3 - set device to D3
5727 * (none) - get current device power state
5728 ************************************************************************/
5729 static int
5730 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5731 {
5732 #ifdef notyet
5733 struct sysctlnode node = *rnode;
5734 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5735 device_t dev = adapter->dev;
5736 int curr_ps, new_ps, error = 0;
5737
5738 if (ixgbe_fw_recovery_mode_swflag(adapter))
5739 return (EPERM);
5740
5741 curr_ps = new_ps = pci_get_powerstate(dev);
5742
5743 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5744 if ((error) || (req->newp == NULL))
5745 return (error);
5746
5747 if (new_ps == curr_ps)
5748 return (0);
5749
5750 if (new_ps == 3 && curr_ps == 0)
5751 error = DEVICE_SUSPEND(dev);
5752 else if (new_ps == 0 && curr_ps == 3)
5753 error = DEVICE_RESUME(dev);
5754 else
5755 return (EINVAL);
5756
5757 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5758
5759 return (error);
5760 #else
5761 return 0;
5762 #endif
5763 } /* ixgbe_sysctl_power_state */
5764 #endif
5765
5766 /************************************************************************
5767 * ixgbe_sysctl_wol_enable
5768 *
5769 * Sysctl to enable/disable the WoL capability,
5770 * if supported by the adapter.
5771 *
5772 * Values:
5773 * 0 - disabled
5774 * 1 - enabled
5775 ************************************************************************/
5776 static int
5777 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5778 {
5779 struct sysctlnode node = *rnode;
5780 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5781 struct ixgbe_hw *hw = &adapter->hw;
5782 bool new_wol_enabled;
5783 int error = 0;
5784
5785 /*
5786 * It's not required to check recovery mode because this function never
5787 * touches hardware.
5788 */
5789 new_wol_enabled = hw->wol_enabled;
5790 node.sysctl_data = &new_wol_enabled;
5791 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5792 if ((error) || (newp == NULL))
5793 return (error);
5794 if (new_wol_enabled == hw->wol_enabled)
5795 return (0);
5796
5797 if (new_wol_enabled && !adapter->wol_support)
5798 return (ENODEV);
5799 else
5800 hw->wol_enabled = new_wol_enabled;
5801
5802 return (0);
5803 } /* ixgbe_sysctl_wol_enable */
5804
5805 /************************************************************************
5806 * ixgbe_sysctl_wufc - Wake Up Filter Control
5807 *
5808 * Sysctl to enable/disable the types of packets that the
5809 * adapter will wake up on upon receipt.
5810 * Flags:
5811 * 0x1 - Link Status Change
5812 * 0x2 - Magic Packet
5813 * 0x4 - Direct Exact
5814 * 0x8 - Directed Multicast
5815 * 0x10 - Broadcast
5816 * 0x20 - ARP/IPv4 Request Packet
5817 * 0x40 - Direct IPv4 Packet
5818 * 0x80 - Direct IPv6 Packet
5819 *
5820 * Settings not listed above will cause the sysctl to return an error.
5821 ************************************************************************/
5822 static int
5823 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5824 {
5825 struct sysctlnode node = *rnode;
5826 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5827 int error = 0;
5828 u32 new_wufc;
5829
5830 /*
5831 * It's not required to check recovery mode because this function never
5832 * touches hardware.
5833 */
5834 new_wufc = adapter->wufc;
5835 node.sysctl_data = &new_wufc;
5836 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5837 if ((error) || (newp == NULL))
5838 return (error);
5839 if (new_wufc == adapter->wufc)
5840 return (0);
5841
5842 if (new_wufc & 0xffffff00)
5843 return (EINVAL);
5844
5845 new_wufc &= 0xff;
5846 new_wufc |= (0xffffff & adapter->wufc);
5847 adapter->wufc = new_wufc;
5848
5849 return (0);
5850 } /* ixgbe_sysctl_wufc */
5851
5852 #ifdef IXGBE_DEBUG
5853 /************************************************************************
5854 * ixgbe_sysctl_print_rss_config
5855 ************************************************************************/
5856 static int
5857 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5858 {
5859 #ifdef notyet
5860 struct sysctlnode node = *rnode;
5861 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5862 struct ixgbe_hw *hw = &adapter->hw;
5863 device_t dev = adapter->dev;
5864 struct sbuf *buf;
5865 int error = 0, reta_size;
5866 u32 reg;
5867
5868 if (ixgbe_fw_recovery_mode_swflag(adapter))
5869 return (EPERM);
5870
5871 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5872 if (!buf) {
5873 device_printf(dev, "Could not allocate sbuf for output.\n");
5874 return (ENOMEM);
5875 }
5876
5877 // TODO: use sbufs to make a string to print out
5878 /* Set multiplier for RETA setup and table size based on MAC */
5879 switch (adapter->hw.mac.type) {
5880 case ixgbe_mac_X550:
5881 case ixgbe_mac_X550EM_x:
5882 case ixgbe_mac_X550EM_a:
5883 reta_size = 128;
5884 break;
5885 default:
5886 reta_size = 32;
5887 break;
5888 }
5889
5890 /* Print out the redirection table */
5891 sbuf_cat(buf, "\n");
5892 for (int i = 0; i < reta_size; i++) {
5893 if (i < 32) {
5894 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5895 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5896 } else {
5897 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5898 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5899 }
5900 }
5901
5902 // TODO: print more config
5903
5904 error = sbuf_finish(buf);
5905 if (error)
5906 device_printf(dev, "Error finishing sbuf: %d\n", error);
5907
5908 sbuf_delete(buf);
5909 #endif
5910 return (0);
5911 } /* ixgbe_sysctl_print_rss_config */
5912 #endif /* IXGBE_DEBUG */
5913
5914 /************************************************************************
5915 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5916 *
5917 * For X552/X557-AT devices using an external PHY
5918 ************************************************************************/
5919 static int
5920 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5921 {
5922 struct sysctlnode node = *rnode;
5923 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5924 struct ixgbe_hw *hw = &adapter->hw;
5925 int val;
5926 u16 reg;
5927 int error;
5928
5929 if (ixgbe_fw_recovery_mode_swflag(adapter))
5930 return (EPERM);
5931
5932 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5933 device_printf(adapter->dev,
5934 "Device has no supported external thermal sensor.\n");
5935 return (ENODEV);
5936 }
5937
5938 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5939 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5940 device_printf(adapter->dev,
5941 "Error reading from PHY's current temperature register\n");
5942 return (EAGAIN);
5943 }
5944
5945 node.sysctl_data = &val;
5946
5947 /* Shift temp for output */
5948 val = reg >> 8;
5949
5950 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5951 if ((error) || (newp == NULL))
5952 return (error);
5953
5954 return (0);
5955 } /* ixgbe_sysctl_phy_temp */
5956
5957 /************************************************************************
5958 * ixgbe_sysctl_phy_overtemp_occurred
5959 *
5960 * Reports (directly from the PHY) whether the current PHY
5961 * temperature is over the overtemp threshold.
5962 ************************************************************************/
5963 static int
5964 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5965 {
5966 struct sysctlnode node = *rnode;
5967 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5968 struct ixgbe_hw *hw = &adapter->hw;
5969 int val, error;
5970 u16 reg;
5971
5972 if (ixgbe_fw_recovery_mode_swflag(adapter))
5973 return (EPERM);
5974
5975 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5976 device_printf(adapter->dev,
5977 "Device has no supported external thermal sensor.\n");
5978 return (ENODEV);
5979 }
5980
5981 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5982 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5983 device_printf(adapter->dev,
5984 "Error reading from PHY's temperature status register\n");
5985 return (EAGAIN);
5986 }
5987
5988 node.sysctl_data = &val;
5989
5990 /* Get occurrence bit */
5991 val = !!(reg & 0x4000);
5992
5993 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5994 if ((error) || (newp == NULL))
5995 return (error);
5996
5997 return (0);
5998 } /* ixgbe_sysctl_phy_overtemp_occurred */
5999
6000 /************************************************************************
6001 * ixgbe_sysctl_eee_state
6002 *
6003 * Sysctl to set EEE power saving feature
6004 * Values:
6005 * 0 - disable EEE
6006 * 1 - enable EEE
6007 * (none) - get current device EEE state
6008 ************************************************************************/
6009 static int
6010 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6011 {
6012 struct sysctlnode node = *rnode;
6013 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6014 struct ifnet *ifp = adapter->ifp;
6015 device_t dev = adapter->dev;
6016 int curr_eee, new_eee, error = 0;
6017 s32 retval;
6018
6019 if (ixgbe_fw_recovery_mode_swflag(adapter))
6020 return (EPERM);
6021
6022 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6023 node.sysctl_data = &new_eee;
6024 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6025 if ((error) || (newp == NULL))
6026 return (error);
6027
6028 /* Nothing to do */
6029 if (new_eee == curr_eee)
6030 return (0);
6031
6032 /* Not supported */
6033 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6034 return (EINVAL);
6035
6036 /* Bounds checking */
6037 if ((new_eee < 0) || (new_eee > 1))
6038 return (EINVAL);
6039
6040 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6041 if (retval) {
6042 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6043 return (EINVAL);
6044 }
6045
6046 /* Restart auto-neg */
6047 ifp->if_init(ifp);
6048
6049 device_printf(dev, "New EEE state: %d\n", new_eee);
6050
6051 /* Cache new value */
6052 if (new_eee)
6053 adapter->feat_en |= IXGBE_FEATURE_EEE;
6054 else
6055 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6056
6057 return (error);
6058 } /* ixgbe_sysctl_eee_state */
6059
6060 #define PRINTQS(adapter, regname) \
6061 do { \
6062 struct ixgbe_hw *_hw = &(adapter)->hw; \
6063 int _i; \
6064 \
6065 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6066 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6067 printf((_i == 0) ? "\t" : " "); \
6068 printf("%08x", IXGBE_READ_REG(_hw, \
6069 IXGBE_##regname(_i))); \
6070 } \
6071 printf("\n"); \
6072 } while (0)
6073
6074 /************************************************************************
6075 * ixgbe_print_debug_info
6076 *
6077 * Called only when em_display_debug_stats is enabled.
6078 * Provides a way to take a look at important statistics
6079 * maintained by the driver and hardware.
6080 ************************************************************************/
6081 static void
6082 ixgbe_print_debug_info(struct adapter *adapter)
6083 {
6084 device_t dev = adapter->dev;
6085 struct ixgbe_hw *hw = &adapter->hw;
6086 int table_size;
6087 int i;
6088
6089 switch (adapter->hw.mac.type) {
6090 case ixgbe_mac_X550:
6091 case ixgbe_mac_X550EM_x:
6092 case ixgbe_mac_X550EM_a:
6093 table_size = 128;
6094 break;
6095 default:
6096 table_size = 32;
6097 break;
6098 }
6099
6100 device_printf(dev, "[E]RETA:\n");
6101 for (i = 0; i < table_size; i++) {
6102 if (i < 32)
6103 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6104 IXGBE_RETA(i)));
6105 else
6106 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6107 IXGBE_ERETA(i - 32)));
6108 }
6109
6110 device_printf(dev, "queue:");
6111 for (i = 0; i < adapter->num_queues; i++) {
6112 printf((i == 0) ? "\t" : " ");
6113 printf("%8d", i);
6114 }
6115 printf("\n");
6116 PRINTQS(adapter, RDBAL);
6117 PRINTQS(adapter, RDBAH);
6118 PRINTQS(adapter, RDLEN);
6119 PRINTQS(adapter, SRRCTL);
6120 PRINTQS(adapter, RDH);
6121 PRINTQS(adapter, RDT);
6122 PRINTQS(adapter, RXDCTL);
6123
6124 device_printf(dev, "RQSMR:");
6125 for (i = 0; i < adapter->num_queues / 4; i++) {
6126 printf((i == 0) ? "\t" : " ");
6127 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6128 }
6129 printf("\n");
6130
6131 device_printf(dev, "disabled_count:");
6132 for (i = 0; i < adapter->num_queues; i++) {
6133 printf((i == 0) ? "\t" : " ");
6134 printf("%8d", adapter->queues[i].disabled_count);
6135 }
6136 printf("\n");
6137
6138 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6139 if (hw->mac.type != ixgbe_mac_82598EB) {
6140 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6141 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6142 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6143 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6144 }
6145 } /* ixgbe_print_debug_info */
6146
6147 /************************************************************************
6148 * ixgbe_sysctl_debug
6149 ************************************************************************/
6150 static int
6151 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6152 {
6153 struct sysctlnode node = *rnode;
6154 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6155 int error, result = 0;
6156
6157 if (ixgbe_fw_recovery_mode_swflag(adapter))
6158 return (EPERM);
6159
6160 node.sysctl_data = &result;
6161 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6162
6163 if (error || newp == NULL)
6164 return error;
6165
6166 if (result == 1)
6167 ixgbe_print_debug_info(adapter);
6168
6169 return 0;
6170 } /* ixgbe_sysctl_debug */
6171
6172 /************************************************************************
6173 * ixgbe_init_device_features
6174 ************************************************************************/
6175 static void
6176 ixgbe_init_device_features(struct adapter *adapter)
6177 {
6178 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6179 | IXGBE_FEATURE_RSS
6180 | IXGBE_FEATURE_MSI
6181 | IXGBE_FEATURE_MSIX
6182 | IXGBE_FEATURE_LEGACY_IRQ
6183 | IXGBE_FEATURE_LEGACY_TX;
6184
6185 /* Set capabilities first... */
6186 switch (adapter->hw.mac.type) {
6187 case ixgbe_mac_82598EB:
6188 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6189 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6190 break;
6191 case ixgbe_mac_X540:
6192 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6193 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6194 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6195 (adapter->hw.bus.func == 0))
6196 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6197 break;
6198 case ixgbe_mac_X550:
6199 /*
6200 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6201 * NVM Image version.
6202 */
6203 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6204 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6205 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6206 break;
6207 case ixgbe_mac_X550EM_x:
6208 /*
6209 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6210 * NVM Image version.
6211 */
6212 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6213 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6214 break;
6215 case ixgbe_mac_X550EM_a:
6216 /*
6217 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6218 * NVM Image version.
6219 */
6220 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6221 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6222 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6223 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6224 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6225 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6226 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6227 }
6228 break;
6229 case ixgbe_mac_82599EB:
6230 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6231 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6232 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6233 (adapter->hw.bus.func == 0))
6234 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6235 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6236 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6237 break;
6238 default:
6239 break;
6240 }
6241
6242 /* Enabled by default... */
6243 /* Fan failure detection */
6244 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6245 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6246 /* Netmap */
6247 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6248 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6249 /* EEE */
6250 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6251 adapter->feat_en |= IXGBE_FEATURE_EEE;
6252 /* Thermal Sensor */
6253 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6254 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6255 /*
6256 * Recovery mode:
6257 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6258 * NVM Image version.
6259 */
6260
6261 /* Enabled via global sysctl... */
6262 /* Flow Director */
6263 if (ixgbe_enable_fdir) {
6264 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6265 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6266 else
6267 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6268 }
6269 /* Legacy (single queue) transmit */
6270 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6271 ixgbe_enable_legacy_tx)
6272 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6273 /*
6274 * Message Signal Interrupts - Extended (MSI-X)
6275 * Normal MSI is only enabled if MSI-X calls fail.
6276 */
6277 if (!ixgbe_enable_msix)
6278 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6279 /* Receive-Side Scaling (RSS) */
6280 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6281 adapter->feat_en |= IXGBE_FEATURE_RSS;
6282
6283 /* Disable features with unmet dependencies... */
6284 /* No MSI-X */
6285 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6286 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6287 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6288 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6289 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6290 }
6291 } /* ixgbe_init_device_features */
6292
6293 /************************************************************************
6294 * ixgbe_probe - Device identification routine
6295 *
6296 * Determines if the driver should be loaded on
6297 * adapter based on its PCI vendor/device ID.
6298 *
6299 * return BUS_PROBE_DEFAULT on success, positive on failure
6300 ************************************************************************/
6301 static int
6302 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6303 {
6304 const struct pci_attach_args *pa = aux;
6305
6306 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6307 }
6308
6309 static const ixgbe_vendor_info_t *
6310 ixgbe_lookup(const struct pci_attach_args *pa)
6311 {
6312 const ixgbe_vendor_info_t *ent;
6313 pcireg_t subid;
6314
6315 INIT_DEBUGOUT("ixgbe_lookup: begin");
6316
6317 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6318 return NULL;
6319
6320 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6321
6322 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6323 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6324 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6325 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6326 (ent->subvendor_id == 0)) &&
6327 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6328 (ent->subdevice_id == 0))) {
6329 return ent;
6330 }
6331 }
6332 return NULL;
6333 }
6334
6335 static int
6336 ixgbe_ifflags_cb(struct ethercom *ec)
6337 {
6338 struct ifnet *ifp = &ec->ec_if;
6339 struct adapter *adapter = ifp->if_softc;
6340 u_short change;
6341 int rv = 0;
6342
6343 IXGBE_CORE_LOCK(adapter);
6344
6345 change = ifp->if_flags ^ adapter->if_flags;
6346 if (change != 0)
6347 adapter->if_flags = ifp->if_flags;
6348
6349 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6350 rv = ENETRESET;
6351 goto out;
6352 } else if ((change & IFF_PROMISC) != 0)
6353 ixgbe_set_rxfilter(adapter);
6354
6355 /* Check for ec_capenable. */
6356 change = ec->ec_capenable ^ adapter->ec_capenable;
6357 adapter->ec_capenable = ec->ec_capenable;
6358 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6359 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6360 rv = ENETRESET;
6361 goto out;
6362 }
6363
6364 /*
6365 * Special handling is not required for ETHERCAP_VLAN_MTU.
6366 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6367 */
6368
6369 /* Set up VLAN support and filter */
6370 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6371 ixgbe_setup_vlan_hw_support(adapter);
6372
6373 out:
6374 IXGBE_CORE_UNLOCK(adapter);
6375
6376 return rv;
6377 }
6378
6379 /************************************************************************
6380 * ixgbe_ioctl - Ioctl entry point
6381 *
6382 * Called when the user wants to configure the interface.
6383 *
6384 * return 0 on success, positive on failure
6385 ************************************************************************/
6386 static int
6387 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6388 {
6389 struct adapter *adapter = ifp->if_softc;
6390 struct ixgbe_hw *hw = &adapter->hw;
6391 struct ifcapreq *ifcr = data;
6392 struct ifreq *ifr = data;
6393 int error = 0;
6394 int l4csum_en;
6395 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6396 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6397
6398 if (ixgbe_fw_recovery_mode_swflag(adapter))
6399 return (EPERM);
6400
6401 switch (command) {
6402 case SIOCSIFFLAGS:
6403 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6404 break;
6405 case SIOCADDMULTI:
6406 case SIOCDELMULTI:
6407 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6408 break;
6409 case SIOCSIFMEDIA:
6410 case SIOCGIFMEDIA:
6411 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6412 break;
6413 case SIOCSIFCAP:
6414 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6415 break;
6416 case SIOCSIFMTU:
6417 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6418 break;
6419 #ifdef __NetBSD__
6420 case SIOCINITIFADDR:
6421 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6422 break;
6423 case SIOCGIFFLAGS:
6424 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6425 break;
6426 case SIOCGIFAFLAG_IN:
6427 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6428 break;
6429 case SIOCGIFADDR:
6430 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6431 break;
6432 case SIOCGIFMTU:
6433 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6434 break;
6435 case SIOCGIFCAP:
6436 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6437 break;
6438 case SIOCGETHERCAP:
6439 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6440 break;
6441 case SIOCGLIFADDR:
6442 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6443 break;
6444 case SIOCZIFDATA:
6445 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6446 hw->mac.ops.clear_hw_cntrs(hw);
6447 ixgbe_clear_evcnt(adapter);
6448 break;
6449 case SIOCAIFADDR:
6450 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6451 break;
6452 #endif
6453 default:
6454 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6455 break;
6456 }
6457
6458 switch (command) {
6459 case SIOCGI2C:
6460 {
6461 struct ixgbe_i2c_req i2c;
6462
6463 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6464 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6465 if (error != 0)
6466 break;
6467 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6468 error = EINVAL;
6469 break;
6470 }
6471 if (i2c.len > sizeof(i2c.data)) {
6472 error = EINVAL;
6473 break;
6474 }
6475
6476 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6477 i2c.dev_addr, i2c.data);
6478 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6479 break;
6480 }
6481 case SIOCSIFCAP:
6482 /* Layer-4 Rx checksum offload has to be turned on and
6483 * off as a unit.
6484 */
6485 l4csum_en = ifcr->ifcr_capenable & l4csum;
6486 if (l4csum_en != l4csum && l4csum_en != 0)
6487 return EINVAL;
6488 /*FALLTHROUGH*/
6489 case SIOCADDMULTI:
6490 case SIOCDELMULTI:
6491 case SIOCSIFFLAGS:
6492 case SIOCSIFMTU:
6493 default:
6494 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6495 return error;
6496 if ((ifp->if_flags & IFF_RUNNING) == 0)
6497 ;
6498 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6499 IXGBE_CORE_LOCK(adapter);
6500 if ((ifp->if_flags & IFF_RUNNING) != 0)
6501 ixgbe_init_locked(adapter);
6502 ixgbe_recalculate_max_frame(adapter);
6503 IXGBE_CORE_UNLOCK(adapter);
6504 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6505 /*
6506 * Multicast list has changed; set the hardware filter
6507 * accordingly.
6508 */
6509 IXGBE_CORE_LOCK(adapter);
6510 ixgbe_disable_intr(adapter);
6511 ixgbe_set_rxfilter(adapter);
6512 ixgbe_enable_intr(adapter);
6513 IXGBE_CORE_UNLOCK(adapter);
6514 }
6515 return 0;
6516 }
6517
6518 return error;
6519 } /* ixgbe_ioctl */
6520
6521 /************************************************************************
6522 * ixgbe_check_fan_failure
6523 ************************************************************************/
6524 static void
6525 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6526 {
6527 u32 mask;
6528
6529 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6530 IXGBE_ESDP_SDP1;
6531
6532 if (reg & mask)
6533 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6534 } /* ixgbe_check_fan_failure */
6535
6536 /************************************************************************
6537 * ixgbe_handle_que
6538 ************************************************************************/
6539 static void
6540 ixgbe_handle_que(void *context)
6541 {
6542 struct ix_queue *que = context;
6543 struct adapter *adapter = que->adapter;
6544 struct tx_ring *txr = que->txr;
6545 struct ifnet *ifp = adapter->ifp;
6546 bool more = false;
6547
6548 que->handleq.ev_count++;
6549
6550 if (ifp->if_flags & IFF_RUNNING) {
6551 more = ixgbe_rxeof(que);
6552 IXGBE_TX_LOCK(txr);
6553 more |= ixgbe_txeof(txr);
6554 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6555 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6556 ixgbe_mq_start_locked(ifp, txr);
6557 /* Only for queue 0 */
6558 /* NetBSD still needs this for CBQ */
6559 if ((&adapter->queues[0] == que)
6560 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6561 ixgbe_legacy_start_locked(ifp, txr);
6562 IXGBE_TX_UNLOCK(txr);
6563 }
6564
6565 if (more) {
6566 que->req.ev_count++;
6567 ixgbe_sched_handle_que(adapter, que);
6568 } else if (que->res != NULL) {
6569 /* Re-enable this interrupt */
6570 ixgbe_enable_queue(adapter, que->msix);
6571 } else
6572 ixgbe_enable_intr(adapter);
6573
6574 return;
6575 } /* ixgbe_handle_que */
6576
6577 /************************************************************************
6578 * ixgbe_handle_que_work
6579 ************************************************************************/
6580 static void
6581 ixgbe_handle_que_work(struct work *wk, void *context)
6582 {
6583 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6584
6585 /*
6586 * "enqueued flag" is not required here.
6587 * See ixgbe_msix_que().
6588 */
6589 ixgbe_handle_que(que);
6590 }
6591
6592 /************************************************************************
6593 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6594 ************************************************************************/
6595 static int
6596 ixgbe_allocate_legacy(struct adapter *adapter,
6597 const struct pci_attach_args *pa)
6598 {
6599 device_t dev = adapter->dev;
6600 struct ix_queue *que = adapter->queues;
6601 struct tx_ring *txr = adapter->tx_rings;
6602 int counts[PCI_INTR_TYPE_SIZE];
6603 pci_intr_type_t intr_type, max_type;
6604 char intrbuf[PCI_INTRSTR_LEN];
6605 char wqname[MAXCOMLEN];
6606 const char *intrstr = NULL;
6607 int defertx_error = 0, error;
6608
6609 /* We allocate a single interrupt resource */
6610 max_type = PCI_INTR_TYPE_MSI;
6611 counts[PCI_INTR_TYPE_MSIX] = 0;
6612 counts[PCI_INTR_TYPE_MSI] =
6613 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6614 /* Check not feat_en but feat_cap to fallback to INTx */
6615 counts[PCI_INTR_TYPE_INTX] =
6616 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6617
6618 alloc_retry:
6619 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6620 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6621 return ENXIO;
6622 }
6623 adapter->osdep.nintrs = 1;
6624 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6625 intrbuf, sizeof(intrbuf));
6626 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6627 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6628 device_xname(dev));
6629 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6630 if (adapter->osdep.ihs[0] == NULL) {
6631 aprint_error_dev(dev,"unable to establish %s\n",
6632 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6633 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6634 adapter->osdep.intrs = NULL;
6635 switch (intr_type) {
6636 case PCI_INTR_TYPE_MSI:
6637 /* The next try is for INTx: Disable MSI */
6638 max_type = PCI_INTR_TYPE_INTX;
6639 counts[PCI_INTR_TYPE_INTX] = 1;
6640 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6641 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6642 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6643 goto alloc_retry;
6644 } else
6645 break;
6646 case PCI_INTR_TYPE_INTX:
6647 default:
6648 /* See below */
6649 break;
6650 }
6651 }
6652 if (intr_type == PCI_INTR_TYPE_INTX) {
6653 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6654 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6655 }
6656 if (adapter->osdep.ihs[0] == NULL) {
6657 aprint_error_dev(dev,
6658 "couldn't establish interrupt%s%s\n",
6659 intrstr ? " at " : "", intrstr ? intrstr : "");
6660 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6661 adapter->osdep.intrs = NULL;
6662 return ENXIO;
6663 }
6664 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6665 /*
6666 * Try allocating a fast interrupt and the associated deferred
6667 * processing contexts.
6668 */
6669 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6670 txr->txr_si =
6671 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6672 ixgbe_deferred_mq_start, txr);
6673
6674 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6675 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6676 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6677 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6678 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6679 }
6680 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6681 ixgbe_handle_que, que);
6682 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6683 error = workqueue_create(&adapter->que_wq, wqname,
6684 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6685 IXGBE_WORKQUEUE_FLAGS);
6686
6687 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6688 && ((txr->txr_si == NULL) || defertx_error != 0))
6689 || (que->que_si == NULL) || error != 0) {
6690 aprint_error_dev(dev,
6691 "could not establish software interrupts\n");
6692
6693 return ENXIO;
6694 }
6695 /* For simplicity in the handlers */
6696 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6697
6698 return (0);
6699 } /* ixgbe_allocate_legacy */
6700
6701 /************************************************************************
6702 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6703 ************************************************************************/
6704 static int
6705 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6706 {
6707 device_t dev = adapter->dev;
6708 struct ix_queue *que = adapter->queues;
6709 struct tx_ring *txr = adapter->tx_rings;
6710 pci_chipset_tag_t pc;
6711 char intrbuf[PCI_INTRSTR_LEN];
6712 char intr_xname[32];
6713 char wqname[MAXCOMLEN];
6714 const char *intrstr = NULL;
6715 int error, vector = 0;
6716 int cpu_id = 0;
6717 kcpuset_t *affinity;
6718 #ifdef RSS
6719 unsigned int rss_buckets = 0;
6720 kcpuset_t cpu_mask;
6721 #endif
6722
6723 pc = adapter->osdep.pc;
6724 #ifdef RSS
6725 /*
6726 * If we're doing RSS, the number of queues needs to
6727 * match the number of RSS buckets that are configured.
6728 *
6729 * + If there's more queues than RSS buckets, we'll end
6730 * up with queues that get no traffic.
6731 *
6732 * + If there's more RSS buckets than queues, we'll end
6733 * up having multiple RSS buckets map to the same queue,
6734 * so there'll be some contention.
6735 */
6736 rss_buckets = rss_getnumbuckets();
6737 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6738 (adapter->num_queues != rss_buckets)) {
6739 device_printf(dev,
6740 "%s: number of queues (%d) != number of RSS buckets (%d)"
6741 "; performance will be impacted.\n",
6742 __func__, adapter->num_queues, rss_buckets);
6743 }
6744 #endif
6745
6746 adapter->osdep.nintrs = adapter->num_queues + 1;
6747 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6748 adapter->osdep.nintrs) != 0) {
6749 aprint_error_dev(dev,
6750 "failed to allocate MSI-X interrupt\n");
6751 return (ENXIO);
6752 }
6753
6754 kcpuset_create(&affinity, false);
6755 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6756 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6757 device_xname(dev), i);
6758 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6759 sizeof(intrbuf));
6760 #ifdef IXGBE_MPSAFE
6761 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6762 true);
6763 #endif
6764 /* Set the handler function */
6765 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6766 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6767 intr_xname);
6768 if (que->res == NULL) {
6769 aprint_error_dev(dev,
6770 "Failed to register QUE handler\n");
6771 error = ENXIO;
6772 goto err_out;
6773 }
6774 que->msix = vector;
6775 adapter->active_queues |= 1ULL << que->msix;
6776
6777 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6778 #ifdef RSS
6779 /*
6780 * The queue ID is used as the RSS layer bucket ID.
6781 * We look up the queue ID -> RSS CPU ID and select
6782 * that.
6783 */
6784 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6785 CPU_SETOF(cpu_id, &cpu_mask);
6786 #endif
6787 } else {
6788 /*
6789 * Bind the MSI-X vector, and thus the
6790 * rings to the corresponding CPU.
6791 *
6792 * This just happens to match the default RSS
6793 * round-robin bucket -> queue -> CPU allocation.
6794 */
6795 if (adapter->num_queues > 1)
6796 cpu_id = i;
6797 }
6798 /* Round-robin affinity */
6799 kcpuset_zero(affinity);
6800 kcpuset_set(affinity, cpu_id % ncpu);
6801 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6802 NULL);
6803 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6804 intrstr);
6805 if (error == 0) {
6806 #if 1 /* def IXGBE_DEBUG */
6807 #ifdef RSS
6808 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6809 cpu_id % ncpu);
6810 #else
6811 aprint_normal(", bound queue %d to cpu %d", i,
6812 cpu_id % ncpu);
6813 #endif
6814 #endif /* IXGBE_DEBUG */
6815 }
6816 aprint_normal("\n");
6817
6818 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6819 txr->txr_si = softint_establish(
6820 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6821 ixgbe_deferred_mq_start, txr);
6822 if (txr->txr_si == NULL) {
6823 aprint_error_dev(dev,
6824 "couldn't establish software interrupt\n");
6825 error = ENXIO;
6826 goto err_out;
6827 }
6828 }
6829 que->que_si
6830 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6831 ixgbe_handle_que, que);
6832 if (que->que_si == NULL) {
6833 aprint_error_dev(dev,
6834 "couldn't establish software interrupt\n");
6835 error = ENXIO;
6836 goto err_out;
6837 }
6838 }
6839 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6840 error = workqueue_create(&adapter->txr_wq, wqname,
6841 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6842 IXGBE_WORKQUEUE_FLAGS);
6843 if (error) {
6844 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6845 goto err_out;
6846 }
6847 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6848
6849 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6850 error = workqueue_create(&adapter->que_wq, wqname,
6851 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6852 IXGBE_WORKQUEUE_FLAGS);
6853 if (error) {
6854 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6855 goto err_out;
6856 }
6857
6858 /* and Link */
6859 cpu_id++;
6860 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6861 adapter->vector = vector;
6862 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6863 sizeof(intrbuf));
6864 #ifdef IXGBE_MPSAFE
6865 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6866 true);
6867 #endif
6868 /* Set the link handler function */
6869 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6870 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6871 intr_xname);
6872 if (adapter->osdep.ihs[vector] == NULL) {
6873 aprint_error_dev(dev, "Failed to register LINK handler\n");
6874 error = ENXIO;
6875 goto err_out;
6876 }
6877 /* Round-robin affinity */
6878 kcpuset_zero(affinity);
6879 kcpuset_set(affinity, cpu_id % ncpu);
6880 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6881 NULL);
6882
6883 aprint_normal_dev(dev,
6884 "for link, interrupting at %s", intrstr);
6885 if (error == 0)
6886 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6887 else
6888 aprint_normal("\n");
6889
6890 kcpuset_destroy(affinity);
6891 aprint_normal_dev(dev,
6892 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6893
6894 return (0);
6895
6896 err_out:
6897 kcpuset_destroy(affinity);
6898 ixgbe_free_deferred_handlers(adapter);
6899 ixgbe_free_pciintr_resources(adapter);
6900 return (error);
6901 } /* ixgbe_allocate_msix */
6902
6903 /************************************************************************
6904 * ixgbe_configure_interrupts
6905 *
6906 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6907 * This will also depend on user settings.
6908 ************************************************************************/
6909 static int
6910 ixgbe_configure_interrupts(struct adapter *adapter)
6911 {
6912 device_t dev = adapter->dev;
6913 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6914 int want, queues, msgs;
6915
6916 /* Default to 1 queue if MSI-X setup fails */
6917 adapter->num_queues = 1;
6918
6919 /* Override by tuneable */
6920 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6921 goto msi;
6922
6923 /*
6924 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6925 * interrupt slot.
6926 */
6927 if (ncpu == 1)
6928 goto msi;
6929
6930 /* First try MSI-X */
6931 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6932 msgs = MIN(msgs, IXG_MAX_NINTR);
6933 if (msgs < 2)
6934 goto msi;
6935
6936 adapter->msix_mem = (void *)1; /* XXX */
6937
6938 /* Figure out a reasonable auto config value */
6939 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6940
6941 #ifdef RSS
6942 /* If we're doing RSS, clamp at the number of RSS buckets */
6943 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6944 queues = uimin(queues, rss_getnumbuckets());
6945 #endif
6946 if (ixgbe_num_queues > queues) {
6947 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6948 ixgbe_num_queues = queues;
6949 }
6950
6951 if (ixgbe_num_queues != 0)
6952 queues = ixgbe_num_queues;
6953 else
6954 queues = uimin(queues,
6955 uimin(mac->max_tx_queues, mac->max_rx_queues));
6956
6957 /* reflect correct sysctl value */
6958 ixgbe_num_queues = queues;
6959
6960 /*
6961 * Want one vector (RX/TX pair) per queue
6962 * plus an additional for Link.
6963 */
6964 want = queues + 1;
6965 if (msgs >= want)
6966 msgs = want;
6967 else {
6968 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6969 "%d vectors but %d queues wanted!\n",
6970 msgs, want);
6971 goto msi;
6972 }
6973 adapter->num_queues = queues;
6974 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6975 return (0);
6976
6977 /*
6978 * MSI-X allocation failed or provided us with
6979 * less vectors than needed. Free MSI-X resources
6980 * and we'll try enabling MSI.
6981 */
6982 msi:
6983 /* Without MSI-X, some features are no longer supported */
6984 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6985 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6986 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6987 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6988
6989 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6990 adapter->msix_mem = NULL; /* XXX */
6991 if (msgs > 1)
6992 msgs = 1;
6993 if (msgs != 0) {
6994 msgs = 1;
6995 adapter->feat_en |= IXGBE_FEATURE_MSI;
6996 return (0);
6997 }
6998
6999 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7000 aprint_error_dev(dev,
7001 "Device does not support legacy interrupts.\n");
7002 return 1;
7003 }
7004
7005 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7006
7007 return (0);
7008 } /* ixgbe_configure_interrupts */
7009
7010
7011 /************************************************************************
7012 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7013 *
7014 * Done outside of interrupt context since the driver might sleep
7015 ************************************************************************/
7016 static void
7017 ixgbe_handle_link(void *context)
7018 {
7019 struct adapter *adapter = context;
7020 struct ixgbe_hw *hw = &adapter->hw;
7021
7022 KASSERT(mutex_owned(&adapter->core_mtx));
7023
7024 ++adapter->link_workev.ev_count;
7025 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7026 ixgbe_update_link_status(adapter);
7027
7028 /* Re-enable link interrupts */
7029 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7030 } /* ixgbe_handle_link */
7031
7032 #if 0
7033 /************************************************************************
7034 * ixgbe_rearm_queues
7035 ************************************************************************/
7036 static __inline void
7037 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7038 {
7039 u32 mask;
7040
7041 switch (adapter->hw.mac.type) {
7042 case ixgbe_mac_82598EB:
7043 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7045 break;
7046 case ixgbe_mac_82599EB:
7047 case ixgbe_mac_X540:
7048 case ixgbe_mac_X550:
7049 case ixgbe_mac_X550EM_x:
7050 case ixgbe_mac_X550EM_a:
7051 mask = (queues & 0xFFFFFFFF);
7052 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7053 mask = (queues >> 32);
7054 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7055 break;
7056 default:
7057 break;
7058 }
7059 } /* ixgbe_rearm_queues */
7060 #endif
7061