ixgbe.c revision 1.258 1 /* $NetBSD: ixgbe.c,v 1.258 2020/09/07 09:14:53 knakahara Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_phy.h"
74 #include "ixgbe_sriov.h"
75 #include "vlan.h"
76
77 #include <sys/cprng.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 /************************************************************************
82 * Driver version
83 ************************************************************************/
84 static const char ixgbe_driver_version[] = "4.0.1-k";
85 /* XXX NetBSD: + 3.3.10 */
86
87 /************************************************************************
88 * PCI Device ID Table
89 *
90 * Used by probe to select devices to load on
91 * Last field stores an index into ixgbe_strings
92 * Last entry must be all 0s
93 *
94 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
95 ************************************************************************/
96 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
97 {
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
148 /* required last entry */
149 {0, 0, 0, 0, 0}
150 };
151
152 /************************************************************************
153 * Table of branding strings
154 ************************************************************************/
155 static const char *ixgbe_strings[] = {
156 "Intel(R) PRO/10GbE PCI-Express Network Driver"
157 };
158
159 /************************************************************************
160 * Function prototypes
161 ************************************************************************/
162 static int ixgbe_probe(device_t, cfdata_t, void *);
163 static void ixgbe_quirks(struct adapter *);
164 static void ixgbe_attach(device_t, device_t, void *);
165 static int ixgbe_detach(device_t, int);
166 #if 0
167 static int ixgbe_shutdown(device_t);
168 #endif
169 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
170 static bool ixgbe_resume(device_t, const pmf_qual_t *);
171 static int ixgbe_ifflags_cb(struct ethercom *);
172 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
173 static int ixgbe_init(struct ifnet *);
174 static void ixgbe_init_locked(struct adapter *);
175 static void ixgbe_ifstop(struct ifnet *, int);
176 static void ixgbe_stop_locked(void *);
177 static void ixgbe_init_device_features(struct adapter *);
178 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
179 static void ixgbe_add_media_types(struct adapter *);
180 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
181 static int ixgbe_media_change(struct ifnet *);
182 static int ixgbe_allocate_pci_resources(struct adapter *,
183 const struct pci_attach_args *);
184 static void ixgbe_free_deferred_handlers(struct adapter *);
185 static void ixgbe_get_slot_info(struct adapter *);
186 static int ixgbe_allocate_msix(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_allocate_legacy(struct adapter *,
189 const struct pci_attach_args *);
190 static int ixgbe_configure_interrupts(struct adapter *);
191 static void ixgbe_free_pciintr_resources(struct adapter *);
192 static void ixgbe_free_pci_resources(struct adapter *);
193 static void ixgbe_local_timer(void *);
194 static void ixgbe_handle_timer(struct work *, void *);
195 static void ixgbe_recovery_mode_timer(void *);
196 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_schedule_admin_tasklet(struct adapter *);
202 static void ixgbe_config_link(struct adapter *);
203 static void ixgbe_check_wol_support(struct adapter *);
204 static int ixgbe_setup_low_power_mode(struct adapter *);
205 #if 0
206 static void ixgbe_rearm_queues(struct adapter *, u64);
207 #endif
208
209 static void ixgbe_initialize_transmit_units(struct adapter *);
210 static void ixgbe_initialize_receive_units(struct adapter *);
211 static void ixgbe_enable_rx_drop(struct adapter *);
212 static void ixgbe_disable_rx_drop(struct adapter *);
213 static void ixgbe_initialize_rss_mapping(struct adapter *);
214
215 static void ixgbe_enable_intr(struct adapter *);
216 static void ixgbe_disable_intr(struct adapter *);
217 static void ixgbe_update_stats_counters(struct adapter *);
218 static void ixgbe_set_rxfilter(struct adapter *);
219 static void ixgbe_update_link_status(struct adapter *);
220 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
221 static void ixgbe_configure_ivars(struct adapter *);
222 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
223 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
224
225 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
226 static void ixgbe_setup_vlan_hw_support(struct adapter *);
227 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
228 static int ixgbe_register_vlan(struct adapter *, u16);
229 static int ixgbe_unregister_vlan(struct adapter *, u16);
230
231 static void ixgbe_add_device_sysctls(struct adapter *);
232 static void ixgbe_add_hw_stats(struct adapter *);
233 static void ixgbe_clear_evcnt(struct adapter *);
234 static int ixgbe_set_flowcntl(struct adapter *, int);
235 static int ixgbe_set_advertise(struct adapter *, int);
236 static int ixgbe_get_advertise(struct adapter *);
237
238 /* Sysctl handlers */
239 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
240 const char *, int *, int);
241 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
247 #ifdef IXGBE_DEBUG
248 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
250 #endif
251 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
260
261 /* Legacy (single vector) interrupt handler */
262 static int ixgbe_legacy_irq(void *);
263
264 /* The MSI/MSI-X Interrupt handlers */
265 static int ixgbe_msix_que(void *);
266 static int ixgbe_msix_admin(void *);
267
268 /* Event handlers running on workqueue */
269 static void ixgbe_handle_que(void *);
270 static void ixgbe_handle_link(void *);
271 static void ixgbe_handle_msf(void *);
272 static void ixgbe_handle_mod(void *);
273 static void ixgbe_handle_phy(void *);
274
275 /* Deferred workqueue handlers */
276 static void ixgbe_handle_admin(struct work *, void *);
277 static void ixgbe_handle_que_work(struct work *, void *);
278
279 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
280
281 /************************************************************************
282 * NetBSD Device Interface Entry Points
283 ************************************************************************/
284 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
285 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
286 DVF_DETACH_SHUTDOWN);
287
288 #if 0
289 devclass_t ix_devclass;
290 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
291
292 MODULE_DEPEND(ix, pci, 1, 1, 1);
293 MODULE_DEPEND(ix, ether, 1, 1, 1);
294 #ifdef DEV_NETMAP
295 MODULE_DEPEND(ix, netmap, 1, 1, 1);
296 #endif
297 #endif
298
299 /*
300 * TUNEABLE PARAMETERS:
301 */
302
303 /*
304 * AIM: Adaptive Interrupt Moderation
305 * which means that the interrupt rate
306 * is varied over time based on the
307 * traffic for that interrupt vector
308 */
309 static bool ixgbe_enable_aim = true;
310 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
311 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
312 "Enable adaptive interrupt moderation");
313
314 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
315 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
316 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
317
318 /* How many packets rxeof tries to clean at a time */
319 static int ixgbe_rx_process_limit = 256;
320 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
321 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
322
323 /* How many packets txeof tries to clean at a time */
324 static int ixgbe_tx_process_limit = 256;
325 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
326 &ixgbe_tx_process_limit, 0,
327 "Maximum number of sent packets to process at a time, -1 means unlimited");
328
329 /* Flow control setting, default to full */
330 static int ixgbe_flow_control = ixgbe_fc_full;
331 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
332 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
333
334 /* Which packet processing uses workqueue or softint */
335 static bool ixgbe_txrx_workqueue = false;
336
337 /*
338 * Smart speed setting, default to on
339 * this only works as a compile option
340 * right now as its during attach, set
341 * this to 'ixgbe_smart_speed_off' to
342 * disable.
343 */
344 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
345
346 /*
347 * MSI-X should be the default for best performance,
348 * but this allows it to be forced off for testing.
349 */
350 static int ixgbe_enable_msix = 1;
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
352 "Enable MSI-X interrupts");
353
354 /*
355 * Number of Queues, can be set to 0,
356 * it then autoconfigures based on the
357 * number of cpus with a max of 8. This
358 * can be overridden manually here.
359 */
360 static int ixgbe_num_queues = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
362 "Number of queues to configure, 0 indicates autoconfigure");
363
364 /*
365 * Number of TX descriptors per ring,
366 * setting higher than RX as this seems
367 * the better performing choice.
368 */
369 static int ixgbe_txd = PERFORM_TXD;
370 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
371 "Number of transmit descriptors per queue");
372
373 /* Number of RX descriptors per ring */
374 static int ixgbe_rxd = PERFORM_RXD;
375 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
376 "Number of receive descriptors per queue");
377
378 /*
379 * Defining this on will allow the use
380 * of unsupported SFP+ modules, note that
381 * doing so you are on your own :)
382 */
383 static int allow_unsupported_sfp = false;
384 #define TUNABLE_INT(__x, __y)
385 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
386
387 /*
388 * Not sure if Flow Director is fully baked,
389 * so we'll default to turning it off.
390 */
391 static int ixgbe_enable_fdir = 0;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
393 "Enable Flow Director");
394
395 /* Legacy Transmit (single queue) */
396 static int ixgbe_enable_legacy_tx = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
398 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
399
400 /* Receive-Side Scaling */
401 static int ixgbe_enable_rss = 1;
402 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
403 "Enable Receive-Side Scaling (RSS)");
404
405 #if 0
406 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
407 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
408 #endif
409
410 #ifdef NET_MPSAFE
411 #define IXGBE_MPSAFE 1
412 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
413 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
414 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
415 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
416 #else
417 #define IXGBE_CALLOUT_FLAGS 0
418 #define IXGBE_SOFTINT_FLAGS 0
419 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
420 #define IXGBE_TASKLET_WQ_FLAGS 0
421 #endif
422 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
423
424 /************************************************************************
425 * ixgbe_initialize_rss_mapping
426 ************************************************************************/
427 static void
428 ixgbe_initialize_rss_mapping(struct adapter *adapter)
429 {
430 struct ixgbe_hw *hw = &adapter->hw;
431 u32 reta = 0, mrqc, rss_key[10];
432 int queue_id, table_size, index_mult;
433 int i, j;
434 u32 rss_hash_config;
435
436 /* force use default RSS key. */
437 #ifdef __NetBSD__
438 rss_getkey((uint8_t *) &rss_key);
439 #else
440 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
441 /* Fetch the configured RSS key */
442 rss_getkey((uint8_t *) &rss_key);
443 } else {
444 /* set up random bits */
445 cprng_fast(&rss_key, sizeof(rss_key));
446 }
447 #endif
448
449 /* Set multiplier for RETA setup and table size based on MAC */
450 index_mult = 0x1;
451 table_size = 128;
452 switch (adapter->hw.mac.type) {
453 case ixgbe_mac_82598EB:
454 index_mult = 0x11;
455 break;
456 case ixgbe_mac_X550:
457 case ixgbe_mac_X550EM_x:
458 case ixgbe_mac_X550EM_a:
459 table_size = 512;
460 break;
461 default:
462 break;
463 }
464
465 /* Set up the redirection table */
466 for (i = 0, j = 0; i < table_size; i++, j++) {
467 if (j == adapter->num_queues)
468 j = 0;
469
470 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
471 /*
472 * Fetch the RSS bucket id for the given indirection
473 * entry. Cap it at the number of configured buckets
474 * (which is num_queues.)
475 */
476 queue_id = rss_get_indirection_to_bucket(i);
477 queue_id = queue_id % adapter->num_queues;
478 } else
479 queue_id = (j * index_mult);
480
481 /*
482 * The low 8 bits are for hash value (n+0);
483 * The next 8 bits are for hash value (n+1), etc.
484 */
485 reta = reta >> 8;
486 reta = reta | (((uint32_t) queue_id) << 24);
487 if ((i & 3) == 3) {
488 if (i < 128)
489 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
490 else
491 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
492 reta);
493 reta = 0;
494 }
495 }
496
497 /* Now fill our hash function seeds */
498 for (i = 0; i < 10; i++)
499 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
500
501 /* Perform hash on these packet types */
502 if (adapter->feat_en & IXGBE_FEATURE_RSS)
503 rss_hash_config = rss_gethashconfig();
504 else {
505 /*
506 * Disable UDP - IP fragments aren't currently being handled
507 * and so we end up with a mix of 2-tuple and 4-tuple
508 * traffic.
509 */
510 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
511 | RSS_HASHTYPE_RSS_TCP_IPV4
512 | RSS_HASHTYPE_RSS_IPV6
513 | RSS_HASHTYPE_RSS_TCP_IPV6
514 | RSS_HASHTYPE_RSS_IPV6_EX
515 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
516 }
517
518 mrqc = IXGBE_MRQC_RSSEN;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
537 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
538 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
539 } /* ixgbe_initialize_rss_mapping */
540
541 /************************************************************************
542 * ixgbe_initialize_receive_units - Setup receive registers and features.
543 ************************************************************************/
544 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
545
546 static void
547 ixgbe_initialize_receive_units(struct adapter *adapter)
548 {
549 struct rx_ring *rxr = adapter->rx_rings;
550 struct ixgbe_hw *hw = &adapter->hw;
551 struct ifnet *ifp = adapter->ifp;
552 int i, j;
553 u32 bufsz, fctrl, srrctl, rxcsum;
554 u32 hlreg;
555
556 /*
557 * Make sure receives are disabled while
558 * setting up the descriptor ring
559 */
560 ixgbe_disable_rx(hw);
561
562 /* Enable broadcasts */
563 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
564 fctrl |= IXGBE_FCTRL_BAM;
565 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
566 fctrl |= IXGBE_FCTRL_DPF;
567 fctrl |= IXGBE_FCTRL_PMCF;
568 }
569 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
570
571 /* Set for Jumbo Frames? */
572 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
573 if (ifp->if_mtu > ETHERMTU)
574 hlreg |= IXGBE_HLREG0_JUMBOEN;
575 else
576 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
577
578 #ifdef DEV_NETMAP
579 /* CRC stripping is conditional in Netmap */
580 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
581 (ifp->if_capenable & IFCAP_NETMAP) &&
582 !ix_crcstrip)
583 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
584 else
585 #endif /* DEV_NETMAP */
586 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
587
588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
589
590 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
591 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
592
593 for (i = 0; i < adapter->num_queues; i++, rxr++) {
594 u64 rdba = rxr->rxdma.dma_paddr;
595 u32 reg;
596 int regnum = i / 4; /* 1 register per 4 queues */
597 int regshift = i % 4; /* 4 bits per 1 queue */
598 j = rxr->me;
599
600 /* Setup the Base and Length of the Rx Descriptor Ring */
601 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
602 (rdba & 0x00000000ffffffffULL));
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
604 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
605 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
606
607 /* Set up the SRRCTL register */
608 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
609 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
610 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
611 srrctl |= bufsz;
612 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
613
614 /* Set RQSMR (Receive Queue Statistic Mapping) register */
615 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
616 reg &= ~(0x000000ffUL << (regshift * 8));
617 reg |= i << (regshift * 8);
618 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
619
620 /*
621 * Set DROP_EN iff we have no flow control and >1 queue.
622 * Note that srrctl was cleared shortly before during reset,
623 * so we do not need to clear the bit, but do it just in case
624 * this code is moved elsewhere.
625 */
626 if (adapter->num_queues > 1 &&
627 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
628 srrctl |= IXGBE_SRRCTL_DROP_EN;
629 } else {
630 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
631 }
632
633 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
634
635 /* Setup the HW Rx Head and Tail Descriptor Pointers */
636 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
637 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
638
639 /* Set the driver rx tail address */
640 rxr->tail = IXGBE_RDT(rxr->me);
641 }
642
643 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
644 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
645 | IXGBE_PSRTYPE_UDPHDR
646 | IXGBE_PSRTYPE_IPV4HDR
647 | IXGBE_PSRTYPE_IPV6HDR;
648 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
649 }
650
651 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
652
653 ixgbe_initialize_rss_mapping(adapter);
654
655 if (adapter->num_queues > 1) {
656 /* RSS and RX IPP Checksum are mutually exclusive */
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658 }
659
660 if (ifp->if_capenable & IFCAP_RXCSUM)
661 rxcsum |= IXGBE_RXCSUM_PCSD;
662
663 /* This is useful for calculating UDP/IP fragment checksums */
664 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
665 rxcsum |= IXGBE_RXCSUM_IPPCSE;
666
667 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
668
669 } /* ixgbe_initialize_receive_units */
670
671 /************************************************************************
672 * ixgbe_initialize_transmit_units - Enable transmit units.
673 ************************************************************************/
674 static void
675 ixgbe_initialize_transmit_units(struct adapter *adapter)
676 {
677 struct tx_ring *txr = adapter->tx_rings;
678 struct ixgbe_hw *hw = &adapter->hw;
679 int i;
680
681 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 u32 tqsmreg, reg;
688 int regnum = i / 4; /* 1 register per 4 queues */
689 int regshift = i % 4; /* 4 bits per 1 queue */
690 int j = txr->me;
691
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
693 (tdba & 0x00000000ffffffffULL));
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
695 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
696 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
697
698 /*
699 * Set TQSMR (Transmit Queue Statistic Mapping) register.
700 * Register location is different between 82598 and others.
701 */
702 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
703 tqsmreg = IXGBE_TQSMR(regnum);
704 else
705 tqsmreg = IXGBE_TQSM(regnum);
706 reg = IXGBE_READ_REG(hw, tqsmreg);
707 reg &= ~(0x000000ffUL << (regshift * 8));
708 reg |= i << (regshift * 8);
709 IXGBE_WRITE_REG(hw, tqsmreg, reg);
710
711 /* Setup the HW Tx Head and Tail descriptor pointers */
712 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
713 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
714
715 /* Cache the tail address */
716 txr->tail = IXGBE_TDT(j);
717
718 txr->txr_no_space = false;
719
720 /* Disable Head Writeback */
721 /*
722 * Note: for X550 series devices, these registers are actually
723 * prefixed with TPH_ isntead of DCA_, but the addresses and
724 * fields remain the same.
725 */
726 switch (hw->mac.type) {
727 case ixgbe_mac_82598EB:
728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
729 break;
730 default:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
732 break;
733 }
734 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
735 switch (hw->mac.type) {
736 case ixgbe_mac_82598EB:
737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
738 break;
739 default:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
741 break;
742 }
743
744 }
745
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 u32 dmatxctl, rttdcs;
748
749 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
750 dmatxctl |= IXGBE_DMATXCTL_TE;
751 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
752 /* Disable arbiter to set MTQC */
753 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
754 rttdcs |= IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
757 ixgbe_get_mtqc(adapter->iov_mode));
758 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
759 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
760 }
761
762 return;
763 } /* ixgbe_initialize_transmit_units */
764
765 static void
766 ixgbe_quirks(struct adapter *adapter)
767 {
768 device_t dev = adapter->dev;
769 struct ixgbe_hw *hw = &adapter->hw;
770 const char *vendor, *product;
771
772 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
773 /*
774 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
775 * MA10-ST0.
776 */
777 vendor = pmf_get_platform("system-vendor");
778 product = pmf_get_platform("system-product");
779
780 if ((vendor == NULL) || (product == NULL))
781 return;
782
783 if ((strcmp(vendor, "GIGABYTE") == 0) &&
784 (strcmp(product, "MA10-ST0") == 0)) {
785 aprint_verbose_dev(dev,
786 "Enable SFP+ MOD_ABS inverse quirk\n");
787 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
788 }
789 }
790 }
791
792 /************************************************************************
793 * ixgbe_attach - Device initialization routine
794 *
795 * Called when the driver is being loaded.
796 * Identifies the type of hardware, allocates all resources
797 * and initializes the hardware.
798 *
799 * return 0 on success, positive on failure
800 ************************************************************************/
801 static void
802 ixgbe_attach(device_t parent, device_t dev, void *aux)
803 {
804 struct adapter *adapter;
805 struct ixgbe_hw *hw;
806 int error = -1;
807 u32 ctrl_ext;
808 u16 high, low, nvmreg;
809 pcireg_t id, subid;
810 const ixgbe_vendor_info_t *ent;
811 struct pci_attach_args *pa = aux;
812 bool unsupported_sfp = false;
813 const char *str;
814 char wqname[MAXCOMLEN];
815 char buf[256];
816
817 INIT_DEBUGOUT("ixgbe_attach: begin");
818
819 /* Allocate, clear, and link in our adapter structure */
820 adapter = device_private(dev);
821 adapter->hw.back = adapter;
822 adapter->dev = dev;
823 hw = &adapter->hw;
824 adapter->osdep.pc = pa->pa_pc;
825 adapter->osdep.tag = pa->pa_tag;
826 if (pci_dma64_available(pa))
827 adapter->osdep.dmat = pa->pa_dmat64;
828 else
829 adapter->osdep.dmat = pa->pa_dmat;
830 adapter->osdep.attached = false;
831 adapter->osdep.detaching = false;
832
833 ent = ixgbe_lookup(pa);
834
835 KASSERT(ent != NULL);
836
837 aprint_normal(": %s, Version - %s\n",
838 ixgbe_strings[ent->index], ixgbe_driver_version);
839
840 /* Core Lock Init */
841 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
842
843 /* Set up the timer callout and workqueue */
844 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
845 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
846 error = workqueue_create(&adapter->timer_wq, wqname,
847 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
848 IXGBE_TASKLET_WQ_FLAGS);
849 if (error) {
850 aprint_error_dev(dev,
851 "could not create timer workqueue (%d)\n", error);
852 goto err_out;
853 }
854
855 /* Determine hardware revision */
856 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
857 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
858
859 hw->vendor_id = PCI_VENDOR(id);
860 hw->device_id = PCI_PRODUCT(id);
861 hw->revision_id =
862 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
863 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
864 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
865
866 /* Set quirk flags */
867 ixgbe_quirks(adapter);
868
869 /*
870 * Make sure BUSMASTER is set
871 */
872 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
873
874 /* Do base PCI setup - map BAR0 */
875 if (ixgbe_allocate_pci_resources(adapter, pa)) {
876 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* let hardware know driver is loaded */
882 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
883 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
884 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
885
886 /*
887 * Initialize the shared code
888 */
889 if (ixgbe_init_shared_code(hw) != 0) {
890 aprint_error_dev(dev, "Unable to initialize the shared code\n");
891 error = ENXIO;
892 goto err_out;
893 }
894
895 switch (hw->mac.type) {
896 case ixgbe_mac_82598EB:
897 str = "82598EB";
898 break;
899 case ixgbe_mac_82599EB:
900 str = "82599EB";
901 break;
902 case ixgbe_mac_X540:
903 str = "X540";
904 break;
905 case ixgbe_mac_X550:
906 str = "X550";
907 break;
908 case ixgbe_mac_X550EM_x:
909 str = "X550EM X";
910 break;
911 case ixgbe_mac_X550EM_a:
912 str = "X550EM A";
913 break;
914 default:
915 str = "Unknown";
916 break;
917 }
918 aprint_normal_dev(dev, "device %s\n", str);
919
920 if (hw->mbx.ops.init_params)
921 hw->mbx.ops.init_params(hw);
922
923 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924
925 /* Pick up the 82599 settings */
926 if (hw->mac.type != ixgbe_mac_82598EB) {
927 hw->phy.smart_speed = ixgbe_smart_speed;
928 adapter->num_segs = IXGBE_82599_SCATTER;
929 } else
930 adapter->num_segs = IXGBE_82598_SCATTER;
931
932 /* Ensure SW/FW semaphore is free */
933 ixgbe_init_swfw_semaphore(hw);
934
935 hw->mac.ops.set_lan_id(hw);
936 ixgbe_init_device_features(adapter);
937
938 if (ixgbe_configure_interrupts(adapter)) {
939 error = ENXIO;
940 goto err_out;
941 }
942
943 /* Allocate multicast array memory. */
944 adapter->mta = malloc(sizeof(*adapter->mta) *
945 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
946
947 /* Enable WoL (if supported) */
948 ixgbe_check_wol_support(adapter);
949
950 /* Register for VLAN events */
951 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
952
953 /* Verify adapter fan is still functional (if applicable) */
954 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
955 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
956 ixgbe_check_fan_failure(adapter, esdp, FALSE);
957 }
958
959 /* Set an initial default flow control value */
960 hw->fc.requested_mode = ixgbe_flow_control;
961
962 /* Sysctls for limiting the amount of work done in the taskqueues */
963 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
964 "max number of rx packets to process",
965 &adapter->rx_process_limit, ixgbe_rx_process_limit);
966
967 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
968 "max number of tx packets to process",
969 &adapter->tx_process_limit, ixgbe_tx_process_limit);
970
971 /* Do descriptor calc and sanity checks */
972 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
973 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
974 aprint_error_dev(dev, "TXD config issue, using default!\n");
975 adapter->num_tx_desc = DEFAULT_TXD;
976 } else
977 adapter->num_tx_desc = ixgbe_txd;
978
979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 aprint_error_dev(dev, "RXD config issue, using default!\n");
982 adapter->num_rx_desc = DEFAULT_RXD;
983 } else
984 adapter->num_rx_desc = ixgbe_rxd;
985
986 /* Allocate our TX/RX Queues */
987 if (ixgbe_allocate_queues(adapter)) {
988 error = ENOMEM;
989 goto err_out;
990 }
991
992 hw->phy.reset_if_overtemp = TRUE;
993 error = ixgbe_reset_hw(hw);
994 hw->phy.reset_if_overtemp = FALSE;
995 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
996 error = IXGBE_SUCCESS;
997 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
998 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
999 unsupported_sfp = true;
1000 error = IXGBE_SUCCESS;
1001 } else if (error) {
1002 aprint_error_dev(dev, "Hardware initialization failed\n");
1003 error = EIO;
1004 goto err_late;
1005 }
1006
1007 /* Make sure we have a good EEPROM before we read from it */
1008 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1009 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1010 error = EIO;
1011 goto err_late;
1012 }
1013
1014 aprint_normal("%s:", device_xname(dev));
1015 /* NVM Image Version */
1016 high = low = 0;
1017 switch (hw->mac.type) {
1018 case ixgbe_mac_X540:
1019 case ixgbe_mac_X550EM_a:
1020 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1021 if (nvmreg == 0xffff)
1022 break;
1023 high = (nvmreg >> 12) & 0x0f;
1024 low = (nvmreg >> 4) & 0xff;
1025 id = nvmreg & 0x0f;
1026 aprint_normal(" NVM Image Version %u.", high);
1027 if (hw->mac.type == ixgbe_mac_X540)
1028 str = "%x";
1029 else
1030 str = "%02x";
1031 aprint_normal(str, low);
1032 aprint_normal(" ID 0x%x,", id);
1033 break;
1034 case ixgbe_mac_X550EM_x:
1035 case ixgbe_mac_X550:
1036 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1037 if (nvmreg == 0xffff)
1038 break;
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0xff;
1041 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1042 break;
1043 default:
1044 break;
1045 }
1046 hw->eeprom.nvm_image_ver_high = high;
1047 hw->eeprom.nvm_image_ver_low = low;
1048
1049 /* PHY firmware revision */
1050 switch (hw->mac.type) {
1051 case ixgbe_mac_X540:
1052 case ixgbe_mac_X550:
1053 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1054 if (nvmreg == 0xffff)
1055 break;
1056 high = (nvmreg >> 12) & 0x0f;
1057 low = (nvmreg >> 4) & 0xff;
1058 id = nvmreg & 0x000f;
1059 aprint_normal(" PHY FW Revision %u.", high);
1060 if (hw->mac.type == ixgbe_mac_X540)
1061 str = "%x";
1062 else
1063 str = "%02x";
1064 aprint_normal(str, low);
1065 aprint_normal(" ID 0x%x,", id);
1066 break;
1067 default:
1068 break;
1069 }
1070
1071 /* NVM Map version & OEM NVM Image version */
1072 switch (hw->mac.type) {
1073 case ixgbe_mac_X550:
1074 case ixgbe_mac_X550EM_x:
1075 case ixgbe_mac_X550EM_a:
1076 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1077 if (nvmreg != 0xffff) {
1078 high = (nvmreg >> 12) & 0x0f;
1079 low = nvmreg & 0x00ff;
1080 aprint_normal(" NVM Map version %u.%02x,", high, low);
1081 }
1082 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1083 if (nvmreg != 0xffff) {
1084 high = (nvmreg >> 12) & 0x0f;
1085 low = nvmreg & 0x00ff;
1086 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1087 low);
1088 }
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 /* Print the ETrackID */
1095 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1096 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1097 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1098
1099 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1100 error = ixgbe_allocate_msix(adapter, pa);
1101 if (error) {
1102 /* Free allocated queue structures first */
1103 ixgbe_free_queues(adapter);
1104
1105 /* Fallback to legacy interrupt */
1106 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1107 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1108 adapter->feat_en |= IXGBE_FEATURE_MSI;
1109 adapter->num_queues = 1;
1110
1111 /* Allocate our TX/RX Queues again */
1112 if (ixgbe_allocate_queues(adapter)) {
1113 error = ENOMEM;
1114 goto err_out;
1115 }
1116 }
1117 }
1118 /* Recovery mode */
1119 switch (adapter->hw.mac.type) {
1120 case ixgbe_mac_X550:
1121 case ixgbe_mac_X550EM_x:
1122 case ixgbe_mac_X550EM_a:
1123 /* >= 2.00 */
1124 if (hw->eeprom.nvm_image_ver_high >= 2) {
1125 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1126 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1127 }
1128 break;
1129 default:
1130 break;
1131 }
1132
1133 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1134 error = ixgbe_allocate_legacy(adapter, pa);
1135 if (error)
1136 goto err_late;
1137
1138 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1139 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1140 error = workqueue_create(&adapter->admin_wq, wqname,
1141 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1142 IXGBE_TASKLET_WQ_FLAGS);
1143 if (error) {
1144 aprint_error_dev(dev,
1145 "could not create admin workqueue (%d)\n", error);
1146 goto err_out;
1147 }
1148
1149 error = ixgbe_start_hw(hw);
1150 switch (error) {
1151 case IXGBE_ERR_EEPROM_VERSION:
1152 aprint_error_dev(dev, "This device is a pre-production adapter/"
1153 "LOM. Please be aware there may be issues associated "
1154 "with your hardware.\nIf you are experiencing problems "
1155 "please contact your Intel or hardware representative "
1156 "who provided you with this hardware.\n");
1157 break;
1158 default:
1159 break;
1160 }
1161
1162 /* Setup OS specific network interface */
1163 if (ixgbe_setup_interface(dev, adapter) != 0)
1164 goto err_late;
1165
1166 /*
1167 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1168 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1169 */
1170 if (hw->phy.media_type == ixgbe_media_type_copper) {
1171 uint16_t id1, id2;
1172 int oui, model, rev;
1173 const char *descr;
1174
1175 id1 = hw->phy.id >> 16;
1176 id2 = hw->phy.id & 0xffff;
1177 oui = MII_OUI(id1, id2);
1178 model = MII_MODEL(id2);
1179 rev = MII_REV(id2);
1180 if ((descr = mii_get_descr(oui, model)) != NULL)
1181 aprint_normal_dev(dev,
1182 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1183 descr, oui, model, rev);
1184 else
1185 aprint_normal_dev(dev,
1186 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1187 oui, model, rev);
1188 }
1189
1190 /* Enable EEE power saving */
1191 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1192 hw->mac.ops.setup_eee(hw,
1193 adapter->feat_en & IXGBE_FEATURE_EEE);
1194
1195 /* Enable power to the phy. */
1196 if (!unsupported_sfp) {
1197 /* Enable the optics for 82599 SFP+ fiber */
1198 ixgbe_enable_tx_laser(hw);
1199
1200 /*
1201 * XXX Currently, ixgbe_set_phy_power() supports only copper
1202 * PHY, so it's not required to test with !unsupported_sfp.
1203 */
1204 ixgbe_set_phy_power(hw, TRUE);
1205 }
1206
1207 /* Initialize statistics */
1208 ixgbe_update_stats_counters(adapter);
1209
1210 /* Check PCIE slot type/speed/width */
1211 ixgbe_get_slot_info(adapter);
1212
1213 /*
1214 * Do time init and sysctl init here, but
1215 * only on the first port of a bypass adapter.
1216 */
1217 ixgbe_bypass_init(adapter);
1218
1219 /* Set an initial dmac value */
1220 adapter->dmac = 0;
1221 /* Set initial advertised speeds (if applicable) */
1222 adapter->advertise = ixgbe_get_advertise(adapter);
1223
1224 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1225 ixgbe_define_iov_schemas(dev, &error);
1226
1227 /* Add sysctls */
1228 ixgbe_add_device_sysctls(adapter);
1229 ixgbe_add_hw_stats(adapter);
1230
1231 /* For Netmap */
1232 adapter->init_locked = ixgbe_init_locked;
1233 adapter->stop_locked = ixgbe_stop_locked;
1234
1235 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1236 ixgbe_netmap_attach(adapter);
1237
1238 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1239 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1240 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1241 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1242
1243 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1244 pmf_class_network_register(dev, adapter->ifp);
1245 else
1246 aprint_error_dev(dev, "couldn't establish power handler\n");
1247
1248 /* Init recovery mode timer and state variable */
1249 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1250 adapter->recovery_mode = 0;
1251
1252 /* Set up the timer callout */
1253 callout_init(&adapter->recovery_mode_timer,
1254 IXGBE_CALLOUT_FLAGS);
1255 snprintf(wqname, sizeof(wqname), "%s-recovery",
1256 device_xname(dev));
1257 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1258 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1259 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1260 if (error) {
1261 aprint_error_dev(dev, "could not create "
1262 "recovery_mode_timer workqueue (%d)\n", error);
1263 goto err_out;
1264 }
1265
1266 /* Start the task */
1267 callout_reset(&adapter->recovery_mode_timer, hz,
1268 ixgbe_recovery_mode_timer, adapter);
1269 }
1270
1271 INIT_DEBUGOUT("ixgbe_attach: end");
1272 adapter->osdep.attached = true;
1273
1274 return;
1275
1276 err_late:
1277 ixgbe_free_queues(adapter);
1278 err_out:
1279 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1280 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1282 ixgbe_free_deferred_handlers(adapter);
1283 ixgbe_free_pci_resources(adapter);
1284 if (adapter->mta != NULL)
1285 free(adapter->mta, M_DEVBUF);
1286 IXGBE_CORE_LOCK_DESTROY(adapter);
1287
1288 return;
1289 } /* ixgbe_attach */
1290
1291 /************************************************************************
1292 * ixgbe_check_wol_support
1293 *
1294 * Checks whether the adapter's ports are capable of
1295 * Wake On LAN by reading the adapter's NVM.
1296 *
1297 * Sets each port's hw->wol_enabled value depending
1298 * on the value read here.
1299 ************************************************************************/
1300 static void
1301 ixgbe_check_wol_support(struct adapter *adapter)
1302 {
1303 struct ixgbe_hw *hw = &adapter->hw;
1304 u16 dev_caps = 0;
1305
1306 /* Find out WoL support for port */
1307 adapter->wol_support = hw->wol_enabled = 0;
1308 ixgbe_get_device_caps(hw, &dev_caps);
1309 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1310 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1311 hw->bus.func == 0))
1312 adapter->wol_support = hw->wol_enabled = 1;
1313
1314 /* Save initial wake up filter configuration */
1315 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1316
1317 return;
1318 } /* ixgbe_check_wol_support */
1319
1320 /************************************************************************
1321 * ixgbe_setup_interface
1322 *
1323 * Setup networking device structure and register an interface.
1324 ************************************************************************/
1325 static int
1326 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1327 {
1328 struct ethercom *ec = &adapter->osdep.ec;
1329 struct ifnet *ifp;
1330 int rv;
1331
1332 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1333
1334 ifp = adapter->ifp = &ec->ec_if;
1335 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1336 ifp->if_baudrate = IF_Gbps(10);
1337 ifp->if_init = ixgbe_init;
1338 ifp->if_stop = ixgbe_ifstop;
1339 ifp->if_softc = adapter;
1340 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1341 #ifdef IXGBE_MPSAFE
1342 ifp->if_extflags = IFEF_MPSAFE;
1343 #endif
1344 ifp->if_ioctl = ixgbe_ioctl;
1345 #if __FreeBSD_version >= 1100045
1346 /* TSO parameters */
1347 ifp->if_hw_tsomax = 65518;
1348 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1349 ifp->if_hw_tsomaxsegsize = 2048;
1350 #endif
1351 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1352 #if 0
1353 ixgbe_start_locked = ixgbe_legacy_start_locked;
1354 #endif
1355 } else {
1356 ifp->if_transmit = ixgbe_mq_start;
1357 #if 0
1358 ixgbe_start_locked = ixgbe_mq_start_locked;
1359 #endif
1360 }
1361 ifp->if_start = ixgbe_legacy_start;
1362 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1363 IFQ_SET_READY(&ifp->if_snd);
1364
1365 rv = if_initialize(ifp);
1366 if (rv != 0) {
1367 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1368 return rv;
1369 }
1370 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1371 ether_ifattach(ifp, adapter->hw.mac.addr);
1372 aprint_normal_dev(dev, "Ethernet address %s\n",
1373 ether_sprintf(adapter->hw.mac.addr));
1374 /*
1375 * We use per TX queue softint, so if_deferred_start_init() isn't
1376 * used.
1377 */
1378 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1379
1380 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1381
1382 /*
1383 * Tell the upper layer(s) we support long frames.
1384 */
1385 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1386
1387 /* Set capability flags */
1388 ifp->if_capabilities |= IFCAP_RXCSUM
1389 | IFCAP_TXCSUM
1390 | IFCAP_TSOv4
1391 | IFCAP_TSOv6;
1392 ifp->if_capenable = 0;
1393
1394 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1395 | ETHERCAP_VLAN_HWCSUM
1396 | ETHERCAP_JUMBO_MTU
1397 | ETHERCAP_VLAN_MTU;
1398
1399 /* Enable the above capabilities by default */
1400 ec->ec_capenable = ec->ec_capabilities;
1401
1402 /*
1403 * Don't turn this on by default, if vlans are
1404 * created on another pseudo device (eg. lagg)
1405 * then vlan events are not passed thru, breaking
1406 * operation, but with HW FILTER off it works. If
1407 * using vlans directly on the ixgbe driver you can
1408 * enable this and get full hardware tag filtering.
1409 */
1410 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1411
1412 /*
1413 * Specify the media types supported by this adapter and register
1414 * callbacks to update media and link information
1415 */
1416 ec->ec_ifmedia = &adapter->media;
1417 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1418 ixgbe_media_status, &adapter->core_mtx);
1419
1420 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1421 ixgbe_add_media_types(adapter);
1422
1423 /* Set autoselect media by default */
1424 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1425
1426 if_register(ifp);
1427
1428 return (0);
1429 } /* ixgbe_setup_interface */
1430
1431 /************************************************************************
1432 * ixgbe_add_media_types
1433 ************************************************************************/
1434 static void
1435 ixgbe_add_media_types(struct adapter *adapter)
1436 {
1437 struct ixgbe_hw *hw = &adapter->hw;
1438 u64 layer;
1439
1440 layer = adapter->phy_layer;
1441
1442 #define ADD(mm, dd) \
1443 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1444
1445 ADD(IFM_NONE, 0);
1446
1447 /* Media types with matching NetBSD media defines */
1448 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1449 ADD(IFM_10G_T | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1452 ADD(IFM_1000_T | IFM_FDX, 0);
1453 }
1454 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1455 ADD(IFM_100_TX | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1458 ADD(IFM_10_T | IFM_FDX, 0);
1459 }
1460
1461 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1462 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1463 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1464 }
1465
1466 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1467 ADD(IFM_10G_LR | IFM_FDX, 0);
1468 if (hw->phy.multispeed_fiber) {
1469 ADD(IFM_1000_LX | IFM_FDX, 0);
1470 }
1471 }
1472 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1473 ADD(IFM_10G_SR | IFM_FDX, 0);
1474 if (hw->phy.multispeed_fiber) {
1475 ADD(IFM_1000_SX | IFM_FDX, 0);
1476 }
1477 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1478 ADD(IFM_1000_SX | IFM_FDX, 0);
1479 }
1480 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1481 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1482 }
1483
1484 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1485 ADD(IFM_10G_KR | IFM_FDX, 0);
1486 }
1487 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1488 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1489 }
1490 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1491 ADD(IFM_1000_KX | IFM_FDX, 0);
1492 }
1493 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1494 ADD(IFM_2500_KX | IFM_FDX, 0);
1495 }
1496 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1497 ADD(IFM_2500_T | IFM_FDX, 0);
1498 }
1499 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1500 ADD(IFM_5000_T | IFM_FDX, 0);
1501 }
1502 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1503 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1504 /* XXX no ifmedia_set? */
1505
1506 ADD(IFM_AUTO, 0);
1507
1508 #undef ADD
1509 } /* ixgbe_add_media_types */
1510
1511 /************************************************************************
1512 * ixgbe_is_sfp
1513 ************************************************************************/
1514 static inline bool
1515 ixgbe_is_sfp(struct ixgbe_hw *hw)
1516 {
1517 switch (hw->mac.type) {
1518 case ixgbe_mac_82598EB:
1519 if (hw->phy.type == ixgbe_phy_nl)
1520 return (TRUE);
1521 return (FALSE);
1522 case ixgbe_mac_82599EB:
1523 case ixgbe_mac_X550EM_x:
1524 case ixgbe_mac_X550EM_a:
1525 switch (hw->mac.ops.get_media_type(hw)) {
1526 case ixgbe_media_type_fiber:
1527 case ixgbe_media_type_fiber_qsfp:
1528 return (TRUE);
1529 default:
1530 return (FALSE);
1531 }
1532 default:
1533 return (FALSE);
1534 }
1535 } /* ixgbe_is_sfp */
1536
1537 static void
1538 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1539 {
1540
1541 if (__predict_true(adapter->osdep.detaching == false)) {
1542 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1543 workqueue_enqueue(adapter->admin_wq,
1544 &adapter->admin_wc, NULL);
1545 }
1546 }
1547
1548 /************************************************************************
1549 * ixgbe_config_link
1550 ************************************************************************/
1551 static void
1552 ixgbe_config_link(struct adapter *adapter)
1553 {
1554 struct ixgbe_hw *hw = &adapter->hw;
1555 u32 autoneg, err = 0;
1556 u32 task_requests = 0;
1557 bool sfp, negotiate = false;
1558
1559 sfp = ixgbe_is_sfp(hw);
1560
1561 if (sfp) {
1562 if (hw->phy.multispeed_fiber) {
1563 ixgbe_enable_tx_laser(hw);
1564 task_requests |= IXGBE_REQUEST_TASK_MSF;
1565 }
1566 task_requests |= IXGBE_REQUEST_TASK_MOD;
1567 atomic_or_32(&adapter->task_requests, task_requests);
1568 ixgbe_schedule_admin_tasklet(adapter);
1569 } else {
1570 struct ifmedia *ifm = &adapter->media;
1571
1572 if (hw->mac.ops.check_link)
1573 err = ixgbe_check_link(hw, &adapter->link_speed,
1574 &adapter->link_up, FALSE);
1575 if (err)
1576 return;
1577
1578 /*
1579 * Check if it's the first call. If it's the first call,
1580 * get value for auto negotiation.
1581 */
1582 autoneg = hw->phy.autoneg_advertised;
1583 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1584 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1585 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1586 &negotiate);
1587 if (err)
1588 return;
1589 if (hw->mac.ops.setup_link)
1590 err = hw->mac.ops.setup_link(hw, autoneg,
1591 adapter->link_up);
1592 }
1593
1594 } /* ixgbe_config_link */
1595
1596 /************************************************************************
1597 * ixgbe_update_stats_counters - Update board statistics counters.
1598 ************************************************************************/
1599 static void
1600 ixgbe_update_stats_counters(struct adapter *adapter)
1601 {
1602 struct ifnet *ifp = adapter->ifp;
1603 struct ixgbe_hw *hw = &adapter->hw;
1604 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1605 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1606 u64 total_missed_rx = 0;
1607 uint64_t crcerrs, rlec;
1608 unsigned int queue_counters;
1609 int i;
1610
1611 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1612 stats->crcerrs.ev_count += crcerrs;
1613 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1614 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1615 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1616 if (hw->mac.type >= ixgbe_mac_X550)
1617 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1618
1619 /* 16 registers exist */
1620 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1621 for (i = 0; i < queue_counters; i++) {
1622 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1623 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1624 if (hw->mac.type >= ixgbe_mac_82599EB) {
1625 stats->qprdc[i].ev_count
1626 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1627 }
1628 }
1629
1630 /* 8 registers exist */
1631 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1632 uint32_t mp;
1633
1634 /* MPC */
1635 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1636 /* global total per queue */
1637 stats->mpc[i].ev_count += mp;
1638 /* running comprehensive total for stats display */
1639 total_missed_rx += mp;
1640
1641 if (hw->mac.type == ixgbe_mac_82598EB)
1642 stats->rnbc[i].ev_count
1643 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1644
1645 stats->pxontxc[i].ev_count
1646 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1647 stats->pxofftxc[i].ev_count
1648 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1649 if (hw->mac.type >= ixgbe_mac_82599EB) {
1650 stats->pxonrxc[i].ev_count
1651 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1652 stats->pxoffrxc[i].ev_count
1653 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1654 stats->pxon2offc[i].ev_count
1655 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1656 } else {
1657 stats->pxonrxc[i].ev_count
1658 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1659 stats->pxoffrxc[i].ev_count
1660 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1661 }
1662 }
1663 stats->mpctotal.ev_count += total_missed_rx;
1664
1665 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1666 if ((adapter->link_active == LINK_STATE_UP)
1667 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1668 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1669 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1670 }
1671 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1672 stats->rlec.ev_count += rlec;
1673
1674 /* Hardware workaround, gprc counts missed packets */
1675 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1676
1677 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1678 stats->lxontxc.ev_count += lxon;
1679 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1680 stats->lxofftxc.ev_count += lxoff;
1681 total = lxon + lxoff;
1682
1683 if (hw->mac.type != ixgbe_mac_82598EB) {
1684 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1685 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1686 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1687 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1688 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1689 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1690 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1691 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1692 } else {
1693 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1694 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1695 /* 82598 only has a counter in the high register */
1696 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1697 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1698 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1699 }
1700
1701 /*
1702 * Workaround: mprc hardware is incorrectly counting
1703 * broadcasts, so for now we subtract those.
1704 */
1705 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1706 stats->bprc.ev_count += bprc;
1707 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1708 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1709
1710 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1711 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1712 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1713 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1714 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1715 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1716
1717 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1718 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1719 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1720
1721 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1722 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1723 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1724 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1725 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1726 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1727 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1728 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1729 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1730 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1731 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1732 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1733 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1734 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1735 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1736 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1737 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1738 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1739 /* Only read FCOE on 82599 */
1740 if (hw->mac.type != ixgbe_mac_82598EB) {
1741 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1742 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1743 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1744 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1745 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1746 }
1747
1748 /*
1749 * Fill out the OS statistics structure. Only RX errors are required
1750 * here because all TX counters are incremented in the TX path and
1751 * normal RX counters are prepared in ether_input().
1752 */
1753 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1754 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1755 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1756 IF_STAT_PUTREF(ifp);
1757 } /* ixgbe_update_stats_counters */
1758
1759 /************************************************************************
1760 * ixgbe_add_hw_stats
1761 *
1762 * Add sysctl variables, one per statistic, to the system.
1763 ************************************************************************/
1764 static void
1765 ixgbe_add_hw_stats(struct adapter *adapter)
1766 {
1767 device_t dev = adapter->dev;
1768 const struct sysctlnode *rnode, *cnode;
1769 struct sysctllog **log = &adapter->sysctllog;
1770 struct tx_ring *txr = adapter->tx_rings;
1771 struct rx_ring *rxr = adapter->rx_rings;
1772 struct ixgbe_hw *hw = &adapter->hw;
1773 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1774 const char *xname = device_xname(dev);
1775 int i;
1776
1777 /* Driver Statistics */
1778 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1779 NULL, xname, "Driver tx dma soft fail EFBIG");
1780 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1781 NULL, xname, "m_defrag() failed");
1782 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1783 NULL, xname, "Driver tx dma hard fail EFBIG");
1784 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1785 NULL, xname, "Driver tx dma hard fail EINVAL");
1786 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1787 NULL, xname, "Driver tx dma hard fail other");
1788 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1789 NULL, xname, "Driver tx dma soft fail EAGAIN");
1790 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1791 NULL, xname, "Driver tx dma soft fail ENOMEM");
1792 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1793 NULL, xname, "Watchdog timeouts");
1794 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1795 NULL, xname, "TSO errors");
1796 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1797 NULL, xname, "Admin MSI-X IRQ Handled");
1798 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1799 NULL, xname, "Link event");
1800 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1801 NULL, xname, "SFP+ module event");
1802 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1803 NULL, xname, "Multispeed event");
1804 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1805 NULL, xname, "External PHY event");
1806
1807 /* Max number of traffic class is 8 */
1808 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1809 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1810 snprintf(adapter->tcs[i].evnamebuf,
1811 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1812 xname, i);
1813 if (i < __arraycount(stats->mpc)) {
1814 evcnt_attach_dynamic(&stats->mpc[i],
1815 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1816 "RX Missed Packet Count");
1817 if (hw->mac.type == ixgbe_mac_82598EB)
1818 evcnt_attach_dynamic(&stats->rnbc[i],
1819 EVCNT_TYPE_MISC, NULL,
1820 adapter->tcs[i].evnamebuf,
1821 "Receive No Buffers");
1822 }
1823 if (i < __arraycount(stats->pxontxc)) {
1824 evcnt_attach_dynamic(&stats->pxontxc[i],
1825 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1826 "pxontxc");
1827 evcnt_attach_dynamic(&stats->pxonrxc[i],
1828 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1829 "pxonrxc");
1830 evcnt_attach_dynamic(&stats->pxofftxc[i],
1831 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1832 "pxofftxc");
1833 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1834 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1835 "pxoffrxc");
1836 if (hw->mac.type >= ixgbe_mac_82599EB)
1837 evcnt_attach_dynamic(&stats->pxon2offc[i],
1838 EVCNT_TYPE_MISC, NULL,
1839 adapter->tcs[i].evnamebuf,
1840 "pxon2offc");
1841 }
1842 }
1843
1844 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1845 #ifdef LRO
1846 struct lro_ctrl *lro = &rxr->lro;
1847 #endif /* LRO */
1848
1849 snprintf(adapter->queues[i].evnamebuf,
1850 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1851 xname, i);
1852 snprintf(adapter->queues[i].namebuf,
1853 sizeof(adapter->queues[i].namebuf), "q%d", i);
1854
1855 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1856 aprint_error_dev(dev, "could not create sysctl root\n");
1857 break;
1858 }
1859
1860 if (sysctl_createv(log, 0, &rnode, &rnode,
1861 0, CTLTYPE_NODE,
1862 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1863 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1864 break;
1865
1866 if (sysctl_createv(log, 0, &rnode, &cnode,
1867 CTLFLAG_READWRITE, CTLTYPE_INT,
1868 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1869 ixgbe_sysctl_interrupt_rate_handler, 0,
1870 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1871 break;
1872
1873 if (sysctl_createv(log, 0, &rnode, &cnode,
1874 CTLFLAG_READONLY, CTLTYPE_INT,
1875 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1876 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1877 0, CTL_CREATE, CTL_EOL) != 0)
1878 break;
1879
1880 if (sysctl_createv(log, 0, &rnode, &cnode,
1881 CTLFLAG_READONLY, CTLTYPE_INT,
1882 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1883 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1884 0, CTL_CREATE, CTL_EOL) != 0)
1885 break;
1886
1887 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1888 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1889 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1890 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1891 "Handled queue in softint");
1892 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1893 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1894 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1895 NULL, adapter->queues[i].evnamebuf, "TSO");
1896 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1897 NULL, adapter->queues[i].evnamebuf,
1898 "Queue No Descriptor Available");
1899 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1900 NULL, adapter->queues[i].evnamebuf,
1901 "Queue Packets Transmitted");
1902 #ifndef IXGBE_LEGACY_TX
1903 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1904 NULL, adapter->queues[i].evnamebuf,
1905 "Packets dropped in pcq");
1906 #endif
1907
1908 if (sysctl_createv(log, 0, &rnode, &cnode,
1909 CTLFLAG_READONLY,
1910 CTLTYPE_INT,
1911 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1912 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1913 CTL_CREATE, CTL_EOL) != 0)
1914 break;
1915
1916 if (sysctl_createv(log, 0, &rnode, &cnode,
1917 CTLFLAG_READONLY,
1918 CTLTYPE_INT,
1919 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1920 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1921 CTL_CREATE, CTL_EOL) != 0)
1922 break;
1923
1924 if (sysctl_createv(log, 0, &rnode, &cnode,
1925 CTLFLAG_READONLY,
1926 CTLTYPE_INT,
1927 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1928 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1929 CTL_CREATE, CTL_EOL) != 0)
1930 break;
1931
1932 if (i < __arraycount(stats->qprc)) {
1933 evcnt_attach_dynamic(&stats->qprc[i],
1934 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1935 "qprc");
1936 evcnt_attach_dynamic(&stats->qptc[i],
1937 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1938 "qptc");
1939 evcnt_attach_dynamic(&stats->qbrc[i],
1940 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1941 "qbrc");
1942 evcnt_attach_dynamic(&stats->qbtc[i],
1943 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1944 "qbtc");
1945 if (hw->mac.type >= ixgbe_mac_82599EB)
1946 evcnt_attach_dynamic(&stats->qprdc[i],
1947 EVCNT_TYPE_MISC, NULL,
1948 adapter->queues[i].evnamebuf, "qprdc");
1949 }
1950
1951 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1952 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1953 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1954 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1955 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1956 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1957 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1958 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1959 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1960 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1961 #ifdef LRO
1962 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1963 CTLFLAG_RD, &lro->lro_queued, 0,
1964 "LRO Queued");
1965 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1966 CTLFLAG_RD, &lro->lro_flushed, 0,
1967 "LRO Flushed");
1968 #endif /* LRO */
1969 }
1970
1971 /* MAC stats get their own sub node */
1972
1973 snprintf(stats->namebuf,
1974 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1975
1976 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "rx csum offload - IP");
1978 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "rx csum offload - L4");
1980 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "rx csum offload - IP bad");
1982 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "rx csum offload - L4 bad");
1984 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Interrupt conditions zero");
1986 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Legacy interrupts");
1988
1989 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "CRC Errors");
1991 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Illegal Byte Errors");
1993 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Byte Errors");
1995 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "MAC Short Packets Discarded");
1997 if (hw->mac.type >= ixgbe_mac_X550)
1998 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Bad SFD");
2000 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Total Packets Missed");
2002 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "MAC Local Faults");
2004 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "MAC Remote Faults");
2006 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Receive Length Errors");
2008 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Link XON Transmitted");
2010 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Link XON Received");
2012 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Link XOFF Transmitted");
2014 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Link XOFF Received");
2016
2017 /* Packet Reception Stats */
2018 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Total Octets Received");
2020 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Good Octets Received");
2022 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Total Packets Received");
2024 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Good Packets Received");
2026 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Multicast Packets Received");
2028 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Broadcast Packets Received");
2030 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "64 byte frames received ");
2032 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "65-127 byte frames received");
2034 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "128-255 byte frames received");
2036 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "256-511 byte frames received");
2038 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "512-1023 byte frames received");
2040 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "1023-1522 byte frames received");
2042 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "Receive Undersized");
2044 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "Fragmented Packets Received ");
2046 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "Oversized Packets Received");
2048 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "Received Jabber");
2050 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Management Packets Received");
2052 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Management Packets Dropped");
2054 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Checksum Errors");
2056
2057 /* Packet Transmission Stats */
2058 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Good Octets Transmitted");
2060 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Total Packets Transmitted");
2062 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Good Packets Transmitted");
2064 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2065 stats->namebuf, "Broadcast Packets Transmitted");
2066 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "Multicast Packets Transmitted");
2068 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "Management Packets Transmitted");
2070 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "64 byte frames transmitted ");
2072 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "65-127 byte frames transmitted");
2074 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "128-255 byte frames transmitted");
2076 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "256-511 byte frames transmitted");
2078 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "512-1023 byte frames transmitted");
2080 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2081 stats->namebuf, "1024-1522 byte frames transmitted");
2082 } /* ixgbe_add_hw_stats */
2083
2084 static void
2085 ixgbe_clear_evcnt(struct adapter *adapter)
2086 {
2087 struct tx_ring *txr = adapter->tx_rings;
2088 struct rx_ring *rxr = adapter->rx_rings;
2089 struct ixgbe_hw *hw = &adapter->hw;
2090 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2091 int i;
2092
2093 adapter->efbig_tx_dma_setup.ev_count = 0;
2094 adapter->mbuf_defrag_failed.ev_count = 0;
2095 adapter->efbig2_tx_dma_setup.ev_count = 0;
2096 adapter->einval_tx_dma_setup.ev_count = 0;
2097 adapter->other_tx_dma_setup.ev_count = 0;
2098 adapter->eagain_tx_dma_setup.ev_count = 0;
2099 adapter->enomem_tx_dma_setup.ev_count = 0;
2100 adapter->tso_err.ev_count = 0;
2101 adapter->watchdog_events.ev_count = 0;
2102 adapter->admin_irqev.ev_count = 0;
2103 adapter->link_workev.ev_count = 0;
2104 adapter->mod_workev.ev_count = 0;
2105 adapter->msf_workev.ev_count = 0;
2106 adapter->phy_workev.ev_count = 0;
2107
2108 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2109 if (i < __arraycount(stats->mpc)) {
2110 stats->mpc[i].ev_count = 0;
2111 if (hw->mac.type == ixgbe_mac_82598EB)
2112 stats->rnbc[i].ev_count = 0;
2113 }
2114 if (i < __arraycount(stats->pxontxc)) {
2115 stats->pxontxc[i].ev_count = 0;
2116 stats->pxonrxc[i].ev_count = 0;
2117 stats->pxofftxc[i].ev_count = 0;
2118 stats->pxoffrxc[i].ev_count = 0;
2119 if (hw->mac.type >= ixgbe_mac_82599EB)
2120 stats->pxon2offc[i].ev_count = 0;
2121 }
2122 }
2123
2124 txr = adapter->tx_rings;
2125 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2126 adapter->queues[i].irqs.ev_count = 0;
2127 adapter->queues[i].handleq.ev_count = 0;
2128 adapter->queues[i].req.ev_count = 0;
2129 txr->no_desc_avail.ev_count = 0;
2130 txr->total_packets.ev_count = 0;
2131 txr->tso_tx.ev_count = 0;
2132 #ifndef IXGBE_LEGACY_TX
2133 txr->pcq_drops.ev_count = 0;
2134 #endif
2135 txr->q_efbig_tx_dma_setup = 0;
2136 txr->q_mbuf_defrag_failed = 0;
2137 txr->q_efbig2_tx_dma_setup = 0;
2138 txr->q_einval_tx_dma_setup = 0;
2139 txr->q_other_tx_dma_setup = 0;
2140 txr->q_eagain_tx_dma_setup = 0;
2141 txr->q_enomem_tx_dma_setup = 0;
2142 txr->q_tso_err = 0;
2143
2144 if (i < __arraycount(stats->qprc)) {
2145 stats->qprc[i].ev_count = 0;
2146 stats->qptc[i].ev_count = 0;
2147 stats->qbrc[i].ev_count = 0;
2148 stats->qbtc[i].ev_count = 0;
2149 if (hw->mac.type >= ixgbe_mac_82599EB)
2150 stats->qprdc[i].ev_count = 0;
2151 }
2152
2153 rxr->rx_packets.ev_count = 0;
2154 rxr->rx_bytes.ev_count = 0;
2155 rxr->rx_copies.ev_count = 0;
2156 rxr->no_jmbuf.ev_count = 0;
2157 rxr->rx_discarded.ev_count = 0;
2158 }
2159 stats->ipcs.ev_count = 0;
2160 stats->l4cs.ev_count = 0;
2161 stats->ipcs_bad.ev_count = 0;
2162 stats->l4cs_bad.ev_count = 0;
2163 stats->intzero.ev_count = 0;
2164 stats->legint.ev_count = 0;
2165 stats->crcerrs.ev_count = 0;
2166 stats->illerrc.ev_count = 0;
2167 stats->errbc.ev_count = 0;
2168 stats->mspdc.ev_count = 0;
2169 if (hw->mac.type >= ixgbe_mac_X550)
2170 stats->mbsdc.ev_count = 0;
2171 stats->mpctotal.ev_count = 0;
2172 stats->mlfc.ev_count = 0;
2173 stats->mrfc.ev_count = 0;
2174 stats->rlec.ev_count = 0;
2175 stats->lxontxc.ev_count = 0;
2176 stats->lxonrxc.ev_count = 0;
2177 stats->lxofftxc.ev_count = 0;
2178 stats->lxoffrxc.ev_count = 0;
2179
2180 /* Packet Reception Stats */
2181 stats->tor.ev_count = 0;
2182 stats->gorc.ev_count = 0;
2183 stats->tpr.ev_count = 0;
2184 stats->gprc.ev_count = 0;
2185 stats->mprc.ev_count = 0;
2186 stats->bprc.ev_count = 0;
2187 stats->prc64.ev_count = 0;
2188 stats->prc127.ev_count = 0;
2189 stats->prc255.ev_count = 0;
2190 stats->prc511.ev_count = 0;
2191 stats->prc1023.ev_count = 0;
2192 stats->prc1522.ev_count = 0;
2193 stats->ruc.ev_count = 0;
2194 stats->rfc.ev_count = 0;
2195 stats->roc.ev_count = 0;
2196 stats->rjc.ev_count = 0;
2197 stats->mngprc.ev_count = 0;
2198 stats->mngpdc.ev_count = 0;
2199 stats->xec.ev_count = 0;
2200
2201 /* Packet Transmission Stats */
2202 stats->gotc.ev_count = 0;
2203 stats->tpt.ev_count = 0;
2204 stats->gptc.ev_count = 0;
2205 stats->bptc.ev_count = 0;
2206 stats->mptc.ev_count = 0;
2207 stats->mngptc.ev_count = 0;
2208 stats->ptc64.ev_count = 0;
2209 stats->ptc127.ev_count = 0;
2210 stats->ptc255.ev_count = 0;
2211 stats->ptc511.ev_count = 0;
2212 stats->ptc1023.ev_count = 0;
2213 stats->ptc1522.ev_count = 0;
2214 }
2215
2216 /************************************************************************
2217 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2218 *
2219 * Retrieves the TDH value from the hardware
2220 ************************************************************************/
2221 static int
2222 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2223 {
2224 struct sysctlnode node = *rnode;
2225 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2226 struct adapter *adapter;
2227 uint32_t val;
2228
2229 if (!txr)
2230 return (0);
2231
2232 adapter = txr->adapter;
2233 if (ixgbe_fw_recovery_mode_swflag(adapter))
2234 return (EPERM);
2235
2236 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2237 node.sysctl_data = &val;
2238 return sysctl_lookup(SYSCTLFN_CALL(&node));
2239 } /* ixgbe_sysctl_tdh_handler */
2240
2241 /************************************************************************
2242 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2243 *
2244 * Retrieves the TDT value from the hardware
2245 ************************************************************************/
2246 static int
2247 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2248 {
2249 struct sysctlnode node = *rnode;
2250 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2251 struct adapter *adapter;
2252 uint32_t val;
2253
2254 if (!txr)
2255 return (0);
2256
2257 adapter = txr->adapter;
2258 if (ixgbe_fw_recovery_mode_swflag(adapter))
2259 return (EPERM);
2260
2261 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2262 node.sysctl_data = &val;
2263 return sysctl_lookup(SYSCTLFN_CALL(&node));
2264 } /* ixgbe_sysctl_tdt_handler */
2265
2266 /************************************************************************
2267 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2268 * handler function
2269 *
2270 * Retrieves the next_to_check value
2271 ************************************************************************/
2272 static int
2273 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2274 {
2275 struct sysctlnode node = *rnode;
2276 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2277 struct adapter *adapter;
2278 uint32_t val;
2279
2280 if (!rxr)
2281 return (0);
2282
2283 adapter = rxr->adapter;
2284 if (ixgbe_fw_recovery_mode_swflag(adapter))
2285 return (EPERM);
2286
2287 val = rxr->next_to_check;
2288 node.sysctl_data = &val;
2289 return sysctl_lookup(SYSCTLFN_CALL(&node));
2290 } /* ixgbe_sysctl_next_to_check_handler */
2291
2292 /************************************************************************
2293 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2294 *
2295 * Retrieves the RDH value from the hardware
2296 ************************************************************************/
2297 static int
2298 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2299 {
2300 struct sysctlnode node = *rnode;
2301 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2302 struct adapter *adapter;
2303 uint32_t val;
2304
2305 if (!rxr)
2306 return (0);
2307
2308 adapter = rxr->adapter;
2309 if (ixgbe_fw_recovery_mode_swflag(adapter))
2310 return (EPERM);
2311
2312 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2313 node.sysctl_data = &val;
2314 return sysctl_lookup(SYSCTLFN_CALL(&node));
2315 } /* ixgbe_sysctl_rdh_handler */
2316
2317 /************************************************************************
2318 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2319 *
2320 * Retrieves the RDT value from the hardware
2321 ************************************************************************/
2322 static int
2323 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2324 {
2325 struct sysctlnode node = *rnode;
2326 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2327 struct adapter *adapter;
2328 uint32_t val;
2329
2330 if (!rxr)
2331 return (0);
2332
2333 adapter = rxr->adapter;
2334 if (ixgbe_fw_recovery_mode_swflag(adapter))
2335 return (EPERM);
2336
2337 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2338 node.sysctl_data = &val;
2339 return sysctl_lookup(SYSCTLFN_CALL(&node));
2340 } /* ixgbe_sysctl_rdt_handler */
2341
2342 static int
2343 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2344 {
2345 struct ifnet *ifp = &ec->ec_if;
2346 struct adapter *adapter = ifp->if_softc;
2347 int rv;
2348
2349 if (set)
2350 rv = ixgbe_register_vlan(adapter, vid);
2351 else
2352 rv = ixgbe_unregister_vlan(adapter, vid);
2353
2354 if (rv != 0)
2355 return rv;
2356
2357 /*
2358 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2359 * or 0 to 1.
2360 */
2361 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2362 ixgbe_setup_vlan_hw_tagging(adapter);
2363
2364 return rv;
2365 }
2366
2367 /************************************************************************
2368 * ixgbe_register_vlan
2369 *
2370 * Run via vlan config EVENT, it enables us to use the
2371 * HW Filter table since we can get the vlan id. This
2372 * just creates the entry in the soft version of the
2373 * VFTA, init will repopulate the real table.
2374 ************************************************************************/
2375 static int
2376 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2377 {
2378 u16 index, bit;
2379 int error;
2380
2381 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2382 return EINVAL;
2383
2384 IXGBE_CORE_LOCK(adapter);
2385 index = (vtag >> 5) & 0x7F;
2386 bit = vtag & 0x1F;
2387 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2388 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2389 true);
2390 IXGBE_CORE_UNLOCK(adapter);
2391 if (error != 0)
2392 error = EACCES;
2393
2394 return error;
2395 } /* ixgbe_register_vlan */
2396
2397 /************************************************************************
2398 * ixgbe_unregister_vlan
2399 *
2400 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2401 ************************************************************************/
2402 static int
2403 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2404 {
2405 u16 index, bit;
2406 int error;
2407
2408 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2409 return EINVAL;
2410
2411 IXGBE_CORE_LOCK(adapter);
2412 index = (vtag >> 5) & 0x7F;
2413 bit = vtag & 0x1F;
2414 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2415 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2416 true);
2417 IXGBE_CORE_UNLOCK(adapter);
2418 if (error != 0)
2419 error = EACCES;
2420
2421 return error;
2422 } /* ixgbe_unregister_vlan */
2423
2424 static void
2425 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2426 {
2427 struct ethercom *ec = &adapter->osdep.ec;
2428 struct ixgbe_hw *hw = &adapter->hw;
2429 struct rx_ring *rxr;
2430 u32 ctrl;
2431 int i;
2432 bool hwtagging;
2433
2434 /* Enable HW tagging only if any vlan is attached */
2435 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2436 && VLAN_ATTACHED(ec);
2437
2438 /* Setup the queues for vlans */
2439 for (i = 0; i < adapter->num_queues; i++) {
2440 rxr = &adapter->rx_rings[i];
2441 /*
2442 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2443 */
2444 if (hw->mac.type != ixgbe_mac_82598EB) {
2445 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2446 if (hwtagging)
2447 ctrl |= IXGBE_RXDCTL_VME;
2448 else
2449 ctrl &= ~IXGBE_RXDCTL_VME;
2450 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2451 }
2452 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2453 }
2454
2455 /* VLAN hw tagging for 82598 */
2456 if (hw->mac.type == ixgbe_mac_82598EB) {
2457 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2458 if (hwtagging)
2459 ctrl |= IXGBE_VLNCTRL_VME;
2460 else
2461 ctrl &= ~IXGBE_VLNCTRL_VME;
2462 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2463 }
2464 } /* ixgbe_setup_vlan_hw_tagging */
2465
2466 static void
2467 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2468 {
2469 struct ethercom *ec = &adapter->osdep.ec;
2470 struct ixgbe_hw *hw = &adapter->hw;
2471 int i;
2472 u32 ctrl;
2473 struct vlanid_list *vlanidp;
2474
2475 /*
2476 * This function is called from both if_init and ifflags_cb()
2477 * on NetBSD.
2478 */
2479
2480 /*
2481 * Part 1:
2482 * Setup VLAN HW tagging
2483 */
2484 ixgbe_setup_vlan_hw_tagging(adapter);
2485
2486 /*
2487 * Part 2:
2488 * Setup VLAN HW filter
2489 */
2490 /* Cleanup shadow_vfta */
2491 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2492 adapter->shadow_vfta[i] = 0;
2493 /* Generate shadow_vfta from ec_vids */
2494 ETHER_LOCK(ec);
2495 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2496 uint32_t idx;
2497
2498 idx = vlanidp->vid / 32;
2499 KASSERT(idx < IXGBE_VFTA_SIZE);
2500 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2501 }
2502 ETHER_UNLOCK(ec);
2503 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2504 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2505
2506 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2507 /* Enable the Filter Table if enabled */
2508 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2509 ctrl |= IXGBE_VLNCTRL_VFE;
2510 else
2511 ctrl &= ~IXGBE_VLNCTRL_VFE;
2512 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2513 } /* ixgbe_setup_vlan_hw_support */
2514
2515 /************************************************************************
2516 * ixgbe_get_slot_info
2517 *
2518 * Get the width and transaction speed of
2519 * the slot this adapter is plugged into.
2520 ************************************************************************/
2521 static void
2522 ixgbe_get_slot_info(struct adapter *adapter)
2523 {
2524 device_t dev = adapter->dev;
2525 struct ixgbe_hw *hw = &adapter->hw;
2526 u32 offset;
2527 u16 link;
2528 int bus_info_valid = TRUE;
2529
2530 /* Some devices are behind an internal bridge */
2531 switch (hw->device_id) {
2532 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2533 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2534 goto get_parent_info;
2535 default:
2536 break;
2537 }
2538
2539 ixgbe_get_bus_info(hw);
2540
2541 /*
2542 * Some devices don't use PCI-E, but there is no need
2543 * to display "Unknown" for bus speed and width.
2544 */
2545 switch (hw->mac.type) {
2546 case ixgbe_mac_X550EM_x:
2547 case ixgbe_mac_X550EM_a:
2548 return;
2549 default:
2550 goto display;
2551 }
2552
2553 get_parent_info:
2554 /*
2555 * For the Quad port adapter we need to parse back
2556 * up the PCI tree to find the speed of the expansion
2557 * slot into which this adapter is plugged. A bit more work.
2558 */
2559 dev = device_parent(device_parent(dev));
2560 #if 0
2561 #ifdef IXGBE_DEBUG
2562 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2563 pci_get_slot(dev), pci_get_function(dev));
2564 #endif
2565 dev = device_parent(device_parent(dev));
2566 #ifdef IXGBE_DEBUG
2567 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2568 pci_get_slot(dev), pci_get_function(dev));
2569 #endif
2570 #endif
2571 /* Now get the PCI Express Capabilities offset */
2572 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2573 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2574 /*
2575 * Hmm...can't get PCI-Express capabilities.
2576 * Falling back to default method.
2577 */
2578 bus_info_valid = FALSE;
2579 ixgbe_get_bus_info(hw);
2580 goto display;
2581 }
2582 /* ...and read the Link Status Register */
2583 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2584 offset + PCIE_LCSR) >> 16;
2585 ixgbe_set_pci_config_data_generic(hw, link);
2586
2587 display:
2588 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2589 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2590 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2591 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2592 "Unknown"),
2593 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2594 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2595 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2596 "Unknown"));
2597
2598 if (bus_info_valid) {
2599 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2600 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2601 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2602 device_printf(dev, "PCI-Express bandwidth available"
2603 " for this card\n is not sufficient for"
2604 " optimal performance.\n");
2605 device_printf(dev, "For optimal performance a x8 "
2606 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2607 }
2608 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2609 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2610 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2611 device_printf(dev, "PCI-Express bandwidth available"
2612 " for this card\n is not sufficient for"
2613 " optimal performance.\n");
2614 device_printf(dev, "For optimal performance a x8 "
2615 "PCIE Gen3 slot is required.\n");
2616 }
2617 } else
2618 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2619
2620 return;
2621 } /* ixgbe_get_slot_info */
2622
2623 /************************************************************************
2624 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2625 ************************************************************************/
2626 static inline void
2627 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2628 {
2629 struct ixgbe_hw *hw = &adapter->hw;
2630 struct ix_queue *que = &adapter->queues[vector];
2631 u64 queue = 1ULL << vector;
2632 u32 mask;
2633
2634 mutex_enter(&que->dc_mtx);
2635 if (que->disabled_count > 0 && --que->disabled_count > 0)
2636 goto out;
2637
2638 if (hw->mac.type == ixgbe_mac_82598EB) {
2639 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2640 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2641 } else {
2642 mask = (queue & 0xFFFFFFFF);
2643 if (mask)
2644 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2645 mask = (queue >> 32);
2646 if (mask)
2647 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2648 }
2649 out:
2650 mutex_exit(&que->dc_mtx);
2651 } /* ixgbe_enable_queue */
2652
2653 /************************************************************************
2654 * ixgbe_disable_queue_internal
2655 ************************************************************************/
2656 static inline void
2657 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2658 {
2659 struct ixgbe_hw *hw = &adapter->hw;
2660 struct ix_queue *que = &adapter->queues[vector];
2661 u64 queue = 1ULL << vector;
2662 u32 mask;
2663
2664 mutex_enter(&que->dc_mtx);
2665
2666 if (que->disabled_count > 0) {
2667 if (nestok)
2668 que->disabled_count++;
2669 goto out;
2670 }
2671 que->disabled_count++;
2672
2673 if (hw->mac.type == ixgbe_mac_82598EB) {
2674 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2675 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2676 } else {
2677 mask = (queue & 0xFFFFFFFF);
2678 if (mask)
2679 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2680 mask = (queue >> 32);
2681 if (mask)
2682 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2683 }
2684 out:
2685 mutex_exit(&que->dc_mtx);
2686 } /* ixgbe_disable_queue_internal */
2687
2688 /************************************************************************
2689 * ixgbe_disable_queue
2690 ************************************************************************/
2691 static inline void
2692 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2693 {
2694
2695 ixgbe_disable_queue_internal(adapter, vector, true);
2696 } /* ixgbe_disable_queue */
2697
2698 /************************************************************************
2699 * ixgbe_sched_handle_que - schedule deferred packet processing
2700 ************************************************************************/
2701 static inline void
2702 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2703 {
2704
2705 if (que->txrx_use_workqueue) {
2706 /*
2707 * adapter->que_wq is bound to each CPU instead of
2708 * each NIC queue to reduce workqueue kthread. As we
2709 * should consider about interrupt affinity in this
2710 * function, the workqueue kthread must be WQ_PERCPU.
2711 * If create WQ_PERCPU workqueue kthread for each NIC
2712 * queue, that number of created workqueue kthread is
2713 * (number of used NIC queue) * (number of CPUs) =
2714 * (number of CPUs) ^ 2 most often.
2715 *
2716 * The same NIC queue's interrupts are avoided by
2717 * masking the queue's interrupt. And different
2718 * NIC queue's interrupts use different struct work
2719 * (que->wq_cookie). So, "enqueued flag" to avoid
2720 * twice workqueue_enqueue() is not required .
2721 */
2722 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2723 } else {
2724 softint_schedule(que->que_si);
2725 }
2726 }
2727
2728 /************************************************************************
2729 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2730 ************************************************************************/
2731 static int
2732 ixgbe_msix_que(void *arg)
2733 {
2734 struct ix_queue *que = arg;
2735 struct adapter *adapter = que->adapter;
2736 struct ifnet *ifp = adapter->ifp;
2737 struct tx_ring *txr = que->txr;
2738 struct rx_ring *rxr = que->rxr;
2739 bool more;
2740 u32 newitr = 0;
2741
2742 /* Protect against spurious interrupts */
2743 if ((ifp->if_flags & IFF_RUNNING) == 0)
2744 return 0;
2745
2746 ixgbe_disable_queue(adapter, que->msix);
2747 ++que->irqs.ev_count;
2748
2749 /*
2750 * Don't change "que->txrx_use_workqueue" from this point to avoid
2751 * flip-flopping softint/workqueue mode in one deferred processing.
2752 */
2753 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2754
2755 #ifdef __NetBSD__
2756 /* Don't run ixgbe_rxeof in interrupt context */
2757 more = true;
2758 #else
2759 more = ixgbe_rxeof(que);
2760 #endif
2761
2762 IXGBE_TX_LOCK(txr);
2763 ixgbe_txeof(txr);
2764 IXGBE_TX_UNLOCK(txr);
2765
2766 /* Do AIM now? */
2767
2768 if (adapter->enable_aim == false)
2769 goto no_calc;
2770 /*
2771 * Do Adaptive Interrupt Moderation:
2772 * - Write out last calculated setting
2773 * - Calculate based on average size over
2774 * the last interval.
2775 */
2776 if (que->eitr_setting)
2777 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2778
2779 que->eitr_setting = 0;
2780
2781 /* Idle, do nothing */
2782 if ((txr->bytes == 0) && (rxr->bytes == 0))
2783 goto no_calc;
2784
2785 if ((txr->bytes) && (txr->packets))
2786 newitr = txr->bytes/txr->packets;
2787 if ((rxr->bytes) && (rxr->packets))
2788 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2789 newitr += 24; /* account for hardware frame, crc */
2790
2791 /* set an upper boundary */
2792 newitr = uimin(newitr, 3000);
2793
2794 /* Be nice to the mid range */
2795 if ((newitr > 300) && (newitr < 1200))
2796 newitr = (newitr / 3);
2797 else
2798 newitr = (newitr / 2);
2799
2800 /*
2801 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2802 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2803 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2804 * on 1G and higher.
2805 */
2806 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2807 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2808 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2809 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2810 }
2811
2812 /* save for next interrupt */
2813 que->eitr_setting = newitr;
2814
2815 /* Reset state */
2816 txr->bytes = 0;
2817 txr->packets = 0;
2818 rxr->bytes = 0;
2819 rxr->packets = 0;
2820
2821 no_calc:
2822 if (more)
2823 ixgbe_sched_handle_que(adapter, que);
2824 else
2825 ixgbe_enable_queue(adapter, que->msix);
2826
2827 return 1;
2828 } /* ixgbe_msix_que */
2829
2830 /************************************************************************
2831 * ixgbe_media_status - Media Ioctl callback
2832 *
2833 * Called whenever the user queries the status of
2834 * the interface using ifconfig.
2835 ************************************************************************/
2836 static void
2837 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2838 {
2839 struct adapter *adapter = ifp->if_softc;
2840 struct ixgbe_hw *hw = &adapter->hw;
2841 int layer;
2842
2843 INIT_DEBUGOUT("ixgbe_media_status: begin");
2844 ixgbe_update_link_status(adapter);
2845
2846 ifmr->ifm_status = IFM_AVALID;
2847 ifmr->ifm_active = IFM_ETHER;
2848
2849 if (adapter->link_active != LINK_STATE_UP) {
2850 ifmr->ifm_active |= IFM_NONE;
2851 return;
2852 }
2853
2854 ifmr->ifm_status |= IFM_ACTIVE;
2855 layer = adapter->phy_layer;
2856
2857 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2858 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2859 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2860 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2861 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2862 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2863 switch (adapter->link_speed) {
2864 case IXGBE_LINK_SPEED_10GB_FULL:
2865 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2866 break;
2867 case IXGBE_LINK_SPEED_5GB_FULL:
2868 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2869 break;
2870 case IXGBE_LINK_SPEED_2_5GB_FULL:
2871 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2872 break;
2873 case IXGBE_LINK_SPEED_1GB_FULL:
2874 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2875 break;
2876 case IXGBE_LINK_SPEED_100_FULL:
2877 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2878 break;
2879 case IXGBE_LINK_SPEED_10_FULL:
2880 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2881 break;
2882 }
2883 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2884 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2885 switch (adapter->link_speed) {
2886 case IXGBE_LINK_SPEED_10GB_FULL:
2887 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2888 break;
2889 }
2890 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2891 switch (adapter->link_speed) {
2892 case IXGBE_LINK_SPEED_10GB_FULL:
2893 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2894 break;
2895 case IXGBE_LINK_SPEED_1GB_FULL:
2896 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2897 break;
2898 }
2899 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2900 switch (adapter->link_speed) {
2901 case IXGBE_LINK_SPEED_10GB_FULL:
2902 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2903 break;
2904 case IXGBE_LINK_SPEED_1GB_FULL:
2905 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2906 break;
2907 }
2908 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2909 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2910 switch (adapter->link_speed) {
2911 case IXGBE_LINK_SPEED_10GB_FULL:
2912 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2913 break;
2914 case IXGBE_LINK_SPEED_1GB_FULL:
2915 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2916 break;
2917 }
2918 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2919 switch (adapter->link_speed) {
2920 case IXGBE_LINK_SPEED_10GB_FULL:
2921 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2922 break;
2923 }
2924 /*
2925 * XXX: These need to use the proper media types once
2926 * they're added.
2927 */
2928 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2929 switch (adapter->link_speed) {
2930 case IXGBE_LINK_SPEED_10GB_FULL:
2931 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2932 break;
2933 case IXGBE_LINK_SPEED_2_5GB_FULL:
2934 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2935 break;
2936 case IXGBE_LINK_SPEED_1GB_FULL:
2937 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2938 break;
2939 }
2940 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2941 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2942 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2943 switch (adapter->link_speed) {
2944 case IXGBE_LINK_SPEED_10GB_FULL:
2945 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2946 break;
2947 case IXGBE_LINK_SPEED_2_5GB_FULL:
2948 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2949 break;
2950 case IXGBE_LINK_SPEED_1GB_FULL:
2951 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2952 break;
2953 }
2954
2955 /* If nothing is recognized... */
2956 #if 0
2957 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2958 ifmr->ifm_active |= IFM_UNKNOWN;
2959 #endif
2960
2961 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2962
2963 /* Display current flow control setting used on link */
2964 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2965 hw->fc.current_mode == ixgbe_fc_full)
2966 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2967 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2968 hw->fc.current_mode == ixgbe_fc_full)
2969 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2970
2971 return;
2972 } /* ixgbe_media_status */
2973
2974 /************************************************************************
2975 * ixgbe_media_change - Media Ioctl callback
2976 *
2977 * Called when the user changes speed/duplex using
2978 * media/mediopt option with ifconfig.
2979 ************************************************************************/
2980 static int
2981 ixgbe_media_change(struct ifnet *ifp)
2982 {
2983 struct adapter *adapter = ifp->if_softc;
2984 struct ifmedia *ifm = &adapter->media;
2985 struct ixgbe_hw *hw = &adapter->hw;
2986 ixgbe_link_speed speed = 0;
2987 ixgbe_link_speed link_caps = 0;
2988 bool negotiate = false;
2989 s32 err = IXGBE_NOT_IMPLEMENTED;
2990
2991 INIT_DEBUGOUT("ixgbe_media_change: begin");
2992
2993 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2994 return (EINVAL);
2995
2996 if (hw->phy.media_type == ixgbe_media_type_backplane)
2997 return (EPERM);
2998
2999 /*
3000 * We don't actually need to check against the supported
3001 * media types of the adapter; ifmedia will take care of
3002 * that for us.
3003 */
3004 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3005 case IFM_AUTO:
3006 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3007 &negotiate);
3008 if (err != IXGBE_SUCCESS) {
3009 device_printf(adapter->dev, "Unable to determine "
3010 "supported advertise speeds\n");
3011 return (ENODEV);
3012 }
3013 speed |= link_caps;
3014 break;
3015 case IFM_10G_T:
3016 case IFM_10G_LRM:
3017 case IFM_10G_LR:
3018 case IFM_10G_TWINAX:
3019 case IFM_10G_SR:
3020 case IFM_10G_CX4:
3021 case IFM_10G_KR:
3022 case IFM_10G_KX4:
3023 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3024 break;
3025 case IFM_5000_T:
3026 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3027 break;
3028 case IFM_2500_T:
3029 case IFM_2500_KX:
3030 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3031 break;
3032 case IFM_1000_T:
3033 case IFM_1000_LX:
3034 case IFM_1000_SX:
3035 case IFM_1000_KX:
3036 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3037 break;
3038 case IFM_100_TX:
3039 speed |= IXGBE_LINK_SPEED_100_FULL;
3040 break;
3041 case IFM_10_T:
3042 speed |= IXGBE_LINK_SPEED_10_FULL;
3043 break;
3044 case IFM_NONE:
3045 break;
3046 default:
3047 goto invalid;
3048 }
3049
3050 hw->mac.autotry_restart = TRUE;
3051 hw->mac.ops.setup_link(hw, speed, TRUE);
3052 adapter->advertise = 0;
3053 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3054 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3055 adapter->advertise |= 1 << 2;
3056 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3057 adapter->advertise |= 1 << 1;
3058 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3059 adapter->advertise |= 1 << 0;
3060 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3061 adapter->advertise |= 1 << 3;
3062 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3063 adapter->advertise |= 1 << 4;
3064 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3065 adapter->advertise |= 1 << 5;
3066 }
3067
3068 return (0);
3069
3070 invalid:
3071 device_printf(adapter->dev, "Invalid media type!\n");
3072
3073 return (EINVAL);
3074 } /* ixgbe_media_change */
3075
3076 /************************************************************************
3077 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3078 ************************************************************************/
3079 static int
3080 ixgbe_msix_admin(void *arg)
3081 {
3082 struct adapter *adapter = arg;
3083 struct ixgbe_hw *hw = &adapter->hw;
3084 u32 eicr, eicr_mask;
3085 u32 task_requests = 0;
3086 s32 retval;
3087
3088 ++adapter->admin_irqev.ev_count;
3089
3090 /* Pause other interrupts */
3091 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3092
3093 /* First get the cause */
3094 /*
3095 * The specifications of 82598, 82599, X540 and X550 say EICS register
3096 * is write only. However, Linux says it is a workaround for silicon
3097 * errata to read EICS instead of EICR to get interrupt cause. It seems
3098 * there is a problem about read clear mechanism for EICR register.
3099 */
3100 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3101 /* Be sure the queue bits are not cleared */
3102 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3103 /* Clear interrupt with write */
3104 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3105
3106 if (ixgbe_is_sfp(hw)) {
3107 /* Pluggable optics-related interrupt */
3108 if (hw->mac.type >= ixgbe_mac_X540)
3109 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3110 else
3111 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3112
3113 /*
3114 * An interrupt might not arrive when a module is inserted.
3115 * When an link status change interrupt occurred and the driver
3116 * still regard SFP as unplugged, issue the module softint
3117 * and then issue LSC interrupt.
3118 */
3119 if ((eicr & eicr_mask)
3120 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3121 && (eicr & IXGBE_EICR_LSC))) {
3122 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3123 task_requests |= IXGBE_REQUEST_TASK_MOD;
3124 }
3125
3126 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3127 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3128 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3129 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3130 task_requests |= IXGBE_REQUEST_TASK_MSF;
3131 }
3132 }
3133
3134 /* Link status change */
3135 if (eicr & IXGBE_EICR_LSC) {
3136 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3137 task_requests |= IXGBE_REQUEST_TASK_LSC;
3138 }
3139
3140 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3141 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3142 (eicr & IXGBE_EICR_FLOW_DIR)) {
3143 /* This is probably overkill :) */
3144 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3145 return 1;
3146 /* Disable the interrupt */
3147 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3148 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3149 }
3150
3151 if (eicr & IXGBE_EICR_ECC) {
3152 device_printf(adapter->dev,
3153 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3154 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3155 }
3156
3157 /* Check for over temp condition */
3158 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3159 switch (adapter->hw.mac.type) {
3160 case ixgbe_mac_X550EM_a:
3161 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3162 break;
3163 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3164 IXGBE_EICR_GPI_SDP0_X550EM_a);
3165 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3166 IXGBE_EICR_GPI_SDP0_X550EM_a);
3167 retval = hw->phy.ops.check_overtemp(hw);
3168 if (retval != IXGBE_ERR_OVERTEMP)
3169 break;
3170 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3171 device_printf(adapter->dev, "System shutdown required!\n");
3172 break;
3173 default:
3174 if (!(eicr & IXGBE_EICR_TS))
3175 break;
3176 retval = hw->phy.ops.check_overtemp(hw);
3177 if (retval != IXGBE_ERR_OVERTEMP)
3178 break;
3179 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3180 device_printf(adapter->dev, "System shutdown required!\n");
3181 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3182 break;
3183 }
3184 }
3185
3186 /* Check for VF message */
3187 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3188 (eicr & IXGBE_EICR_MAILBOX)) {
3189 task_requests |= IXGBE_REQUEST_TASK_MBX;
3190 }
3191 }
3192
3193 /* Check for fan failure */
3194 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3195 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3196 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3197 }
3198
3199 /* External PHY interrupt */
3200 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3201 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3202 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3203 task_requests |= IXGBE_REQUEST_TASK_PHY;
3204 }
3205
3206 if (task_requests != 0) {
3207 /* Re-enabling other interrupts is done in the admin task */
3208 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
3209 atomic_or_32(&adapter->task_requests, task_requests);
3210 ixgbe_schedule_admin_tasklet(adapter);
3211 } else {
3212 /* Re-enable other interrupts */
3213 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3214 }
3215
3216 return 1;
3217 } /* ixgbe_msix_admin */
3218
3219 static void
3220 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3221 {
3222
3223 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3224 itr |= itr << 16;
3225 else
3226 itr |= IXGBE_EITR_CNT_WDIS;
3227
3228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3229 }
3230
3231
3232 /************************************************************************
3233 * ixgbe_sysctl_interrupt_rate_handler
3234 ************************************************************************/
3235 static int
3236 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3237 {
3238 struct sysctlnode node = *rnode;
3239 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3240 struct adapter *adapter;
3241 uint32_t reg, usec, rate;
3242 int error;
3243
3244 if (que == NULL)
3245 return 0;
3246
3247 adapter = que->adapter;
3248 if (ixgbe_fw_recovery_mode_swflag(adapter))
3249 return (EPERM);
3250
3251 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3252 usec = ((reg & 0x0FF8) >> 3);
3253 if (usec > 0)
3254 rate = 500000 / usec;
3255 else
3256 rate = 0;
3257 node.sysctl_data = &rate;
3258 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3259 if (error || newp == NULL)
3260 return error;
3261 reg &= ~0xfff; /* default, no limitation */
3262 if (rate > 0 && rate < 500000) {
3263 if (rate < 1000)
3264 rate = 1000;
3265 reg |= ((4000000 / rate) & 0xff8);
3266 /*
3267 * When RSC is used, ITR interval must be larger than
3268 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3269 * The minimum value is always greater than 2us on 100M
3270 * (and 10M?(not documented)), but it's not on 1G and higher.
3271 */
3272 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3273 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3274 if ((adapter->num_queues > 1)
3275 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3276 return EINVAL;
3277 }
3278 ixgbe_max_interrupt_rate = rate;
3279 } else
3280 ixgbe_max_interrupt_rate = 0;
3281 ixgbe_eitr_write(adapter, que->msix, reg);
3282
3283 return (0);
3284 } /* ixgbe_sysctl_interrupt_rate_handler */
3285
3286 const struct sysctlnode *
3287 ixgbe_sysctl_instance(struct adapter *adapter)
3288 {
3289 const char *dvname;
3290 struct sysctllog **log;
3291 int rc;
3292 const struct sysctlnode *rnode;
3293
3294 if (adapter->sysctltop != NULL)
3295 return adapter->sysctltop;
3296
3297 log = &adapter->sysctllog;
3298 dvname = device_xname(adapter->dev);
3299
3300 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3301 0, CTLTYPE_NODE, dvname,
3302 SYSCTL_DESCR("ixgbe information and settings"),
3303 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3304 goto err;
3305
3306 return rnode;
3307 err:
3308 device_printf(adapter->dev,
3309 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3310 return NULL;
3311 }
3312
3313 /************************************************************************
3314 * ixgbe_add_device_sysctls
3315 ************************************************************************/
3316 static void
3317 ixgbe_add_device_sysctls(struct adapter *adapter)
3318 {
3319 device_t dev = adapter->dev;
3320 struct ixgbe_hw *hw = &adapter->hw;
3321 struct sysctllog **log;
3322 const struct sysctlnode *rnode, *cnode;
3323
3324 log = &adapter->sysctllog;
3325
3326 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3327 aprint_error_dev(dev, "could not create sysctl root\n");
3328 return;
3329 }
3330
3331 if (sysctl_createv(log, 0, &rnode, &cnode,
3332 CTLFLAG_READWRITE, CTLTYPE_INT,
3333 "debug", SYSCTL_DESCR("Debug Info"),
3334 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3335 aprint_error_dev(dev, "could not create sysctl\n");
3336
3337 if (sysctl_createv(log, 0, &rnode, &cnode,
3338 CTLFLAG_READONLY, CTLTYPE_INT,
3339 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3340 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3341 aprint_error_dev(dev, "could not create sysctl\n");
3342
3343 if (sysctl_createv(log, 0, &rnode, &cnode,
3344 CTLFLAG_READONLY, CTLTYPE_INT,
3345 "num_queues", SYSCTL_DESCR("Number of queues"),
3346 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3347 aprint_error_dev(dev, "could not create sysctl\n");
3348
3349 /* Sysctls for all devices */
3350 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3351 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3352 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3353 CTL_EOL) != 0)
3354 aprint_error_dev(dev, "could not create sysctl\n");
3355
3356 adapter->enable_aim = ixgbe_enable_aim;
3357 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3358 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3359 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3360 aprint_error_dev(dev, "could not create sysctl\n");
3361
3362 if (sysctl_createv(log, 0, &rnode, &cnode,
3363 CTLFLAG_READWRITE, CTLTYPE_INT,
3364 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3365 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3366 CTL_EOL) != 0)
3367 aprint_error_dev(dev, "could not create sysctl\n");
3368
3369 /*
3370 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3371 * it causesflip-flopping softint/workqueue mode in one deferred
3372 * processing. Therefore, preempt_disable()/preempt_enable() are
3373 * required in ixgbe_sched_handle_que() to avoid
3374 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3375 * I think changing "que->txrx_use_workqueue" in interrupt handler
3376 * is lighter than doing preempt_disable()/preempt_enable() in every
3377 * ixgbe_sched_handle_que().
3378 */
3379 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3380 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3381 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3382 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3383 aprint_error_dev(dev, "could not create sysctl\n");
3384
3385 #ifdef IXGBE_DEBUG
3386 /* testing sysctls (for all devices) */
3387 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3388 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3389 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3390 CTL_EOL) != 0)
3391 aprint_error_dev(dev, "could not create sysctl\n");
3392
3393 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3394 CTLTYPE_STRING, "print_rss_config",
3395 SYSCTL_DESCR("Prints RSS Configuration"),
3396 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3397 CTL_EOL) != 0)
3398 aprint_error_dev(dev, "could not create sysctl\n");
3399 #endif
3400 /* for X550 series devices */
3401 if (hw->mac.type >= ixgbe_mac_X550)
3402 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3403 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3404 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3405 CTL_EOL) != 0)
3406 aprint_error_dev(dev, "could not create sysctl\n");
3407
3408 /* for WoL-capable devices */
3409 if (adapter->wol_support) {
3410 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3411 CTLTYPE_BOOL, "wol_enable",
3412 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3413 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3414 CTL_EOL) != 0)
3415 aprint_error_dev(dev, "could not create sysctl\n");
3416
3417 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3418 CTLTYPE_INT, "wufc",
3419 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3420 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3421 CTL_EOL) != 0)
3422 aprint_error_dev(dev, "could not create sysctl\n");
3423 }
3424
3425 /* for X552/X557-AT devices */
3426 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3427 const struct sysctlnode *phy_node;
3428
3429 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3430 "phy", SYSCTL_DESCR("External PHY sysctls"),
3431 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3432 aprint_error_dev(dev, "could not create sysctl\n");
3433 return;
3434 }
3435
3436 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3437 CTLTYPE_INT, "temp",
3438 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3439 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3440 CTL_EOL) != 0)
3441 aprint_error_dev(dev, "could not create sysctl\n");
3442
3443 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3444 CTLTYPE_INT, "overtemp_occurred",
3445 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3446 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3447 CTL_CREATE, CTL_EOL) != 0)
3448 aprint_error_dev(dev, "could not create sysctl\n");
3449 }
3450
3451 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3452 && (hw->phy.type == ixgbe_phy_fw))
3453 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3454 CTLTYPE_BOOL, "force_10_100_autonego",
3455 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3456 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3457 CTL_CREATE, CTL_EOL) != 0)
3458 aprint_error_dev(dev, "could not create sysctl\n");
3459
3460 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3461 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3462 CTLTYPE_INT, "eee_state",
3463 SYSCTL_DESCR("EEE Power Save State"),
3464 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3465 CTL_EOL) != 0)
3466 aprint_error_dev(dev, "could not create sysctl\n");
3467 }
3468 } /* ixgbe_add_device_sysctls */
3469
3470 /************************************************************************
3471 * ixgbe_allocate_pci_resources
3472 ************************************************************************/
3473 static int
3474 ixgbe_allocate_pci_resources(struct adapter *adapter,
3475 const struct pci_attach_args *pa)
3476 {
3477 pcireg_t memtype, csr;
3478 device_t dev = adapter->dev;
3479 bus_addr_t addr;
3480 int flags;
3481
3482 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3483 switch (memtype) {
3484 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3485 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3486 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3487 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3488 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3489 goto map_err;
3490 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3491 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3492 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3493 }
3494 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3495 adapter->osdep.mem_size, flags,
3496 &adapter->osdep.mem_bus_space_handle) != 0) {
3497 map_err:
3498 adapter->osdep.mem_size = 0;
3499 aprint_error_dev(dev, "unable to map BAR0\n");
3500 return ENXIO;
3501 }
3502 /*
3503 * Enable address decoding for memory range in case BIOS or
3504 * UEFI don't set it.
3505 */
3506 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3507 PCI_COMMAND_STATUS_REG);
3508 csr |= PCI_COMMAND_MEM_ENABLE;
3509 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3510 csr);
3511 break;
3512 default:
3513 aprint_error_dev(dev, "unexpected type on BAR0\n");
3514 return ENXIO;
3515 }
3516
3517 return (0);
3518 } /* ixgbe_allocate_pci_resources */
3519
3520 static void
3521 ixgbe_free_deferred_handlers(struct adapter *adapter)
3522 {
3523 struct ix_queue *que = adapter->queues;
3524 struct tx_ring *txr = adapter->tx_rings;
3525 int i;
3526
3527 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3528 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3529 if (txr->txr_si != NULL)
3530 softint_disestablish(txr->txr_si);
3531 }
3532 if (que->que_si != NULL)
3533 softint_disestablish(que->que_si);
3534 }
3535 if (adapter->txr_wq != NULL)
3536 workqueue_destroy(adapter->txr_wq);
3537 if (adapter->txr_wq_enqueued != NULL)
3538 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3539 if (adapter->que_wq != NULL)
3540 workqueue_destroy(adapter->que_wq);
3541
3542 if (adapter->admin_wq != NULL) {
3543 workqueue_destroy(adapter->admin_wq);
3544 adapter->admin_wq = NULL;
3545 }
3546 if (adapter->timer_wq != NULL) {
3547 workqueue_destroy(adapter->timer_wq);
3548 adapter->timer_wq = NULL;
3549 }
3550 if (adapter->recovery_mode_timer_wq != NULL) {
3551 /*
3552 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3553 * the recovery_mode_timer workqueue, so call it here.
3554 */
3555 workqueue_wait(adapter->recovery_mode_timer_wq,
3556 &adapter->recovery_mode_timer_wc);
3557 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3558 workqueue_destroy(adapter->recovery_mode_timer_wq);
3559 adapter->recovery_mode_timer_wq = NULL;
3560 }
3561 } /* ixgbe_free_deferred_handlers */
3562
3563 /************************************************************************
3564 * ixgbe_detach - Device removal routine
3565 *
3566 * Called when the driver is being removed.
3567 * Stops the adapter and deallocates all the resources
3568 * that were allocated for driver operation.
3569 *
3570 * return 0 on success, positive on failure
3571 ************************************************************************/
3572 static int
3573 ixgbe_detach(device_t dev, int flags)
3574 {
3575 struct adapter *adapter = device_private(dev);
3576 struct rx_ring *rxr = adapter->rx_rings;
3577 struct tx_ring *txr = adapter->tx_rings;
3578 struct ixgbe_hw *hw = &adapter->hw;
3579 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3580 u32 ctrl_ext;
3581 int i;
3582
3583 INIT_DEBUGOUT("ixgbe_detach: begin");
3584 if (adapter->osdep.attached == false)
3585 return 0;
3586
3587 if (ixgbe_pci_iov_detach(dev) != 0) {
3588 device_printf(dev, "SR-IOV in use; detach first.\n");
3589 return (EBUSY);
3590 }
3591
3592 #if NVLAN > 0
3593 /* Make sure VLANs are not using driver */
3594 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3595 ; /* nothing to do: no VLANs */
3596 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3597 vlan_ifdetach(adapter->ifp);
3598 else {
3599 aprint_error_dev(dev, "VLANs in use, detach first\n");
3600 return (EBUSY);
3601 }
3602 #endif
3603
3604 adapter->osdep.detaching = true;
3605 /*
3606 * Stop the interface. ixgbe_setup_low_power_mode() calls
3607 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3608 * directly.
3609 */
3610 ixgbe_setup_low_power_mode(adapter);
3611
3612 callout_halt(&adapter->timer, NULL);
3613 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3614 callout_halt(&adapter->recovery_mode_timer, NULL);
3615
3616 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3617 atomic_store_relaxed(&adapter->admin_pending, 0);
3618 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3619 atomic_store_relaxed(&adapter->timer_pending, 0);
3620
3621 pmf_device_deregister(dev);
3622
3623 ether_ifdetach(adapter->ifp);
3624
3625 ixgbe_free_deferred_handlers(adapter);
3626
3627 /* let hardware know driver is unloading */
3628 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3629 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3631
3632 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3633 netmap_detach(adapter->ifp);
3634
3635 ixgbe_free_pci_resources(adapter);
3636 #if 0 /* XXX the NetBSD port is probably missing something here */
3637 bus_generic_detach(dev);
3638 #endif
3639 if_detach(adapter->ifp);
3640 ifmedia_fini(&adapter->media);
3641 if_percpuq_destroy(adapter->ipq);
3642
3643 sysctl_teardown(&adapter->sysctllog);
3644 evcnt_detach(&adapter->efbig_tx_dma_setup);
3645 evcnt_detach(&adapter->mbuf_defrag_failed);
3646 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3647 evcnt_detach(&adapter->einval_tx_dma_setup);
3648 evcnt_detach(&adapter->other_tx_dma_setup);
3649 evcnt_detach(&adapter->eagain_tx_dma_setup);
3650 evcnt_detach(&adapter->enomem_tx_dma_setup);
3651 evcnt_detach(&adapter->watchdog_events);
3652 evcnt_detach(&adapter->tso_err);
3653 evcnt_detach(&adapter->admin_irqev);
3654 evcnt_detach(&adapter->link_workev);
3655 evcnt_detach(&adapter->mod_workev);
3656 evcnt_detach(&adapter->msf_workev);
3657 evcnt_detach(&adapter->phy_workev);
3658
3659 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3660 if (i < __arraycount(stats->mpc)) {
3661 evcnt_detach(&stats->mpc[i]);
3662 if (hw->mac.type == ixgbe_mac_82598EB)
3663 evcnt_detach(&stats->rnbc[i]);
3664 }
3665 if (i < __arraycount(stats->pxontxc)) {
3666 evcnt_detach(&stats->pxontxc[i]);
3667 evcnt_detach(&stats->pxonrxc[i]);
3668 evcnt_detach(&stats->pxofftxc[i]);
3669 evcnt_detach(&stats->pxoffrxc[i]);
3670 if (hw->mac.type >= ixgbe_mac_82599EB)
3671 evcnt_detach(&stats->pxon2offc[i]);
3672 }
3673 }
3674
3675 txr = adapter->tx_rings;
3676 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3677 evcnt_detach(&adapter->queues[i].irqs);
3678 evcnt_detach(&adapter->queues[i].handleq);
3679 evcnt_detach(&adapter->queues[i].req);
3680 evcnt_detach(&txr->no_desc_avail);
3681 evcnt_detach(&txr->total_packets);
3682 evcnt_detach(&txr->tso_tx);
3683 #ifndef IXGBE_LEGACY_TX
3684 evcnt_detach(&txr->pcq_drops);
3685 #endif
3686
3687 if (i < __arraycount(stats->qprc)) {
3688 evcnt_detach(&stats->qprc[i]);
3689 evcnt_detach(&stats->qptc[i]);
3690 evcnt_detach(&stats->qbrc[i]);
3691 evcnt_detach(&stats->qbtc[i]);
3692 if (hw->mac.type >= ixgbe_mac_82599EB)
3693 evcnt_detach(&stats->qprdc[i]);
3694 }
3695
3696 evcnt_detach(&rxr->rx_packets);
3697 evcnt_detach(&rxr->rx_bytes);
3698 evcnt_detach(&rxr->rx_copies);
3699 evcnt_detach(&rxr->no_jmbuf);
3700 evcnt_detach(&rxr->rx_discarded);
3701 }
3702 evcnt_detach(&stats->ipcs);
3703 evcnt_detach(&stats->l4cs);
3704 evcnt_detach(&stats->ipcs_bad);
3705 evcnt_detach(&stats->l4cs_bad);
3706 evcnt_detach(&stats->intzero);
3707 evcnt_detach(&stats->legint);
3708 evcnt_detach(&stats->crcerrs);
3709 evcnt_detach(&stats->illerrc);
3710 evcnt_detach(&stats->errbc);
3711 evcnt_detach(&stats->mspdc);
3712 if (hw->mac.type >= ixgbe_mac_X550)
3713 evcnt_detach(&stats->mbsdc);
3714 evcnt_detach(&stats->mpctotal);
3715 evcnt_detach(&stats->mlfc);
3716 evcnt_detach(&stats->mrfc);
3717 evcnt_detach(&stats->rlec);
3718 evcnt_detach(&stats->lxontxc);
3719 evcnt_detach(&stats->lxonrxc);
3720 evcnt_detach(&stats->lxofftxc);
3721 evcnt_detach(&stats->lxoffrxc);
3722
3723 /* Packet Reception Stats */
3724 evcnt_detach(&stats->tor);
3725 evcnt_detach(&stats->gorc);
3726 evcnt_detach(&stats->tpr);
3727 evcnt_detach(&stats->gprc);
3728 evcnt_detach(&stats->mprc);
3729 evcnt_detach(&stats->bprc);
3730 evcnt_detach(&stats->prc64);
3731 evcnt_detach(&stats->prc127);
3732 evcnt_detach(&stats->prc255);
3733 evcnt_detach(&stats->prc511);
3734 evcnt_detach(&stats->prc1023);
3735 evcnt_detach(&stats->prc1522);
3736 evcnt_detach(&stats->ruc);
3737 evcnt_detach(&stats->rfc);
3738 evcnt_detach(&stats->roc);
3739 evcnt_detach(&stats->rjc);
3740 evcnt_detach(&stats->mngprc);
3741 evcnt_detach(&stats->mngpdc);
3742 evcnt_detach(&stats->xec);
3743
3744 /* Packet Transmission Stats */
3745 evcnt_detach(&stats->gotc);
3746 evcnt_detach(&stats->tpt);
3747 evcnt_detach(&stats->gptc);
3748 evcnt_detach(&stats->bptc);
3749 evcnt_detach(&stats->mptc);
3750 evcnt_detach(&stats->mngptc);
3751 evcnt_detach(&stats->ptc64);
3752 evcnt_detach(&stats->ptc127);
3753 evcnt_detach(&stats->ptc255);
3754 evcnt_detach(&stats->ptc511);
3755 evcnt_detach(&stats->ptc1023);
3756 evcnt_detach(&stats->ptc1522);
3757
3758 ixgbe_free_queues(adapter);
3759 free(adapter->mta, M_DEVBUF);
3760
3761 IXGBE_CORE_LOCK_DESTROY(adapter);
3762
3763 return (0);
3764 } /* ixgbe_detach */
3765
3766 /************************************************************************
3767 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3768 *
3769 * Prepare the adapter/port for LPLU and/or WoL
3770 ************************************************************************/
3771 static int
3772 ixgbe_setup_low_power_mode(struct adapter *adapter)
3773 {
3774 struct ixgbe_hw *hw = &adapter->hw;
3775 device_t dev = adapter->dev;
3776 struct ifnet *ifp = adapter->ifp;
3777 s32 error = 0;
3778
3779 /* Limit power management flow to X550EM baseT */
3780 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3781 hw->phy.ops.enter_lplu) {
3782 /* X550EM baseT adapters need a special LPLU flow */
3783 hw->phy.reset_disable = true;
3784 ixgbe_ifstop(ifp, 1);
3785 error = hw->phy.ops.enter_lplu(hw);
3786 if (error)
3787 device_printf(dev,
3788 "Error entering LPLU: %d\n", error);
3789 hw->phy.reset_disable = false;
3790 } else {
3791 /* Just stop for other adapters */
3792 ixgbe_ifstop(ifp, 1);
3793 }
3794
3795 IXGBE_CORE_LOCK(adapter);
3796
3797 if (!hw->wol_enabled) {
3798 ixgbe_set_phy_power(hw, FALSE);
3799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3800 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3801 } else {
3802 /* Turn off support for APM wakeup. (Using ACPI instead) */
3803 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3804 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3805
3806 /*
3807 * Clear Wake Up Status register to prevent any previous wakeup
3808 * events from waking us up immediately after we suspend.
3809 */
3810 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3811
3812 /*
3813 * Program the Wakeup Filter Control register with user filter
3814 * settings
3815 */
3816 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3817
3818 /* Enable wakeups and power management in Wakeup Control */
3819 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3820 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3821
3822 }
3823
3824 IXGBE_CORE_UNLOCK(adapter);
3825
3826 return error;
3827 } /* ixgbe_setup_low_power_mode */
3828
3829 /************************************************************************
3830 * ixgbe_shutdown - Shutdown entry point
3831 ************************************************************************/
3832 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3833 static int
3834 ixgbe_shutdown(device_t dev)
3835 {
3836 struct adapter *adapter = device_private(dev);
3837 int error = 0;
3838
3839 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3840
3841 error = ixgbe_setup_low_power_mode(adapter);
3842
3843 return (error);
3844 } /* ixgbe_shutdown */
3845 #endif
3846
3847 /************************************************************************
3848 * ixgbe_suspend
3849 *
3850 * From D0 to D3
3851 ************************************************************************/
3852 static bool
3853 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3854 {
3855 struct adapter *adapter = device_private(dev);
3856 int error = 0;
3857
3858 INIT_DEBUGOUT("ixgbe_suspend: begin");
3859
3860 error = ixgbe_setup_low_power_mode(adapter);
3861
3862 return (error);
3863 } /* ixgbe_suspend */
3864
3865 /************************************************************************
3866 * ixgbe_resume
3867 *
3868 * From D3 to D0
3869 ************************************************************************/
3870 static bool
3871 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3872 {
3873 struct adapter *adapter = device_private(dev);
3874 struct ifnet *ifp = adapter->ifp;
3875 struct ixgbe_hw *hw = &adapter->hw;
3876 u32 wus;
3877
3878 INIT_DEBUGOUT("ixgbe_resume: begin");
3879
3880 IXGBE_CORE_LOCK(adapter);
3881
3882 /* Read & clear WUS register */
3883 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3884 if (wus)
3885 device_printf(dev, "Woken up by (WUS): %#010x\n",
3886 IXGBE_READ_REG(hw, IXGBE_WUS));
3887 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3888 /* And clear WUFC until next low-power transition */
3889 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3890
3891 /*
3892 * Required after D3->D0 transition;
3893 * will re-advertise all previous advertised speeds
3894 */
3895 if (ifp->if_flags & IFF_UP)
3896 ixgbe_init_locked(adapter);
3897
3898 IXGBE_CORE_UNLOCK(adapter);
3899
3900 return true;
3901 } /* ixgbe_resume */
3902
3903 /*
3904 * Set the various hardware offload abilities.
3905 *
3906 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3907 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3908 * mbuf offload flags the driver will understand.
3909 */
3910 static void
3911 ixgbe_set_if_hwassist(struct adapter *adapter)
3912 {
3913 /* XXX */
3914 }
3915
3916 /************************************************************************
3917 * ixgbe_init_locked - Init entry point
3918 *
3919 * Used in two ways: It is used by the stack as an init
3920 * entry point in network interface structure. It is also
3921 * used by the driver as a hw/sw initialization routine to
3922 * get to a consistent state.
3923 *
3924 * return 0 on success, positive on failure
3925 ************************************************************************/
3926 static void
3927 ixgbe_init_locked(struct adapter *adapter)
3928 {
3929 struct ifnet *ifp = adapter->ifp;
3930 device_t dev = adapter->dev;
3931 struct ixgbe_hw *hw = &adapter->hw;
3932 struct ix_queue *que;
3933 struct tx_ring *txr;
3934 struct rx_ring *rxr;
3935 u32 txdctl, mhadd;
3936 u32 rxdctl, rxctrl;
3937 u32 ctrl_ext;
3938 bool unsupported_sfp = false;
3939 int i, j, err;
3940
3941 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3942
3943 KASSERT(mutex_owned(&adapter->core_mtx));
3944 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3945
3946 hw->need_unsupported_sfp_recovery = false;
3947 hw->adapter_stopped = FALSE;
3948 ixgbe_stop_adapter(hw);
3949 callout_stop(&adapter->timer);
3950 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3951 callout_stop(&adapter->recovery_mode_timer);
3952 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3953 que->disabled_count = 0;
3954
3955 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3956 adapter->max_frame_size =
3957 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3958
3959 /* Queue indices may change with IOV mode */
3960 ixgbe_align_all_queue_indices(adapter);
3961
3962 /* reprogram the RAR[0] in case user changed it. */
3963 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3964
3965 /* Get the latest mac address, User can use a LAA */
3966 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3967 IXGBE_ETH_LENGTH_OF_ADDRESS);
3968 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3969 hw->addr_ctrl.rar_used_count = 1;
3970
3971 /* Set hardware offload abilities from ifnet flags */
3972 ixgbe_set_if_hwassist(adapter);
3973
3974 /* Prepare transmit descriptors and buffers */
3975 if (ixgbe_setup_transmit_structures(adapter)) {
3976 device_printf(dev, "Could not setup transmit structures\n");
3977 ixgbe_stop_locked(adapter);
3978 return;
3979 }
3980
3981 ixgbe_init_hw(hw);
3982
3983 ixgbe_initialize_iov(adapter);
3984
3985 ixgbe_initialize_transmit_units(adapter);
3986
3987 /* Setup Multicast table */
3988 ixgbe_set_rxfilter(adapter);
3989
3990 /* Determine the correct mbuf pool, based on frame size */
3991 if (adapter->max_frame_size <= MCLBYTES)
3992 adapter->rx_mbuf_sz = MCLBYTES;
3993 else
3994 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3995
3996 /* Prepare receive descriptors and buffers */
3997 if (ixgbe_setup_receive_structures(adapter)) {
3998 device_printf(dev, "Could not setup receive structures\n");
3999 ixgbe_stop_locked(adapter);
4000 return;
4001 }
4002
4003 /* Configure RX settings */
4004 ixgbe_initialize_receive_units(adapter);
4005
4006 /* Initialize variable holding task enqueue requests interrupts */
4007 adapter->task_requests = 0;
4008
4009 /* Enable SDP & MSI-X interrupts based on adapter */
4010 ixgbe_config_gpie(adapter);
4011
4012 /* Set MTU size */
4013 if (ifp->if_mtu > ETHERMTU) {
4014 /* aka IXGBE_MAXFRS on 82599 and newer */
4015 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4016 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4017 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4018 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4019 }
4020
4021 /* Now enable all the queues */
4022 for (i = 0; i < adapter->num_queues; i++) {
4023 txr = &adapter->tx_rings[i];
4024 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4025 txdctl |= IXGBE_TXDCTL_ENABLE;
4026 /* Set WTHRESH to 8, burst writeback */
4027 txdctl |= (8 << 16);
4028 /*
4029 * When the internal queue falls below PTHRESH (32),
4030 * start prefetching as long as there are at least
4031 * HTHRESH (1) buffers ready. The values are taken
4032 * from the Intel linux driver 3.8.21.
4033 * Prefetching enables tx line rate even with 1 queue.
4034 */
4035 txdctl |= (32 << 0) | (1 << 8);
4036 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4037 }
4038
4039 for (i = 0; i < adapter->num_queues; i++) {
4040 rxr = &adapter->rx_rings[i];
4041 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4042 if (hw->mac.type == ixgbe_mac_82598EB) {
4043 /*
4044 * PTHRESH = 21
4045 * HTHRESH = 4
4046 * WTHRESH = 8
4047 */
4048 rxdctl &= ~0x3FFFFF;
4049 rxdctl |= 0x080420;
4050 }
4051 rxdctl |= IXGBE_RXDCTL_ENABLE;
4052 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4053 for (j = 0; j < 10; j++) {
4054 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4055 IXGBE_RXDCTL_ENABLE)
4056 break;
4057 else
4058 msec_delay(1);
4059 }
4060 IXGBE_WRITE_BARRIER(hw);
4061
4062 /*
4063 * In netmap mode, we must preserve the buffers made
4064 * available to userspace before the if_init()
4065 * (this is true by default on the TX side, because
4066 * init makes all buffers available to userspace).
4067 *
4068 * netmap_reset() and the device specific routines
4069 * (e.g. ixgbe_setup_receive_rings()) map these
4070 * buffers at the end of the NIC ring, so here we
4071 * must set the RDT (tail) register to make sure
4072 * they are not overwritten.
4073 *
4074 * In this driver the NIC ring starts at RDH = 0,
4075 * RDT points to the last slot available for reception (?),
4076 * so RDT = num_rx_desc - 1 means the whole ring is available.
4077 */
4078 #ifdef DEV_NETMAP
4079 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4080 (ifp->if_capenable & IFCAP_NETMAP)) {
4081 struct netmap_adapter *na = NA(adapter->ifp);
4082 struct netmap_kring *kring = na->rx_rings[i];
4083 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4084
4085 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4086 } else
4087 #endif /* DEV_NETMAP */
4088 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4089 adapter->num_rx_desc - 1);
4090 }
4091
4092 /* Enable Receive engine */
4093 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4094 if (hw->mac.type == ixgbe_mac_82598EB)
4095 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4096 rxctrl |= IXGBE_RXCTRL_RXEN;
4097 ixgbe_enable_rx_dma(hw, rxctrl);
4098
4099 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4100 atomic_store_relaxed(&adapter->timer_pending, 0);
4101 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4102 callout_reset(&adapter->recovery_mode_timer, hz,
4103 ixgbe_recovery_mode_timer, adapter);
4104
4105 /* Set up MSI/MSI-X routing */
4106 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4107 ixgbe_configure_ivars(adapter);
4108 /* Set up auto-mask */
4109 if (hw->mac.type == ixgbe_mac_82598EB)
4110 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4111 else {
4112 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4113 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4114 }
4115 } else { /* Simple settings for Legacy/MSI */
4116 ixgbe_set_ivar(adapter, 0, 0, 0);
4117 ixgbe_set_ivar(adapter, 0, 0, 1);
4118 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4119 }
4120
4121 ixgbe_init_fdir(adapter);
4122
4123 /*
4124 * Check on any SFP devices that
4125 * need to be kick-started
4126 */
4127 if (hw->phy.type == ixgbe_phy_none) {
4128 err = hw->phy.ops.identify(hw);
4129 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4130 unsupported_sfp = true;
4131 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4132 unsupported_sfp = true;
4133
4134 if (unsupported_sfp)
4135 device_printf(dev,
4136 "Unsupported SFP+ module type was detected.\n");
4137
4138 /* Set moderation on the Link interrupt */
4139 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4140
4141 /* Enable EEE power saving */
4142 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4143 hw->mac.ops.setup_eee(hw,
4144 adapter->feat_en & IXGBE_FEATURE_EEE);
4145
4146 /* Enable power to the phy. */
4147 if (!unsupported_sfp) {
4148 ixgbe_set_phy_power(hw, TRUE);
4149
4150 /* Config/Enable Link */
4151 ixgbe_config_link(adapter);
4152 }
4153
4154 /* Hardware Packet Buffer & Flow Control setup */
4155 ixgbe_config_delay_values(adapter);
4156
4157 /* Initialize the FC settings */
4158 ixgbe_start_hw(hw);
4159
4160 /* Set up VLAN support and filter */
4161 ixgbe_setup_vlan_hw_support(adapter);
4162
4163 /* Setup DMA Coalescing */
4164 ixgbe_config_dmac(adapter);
4165
4166 /* OK to schedule workqueues. */
4167 adapter->schedule_wqs_ok = true;
4168
4169 /* And now turn on interrupts */
4170 ixgbe_enable_intr(adapter);
4171
4172 /* Enable the use of the MBX by the VF's */
4173 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4174 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4175 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4176 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4177 }
4178
4179 /* Update saved flags. See ixgbe_ifflags_cb() */
4180 adapter->if_flags = ifp->if_flags;
4181 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4182
4183 /* Now inform the stack we're ready */
4184 ifp->if_flags |= IFF_RUNNING;
4185
4186 return;
4187 } /* ixgbe_init_locked */
4188
4189 /************************************************************************
4190 * ixgbe_init
4191 ************************************************************************/
4192 static int
4193 ixgbe_init(struct ifnet *ifp)
4194 {
4195 struct adapter *adapter = ifp->if_softc;
4196
4197 IXGBE_CORE_LOCK(adapter);
4198 ixgbe_init_locked(adapter);
4199 IXGBE_CORE_UNLOCK(adapter);
4200
4201 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4202 } /* ixgbe_init */
4203
4204 /************************************************************************
4205 * ixgbe_set_ivar
4206 *
4207 * Setup the correct IVAR register for a particular MSI-X interrupt
4208 * (yes this is all very magic and confusing :)
4209 * - entry is the register array entry
4210 * - vector is the MSI-X vector for this queue
4211 * - type is RX/TX/MISC
4212 ************************************************************************/
4213 static void
4214 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4215 {
4216 struct ixgbe_hw *hw = &adapter->hw;
4217 u32 ivar, index;
4218
4219 vector |= IXGBE_IVAR_ALLOC_VAL;
4220
4221 switch (hw->mac.type) {
4222 case ixgbe_mac_82598EB:
4223 if (type == -1)
4224 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4225 else
4226 entry += (type * 64);
4227 index = (entry >> 2) & 0x1F;
4228 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4229 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4230 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4231 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4232 break;
4233 case ixgbe_mac_82599EB:
4234 case ixgbe_mac_X540:
4235 case ixgbe_mac_X550:
4236 case ixgbe_mac_X550EM_x:
4237 case ixgbe_mac_X550EM_a:
4238 if (type == -1) { /* MISC IVAR */
4239 index = (entry & 1) * 8;
4240 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4241 ivar &= ~(0xffUL << index);
4242 ivar |= ((u32)vector << index);
4243 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4244 } else { /* RX/TX IVARS */
4245 index = (16 * (entry & 1)) + (8 * type);
4246 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4247 ivar &= ~(0xffUL << index);
4248 ivar |= ((u32)vector << index);
4249 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4250 }
4251 break;
4252 default:
4253 break;
4254 }
4255 } /* ixgbe_set_ivar */
4256
4257 /************************************************************************
4258 * ixgbe_configure_ivars
4259 ************************************************************************/
4260 static void
4261 ixgbe_configure_ivars(struct adapter *adapter)
4262 {
4263 struct ix_queue *que = adapter->queues;
4264 u32 newitr;
4265
4266 if (ixgbe_max_interrupt_rate > 0)
4267 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4268 else {
4269 /*
4270 * Disable DMA coalescing if interrupt moderation is
4271 * disabled.
4272 */
4273 adapter->dmac = 0;
4274 newitr = 0;
4275 }
4276
4277 for (int i = 0; i < adapter->num_queues; i++, que++) {
4278 struct rx_ring *rxr = &adapter->rx_rings[i];
4279 struct tx_ring *txr = &adapter->tx_rings[i];
4280 /* First the RX queue entry */
4281 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4282 /* ... and the TX */
4283 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4284 /* Set an Initial EITR value */
4285 ixgbe_eitr_write(adapter, que->msix, newitr);
4286 /*
4287 * To eliminate influence of the previous state.
4288 * At this point, Tx/Rx interrupt handler
4289 * (ixgbe_msix_que()) cannot be called, so both
4290 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4291 */
4292 que->eitr_setting = 0;
4293 }
4294
4295 /* For the Link interrupt */
4296 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4297 } /* ixgbe_configure_ivars */
4298
4299 /************************************************************************
4300 * ixgbe_config_gpie
4301 ************************************************************************/
4302 static void
4303 ixgbe_config_gpie(struct adapter *adapter)
4304 {
4305 struct ixgbe_hw *hw = &adapter->hw;
4306 u32 gpie;
4307
4308 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4309
4310 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4311 /* Enable Enhanced MSI-X mode */
4312 gpie |= IXGBE_GPIE_MSIX_MODE
4313 | IXGBE_GPIE_EIAME
4314 | IXGBE_GPIE_PBA_SUPPORT
4315 | IXGBE_GPIE_OCD;
4316 }
4317
4318 /* Fan Failure Interrupt */
4319 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4320 gpie |= IXGBE_SDP1_GPIEN;
4321
4322 /* Thermal Sensor Interrupt */
4323 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4324 gpie |= IXGBE_SDP0_GPIEN_X540;
4325
4326 /* Link detection */
4327 switch (hw->mac.type) {
4328 case ixgbe_mac_82599EB:
4329 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4330 break;
4331 case ixgbe_mac_X550EM_x:
4332 case ixgbe_mac_X550EM_a:
4333 gpie |= IXGBE_SDP0_GPIEN_X540;
4334 break;
4335 default:
4336 break;
4337 }
4338
4339 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4340
4341 } /* ixgbe_config_gpie */
4342
4343 /************************************************************************
4344 * ixgbe_config_delay_values
4345 *
4346 * Requires adapter->max_frame_size to be set.
4347 ************************************************************************/
4348 static void
4349 ixgbe_config_delay_values(struct adapter *adapter)
4350 {
4351 struct ixgbe_hw *hw = &adapter->hw;
4352 u32 rxpb, frame, size, tmp;
4353
4354 frame = adapter->max_frame_size;
4355
4356 /* Calculate High Water */
4357 switch (hw->mac.type) {
4358 case ixgbe_mac_X540:
4359 case ixgbe_mac_X550:
4360 case ixgbe_mac_X550EM_x:
4361 case ixgbe_mac_X550EM_a:
4362 tmp = IXGBE_DV_X540(frame, frame);
4363 break;
4364 default:
4365 tmp = IXGBE_DV(frame, frame);
4366 break;
4367 }
4368 size = IXGBE_BT2KB(tmp);
4369 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4370 hw->fc.high_water[0] = rxpb - size;
4371
4372 /* Now calculate Low Water */
4373 switch (hw->mac.type) {
4374 case ixgbe_mac_X540:
4375 case ixgbe_mac_X550:
4376 case ixgbe_mac_X550EM_x:
4377 case ixgbe_mac_X550EM_a:
4378 tmp = IXGBE_LOW_DV_X540(frame);
4379 break;
4380 default:
4381 tmp = IXGBE_LOW_DV(frame);
4382 break;
4383 }
4384 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4385
4386 hw->fc.pause_time = IXGBE_FC_PAUSE;
4387 hw->fc.send_xon = TRUE;
4388 } /* ixgbe_config_delay_values */
4389
4390 /************************************************************************
4391 * ixgbe_set_rxfilter - Multicast Update
4392 *
4393 * Called whenever multicast address list is updated.
4394 ************************************************************************/
4395 static void
4396 ixgbe_set_rxfilter(struct adapter *adapter)
4397 {
4398 struct ixgbe_mc_addr *mta;
4399 struct ifnet *ifp = adapter->ifp;
4400 u8 *update_ptr;
4401 int mcnt = 0;
4402 u32 fctrl;
4403 struct ethercom *ec = &adapter->osdep.ec;
4404 struct ether_multi *enm;
4405 struct ether_multistep step;
4406
4407 KASSERT(mutex_owned(&adapter->core_mtx));
4408 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4409
4410 mta = adapter->mta;
4411 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4412
4413 ETHER_LOCK(ec);
4414 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4415 ETHER_FIRST_MULTI(step, ec, enm);
4416 while (enm != NULL) {
4417 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4418 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4419 ETHER_ADDR_LEN) != 0)) {
4420 ec->ec_flags |= ETHER_F_ALLMULTI;
4421 break;
4422 }
4423 bcopy(enm->enm_addrlo,
4424 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4425 mta[mcnt].vmdq = adapter->pool;
4426 mcnt++;
4427 ETHER_NEXT_MULTI(step, enm);
4428 }
4429
4430 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4431 if (ifp->if_flags & IFF_PROMISC)
4432 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4433 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4434 fctrl |= IXGBE_FCTRL_MPE;
4435 fctrl &= ~IXGBE_FCTRL_UPE;
4436 } else
4437 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4438
4439 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4440
4441 /* Update multicast filter entries only when it's not ALLMULTI */
4442 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4443 ETHER_UNLOCK(ec);
4444 update_ptr = (u8 *)mta;
4445 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4446 ixgbe_mc_array_itr, TRUE);
4447 } else
4448 ETHER_UNLOCK(ec);
4449 } /* ixgbe_set_rxfilter */
4450
4451 /************************************************************************
4452 * ixgbe_mc_array_itr
4453 *
4454 * An iterator function needed by the multicast shared code.
4455 * It feeds the shared code routine the addresses in the
4456 * array of ixgbe_set_rxfilter() one by one.
4457 ************************************************************************/
4458 static u8 *
4459 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4460 {
4461 struct ixgbe_mc_addr *mta;
4462
4463 mta = (struct ixgbe_mc_addr *)*update_ptr;
4464 *vmdq = mta->vmdq;
4465
4466 *update_ptr = (u8*)(mta + 1);
4467
4468 return (mta->addr);
4469 } /* ixgbe_mc_array_itr */
4470
4471 /************************************************************************
4472 * ixgbe_local_timer - Timer routine
4473 *
4474 * Checks for link status, updates statistics,
4475 * and runs the watchdog check.
4476 ************************************************************************/
4477 static void
4478 ixgbe_local_timer(void *arg)
4479 {
4480 struct adapter *adapter = arg;
4481
4482 if (adapter->schedule_wqs_ok) {
4483 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4484 workqueue_enqueue(adapter->timer_wq,
4485 &adapter->timer_wc, NULL);
4486 }
4487 }
4488
4489 static void
4490 ixgbe_handle_timer(struct work *wk, void *context)
4491 {
4492 struct adapter *adapter = context;
4493 struct ixgbe_hw *hw = &adapter->hw;
4494 device_t dev = adapter->dev;
4495 struct ix_queue *que = adapter->queues;
4496 u64 queues = 0;
4497 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4498 int hung = 0;
4499 int i;
4500
4501 IXGBE_CORE_LOCK(adapter);
4502
4503 /* Check for pluggable optics */
4504 if (ixgbe_is_sfp(hw)) {
4505 bool sched_mod_task = false;
4506
4507 if (hw->mac.type == ixgbe_mac_82598EB) {
4508 /*
4509 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4510 * any GPIO(SDP). So just schedule TASK_MOD.
4511 */
4512 sched_mod_task = true;
4513 } else {
4514 bool was_full, is_full;
4515
4516 was_full =
4517 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4518 is_full = ixgbe_sfp_cage_full(hw);
4519
4520 /* Do probe if cage state changed */
4521 if (was_full ^ is_full)
4522 sched_mod_task = true;
4523 }
4524 if (sched_mod_task) {
4525 atomic_or_32(&adapter->task_requests,
4526 IXGBE_REQUEST_TASK_MOD);
4527 ixgbe_schedule_admin_tasklet(adapter);
4528 }
4529 }
4530
4531 ixgbe_update_link_status(adapter);
4532 ixgbe_update_stats_counters(adapter);
4533
4534 /* Update some event counters */
4535 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4536 que = adapter->queues;
4537 for (i = 0; i < adapter->num_queues; i++, que++) {
4538 struct tx_ring *txr = que->txr;
4539
4540 v0 += txr->q_efbig_tx_dma_setup;
4541 v1 += txr->q_mbuf_defrag_failed;
4542 v2 += txr->q_efbig2_tx_dma_setup;
4543 v3 += txr->q_einval_tx_dma_setup;
4544 v4 += txr->q_other_tx_dma_setup;
4545 v5 += txr->q_eagain_tx_dma_setup;
4546 v6 += txr->q_enomem_tx_dma_setup;
4547 v7 += txr->q_tso_err;
4548 }
4549 adapter->efbig_tx_dma_setup.ev_count = v0;
4550 adapter->mbuf_defrag_failed.ev_count = v1;
4551 adapter->efbig2_tx_dma_setup.ev_count = v2;
4552 adapter->einval_tx_dma_setup.ev_count = v3;
4553 adapter->other_tx_dma_setup.ev_count = v4;
4554 adapter->eagain_tx_dma_setup.ev_count = v5;
4555 adapter->enomem_tx_dma_setup.ev_count = v6;
4556 adapter->tso_err.ev_count = v7;
4557
4558 /*
4559 * Check the TX queues status
4560 * - mark hung queues so we don't schedule on them
4561 * - watchdog only if all queues show hung
4562 */
4563 que = adapter->queues;
4564 for (i = 0; i < adapter->num_queues; i++, que++) {
4565 /* Keep track of queues with work for soft irq */
4566 if (que->txr->busy)
4567 queues |= 1ULL << que->me;
4568 /*
4569 * Each time txeof runs without cleaning, but there
4570 * are uncleaned descriptors it increments busy. If
4571 * we get to the MAX we declare it hung.
4572 */
4573 if (que->busy == IXGBE_QUEUE_HUNG) {
4574 ++hung;
4575 /* Mark the queue as inactive */
4576 adapter->active_queues &= ~(1ULL << que->me);
4577 continue;
4578 } else {
4579 /* Check if we've come back from hung */
4580 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4581 adapter->active_queues |= 1ULL << que->me;
4582 }
4583 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4584 device_printf(dev,
4585 "Warning queue %d appears to be hung!\n", i);
4586 que->txr->busy = IXGBE_QUEUE_HUNG;
4587 ++hung;
4588 }
4589 }
4590
4591 /* Only truly watchdog if all queues show hung */
4592 if (hung == adapter->num_queues)
4593 goto watchdog;
4594 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4595 else if (queues != 0) { /* Force an IRQ on queues with work */
4596 que = adapter->queues;
4597 for (i = 0; i < adapter->num_queues; i++, que++) {
4598 mutex_enter(&que->dc_mtx);
4599 if (que->disabled_count == 0)
4600 ixgbe_rearm_queues(adapter,
4601 queues & ((u64)1 << i));
4602 mutex_exit(&que->dc_mtx);
4603 }
4604 }
4605 #endif
4606
4607 atomic_store_relaxed(&adapter->timer_pending, 0);
4608 IXGBE_CORE_UNLOCK(adapter);
4609 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4610 return;
4611
4612 watchdog:
4613 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4614 adapter->ifp->if_flags &= ~IFF_RUNNING;
4615 adapter->watchdog_events.ev_count++;
4616 ixgbe_init_locked(adapter);
4617 IXGBE_CORE_UNLOCK(adapter);
4618 } /* ixgbe_handle_timer */
4619
4620 /************************************************************************
4621 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4622 ************************************************************************/
4623 static void
4624 ixgbe_recovery_mode_timer(void *arg)
4625 {
4626 struct adapter *adapter = arg;
4627
4628 if (__predict_true(adapter->osdep.detaching == false)) {
4629 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4630 0, 1) == 0) {
4631 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4632 &adapter->recovery_mode_timer_wc, NULL);
4633 }
4634 }
4635 }
4636
4637 static void
4638 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4639 {
4640 struct adapter *adapter = context;
4641 struct ixgbe_hw *hw = &adapter->hw;
4642
4643 IXGBE_CORE_LOCK(adapter);
4644 if (ixgbe_fw_recovery_mode(hw)) {
4645 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4646 /* Firmware error detected, entering recovery mode */
4647 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4648
4649 if (hw->adapter_stopped == FALSE)
4650 ixgbe_stop_locked(adapter);
4651 }
4652 } else
4653 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4654
4655 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4656 callout_reset(&adapter->recovery_mode_timer, hz,
4657 ixgbe_recovery_mode_timer, adapter);
4658 IXGBE_CORE_UNLOCK(adapter);
4659 } /* ixgbe_handle_recovery_mode_timer */
4660
4661 /************************************************************************
4662 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4663 ************************************************************************/
4664 static void
4665 ixgbe_handle_mod(void *context)
4666 {
4667 struct adapter *adapter = context;
4668 struct ixgbe_hw *hw = &adapter->hw;
4669 device_t dev = adapter->dev;
4670 enum ixgbe_sfp_type last_sfp_type;
4671 u32 err;
4672 bool last_unsupported_sfp_recovery;
4673
4674 KASSERT(mutex_owned(&adapter->core_mtx));
4675
4676 last_sfp_type = hw->phy.sfp_type;
4677 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4678 ++adapter->mod_workev.ev_count;
4679 if (adapter->hw.need_crosstalk_fix) {
4680 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4681 !ixgbe_sfp_cage_full(hw))
4682 goto out;
4683 }
4684
4685 err = hw->phy.ops.identify_sfp(hw);
4686 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4687 if (last_unsupported_sfp_recovery == false)
4688 device_printf(dev,
4689 "Unsupported SFP+ module type was detected.\n");
4690 goto out;
4691 }
4692
4693 if (hw->need_unsupported_sfp_recovery) {
4694 device_printf(dev, "Recovering from unsupported SFP\n");
4695 /*
4696 * We could recover the status by calling setup_sfp(),
4697 * setup_link() and some others. It's complex and might not
4698 * work correctly on some unknown cases. To avoid such type of
4699 * problem, call ixgbe_init_locked(). It's simple and safe
4700 * approach.
4701 */
4702 ixgbe_init_locked(adapter);
4703 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4704 (hw->phy.sfp_type != last_sfp_type)) {
4705 /* A module is inserted and changed. */
4706
4707 if (hw->mac.type == ixgbe_mac_82598EB)
4708 err = hw->phy.ops.reset(hw);
4709 else {
4710 err = hw->mac.ops.setup_sfp(hw);
4711 hw->phy.sfp_setup_needed = FALSE;
4712 }
4713 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4714 device_printf(dev,
4715 "Setup failure - unsupported SFP+ module type.\n");
4716 goto out;
4717 }
4718 }
4719
4720 out:
4721 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4722 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4723
4724 /* Adjust media types shown in ifconfig */
4725 IXGBE_CORE_UNLOCK(adapter);
4726 ifmedia_removeall(&adapter->media);
4727 ixgbe_add_media_types(adapter);
4728 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4729 IXGBE_CORE_LOCK(adapter);
4730
4731 /*
4732 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4733 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4734 * flap because the function calls setup_link().
4735 */
4736 if (hw->mac.type != ixgbe_mac_82598EB)
4737 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4738
4739 /*
4740 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4741 * the workqueue now.
4742 */
4743 } /* ixgbe_handle_mod */
4744
4745
4746 /************************************************************************
4747 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4748 ************************************************************************/
4749 static void
4750 ixgbe_handle_msf(void *context)
4751 {
4752 struct adapter *adapter = context;
4753 struct ixgbe_hw *hw = &adapter->hw;
4754 u32 autoneg;
4755 bool negotiate;
4756
4757 KASSERT(mutex_owned(&adapter->core_mtx));
4758
4759 ++adapter->msf_workev.ev_count;
4760
4761 autoneg = hw->phy.autoneg_advertised;
4762 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4763 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4764 if (hw->mac.ops.setup_link)
4765 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4766 } /* ixgbe_handle_msf */
4767
4768 /************************************************************************
4769 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4770 ************************************************************************/
4771 static void
4772 ixgbe_handle_phy(void *context)
4773 {
4774 struct adapter *adapter = context;
4775 struct ixgbe_hw *hw = &adapter->hw;
4776 int error;
4777
4778 KASSERT(mutex_owned(&adapter->core_mtx));
4779
4780 ++adapter->phy_workev.ev_count;
4781 error = hw->phy.ops.handle_lasi(hw);
4782 if (error == IXGBE_ERR_OVERTEMP)
4783 device_printf(adapter->dev,
4784 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4785 " PHY will downshift to lower power state!\n");
4786 else if (error)
4787 device_printf(adapter->dev,
4788 "Error handling LASI interrupt: %d\n", error);
4789 } /* ixgbe_handle_phy */
4790
4791 static void
4792 ixgbe_handle_admin(struct work *wk, void *context)
4793 {
4794 struct adapter *adapter = context;
4795 struct ifnet *ifp = adapter->ifp;
4796 struct ixgbe_hw *hw = &adapter->hw;
4797 u32 req;
4798
4799 /*
4800 * Hold the IFNET_LOCK across this entire call. This will
4801 * prevent additional changes to adapter->phy_layer
4802 * and serialize calls to this tasklet. We cannot hold the
4803 * CORE_LOCK while calling into the ifmedia functions as
4804 * they call ifmedia_lock() and the lock is CORE_LOCK.
4805 */
4806 IFNET_LOCK(ifp);
4807 IXGBE_CORE_LOCK(adapter);
4808 /*
4809 * Clear the admin_pending flag before reading task_requests to avoid
4810 * missfiring workqueue though setting task_request.
4811 * Hmm, ixgbe_schedule_admin_tasklet() can extra-fire though
4812 * task_requests are done by prior workqueue, but it is harmless.
4813 */
4814 atomic_store_relaxed(&adapter->admin_pending, 0);
4815 while ((req =
4816 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR))
4817 != 0) {
4818 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4819 ixgbe_handle_link(adapter);
4820 atomic_and_32(&adapter->task_requests,
4821 ~IXGBE_REQUEST_TASK_LSC);
4822 }
4823 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4824 ixgbe_handle_mod(adapter);
4825 atomic_and_32(&adapter->task_requests,
4826 ~IXGBE_REQUEST_TASK_MOD);
4827 }
4828 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4829 ixgbe_handle_msf(adapter);
4830 atomic_and_32(&adapter->task_requests,
4831 ~IXGBE_REQUEST_TASK_MSF);
4832 }
4833 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4834 ixgbe_handle_phy(adapter);
4835 atomic_and_32(&adapter->task_requests,
4836 ~IXGBE_REQUEST_TASK_PHY);
4837 }
4838 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4839 ixgbe_reinit_fdir(adapter);
4840 atomic_and_32(&adapter->task_requests,
4841 ~IXGBE_REQUEST_TASK_FDIR);
4842 }
4843 #if 0 /* notyet */
4844 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4845 ixgbe_handle_mbx(adapter);
4846 atomic_and_32(&adapter->task_requests,
4847 ~IXGBE_REQUEST_TASK_MBX);
4848 }
4849 #endif
4850 }
4851 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4852 atomic_and_32(&adapter->task_requests,
4853 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4854 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4855 /* Re-enable other interrupts */
4856 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4857 } else
4858 ixgbe_enable_intr(adapter);
4859 }
4860
4861 IXGBE_CORE_UNLOCK(adapter);
4862 IFNET_UNLOCK(ifp);
4863 } /* ixgbe_handle_admin */
4864
4865 static void
4866 ixgbe_ifstop(struct ifnet *ifp, int disable)
4867 {
4868 struct adapter *adapter = ifp->if_softc;
4869
4870 IXGBE_CORE_LOCK(adapter);
4871 ixgbe_stop_locked(adapter);
4872 IXGBE_CORE_UNLOCK(adapter);
4873
4874 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4875 atomic_store_relaxed(&adapter->timer_pending, 0);
4876 }
4877
4878 /************************************************************************
4879 * ixgbe_stop_locked - Stop the hardware
4880 *
4881 * Disables all traffic on the adapter by issuing a
4882 * global reset on the MAC and deallocates TX/RX buffers.
4883 ************************************************************************/
4884 static void
4885 ixgbe_stop_locked(void *arg)
4886 {
4887 struct ifnet *ifp;
4888 struct adapter *adapter = arg;
4889 struct ixgbe_hw *hw = &adapter->hw;
4890
4891 ifp = adapter->ifp;
4892
4893 KASSERT(mutex_owned(&adapter->core_mtx));
4894
4895 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4896 ixgbe_disable_intr(adapter);
4897 callout_stop(&adapter->timer);
4898
4899 /* Don't schedule workqueues. */
4900 adapter->schedule_wqs_ok = false;
4901
4902 /* Let the stack know...*/
4903 ifp->if_flags &= ~IFF_RUNNING;
4904
4905 ixgbe_reset_hw(hw);
4906 hw->adapter_stopped = FALSE;
4907 ixgbe_stop_adapter(hw);
4908 if (hw->mac.type == ixgbe_mac_82599EB)
4909 ixgbe_stop_mac_link_on_d3_82599(hw);
4910 /* Turn off the laser - noop with no optics */
4911 ixgbe_disable_tx_laser(hw);
4912
4913 /* Update the stack */
4914 adapter->link_up = FALSE;
4915 ixgbe_update_link_status(adapter);
4916
4917 /* reprogram the RAR[0] in case user changed it. */
4918 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4919
4920 return;
4921 } /* ixgbe_stop_locked */
4922
4923 /************************************************************************
4924 * ixgbe_update_link_status - Update OS on link state
4925 *
4926 * Note: Only updates the OS on the cached link state.
4927 * The real check of the hardware only happens with
4928 * a link interrupt.
4929 ************************************************************************/
4930 static void
4931 ixgbe_update_link_status(struct adapter *adapter)
4932 {
4933 struct ifnet *ifp = adapter->ifp;
4934 device_t dev = adapter->dev;
4935 struct ixgbe_hw *hw = &adapter->hw;
4936
4937 KASSERT(mutex_owned(&adapter->core_mtx));
4938
4939 if (adapter->link_up) {
4940 if (adapter->link_active != LINK_STATE_UP) {
4941 /*
4942 * To eliminate influence of the previous state
4943 * in the same way as ixgbe_init_locked().
4944 */
4945 struct ix_queue *que = adapter->queues;
4946 for (int i = 0; i < adapter->num_queues; i++, que++)
4947 que->eitr_setting = 0;
4948
4949 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4950 /*
4951 * Discard count for both MAC Local Fault and
4952 * Remote Fault because those registers are
4953 * valid only when the link speed is up and
4954 * 10Gbps.
4955 */
4956 IXGBE_READ_REG(hw, IXGBE_MLFC);
4957 IXGBE_READ_REG(hw, IXGBE_MRFC);
4958 }
4959
4960 if (bootverbose) {
4961 const char *bpsmsg;
4962
4963 switch (adapter->link_speed) {
4964 case IXGBE_LINK_SPEED_10GB_FULL:
4965 bpsmsg = "10 Gbps";
4966 break;
4967 case IXGBE_LINK_SPEED_5GB_FULL:
4968 bpsmsg = "5 Gbps";
4969 break;
4970 case IXGBE_LINK_SPEED_2_5GB_FULL:
4971 bpsmsg = "2.5 Gbps";
4972 break;
4973 case IXGBE_LINK_SPEED_1GB_FULL:
4974 bpsmsg = "1 Gbps";
4975 break;
4976 case IXGBE_LINK_SPEED_100_FULL:
4977 bpsmsg = "100 Mbps";
4978 break;
4979 case IXGBE_LINK_SPEED_10_FULL:
4980 bpsmsg = "10 Mbps";
4981 break;
4982 default:
4983 bpsmsg = "unknown speed";
4984 break;
4985 }
4986 device_printf(dev, "Link is up %s %s \n",
4987 bpsmsg, "Full Duplex");
4988 }
4989 adapter->link_active = LINK_STATE_UP;
4990 /* Update any Flow Control changes */
4991 ixgbe_fc_enable(&adapter->hw);
4992 /* Update DMA coalescing config */
4993 ixgbe_config_dmac(adapter);
4994 if_link_state_change(ifp, LINK_STATE_UP);
4995
4996 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4997 ixgbe_ping_all_vfs(adapter);
4998 }
4999 } else {
5000 /*
5001 * Do it when link active changes to DOWN. i.e.
5002 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5003 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5004 */
5005 if (adapter->link_active != LINK_STATE_DOWN) {
5006 if (bootverbose)
5007 device_printf(dev, "Link is Down\n");
5008 if_link_state_change(ifp, LINK_STATE_DOWN);
5009 adapter->link_active = LINK_STATE_DOWN;
5010 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5011 ixgbe_ping_all_vfs(adapter);
5012 ixgbe_drain_all(adapter);
5013 }
5014 }
5015 } /* ixgbe_update_link_status */
5016
5017 /************************************************************************
5018 * ixgbe_config_dmac - Configure DMA Coalescing
5019 ************************************************************************/
5020 static void
5021 ixgbe_config_dmac(struct adapter *adapter)
5022 {
5023 struct ixgbe_hw *hw = &adapter->hw;
5024 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5025
5026 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5027 return;
5028
5029 if (dcfg->watchdog_timer ^ adapter->dmac ||
5030 dcfg->link_speed ^ adapter->link_speed) {
5031 dcfg->watchdog_timer = adapter->dmac;
5032 dcfg->fcoe_en = false;
5033 dcfg->link_speed = adapter->link_speed;
5034 dcfg->num_tcs = 1;
5035
5036 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5037 dcfg->watchdog_timer, dcfg->link_speed);
5038
5039 hw->mac.ops.dmac_config(hw);
5040 }
5041 } /* ixgbe_config_dmac */
5042
5043 /************************************************************************
5044 * ixgbe_enable_intr
5045 ************************************************************************/
5046 static void
5047 ixgbe_enable_intr(struct adapter *adapter)
5048 {
5049 struct ixgbe_hw *hw = &adapter->hw;
5050 struct ix_queue *que = adapter->queues;
5051 u32 mask, fwsm;
5052
5053 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5054
5055 switch (adapter->hw.mac.type) {
5056 case ixgbe_mac_82599EB:
5057 mask |= IXGBE_EIMS_ECC;
5058 /* Temperature sensor on some adapters */
5059 mask |= IXGBE_EIMS_GPI_SDP0;
5060 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5061 mask |= IXGBE_EIMS_GPI_SDP1;
5062 mask |= IXGBE_EIMS_GPI_SDP2;
5063 break;
5064 case ixgbe_mac_X540:
5065 /* Detect if Thermal Sensor is enabled */
5066 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5067 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5068 mask |= IXGBE_EIMS_TS;
5069 mask |= IXGBE_EIMS_ECC;
5070 break;
5071 case ixgbe_mac_X550:
5072 /* MAC thermal sensor is automatically enabled */
5073 mask |= IXGBE_EIMS_TS;
5074 mask |= IXGBE_EIMS_ECC;
5075 break;
5076 case ixgbe_mac_X550EM_x:
5077 case ixgbe_mac_X550EM_a:
5078 /* Some devices use SDP0 for important information */
5079 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5080 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5081 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5082 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5083 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5084 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5085 mask |= IXGBE_EICR_GPI_SDP0_X540;
5086 mask |= IXGBE_EIMS_ECC;
5087 break;
5088 default:
5089 break;
5090 }
5091
5092 /* Enable Fan Failure detection */
5093 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5094 mask |= IXGBE_EIMS_GPI_SDP1;
5095 /* Enable SR-IOV */
5096 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5097 mask |= IXGBE_EIMS_MAILBOX;
5098 /* Enable Flow Director */
5099 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5100 mask |= IXGBE_EIMS_FLOW_DIR;
5101
5102 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5103
5104 /* With MSI-X we use auto clear */
5105 if (adapter->msix_mem) {
5106 mask = IXGBE_EIMS_ENABLE_MASK;
5107 /* Don't autoclear Link */
5108 mask &= ~IXGBE_EIMS_OTHER;
5109 mask &= ~IXGBE_EIMS_LSC;
5110 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5111 mask &= ~IXGBE_EIMS_MAILBOX;
5112 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5113 }
5114
5115 /*
5116 * Now enable all queues, this is done separately to
5117 * allow for handling the extended (beyond 32) MSI-X
5118 * vectors that can be used by 82599
5119 */
5120 for (int i = 0; i < adapter->num_queues; i++, que++)
5121 ixgbe_enable_queue(adapter, que->msix);
5122
5123 IXGBE_WRITE_FLUSH(hw);
5124
5125 } /* ixgbe_enable_intr */
5126
5127 /************************************************************************
5128 * ixgbe_disable_intr_internal
5129 ************************************************************************/
5130 static void
5131 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5132 {
5133 struct ix_queue *que = adapter->queues;
5134
5135 /* disable interrupts other than queues */
5136 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5137
5138 if (adapter->msix_mem)
5139 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5140
5141 for (int i = 0; i < adapter->num_queues; i++, que++)
5142 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5143
5144 IXGBE_WRITE_FLUSH(&adapter->hw);
5145
5146 } /* ixgbe_do_disable_intr_internal */
5147
5148 /************************************************************************
5149 * ixgbe_disable_intr
5150 ************************************************************************/
5151 static void
5152 ixgbe_disable_intr(struct adapter *adapter)
5153 {
5154
5155 ixgbe_disable_intr_internal(adapter, true);
5156 } /* ixgbe_disable_intr */
5157
5158 /************************************************************************
5159 * ixgbe_ensure_disabled_intr
5160 ************************************************************************/
5161 void
5162 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5163 {
5164
5165 ixgbe_disable_intr_internal(adapter, false);
5166 } /* ixgbe_ensure_disabled_intr */
5167
5168 /************************************************************************
5169 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5170 ************************************************************************/
5171 static int
5172 ixgbe_legacy_irq(void *arg)
5173 {
5174 struct ix_queue *que = arg;
5175 struct adapter *adapter = que->adapter;
5176 struct ixgbe_hw *hw = &adapter->hw;
5177 struct ifnet *ifp = adapter->ifp;
5178 struct tx_ring *txr = adapter->tx_rings;
5179 bool more = false;
5180 bool reenable_intr = true;
5181 u32 eicr, eicr_mask;
5182 u32 task_requests = 0;
5183
5184 /* Silicon errata #26 on 82598 */
5185 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5186
5187 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5188
5189 adapter->stats.pf.legint.ev_count++;
5190 ++que->irqs.ev_count;
5191 if (eicr == 0) {
5192 adapter->stats.pf.intzero.ev_count++;
5193 if ((ifp->if_flags & IFF_UP) != 0)
5194 ixgbe_enable_intr(adapter);
5195 return 0;
5196 }
5197
5198 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5199 /*
5200 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5201 */
5202 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5203
5204 #ifdef __NetBSD__
5205 /* Don't run ixgbe_rxeof in interrupt context */
5206 more = true;
5207 #else
5208 more = ixgbe_rxeof(que);
5209 #endif
5210
5211 IXGBE_TX_LOCK(txr);
5212 ixgbe_txeof(txr);
5213 #ifdef notyet
5214 if (!ixgbe_ring_empty(ifp, txr->br))
5215 ixgbe_start_locked(ifp, txr);
5216 #endif
5217 IXGBE_TX_UNLOCK(txr);
5218 }
5219
5220 /* Check for fan failure */
5221 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5222 ixgbe_check_fan_failure(adapter, eicr, true);
5223 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5224 }
5225
5226 /* Link status change */
5227 if (eicr & IXGBE_EICR_LSC)
5228 task_requests |= IXGBE_REQUEST_TASK_LSC;
5229
5230 if (ixgbe_is_sfp(hw)) {
5231 /* Pluggable optics-related interrupt */
5232 if (hw->mac.type >= ixgbe_mac_X540)
5233 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5234 else
5235 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5236
5237 if (eicr & eicr_mask) {
5238 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5239 task_requests |= IXGBE_REQUEST_TASK_MOD;
5240 }
5241
5242 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5243 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5244 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5245 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5246 task_requests |= IXGBE_REQUEST_TASK_MSF;
5247 }
5248 }
5249
5250 /* External PHY interrupt */
5251 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5252 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5253 task_requests |= IXGBE_REQUEST_TASK_PHY;
5254
5255 if (more) {
5256 que->req.ev_count++;
5257 ixgbe_sched_handle_que(adapter, que);
5258 reenable_intr = false;
5259 }
5260 if (task_requests != 0) {
5261 /* Re-enabling other interrupts is done in the admin task */
5262 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5263 atomic_or_32(&adapter->task_requests, task_requests);
5264 ixgbe_schedule_admin_tasklet(adapter);
5265 reenable_intr = false;
5266 }
5267
5268 if (reenable_intr == true)
5269 ixgbe_enable_intr(adapter);
5270
5271 return 1;
5272 } /* ixgbe_legacy_irq */
5273
5274 /************************************************************************
5275 * ixgbe_free_pciintr_resources
5276 ************************************************************************/
5277 static void
5278 ixgbe_free_pciintr_resources(struct adapter *adapter)
5279 {
5280 struct ix_queue *que = adapter->queues;
5281 int rid;
5282
5283 /*
5284 * Release all msix queue resources:
5285 */
5286 for (int i = 0; i < adapter->num_queues; i++, que++) {
5287 if (que->res != NULL) {
5288 pci_intr_disestablish(adapter->osdep.pc,
5289 adapter->osdep.ihs[i]);
5290 adapter->osdep.ihs[i] = NULL;
5291 }
5292 }
5293
5294 /* Clean the Legacy or Link interrupt last */
5295 if (adapter->vector) /* we are doing MSIX */
5296 rid = adapter->vector;
5297 else
5298 rid = 0;
5299
5300 if (adapter->osdep.ihs[rid] != NULL) {
5301 pci_intr_disestablish(adapter->osdep.pc,
5302 adapter->osdep.ihs[rid]);
5303 adapter->osdep.ihs[rid] = NULL;
5304 }
5305
5306 if (adapter->osdep.intrs != NULL) {
5307 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5308 adapter->osdep.nintrs);
5309 adapter->osdep.intrs = NULL;
5310 }
5311 } /* ixgbe_free_pciintr_resources */
5312
5313 /************************************************************************
5314 * ixgbe_free_pci_resources
5315 ************************************************************************/
5316 static void
5317 ixgbe_free_pci_resources(struct adapter *adapter)
5318 {
5319
5320 ixgbe_free_pciintr_resources(adapter);
5321
5322 if (adapter->osdep.mem_size != 0) {
5323 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5324 adapter->osdep.mem_bus_space_handle,
5325 adapter->osdep.mem_size);
5326 }
5327
5328 } /* ixgbe_free_pci_resources */
5329
5330 /************************************************************************
5331 * ixgbe_set_sysctl_value
5332 ************************************************************************/
5333 static void
5334 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5335 const char *description, int *limit, int value)
5336 {
5337 device_t dev = adapter->dev;
5338 struct sysctllog **log;
5339 const struct sysctlnode *rnode, *cnode;
5340
5341 /*
5342 * It's not required to check recovery mode because this function never
5343 * touches hardware.
5344 */
5345
5346 log = &adapter->sysctllog;
5347 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5348 aprint_error_dev(dev, "could not create sysctl root\n");
5349 return;
5350 }
5351 if (sysctl_createv(log, 0, &rnode, &cnode,
5352 CTLFLAG_READWRITE, CTLTYPE_INT,
5353 name, SYSCTL_DESCR(description),
5354 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5355 aprint_error_dev(dev, "could not create sysctl\n");
5356 *limit = value;
5357 } /* ixgbe_set_sysctl_value */
5358
5359 /************************************************************************
5360 * ixgbe_sysctl_flowcntl
5361 *
5362 * SYSCTL wrapper around setting Flow Control
5363 ************************************************************************/
5364 static int
5365 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5366 {
5367 struct sysctlnode node = *rnode;
5368 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5369 int error, fc;
5370
5371 if (ixgbe_fw_recovery_mode_swflag(adapter))
5372 return (EPERM);
5373
5374 fc = adapter->hw.fc.current_mode;
5375 node.sysctl_data = &fc;
5376 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5377 if (error != 0 || newp == NULL)
5378 return error;
5379
5380 /* Don't bother if it's not changed */
5381 if (fc == adapter->hw.fc.current_mode)
5382 return (0);
5383
5384 return ixgbe_set_flowcntl(adapter, fc);
5385 } /* ixgbe_sysctl_flowcntl */
5386
5387 /************************************************************************
5388 * ixgbe_set_flowcntl - Set flow control
5389 *
5390 * Flow control values:
5391 * 0 - off
5392 * 1 - rx pause
5393 * 2 - tx pause
5394 * 3 - full
5395 ************************************************************************/
5396 static int
5397 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5398 {
5399 switch (fc) {
5400 case ixgbe_fc_rx_pause:
5401 case ixgbe_fc_tx_pause:
5402 case ixgbe_fc_full:
5403 adapter->hw.fc.requested_mode = fc;
5404 if (adapter->num_queues > 1)
5405 ixgbe_disable_rx_drop(adapter);
5406 break;
5407 case ixgbe_fc_none:
5408 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5409 if (adapter->num_queues > 1)
5410 ixgbe_enable_rx_drop(adapter);
5411 break;
5412 default:
5413 return (EINVAL);
5414 }
5415
5416 #if 0 /* XXX NetBSD */
5417 /* Don't autoneg if forcing a value */
5418 adapter->hw.fc.disable_fc_autoneg = TRUE;
5419 #endif
5420 ixgbe_fc_enable(&adapter->hw);
5421
5422 return (0);
5423 } /* ixgbe_set_flowcntl */
5424
5425 /************************************************************************
5426 * ixgbe_enable_rx_drop
5427 *
5428 * Enable the hardware to drop packets when the buffer is
5429 * full. This is useful with multiqueue, so that no single
5430 * queue being full stalls the entire RX engine. We only
5431 * enable this when Multiqueue is enabled AND Flow Control
5432 * is disabled.
5433 ************************************************************************/
5434 static void
5435 ixgbe_enable_rx_drop(struct adapter *adapter)
5436 {
5437 struct ixgbe_hw *hw = &adapter->hw;
5438 struct rx_ring *rxr;
5439 u32 srrctl;
5440
5441 for (int i = 0; i < adapter->num_queues; i++) {
5442 rxr = &adapter->rx_rings[i];
5443 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5444 srrctl |= IXGBE_SRRCTL_DROP_EN;
5445 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5446 }
5447
5448 /* enable drop for each vf */
5449 for (int i = 0; i < adapter->num_vfs; i++) {
5450 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5451 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5452 IXGBE_QDE_ENABLE));
5453 }
5454 } /* ixgbe_enable_rx_drop */
5455
5456 /************************************************************************
5457 * ixgbe_disable_rx_drop
5458 ************************************************************************/
5459 static void
5460 ixgbe_disable_rx_drop(struct adapter *adapter)
5461 {
5462 struct ixgbe_hw *hw = &adapter->hw;
5463 struct rx_ring *rxr;
5464 u32 srrctl;
5465
5466 for (int i = 0; i < adapter->num_queues; i++) {
5467 rxr = &adapter->rx_rings[i];
5468 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5469 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5470 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5471 }
5472
5473 /* disable drop for each vf */
5474 for (int i = 0; i < adapter->num_vfs; i++) {
5475 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5476 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5477 }
5478 } /* ixgbe_disable_rx_drop */
5479
5480 /************************************************************************
5481 * ixgbe_sysctl_advertise
5482 *
5483 * SYSCTL wrapper around setting advertised speed
5484 ************************************************************************/
5485 static int
5486 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5487 {
5488 struct sysctlnode node = *rnode;
5489 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5490 int error = 0, advertise;
5491
5492 if (ixgbe_fw_recovery_mode_swflag(adapter))
5493 return (EPERM);
5494
5495 advertise = adapter->advertise;
5496 node.sysctl_data = &advertise;
5497 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5498 if (error != 0 || newp == NULL)
5499 return error;
5500
5501 return ixgbe_set_advertise(adapter, advertise);
5502 } /* ixgbe_sysctl_advertise */
5503
5504 /************************************************************************
5505 * ixgbe_set_advertise - Control advertised link speed
5506 *
5507 * Flags:
5508 * 0x00 - Default (all capable link speed)
5509 * 0x01 - advertise 100 Mb
5510 * 0x02 - advertise 1G
5511 * 0x04 - advertise 10G
5512 * 0x08 - advertise 10 Mb
5513 * 0x10 - advertise 2.5G
5514 * 0x20 - advertise 5G
5515 ************************************************************************/
5516 static int
5517 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5518 {
5519 device_t dev;
5520 struct ixgbe_hw *hw;
5521 ixgbe_link_speed speed = 0;
5522 ixgbe_link_speed link_caps = 0;
5523 s32 err = IXGBE_NOT_IMPLEMENTED;
5524 bool negotiate = FALSE;
5525
5526 /* Checks to validate new value */
5527 if (adapter->advertise == advertise) /* no change */
5528 return (0);
5529
5530 dev = adapter->dev;
5531 hw = &adapter->hw;
5532
5533 /* No speed changes for backplane media */
5534 if (hw->phy.media_type == ixgbe_media_type_backplane)
5535 return (ENODEV);
5536
5537 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5538 (hw->phy.multispeed_fiber))) {
5539 device_printf(dev,
5540 "Advertised speed can only be set on copper or "
5541 "multispeed fiber media types.\n");
5542 return (EINVAL);
5543 }
5544
5545 if (advertise < 0x0 || advertise > 0x2f) {
5546 device_printf(dev,
5547 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5548 return (EINVAL);
5549 }
5550
5551 if (hw->mac.ops.get_link_capabilities) {
5552 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5553 &negotiate);
5554 if (err != IXGBE_SUCCESS) {
5555 device_printf(dev, "Unable to determine supported advertise speeds\n");
5556 return (ENODEV);
5557 }
5558 }
5559
5560 /* Set new value and report new advertised mode */
5561 if (advertise & 0x1) {
5562 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5563 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5564 return (EINVAL);
5565 }
5566 speed |= IXGBE_LINK_SPEED_100_FULL;
5567 }
5568 if (advertise & 0x2) {
5569 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5570 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5571 return (EINVAL);
5572 }
5573 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5574 }
5575 if (advertise & 0x4) {
5576 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5577 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5578 return (EINVAL);
5579 }
5580 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5581 }
5582 if (advertise & 0x8) {
5583 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5584 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5585 return (EINVAL);
5586 }
5587 speed |= IXGBE_LINK_SPEED_10_FULL;
5588 }
5589 if (advertise & 0x10) {
5590 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5591 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5592 return (EINVAL);
5593 }
5594 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5595 }
5596 if (advertise & 0x20) {
5597 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5598 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5599 return (EINVAL);
5600 }
5601 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5602 }
5603 if (advertise == 0)
5604 speed = link_caps; /* All capable link speed */
5605
5606 hw->mac.autotry_restart = TRUE;
5607 hw->mac.ops.setup_link(hw, speed, TRUE);
5608 adapter->advertise = advertise;
5609
5610 return (0);
5611 } /* ixgbe_set_advertise */
5612
5613 /************************************************************************
5614 * ixgbe_get_advertise - Get current advertised speed settings
5615 *
5616 * Formatted for sysctl usage.
5617 * Flags:
5618 * 0x01 - advertise 100 Mb
5619 * 0x02 - advertise 1G
5620 * 0x04 - advertise 10G
5621 * 0x08 - advertise 10 Mb (yes, Mb)
5622 * 0x10 - advertise 2.5G
5623 * 0x20 - advertise 5G
5624 ************************************************************************/
5625 static int
5626 ixgbe_get_advertise(struct adapter *adapter)
5627 {
5628 struct ixgbe_hw *hw = &adapter->hw;
5629 int speed;
5630 ixgbe_link_speed link_caps = 0;
5631 s32 err;
5632 bool negotiate = FALSE;
5633
5634 /*
5635 * Advertised speed means nothing unless it's copper or
5636 * multi-speed fiber
5637 */
5638 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5639 !(hw->phy.multispeed_fiber))
5640 return (0);
5641
5642 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5643 if (err != IXGBE_SUCCESS)
5644 return (0);
5645
5646 speed =
5647 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5648 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5649 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5650 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5651 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5652 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5653
5654 return speed;
5655 } /* ixgbe_get_advertise */
5656
5657 /************************************************************************
5658 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5659 *
5660 * Control values:
5661 * 0/1 - off / on (use default value of 1000)
5662 *
5663 * Legal timer values are:
5664 * 50,100,250,500,1000,2000,5000,10000
5665 *
5666 * Turning off interrupt moderation will also turn this off.
5667 ************************************************************************/
5668 static int
5669 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5670 {
5671 struct sysctlnode node = *rnode;
5672 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5673 struct ifnet *ifp = adapter->ifp;
5674 int error;
5675 int newval;
5676
5677 if (ixgbe_fw_recovery_mode_swflag(adapter))
5678 return (EPERM);
5679
5680 newval = adapter->dmac;
5681 node.sysctl_data = &newval;
5682 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5683 if ((error) || (newp == NULL))
5684 return (error);
5685
5686 switch (newval) {
5687 case 0:
5688 /* Disabled */
5689 adapter->dmac = 0;
5690 break;
5691 case 1:
5692 /* Enable and use default */
5693 adapter->dmac = 1000;
5694 break;
5695 case 50:
5696 case 100:
5697 case 250:
5698 case 500:
5699 case 1000:
5700 case 2000:
5701 case 5000:
5702 case 10000:
5703 /* Legal values - allow */
5704 adapter->dmac = newval;
5705 break;
5706 default:
5707 /* Do nothing, illegal value */
5708 return (EINVAL);
5709 }
5710
5711 /* Re-initialize hardware if it's already running */
5712 if (ifp->if_flags & IFF_RUNNING)
5713 ifp->if_init(ifp);
5714
5715 return (0);
5716 }
5717
5718 #ifdef IXGBE_DEBUG
5719 /************************************************************************
5720 * ixgbe_sysctl_power_state
5721 *
5722 * Sysctl to test power states
5723 * Values:
5724 * 0 - set device to D0
5725 * 3 - set device to D3
5726 * (none) - get current device power state
5727 ************************************************************************/
5728 static int
5729 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5730 {
5731 #ifdef notyet
5732 struct sysctlnode node = *rnode;
5733 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5734 device_t dev = adapter->dev;
5735 int curr_ps, new_ps, error = 0;
5736
5737 if (ixgbe_fw_recovery_mode_swflag(adapter))
5738 return (EPERM);
5739
5740 curr_ps = new_ps = pci_get_powerstate(dev);
5741
5742 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5743 if ((error) || (req->newp == NULL))
5744 return (error);
5745
5746 if (new_ps == curr_ps)
5747 return (0);
5748
5749 if (new_ps == 3 && curr_ps == 0)
5750 error = DEVICE_SUSPEND(dev);
5751 else if (new_ps == 0 && curr_ps == 3)
5752 error = DEVICE_RESUME(dev);
5753 else
5754 return (EINVAL);
5755
5756 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5757
5758 return (error);
5759 #else
5760 return 0;
5761 #endif
5762 } /* ixgbe_sysctl_power_state */
5763 #endif
5764
5765 /************************************************************************
5766 * ixgbe_sysctl_wol_enable
5767 *
5768 * Sysctl to enable/disable the WoL capability,
5769 * if supported by the adapter.
5770 *
5771 * Values:
5772 * 0 - disabled
5773 * 1 - enabled
5774 ************************************************************************/
5775 static int
5776 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5777 {
5778 struct sysctlnode node = *rnode;
5779 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5780 struct ixgbe_hw *hw = &adapter->hw;
5781 bool new_wol_enabled;
5782 int error = 0;
5783
5784 /*
5785 * It's not required to check recovery mode because this function never
5786 * touches hardware.
5787 */
5788 new_wol_enabled = hw->wol_enabled;
5789 node.sysctl_data = &new_wol_enabled;
5790 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5791 if ((error) || (newp == NULL))
5792 return (error);
5793 if (new_wol_enabled == hw->wol_enabled)
5794 return (0);
5795
5796 if (new_wol_enabled && !adapter->wol_support)
5797 return (ENODEV);
5798 else
5799 hw->wol_enabled = new_wol_enabled;
5800
5801 return (0);
5802 } /* ixgbe_sysctl_wol_enable */
5803
5804 /************************************************************************
5805 * ixgbe_sysctl_wufc - Wake Up Filter Control
5806 *
5807 * Sysctl to enable/disable the types of packets that the
5808 * adapter will wake up on upon receipt.
5809 * Flags:
5810 * 0x1 - Link Status Change
5811 * 0x2 - Magic Packet
5812 * 0x4 - Direct Exact
5813 * 0x8 - Directed Multicast
5814 * 0x10 - Broadcast
5815 * 0x20 - ARP/IPv4 Request Packet
5816 * 0x40 - Direct IPv4 Packet
5817 * 0x80 - Direct IPv6 Packet
5818 *
5819 * Settings not listed above will cause the sysctl to return an error.
5820 ************************************************************************/
5821 static int
5822 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5823 {
5824 struct sysctlnode node = *rnode;
5825 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5826 int error = 0;
5827 u32 new_wufc;
5828
5829 /*
5830 * It's not required to check recovery mode because this function never
5831 * touches hardware.
5832 */
5833 new_wufc = adapter->wufc;
5834 node.sysctl_data = &new_wufc;
5835 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5836 if ((error) || (newp == NULL))
5837 return (error);
5838 if (new_wufc == adapter->wufc)
5839 return (0);
5840
5841 if (new_wufc & 0xffffff00)
5842 return (EINVAL);
5843
5844 new_wufc &= 0xff;
5845 new_wufc |= (0xffffff & adapter->wufc);
5846 adapter->wufc = new_wufc;
5847
5848 return (0);
5849 } /* ixgbe_sysctl_wufc */
5850
5851 #ifdef IXGBE_DEBUG
5852 /************************************************************************
5853 * ixgbe_sysctl_print_rss_config
5854 ************************************************************************/
5855 static int
5856 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5857 {
5858 #ifdef notyet
5859 struct sysctlnode node = *rnode;
5860 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5861 struct ixgbe_hw *hw = &adapter->hw;
5862 device_t dev = adapter->dev;
5863 struct sbuf *buf;
5864 int error = 0, reta_size;
5865 u32 reg;
5866
5867 if (ixgbe_fw_recovery_mode_swflag(adapter))
5868 return (EPERM);
5869
5870 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5871 if (!buf) {
5872 device_printf(dev, "Could not allocate sbuf for output.\n");
5873 return (ENOMEM);
5874 }
5875
5876 // TODO: use sbufs to make a string to print out
5877 /* Set multiplier for RETA setup and table size based on MAC */
5878 switch (adapter->hw.mac.type) {
5879 case ixgbe_mac_X550:
5880 case ixgbe_mac_X550EM_x:
5881 case ixgbe_mac_X550EM_a:
5882 reta_size = 128;
5883 break;
5884 default:
5885 reta_size = 32;
5886 break;
5887 }
5888
5889 /* Print out the redirection table */
5890 sbuf_cat(buf, "\n");
5891 for (int i = 0; i < reta_size; i++) {
5892 if (i < 32) {
5893 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5894 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5895 } else {
5896 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5897 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5898 }
5899 }
5900
5901 // TODO: print more config
5902
5903 error = sbuf_finish(buf);
5904 if (error)
5905 device_printf(dev, "Error finishing sbuf: %d\n", error);
5906
5907 sbuf_delete(buf);
5908 #endif
5909 return (0);
5910 } /* ixgbe_sysctl_print_rss_config */
5911 #endif /* IXGBE_DEBUG */
5912
5913 /************************************************************************
5914 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5915 *
5916 * For X552/X557-AT devices using an external PHY
5917 ************************************************************************/
5918 static int
5919 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5920 {
5921 struct sysctlnode node = *rnode;
5922 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5923 struct ixgbe_hw *hw = &adapter->hw;
5924 int val;
5925 u16 reg;
5926 int error;
5927
5928 if (ixgbe_fw_recovery_mode_swflag(adapter))
5929 return (EPERM);
5930
5931 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5932 device_printf(adapter->dev,
5933 "Device has no supported external thermal sensor.\n");
5934 return (ENODEV);
5935 }
5936
5937 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5938 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5939 device_printf(adapter->dev,
5940 "Error reading from PHY's current temperature register\n");
5941 return (EAGAIN);
5942 }
5943
5944 node.sysctl_data = &val;
5945
5946 /* Shift temp for output */
5947 val = reg >> 8;
5948
5949 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5950 if ((error) || (newp == NULL))
5951 return (error);
5952
5953 return (0);
5954 } /* ixgbe_sysctl_phy_temp */
5955
5956 /************************************************************************
5957 * ixgbe_sysctl_phy_overtemp_occurred
5958 *
5959 * Reports (directly from the PHY) whether the current PHY
5960 * temperature is over the overtemp threshold.
5961 ************************************************************************/
5962 static int
5963 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5964 {
5965 struct sysctlnode node = *rnode;
5966 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5967 struct ixgbe_hw *hw = &adapter->hw;
5968 int val, error;
5969 u16 reg;
5970
5971 if (ixgbe_fw_recovery_mode_swflag(adapter))
5972 return (EPERM);
5973
5974 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5975 device_printf(adapter->dev,
5976 "Device has no supported external thermal sensor.\n");
5977 return (ENODEV);
5978 }
5979
5980 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5981 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5982 device_printf(adapter->dev,
5983 "Error reading from PHY's temperature status register\n");
5984 return (EAGAIN);
5985 }
5986
5987 node.sysctl_data = &val;
5988
5989 /* Get occurrence bit */
5990 val = !!(reg & 0x4000);
5991
5992 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5993 if ((error) || (newp == NULL))
5994 return (error);
5995
5996 return (0);
5997 } /* ixgbe_sysctl_phy_overtemp_occurred */
5998
5999 /************************************************************************
6000 * ixgbe_sysctl_eee_state
6001 *
6002 * Sysctl to set EEE power saving feature
6003 * Values:
6004 * 0 - disable EEE
6005 * 1 - enable EEE
6006 * (none) - get current device EEE state
6007 ************************************************************************/
6008 static int
6009 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6010 {
6011 struct sysctlnode node = *rnode;
6012 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6013 struct ifnet *ifp = adapter->ifp;
6014 device_t dev = adapter->dev;
6015 int curr_eee, new_eee, error = 0;
6016 s32 retval;
6017
6018 if (ixgbe_fw_recovery_mode_swflag(adapter))
6019 return (EPERM);
6020
6021 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6022 node.sysctl_data = &new_eee;
6023 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6024 if ((error) || (newp == NULL))
6025 return (error);
6026
6027 /* Nothing to do */
6028 if (new_eee == curr_eee)
6029 return (0);
6030
6031 /* Not supported */
6032 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6033 return (EINVAL);
6034
6035 /* Bounds checking */
6036 if ((new_eee < 0) || (new_eee > 1))
6037 return (EINVAL);
6038
6039 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6040 if (retval) {
6041 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6042 return (EINVAL);
6043 }
6044
6045 /* Restart auto-neg */
6046 ifp->if_init(ifp);
6047
6048 device_printf(dev, "New EEE state: %d\n", new_eee);
6049
6050 /* Cache new value */
6051 if (new_eee)
6052 adapter->feat_en |= IXGBE_FEATURE_EEE;
6053 else
6054 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6055
6056 return (error);
6057 } /* ixgbe_sysctl_eee_state */
6058
6059 #define PRINTQS(adapter, regname) \
6060 do { \
6061 struct ixgbe_hw *_hw = &(adapter)->hw; \
6062 int _i; \
6063 \
6064 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6065 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6066 printf((_i == 0) ? "\t" : " "); \
6067 printf("%08x", IXGBE_READ_REG(_hw, \
6068 IXGBE_##regname(_i))); \
6069 } \
6070 printf("\n"); \
6071 } while (0)
6072
6073 /************************************************************************
6074 * ixgbe_print_debug_info
6075 *
6076 * Called only when em_display_debug_stats is enabled.
6077 * Provides a way to take a look at important statistics
6078 * maintained by the driver and hardware.
6079 ************************************************************************/
6080 static void
6081 ixgbe_print_debug_info(struct adapter *adapter)
6082 {
6083 device_t dev = adapter->dev;
6084 struct ixgbe_hw *hw = &adapter->hw;
6085 int table_size;
6086 int i;
6087
6088 switch (adapter->hw.mac.type) {
6089 case ixgbe_mac_X550:
6090 case ixgbe_mac_X550EM_x:
6091 case ixgbe_mac_X550EM_a:
6092 table_size = 128;
6093 break;
6094 default:
6095 table_size = 32;
6096 break;
6097 }
6098
6099 device_printf(dev, "[E]RETA:\n");
6100 for (i = 0; i < table_size; i++) {
6101 if (i < 32)
6102 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6103 IXGBE_RETA(i)));
6104 else
6105 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6106 IXGBE_ERETA(i - 32)));
6107 }
6108
6109 device_printf(dev, "queue:");
6110 for (i = 0; i < adapter->num_queues; i++) {
6111 printf((i == 0) ? "\t" : " ");
6112 printf("%8d", i);
6113 }
6114 printf("\n");
6115 PRINTQS(adapter, RDBAL);
6116 PRINTQS(adapter, RDBAH);
6117 PRINTQS(adapter, RDLEN);
6118 PRINTQS(adapter, SRRCTL);
6119 PRINTQS(adapter, RDH);
6120 PRINTQS(adapter, RDT);
6121 PRINTQS(adapter, RXDCTL);
6122
6123 device_printf(dev, "RQSMR:");
6124 for (i = 0; i < adapter->num_queues / 4; i++) {
6125 printf((i == 0) ? "\t" : " ");
6126 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6127 }
6128 printf("\n");
6129
6130 device_printf(dev, "disabled_count:");
6131 for (i = 0; i < adapter->num_queues; i++) {
6132 printf((i == 0) ? "\t" : " ");
6133 printf("%8d", adapter->queues[i].disabled_count);
6134 }
6135 printf("\n");
6136
6137 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6138 if (hw->mac.type != ixgbe_mac_82598EB) {
6139 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6140 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6141 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6142 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6143 }
6144 } /* ixgbe_print_debug_info */
6145
6146 /************************************************************************
6147 * ixgbe_sysctl_debug
6148 ************************************************************************/
6149 static int
6150 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6151 {
6152 struct sysctlnode node = *rnode;
6153 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6154 int error, result = 0;
6155
6156 if (ixgbe_fw_recovery_mode_swflag(adapter))
6157 return (EPERM);
6158
6159 node.sysctl_data = &result;
6160 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6161
6162 if (error || newp == NULL)
6163 return error;
6164
6165 if (result == 1)
6166 ixgbe_print_debug_info(adapter);
6167
6168 return 0;
6169 } /* ixgbe_sysctl_debug */
6170
6171 /************************************************************************
6172 * ixgbe_init_device_features
6173 ************************************************************************/
6174 static void
6175 ixgbe_init_device_features(struct adapter *adapter)
6176 {
6177 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6178 | IXGBE_FEATURE_RSS
6179 | IXGBE_FEATURE_MSI
6180 | IXGBE_FEATURE_MSIX
6181 | IXGBE_FEATURE_LEGACY_IRQ
6182 | IXGBE_FEATURE_LEGACY_TX;
6183
6184 /* Set capabilities first... */
6185 switch (adapter->hw.mac.type) {
6186 case ixgbe_mac_82598EB:
6187 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6188 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6189 break;
6190 case ixgbe_mac_X540:
6191 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6192 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6193 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6194 (adapter->hw.bus.func == 0))
6195 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6196 break;
6197 case ixgbe_mac_X550:
6198 /*
6199 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6200 * NVM Image version.
6201 */
6202 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6203 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6204 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6205 break;
6206 case ixgbe_mac_X550EM_x:
6207 /*
6208 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6209 * NVM Image version.
6210 */
6211 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6212 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6213 break;
6214 case ixgbe_mac_X550EM_a:
6215 /*
6216 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6217 * NVM Image version.
6218 */
6219 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6220 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6221 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6222 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6223 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6224 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6225 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6226 }
6227 break;
6228 case ixgbe_mac_82599EB:
6229 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6230 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6231 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6232 (adapter->hw.bus.func == 0))
6233 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6234 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6235 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6236 break;
6237 default:
6238 break;
6239 }
6240
6241 /* Enabled by default... */
6242 /* Fan failure detection */
6243 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6244 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6245 /* Netmap */
6246 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6247 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6248 /* EEE */
6249 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6250 adapter->feat_en |= IXGBE_FEATURE_EEE;
6251 /* Thermal Sensor */
6252 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6253 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6254 /*
6255 * Recovery mode:
6256 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6257 * NVM Image version.
6258 */
6259
6260 /* Enabled via global sysctl... */
6261 /* Flow Director */
6262 if (ixgbe_enable_fdir) {
6263 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6264 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6265 else
6266 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6267 }
6268 /* Legacy (single queue) transmit */
6269 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6270 ixgbe_enable_legacy_tx)
6271 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6272 /*
6273 * Message Signal Interrupts - Extended (MSI-X)
6274 * Normal MSI is only enabled if MSI-X calls fail.
6275 */
6276 if (!ixgbe_enable_msix)
6277 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6278 /* Receive-Side Scaling (RSS) */
6279 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6280 adapter->feat_en |= IXGBE_FEATURE_RSS;
6281
6282 /* Disable features with unmet dependencies... */
6283 /* No MSI-X */
6284 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6285 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6286 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6287 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6288 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6289 }
6290 } /* ixgbe_init_device_features */
6291
6292 /************************************************************************
6293 * ixgbe_probe - Device identification routine
6294 *
6295 * Determines if the driver should be loaded on
6296 * adapter based on its PCI vendor/device ID.
6297 *
6298 * return BUS_PROBE_DEFAULT on success, positive on failure
6299 ************************************************************************/
6300 static int
6301 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6302 {
6303 const struct pci_attach_args *pa = aux;
6304
6305 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6306 }
6307
6308 static const ixgbe_vendor_info_t *
6309 ixgbe_lookup(const struct pci_attach_args *pa)
6310 {
6311 const ixgbe_vendor_info_t *ent;
6312 pcireg_t subid;
6313
6314 INIT_DEBUGOUT("ixgbe_lookup: begin");
6315
6316 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6317 return NULL;
6318
6319 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6320
6321 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6322 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6323 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6324 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6325 (ent->subvendor_id == 0)) &&
6326 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6327 (ent->subdevice_id == 0))) {
6328 return ent;
6329 }
6330 }
6331 return NULL;
6332 }
6333
6334 static int
6335 ixgbe_ifflags_cb(struct ethercom *ec)
6336 {
6337 struct ifnet *ifp = &ec->ec_if;
6338 struct adapter *adapter = ifp->if_softc;
6339 u_short change;
6340 int rv = 0;
6341
6342 IXGBE_CORE_LOCK(adapter);
6343
6344 change = ifp->if_flags ^ adapter->if_flags;
6345 if (change != 0)
6346 adapter->if_flags = ifp->if_flags;
6347
6348 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6349 rv = ENETRESET;
6350 goto out;
6351 } else if ((change & IFF_PROMISC) != 0)
6352 ixgbe_set_rxfilter(adapter);
6353
6354 /* Check for ec_capenable. */
6355 change = ec->ec_capenable ^ adapter->ec_capenable;
6356 adapter->ec_capenable = ec->ec_capenable;
6357 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6358 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6359 rv = ENETRESET;
6360 goto out;
6361 }
6362
6363 /*
6364 * Special handling is not required for ETHERCAP_VLAN_MTU.
6365 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6366 */
6367
6368 /* Set up VLAN support and filter */
6369 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6370 ixgbe_setup_vlan_hw_support(adapter);
6371
6372 out:
6373 IXGBE_CORE_UNLOCK(adapter);
6374
6375 return rv;
6376 }
6377
6378 /************************************************************************
6379 * ixgbe_ioctl - Ioctl entry point
6380 *
6381 * Called when the user wants to configure the interface.
6382 *
6383 * return 0 on success, positive on failure
6384 ************************************************************************/
6385 static int
6386 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6387 {
6388 struct adapter *adapter = ifp->if_softc;
6389 struct ixgbe_hw *hw = &adapter->hw;
6390 struct ifcapreq *ifcr = data;
6391 struct ifreq *ifr = data;
6392 int error = 0;
6393 int l4csum_en;
6394 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6395 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6396
6397 if (ixgbe_fw_recovery_mode_swflag(adapter))
6398 return (EPERM);
6399
6400 switch (command) {
6401 case SIOCSIFFLAGS:
6402 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6403 break;
6404 case SIOCADDMULTI:
6405 case SIOCDELMULTI:
6406 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6407 break;
6408 case SIOCSIFMEDIA:
6409 case SIOCGIFMEDIA:
6410 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6411 break;
6412 case SIOCSIFCAP:
6413 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6414 break;
6415 case SIOCSIFMTU:
6416 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6417 break;
6418 #ifdef __NetBSD__
6419 case SIOCINITIFADDR:
6420 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6421 break;
6422 case SIOCGIFFLAGS:
6423 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6424 break;
6425 case SIOCGIFAFLAG_IN:
6426 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6427 break;
6428 case SIOCGIFADDR:
6429 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6430 break;
6431 case SIOCGIFMTU:
6432 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6433 break;
6434 case SIOCGIFCAP:
6435 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6436 break;
6437 case SIOCGETHERCAP:
6438 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6439 break;
6440 case SIOCGLIFADDR:
6441 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6442 break;
6443 case SIOCZIFDATA:
6444 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6445 hw->mac.ops.clear_hw_cntrs(hw);
6446 ixgbe_clear_evcnt(adapter);
6447 break;
6448 case SIOCAIFADDR:
6449 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6450 break;
6451 #endif
6452 default:
6453 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6454 break;
6455 }
6456
6457 switch (command) {
6458 case SIOCGI2C:
6459 {
6460 struct ixgbe_i2c_req i2c;
6461
6462 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6463 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6464 if (error != 0)
6465 break;
6466 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6467 error = EINVAL;
6468 break;
6469 }
6470 if (i2c.len > sizeof(i2c.data)) {
6471 error = EINVAL;
6472 break;
6473 }
6474
6475 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6476 i2c.dev_addr, i2c.data);
6477 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6478 break;
6479 }
6480 case SIOCSIFCAP:
6481 /* Layer-4 Rx checksum offload has to be turned on and
6482 * off as a unit.
6483 */
6484 l4csum_en = ifcr->ifcr_capenable & l4csum;
6485 if (l4csum_en != l4csum && l4csum_en != 0)
6486 return EINVAL;
6487 /*FALLTHROUGH*/
6488 case SIOCADDMULTI:
6489 case SIOCDELMULTI:
6490 case SIOCSIFFLAGS:
6491 case SIOCSIFMTU:
6492 default:
6493 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6494 return error;
6495 if ((ifp->if_flags & IFF_RUNNING) == 0)
6496 ;
6497 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6498 IXGBE_CORE_LOCK(adapter);
6499 if ((ifp->if_flags & IFF_RUNNING) != 0)
6500 ixgbe_init_locked(adapter);
6501 ixgbe_recalculate_max_frame(adapter);
6502 IXGBE_CORE_UNLOCK(adapter);
6503 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6504 /*
6505 * Multicast list has changed; set the hardware filter
6506 * accordingly.
6507 */
6508 IXGBE_CORE_LOCK(adapter);
6509 ixgbe_disable_intr(adapter);
6510 ixgbe_set_rxfilter(adapter);
6511 ixgbe_enable_intr(adapter);
6512 IXGBE_CORE_UNLOCK(adapter);
6513 }
6514 return 0;
6515 }
6516
6517 return error;
6518 } /* ixgbe_ioctl */
6519
6520 /************************************************************************
6521 * ixgbe_check_fan_failure
6522 ************************************************************************/
6523 static void
6524 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6525 {
6526 u32 mask;
6527
6528 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6529 IXGBE_ESDP_SDP1;
6530
6531 if (reg & mask)
6532 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6533 } /* ixgbe_check_fan_failure */
6534
6535 /************************************************************************
6536 * ixgbe_handle_que
6537 ************************************************************************/
6538 static void
6539 ixgbe_handle_que(void *context)
6540 {
6541 struct ix_queue *que = context;
6542 struct adapter *adapter = que->adapter;
6543 struct tx_ring *txr = que->txr;
6544 struct ifnet *ifp = adapter->ifp;
6545 bool more = false;
6546
6547 que->handleq.ev_count++;
6548
6549 if (ifp->if_flags & IFF_RUNNING) {
6550 more = ixgbe_rxeof(que);
6551 IXGBE_TX_LOCK(txr);
6552 more |= ixgbe_txeof(txr);
6553 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6554 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6555 ixgbe_mq_start_locked(ifp, txr);
6556 /* Only for queue 0 */
6557 /* NetBSD still needs this for CBQ */
6558 if ((&adapter->queues[0] == que)
6559 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6560 ixgbe_legacy_start_locked(ifp, txr);
6561 IXGBE_TX_UNLOCK(txr);
6562 }
6563
6564 if (more) {
6565 que->req.ev_count++;
6566 ixgbe_sched_handle_que(adapter, que);
6567 } else if (que->res != NULL) {
6568 /* Re-enable this interrupt */
6569 ixgbe_enable_queue(adapter, que->msix);
6570 } else
6571 ixgbe_enable_intr(adapter);
6572
6573 return;
6574 } /* ixgbe_handle_que */
6575
6576 /************************************************************************
6577 * ixgbe_handle_que_work
6578 ************************************************************************/
6579 static void
6580 ixgbe_handle_que_work(struct work *wk, void *context)
6581 {
6582 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6583
6584 /*
6585 * "enqueued flag" is not required here.
6586 * See ixgbe_msix_que().
6587 */
6588 ixgbe_handle_que(que);
6589 }
6590
6591 /************************************************************************
6592 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6593 ************************************************************************/
6594 static int
6595 ixgbe_allocate_legacy(struct adapter *adapter,
6596 const struct pci_attach_args *pa)
6597 {
6598 device_t dev = adapter->dev;
6599 struct ix_queue *que = adapter->queues;
6600 struct tx_ring *txr = adapter->tx_rings;
6601 int counts[PCI_INTR_TYPE_SIZE];
6602 pci_intr_type_t intr_type, max_type;
6603 char intrbuf[PCI_INTRSTR_LEN];
6604 char wqname[MAXCOMLEN];
6605 const char *intrstr = NULL;
6606 int defertx_error = 0, error;
6607
6608 /* We allocate a single interrupt resource */
6609 max_type = PCI_INTR_TYPE_MSI;
6610 counts[PCI_INTR_TYPE_MSIX] = 0;
6611 counts[PCI_INTR_TYPE_MSI] =
6612 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6613 /* Check not feat_en but feat_cap to fallback to INTx */
6614 counts[PCI_INTR_TYPE_INTX] =
6615 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6616
6617 alloc_retry:
6618 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6619 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6620 return ENXIO;
6621 }
6622 adapter->osdep.nintrs = 1;
6623 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6624 intrbuf, sizeof(intrbuf));
6625 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6626 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6627 device_xname(dev));
6628 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6629 if (adapter->osdep.ihs[0] == NULL) {
6630 aprint_error_dev(dev,"unable to establish %s\n",
6631 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6632 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6633 adapter->osdep.intrs = NULL;
6634 switch (intr_type) {
6635 case PCI_INTR_TYPE_MSI:
6636 /* The next try is for INTx: Disable MSI */
6637 max_type = PCI_INTR_TYPE_INTX;
6638 counts[PCI_INTR_TYPE_INTX] = 1;
6639 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6640 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6641 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6642 goto alloc_retry;
6643 } else
6644 break;
6645 case PCI_INTR_TYPE_INTX:
6646 default:
6647 /* See below */
6648 break;
6649 }
6650 }
6651 if (intr_type == PCI_INTR_TYPE_INTX) {
6652 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6653 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6654 }
6655 if (adapter->osdep.ihs[0] == NULL) {
6656 aprint_error_dev(dev,
6657 "couldn't establish interrupt%s%s\n",
6658 intrstr ? " at " : "", intrstr ? intrstr : "");
6659 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6660 adapter->osdep.intrs = NULL;
6661 return ENXIO;
6662 }
6663 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6664 /*
6665 * Try allocating a fast interrupt and the associated deferred
6666 * processing contexts.
6667 */
6668 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6669 txr->txr_si =
6670 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6671 ixgbe_deferred_mq_start, txr);
6672
6673 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6674 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6675 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6676 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6677 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6678 }
6679 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6680 ixgbe_handle_que, que);
6681 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6682 error = workqueue_create(&adapter->que_wq, wqname,
6683 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6684 IXGBE_WORKQUEUE_FLAGS);
6685
6686 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6687 && ((txr->txr_si == NULL) || defertx_error != 0))
6688 || (que->que_si == NULL) || error != 0) {
6689 aprint_error_dev(dev,
6690 "could not establish software interrupts\n");
6691
6692 return ENXIO;
6693 }
6694 /* For simplicity in the handlers */
6695 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6696
6697 return (0);
6698 } /* ixgbe_allocate_legacy */
6699
6700 /************************************************************************
6701 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6702 ************************************************************************/
6703 static int
6704 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6705 {
6706 device_t dev = adapter->dev;
6707 struct ix_queue *que = adapter->queues;
6708 struct tx_ring *txr = adapter->tx_rings;
6709 pci_chipset_tag_t pc;
6710 char intrbuf[PCI_INTRSTR_LEN];
6711 char intr_xname[32];
6712 char wqname[MAXCOMLEN];
6713 const char *intrstr = NULL;
6714 int error, vector = 0;
6715 int cpu_id = 0;
6716 kcpuset_t *affinity;
6717 #ifdef RSS
6718 unsigned int rss_buckets = 0;
6719 kcpuset_t cpu_mask;
6720 #endif
6721
6722 pc = adapter->osdep.pc;
6723 #ifdef RSS
6724 /*
6725 * If we're doing RSS, the number of queues needs to
6726 * match the number of RSS buckets that are configured.
6727 *
6728 * + If there's more queues than RSS buckets, we'll end
6729 * up with queues that get no traffic.
6730 *
6731 * + If there's more RSS buckets than queues, we'll end
6732 * up having multiple RSS buckets map to the same queue,
6733 * so there'll be some contention.
6734 */
6735 rss_buckets = rss_getnumbuckets();
6736 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6737 (adapter->num_queues != rss_buckets)) {
6738 device_printf(dev,
6739 "%s: number of queues (%d) != number of RSS buckets (%d)"
6740 "; performance will be impacted.\n",
6741 __func__, adapter->num_queues, rss_buckets);
6742 }
6743 #endif
6744
6745 adapter->osdep.nintrs = adapter->num_queues + 1;
6746 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6747 adapter->osdep.nintrs) != 0) {
6748 aprint_error_dev(dev,
6749 "failed to allocate MSI-X interrupt\n");
6750 return (ENXIO);
6751 }
6752
6753 kcpuset_create(&affinity, false);
6754 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6755 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6756 device_xname(dev), i);
6757 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6758 sizeof(intrbuf));
6759 #ifdef IXGBE_MPSAFE
6760 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6761 true);
6762 #endif
6763 /* Set the handler function */
6764 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6765 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6766 intr_xname);
6767 if (que->res == NULL) {
6768 aprint_error_dev(dev,
6769 "Failed to register QUE handler\n");
6770 error = ENXIO;
6771 goto err_out;
6772 }
6773 que->msix = vector;
6774 adapter->active_queues |= 1ULL << que->msix;
6775
6776 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6777 #ifdef RSS
6778 /*
6779 * The queue ID is used as the RSS layer bucket ID.
6780 * We look up the queue ID -> RSS CPU ID and select
6781 * that.
6782 */
6783 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6784 CPU_SETOF(cpu_id, &cpu_mask);
6785 #endif
6786 } else {
6787 /*
6788 * Bind the MSI-X vector, and thus the
6789 * rings to the corresponding CPU.
6790 *
6791 * This just happens to match the default RSS
6792 * round-robin bucket -> queue -> CPU allocation.
6793 */
6794 if (adapter->num_queues > 1)
6795 cpu_id = i;
6796 }
6797 /* Round-robin affinity */
6798 kcpuset_zero(affinity);
6799 kcpuset_set(affinity, cpu_id % ncpu);
6800 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6801 NULL);
6802 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6803 intrstr);
6804 if (error == 0) {
6805 #if 1 /* def IXGBE_DEBUG */
6806 #ifdef RSS
6807 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6808 cpu_id % ncpu);
6809 #else
6810 aprint_normal(", bound queue %d to cpu %d", i,
6811 cpu_id % ncpu);
6812 #endif
6813 #endif /* IXGBE_DEBUG */
6814 }
6815 aprint_normal("\n");
6816
6817 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6818 txr->txr_si = softint_establish(
6819 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6820 ixgbe_deferred_mq_start, txr);
6821 if (txr->txr_si == NULL) {
6822 aprint_error_dev(dev,
6823 "couldn't establish software interrupt\n");
6824 error = ENXIO;
6825 goto err_out;
6826 }
6827 }
6828 que->que_si
6829 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6830 ixgbe_handle_que, que);
6831 if (que->que_si == NULL) {
6832 aprint_error_dev(dev,
6833 "couldn't establish software interrupt\n");
6834 error = ENXIO;
6835 goto err_out;
6836 }
6837 }
6838 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6839 error = workqueue_create(&adapter->txr_wq, wqname,
6840 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6841 IXGBE_WORKQUEUE_FLAGS);
6842 if (error) {
6843 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6844 goto err_out;
6845 }
6846 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6847
6848 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6849 error = workqueue_create(&adapter->que_wq, wqname,
6850 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6851 IXGBE_WORKQUEUE_FLAGS);
6852 if (error) {
6853 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6854 goto err_out;
6855 }
6856
6857 /* and Link */
6858 cpu_id++;
6859 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6860 adapter->vector = vector;
6861 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6862 sizeof(intrbuf));
6863 #ifdef IXGBE_MPSAFE
6864 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6865 true);
6866 #endif
6867 /* Set the link handler function */
6868 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6869 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6870 intr_xname);
6871 if (adapter->osdep.ihs[vector] == NULL) {
6872 aprint_error_dev(dev, "Failed to register LINK handler\n");
6873 error = ENXIO;
6874 goto err_out;
6875 }
6876 /* Round-robin affinity */
6877 kcpuset_zero(affinity);
6878 kcpuset_set(affinity, cpu_id % ncpu);
6879 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6880 NULL);
6881
6882 aprint_normal_dev(dev,
6883 "for link, interrupting at %s", intrstr);
6884 if (error == 0)
6885 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6886 else
6887 aprint_normal("\n");
6888
6889 kcpuset_destroy(affinity);
6890 aprint_normal_dev(dev,
6891 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6892
6893 return (0);
6894
6895 err_out:
6896 kcpuset_destroy(affinity);
6897 ixgbe_free_deferred_handlers(adapter);
6898 ixgbe_free_pciintr_resources(adapter);
6899 return (error);
6900 } /* ixgbe_allocate_msix */
6901
6902 /************************************************************************
6903 * ixgbe_configure_interrupts
6904 *
6905 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6906 * This will also depend on user settings.
6907 ************************************************************************/
6908 static int
6909 ixgbe_configure_interrupts(struct adapter *adapter)
6910 {
6911 device_t dev = adapter->dev;
6912 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6913 int want, queues, msgs;
6914
6915 /* Default to 1 queue if MSI-X setup fails */
6916 adapter->num_queues = 1;
6917
6918 /* Override by tuneable */
6919 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6920 goto msi;
6921
6922 /*
6923 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6924 * interrupt slot.
6925 */
6926 if (ncpu == 1)
6927 goto msi;
6928
6929 /* First try MSI-X */
6930 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6931 msgs = MIN(msgs, IXG_MAX_NINTR);
6932 if (msgs < 2)
6933 goto msi;
6934
6935 adapter->msix_mem = (void *)1; /* XXX */
6936
6937 /* Figure out a reasonable auto config value */
6938 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6939
6940 #ifdef RSS
6941 /* If we're doing RSS, clamp at the number of RSS buckets */
6942 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6943 queues = uimin(queues, rss_getnumbuckets());
6944 #endif
6945 if (ixgbe_num_queues > queues) {
6946 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6947 ixgbe_num_queues = queues;
6948 }
6949
6950 if (ixgbe_num_queues != 0)
6951 queues = ixgbe_num_queues;
6952 else
6953 queues = uimin(queues,
6954 uimin(mac->max_tx_queues, mac->max_rx_queues));
6955
6956 /* reflect correct sysctl value */
6957 ixgbe_num_queues = queues;
6958
6959 /*
6960 * Want one vector (RX/TX pair) per queue
6961 * plus an additional for Link.
6962 */
6963 want = queues + 1;
6964 if (msgs >= want)
6965 msgs = want;
6966 else {
6967 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6968 "%d vectors but %d queues wanted!\n",
6969 msgs, want);
6970 goto msi;
6971 }
6972 adapter->num_queues = queues;
6973 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6974 return (0);
6975
6976 /*
6977 * MSI-X allocation failed or provided us with
6978 * less vectors than needed. Free MSI-X resources
6979 * and we'll try enabling MSI.
6980 */
6981 msi:
6982 /* Without MSI-X, some features are no longer supported */
6983 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6984 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6985 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6986 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6987
6988 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6989 adapter->msix_mem = NULL; /* XXX */
6990 if (msgs > 1)
6991 msgs = 1;
6992 if (msgs != 0) {
6993 msgs = 1;
6994 adapter->feat_en |= IXGBE_FEATURE_MSI;
6995 return (0);
6996 }
6997
6998 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6999 aprint_error_dev(dev,
7000 "Device does not support legacy interrupts.\n");
7001 return 1;
7002 }
7003
7004 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7005
7006 return (0);
7007 } /* ixgbe_configure_interrupts */
7008
7009
7010 /************************************************************************
7011 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7012 *
7013 * Done outside of interrupt context since the driver might sleep
7014 ************************************************************************/
7015 static void
7016 ixgbe_handle_link(void *context)
7017 {
7018 struct adapter *adapter = context;
7019 struct ixgbe_hw *hw = &adapter->hw;
7020
7021 KASSERT(mutex_owned(&adapter->core_mtx));
7022
7023 ++adapter->link_workev.ev_count;
7024 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7025 ixgbe_update_link_status(adapter);
7026
7027 /* Re-enable link interrupts */
7028 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7029 } /* ixgbe_handle_link */
7030
7031 #if 0
7032 /************************************************************************
7033 * ixgbe_rearm_queues
7034 ************************************************************************/
7035 static __inline void
7036 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7037 {
7038 u32 mask;
7039
7040 switch (adapter->hw.mac.type) {
7041 case ixgbe_mac_82598EB:
7042 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7043 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7044 break;
7045 case ixgbe_mac_82599EB:
7046 case ixgbe_mac_X540:
7047 case ixgbe_mac_X550:
7048 case ixgbe_mac_X550EM_x:
7049 case ixgbe_mac_X550EM_a:
7050 mask = (queues & 0xFFFFFFFF);
7051 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7052 mask = (queues >> 32);
7053 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7054 break;
7055 default:
7056 break;
7057 }
7058 } /* ixgbe_rearm_queues */
7059 #endif
7060