ixgbe.c revision 1.278 1 /* $NetBSD: ixgbe.c,v 1.278 2021/01/14 05:47:35 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_phy.h"
74 #include "ixgbe_sriov.h"
75 #include "vlan.h"
76
77 #include <sys/cprng.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 /************************************************************************
82 * Driver version
83 ************************************************************************/
84 static const char ixgbe_driver_version[] = "4.0.1-k";
85 /* XXX NetBSD: + 3.3.10 */
86
87 /************************************************************************
88 * PCI Device ID Table
89 *
90 * Used by probe to select devices to load on
91 * Last field stores an index into ixgbe_strings
92 * Last entry must be all 0s
93 *
94 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
95 ************************************************************************/
96 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
97 {
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
148 /* required last entry */
149 {0, 0, 0, 0, 0}
150 };
151
152 /************************************************************************
153 * Table of branding strings
154 ************************************************************************/
155 static const char *ixgbe_strings[] = {
156 "Intel(R) PRO/10GbE PCI-Express Network Driver"
157 };
158
159 /************************************************************************
160 * Function prototypes
161 ************************************************************************/
162 static int ixgbe_probe(device_t, cfdata_t, void *);
163 static void ixgbe_quirks(struct adapter *);
164 static void ixgbe_attach(device_t, device_t, void *);
165 static int ixgbe_detach(device_t, int);
166 #if 0
167 static int ixgbe_shutdown(device_t);
168 #endif
169 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
170 static bool ixgbe_resume(device_t, const pmf_qual_t *);
171 static int ixgbe_ifflags_cb(struct ethercom *);
172 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
173 static int ixgbe_init(struct ifnet *);
174 static void ixgbe_init_locked(struct adapter *);
175 static void ixgbe_ifstop(struct ifnet *, int);
176 static void ixgbe_stop_locked(void *);
177 static void ixgbe_init_device_features(struct adapter *);
178 static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
179 static void ixgbe_add_media_types(struct adapter *);
180 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
181 static int ixgbe_media_change(struct ifnet *);
182 static int ixgbe_allocate_pci_resources(struct adapter *,
183 const struct pci_attach_args *);
184 static void ixgbe_free_deferred_handlers(struct adapter *);
185 static void ixgbe_get_slot_info(struct adapter *);
186 static int ixgbe_allocate_msix(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_allocate_legacy(struct adapter *,
189 const struct pci_attach_args *);
190 static int ixgbe_configure_interrupts(struct adapter *);
191 static void ixgbe_free_pciintr_resources(struct adapter *);
192 static void ixgbe_free_pci_resources(struct adapter *);
193 static void ixgbe_local_timer(void *);
194 static void ixgbe_handle_timer(struct work *, void *);
195 static void ixgbe_recovery_mode_timer(void *);
196 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_schedule_admin_tasklet(struct adapter *);
202 static void ixgbe_config_link(struct adapter *);
203 static void ixgbe_check_wol_support(struct adapter *);
204 static int ixgbe_setup_low_power_mode(struct adapter *);
205 #if 0
206 static void ixgbe_rearm_queues(struct adapter *, u64);
207 #endif
208
209 static void ixgbe_initialize_transmit_units(struct adapter *);
210 static void ixgbe_initialize_receive_units(struct adapter *);
211 static void ixgbe_enable_rx_drop(struct adapter *);
212 static void ixgbe_disable_rx_drop(struct adapter *);
213 static void ixgbe_initialize_rss_mapping(struct adapter *);
214
215 static void ixgbe_enable_intr(struct adapter *);
216 static void ixgbe_disable_intr(struct adapter *);
217 static void ixgbe_update_stats_counters(struct adapter *);
218 static void ixgbe_set_rxfilter(struct adapter *);
219 static void ixgbe_update_link_status(struct adapter *);
220 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
221 static void ixgbe_configure_ivars(struct adapter *);
222 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
223 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
224
225 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
226 static void ixgbe_setup_vlan_hw_support(struct adapter *);
227 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
228 static int ixgbe_register_vlan(struct adapter *, u16);
229 static int ixgbe_unregister_vlan(struct adapter *, u16);
230
231 static void ixgbe_add_device_sysctls(struct adapter *);
232 static void ixgbe_add_hw_stats(struct adapter *);
233 static void ixgbe_clear_evcnt(struct adapter *);
234 static int ixgbe_set_flowcntl(struct adapter *, int);
235 static int ixgbe_set_advertise(struct adapter *, int);
236 static int ixgbe_get_advertise(struct adapter *);
237
238 /* Sysctl handlers */
239 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
240 const char *, int *, int);
241 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
247 #ifdef IXGBE_DEBUG
248 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
250 #endif
251 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
260
261 /* Interrupt functions */
262 static int ixgbe_msix_que(void *);
263 static int ixgbe_msix_admin(void *);
264 static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
265 static int ixgbe_legacy_irq(void *);
266
267 /* Event handlers running on workqueue */
268 static void ixgbe_handle_que(void *);
269 static void ixgbe_handle_link(void *);
270 static void ixgbe_handle_msf(void *);
271 static void ixgbe_handle_mod(void *, bool);
272 static void ixgbe_handle_phy(void *);
273
274 /* Deferred workqueue handlers */
275 static void ixgbe_handle_admin(struct work *, void *);
276 static void ixgbe_handle_que_work(struct work *, void *);
277
278 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
279
280 /************************************************************************
281 * NetBSD Device Interface Entry Points
282 ************************************************************************/
283 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
284 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
285 DVF_DETACH_SHUTDOWN);
286
287 #if 0
288 devclass_t ix_devclass;
289 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
290
291 MODULE_DEPEND(ix, pci, 1, 1, 1);
292 MODULE_DEPEND(ix, ether, 1, 1, 1);
293 #ifdef DEV_NETMAP
294 MODULE_DEPEND(ix, netmap, 1, 1, 1);
295 #endif
296 #endif
297
298 /*
299 * TUNEABLE PARAMETERS:
300 */
301
302 /*
303 * AIM: Adaptive Interrupt Moderation
304 * which means that the interrupt rate
305 * is varied over time based on the
306 * traffic for that interrupt vector
307 */
308 static bool ixgbe_enable_aim = true;
309 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
310 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
311 "Enable adaptive interrupt moderation");
312
313 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
314 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
315 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
316
317 /* How many packets rxeof tries to clean at a time */
318 static int ixgbe_rx_process_limit = 256;
319 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
320 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
321
322 /* How many packets txeof tries to clean at a time */
323 static int ixgbe_tx_process_limit = 256;
324 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
325 &ixgbe_tx_process_limit, 0,
326 "Maximum number of sent packets to process at a time, -1 means unlimited");
327
328 /* Flow control setting, default to full */
329 static int ixgbe_flow_control = ixgbe_fc_full;
330 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
331 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
332
333 /* Which packet processing uses workqueue or softint */
334 static bool ixgbe_txrx_workqueue = false;
335
336 /*
337 * Smart speed setting, default to on
338 * this only works as a compile option
339 * right now as its during attach, set
340 * this to 'ixgbe_smart_speed_off' to
341 * disable.
342 */
343 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
344
345 /*
346 * MSI-X should be the default for best performance,
347 * but this allows it to be forced off for testing.
348 */
349 static int ixgbe_enable_msix = 1;
350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
351 "Enable MSI-X interrupts");
352
353 /*
354 * Number of Queues, can be set to 0,
355 * it then autoconfigures based on the
356 * number of cpus with a max of 8. This
357 * can be overridden manually here.
358 */
359 static int ixgbe_num_queues = 0;
360 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
361 "Number of queues to configure, 0 indicates autoconfigure");
362
363 /*
364 * Number of TX descriptors per ring,
365 * setting higher than RX as this seems
366 * the better performing choice.
367 */
368 static int ixgbe_txd = PERFORM_TXD;
369 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
370 "Number of transmit descriptors per queue");
371
372 /* Number of RX descriptors per ring */
373 static int ixgbe_rxd = PERFORM_RXD;
374 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
375 "Number of receive descriptors per queue");
376
377 /*
378 * Defining this on will allow the use
379 * of unsupported SFP+ modules, note that
380 * doing so you are on your own :)
381 */
382 static int allow_unsupported_sfp = false;
383 #define TUNABLE_INT(__x, __y)
384 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
385
386 /*
387 * Not sure if Flow Director is fully baked,
388 * so we'll default to turning it off.
389 */
390 static int ixgbe_enable_fdir = 0;
391 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
392 "Enable Flow Director");
393
394 /* Legacy Transmit (single queue) */
395 static int ixgbe_enable_legacy_tx = 0;
396 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
397 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
398
399 /* Receive-Side Scaling */
400 static int ixgbe_enable_rss = 1;
401 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
402 "Enable Receive-Side Scaling (RSS)");
403
404 #if 0
405 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
406 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
407 #endif
408
409 #ifdef NET_MPSAFE
410 #define IXGBE_MPSAFE 1
411 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
412 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
413 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
414 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
415 #else
416 #define IXGBE_CALLOUT_FLAGS 0
417 #define IXGBE_SOFTINT_FLAGS 0
418 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
419 #define IXGBE_TASKLET_WQ_FLAGS 0
420 #endif
421 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
422
423 /************************************************************************
424 * ixgbe_initialize_rss_mapping
425 ************************************************************************/
426 static void
427 ixgbe_initialize_rss_mapping(struct adapter *adapter)
428 {
429 struct ixgbe_hw *hw = &adapter->hw;
430 u32 reta = 0, mrqc, rss_key[10];
431 int queue_id, table_size, index_mult;
432 int i, j;
433 u32 rss_hash_config;
434
435 /* force use default RSS key. */
436 #ifdef __NetBSD__
437 rss_getkey((uint8_t *) &rss_key);
438 #else
439 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
440 /* Fetch the configured RSS key */
441 rss_getkey((uint8_t *) &rss_key);
442 } else {
443 /* set up random bits */
444 cprng_fast(&rss_key, sizeof(rss_key));
445 }
446 #endif
447
448 /* Set multiplier for RETA setup and table size based on MAC */
449 index_mult = 0x1;
450 table_size = 128;
451 switch (adapter->hw.mac.type) {
452 case ixgbe_mac_82598EB:
453 index_mult = 0x11;
454 break;
455 case ixgbe_mac_X550:
456 case ixgbe_mac_X550EM_x:
457 case ixgbe_mac_X550EM_a:
458 table_size = 512;
459 break;
460 default:
461 break;
462 }
463
464 /* Set up the redirection table */
465 for (i = 0, j = 0; i < table_size; i++, j++) {
466 if (j == adapter->num_queues)
467 j = 0;
468
469 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
470 /*
471 * Fetch the RSS bucket id for the given indirection
472 * entry. Cap it at the number of configured buckets
473 * (which is num_queues.)
474 */
475 queue_id = rss_get_indirection_to_bucket(i);
476 queue_id = queue_id % adapter->num_queues;
477 } else
478 queue_id = (j * index_mult);
479
480 /*
481 * The low 8 bits are for hash value (n+0);
482 * The next 8 bits are for hash value (n+1), etc.
483 */
484 reta = reta >> 8;
485 reta = reta | (((uint32_t) queue_id) << 24);
486 if ((i & 3) == 3) {
487 if (i < 128)
488 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
489 else
490 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
491 reta);
492 reta = 0;
493 }
494 }
495
496 /* Now fill our hash function seeds */
497 for (i = 0; i < 10; i++)
498 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
499
500 /* Perform hash on these packet types */
501 if (adapter->feat_en & IXGBE_FEATURE_RSS)
502 rss_hash_config = rss_gethashconfig();
503 else {
504 /*
505 * Disable UDP - IP fragments aren't currently being handled
506 * and so we end up with a mix of 2-tuple and 4-tuple
507 * traffic.
508 */
509 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
510 | RSS_HASHTYPE_RSS_TCP_IPV4
511 | RSS_HASHTYPE_RSS_IPV6
512 | RSS_HASHTYPE_RSS_TCP_IPV6
513 | RSS_HASHTYPE_RSS_IPV6_EX
514 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
515 }
516
517 mrqc = IXGBE_MRQC_RSSEN;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
536 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
537 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
538 } /* ixgbe_initialize_rss_mapping */
539
540 /************************************************************************
541 * ixgbe_initialize_receive_units - Setup receive registers and features.
542 ************************************************************************/
543 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
544
545 static void
546 ixgbe_initialize_receive_units(struct adapter *adapter)
547 {
548 struct rx_ring *rxr = adapter->rx_rings;
549 struct ixgbe_hw *hw = &adapter->hw;
550 struct ifnet *ifp = adapter->ifp;
551 int i, j;
552 u32 bufsz, fctrl, srrctl, rxcsum;
553 u32 hlreg;
554
555 /*
556 * Make sure receives are disabled while
557 * setting up the descriptor ring
558 */
559 ixgbe_disable_rx(hw);
560
561 /* Enable broadcasts */
562 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
563 fctrl |= IXGBE_FCTRL_BAM;
564 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
565 fctrl |= IXGBE_FCTRL_DPF;
566 fctrl |= IXGBE_FCTRL_PMCF;
567 }
568 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
569
570 /* Set for Jumbo Frames? */
571 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
572 if (ifp->if_mtu > ETHERMTU)
573 hlreg |= IXGBE_HLREG0_JUMBOEN;
574 else
575 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
576
577 #ifdef DEV_NETMAP
578 /* CRC stripping is conditional in Netmap */
579 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
580 (ifp->if_capenable & IFCAP_NETMAP) &&
581 !ix_crcstrip)
582 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
583 else
584 #endif /* DEV_NETMAP */
585 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
586
587 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
588
589 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
590 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
591
592 for (i = 0; i < adapter->num_queues; i++, rxr++) {
593 u64 rdba = rxr->rxdma.dma_paddr;
594 u32 reg;
595 int regnum = i / 4; /* 1 register per 4 queues */
596 int regshift = i % 4; /* 4 bits per 1 queue */
597 j = rxr->me;
598
599 /* Setup the Base and Length of the Rx Descriptor Ring */
600 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
601 (rdba & 0x00000000ffffffffULL));
602 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
603 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
604 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
605
606 /* Set up the SRRCTL register */
607 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
608 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
609 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
610 srrctl |= bufsz;
611 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
612
613 /* Set RQSMR (Receive Queue Statistic Mapping) register */
614 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
615 reg &= ~(0x000000ffUL << (regshift * 8));
616 reg |= i << (regshift * 8);
617 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
618
619 /*
620 * Set DROP_EN iff we have no flow control and >1 queue.
621 * Note that srrctl was cleared shortly before during reset,
622 * so we do not need to clear the bit, but do it just in case
623 * this code is moved elsewhere.
624 */
625 if (adapter->num_queues > 1 &&
626 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
627 srrctl |= IXGBE_SRRCTL_DROP_EN;
628 } else {
629 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
630 }
631
632 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
633
634 /* Setup the HW Rx Head and Tail Descriptor Pointers */
635 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
636 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
637
638 /* Set the driver rx tail address */
639 rxr->tail = IXGBE_RDT(rxr->me);
640 }
641
642 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
643 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
644 | IXGBE_PSRTYPE_UDPHDR
645 | IXGBE_PSRTYPE_IPV4HDR
646 | IXGBE_PSRTYPE_IPV6HDR;
647 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
648 }
649
650 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
651
652 ixgbe_initialize_rss_mapping(adapter);
653
654 if (adapter->num_queues > 1) {
655 /* RSS and RX IPP Checksum are mutually exclusive */
656 rxcsum |= IXGBE_RXCSUM_PCSD;
657 }
658
659 if (ifp->if_capenable & IFCAP_RXCSUM)
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661
662 /* This is useful for calculating UDP/IP fragment checksums */
663 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
664 rxcsum |= IXGBE_RXCSUM_IPPCSE;
665
666 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
667
668 } /* ixgbe_initialize_receive_units */
669
670 /************************************************************************
671 * ixgbe_initialize_transmit_units - Enable transmit units.
672 ************************************************************************/
673 static void
674 ixgbe_initialize_transmit_units(struct adapter *adapter)
675 {
676 struct tx_ring *txr = adapter->tx_rings;
677 struct ixgbe_hw *hw = &adapter->hw;
678 int i;
679
680 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
681
682 /* Setup the Base and Length of the Tx Descriptor Ring */
683 for (i = 0; i < adapter->num_queues; i++, txr++) {
684 u64 tdba = txr->txdma.dma_paddr;
685 u32 txctrl = 0;
686 u32 tqsmreg, reg;
687 int regnum = i / 4; /* 1 register per 4 queues */
688 int regshift = i % 4; /* 4 bits per 1 queue */
689 int j = txr->me;
690
691 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
692 (tdba & 0x00000000ffffffffULL));
693 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
694 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
695 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
696
697 /*
698 * Set TQSMR (Transmit Queue Statistic Mapping) register.
699 * Register location is different between 82598 and others.
700 */
701 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
702 tqsmreg = IXGBE_TQSMR(regnum);
703 else
704 tqsmreg = IXGBE_TQSM(regnum);
705 reg = IXGBE_READ_REG(hw, tqsmreg);
706 reg &= ~(0x000000ffUL << (regshift * 8));
707 reg |= i << (regshift * 8);
708 IXGBE_WRITE_REG(hw, tqsmreg, reg);
709
710 /* Setup the HW Tx Head and Tail descriptor pointers */
711 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
712 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
713
714 /* Cache the tail address */
715 txr->tail = IXGBE_TDT(j);
716
717 txr->txr_no_space = false;
718
719 /* Disable Head Writeback */
720 /*
721 * Note: for X550 series devices, these registers are actually
722 * prefixed with TPH_ isntead of DCA_, but the addresses and
723 * fields remain the same.
724 */
725 switch (hw->mac.type) {
726 case ixgbe_mac_82598EB:
727 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
728 break;
729 default:
730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
731 break;
732 }
733 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
734 switch (hw->mac.type) {
735 case ixgbe_mac_82598EB:
736 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
737 break;
738 default:
739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
740 break;
741 }
742
743 }
744
745 if (hw->mac.type != ixgbe_mac_82598EB) {
746 u32 dmatxctl, rttdcs;
747
748 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
749 dmatxctl |= IXGBE_DMATXCTL_TE;
750 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
751 /* Disable arbiter to set MTQC */
752 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
753 rttdcs |= IXGBE_RTTDCS_ARBDIS;
754 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
755 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
756 ixgbe_get_mtqc(adapter->iov_mode));
757 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
759 }
760
761 return;
762 } /* ixgbe_initialize_transmit_units */
763
764 static void
765 ixgbe_quirks(struct adapter *adapter)
766 {
767 device_t dev = adapter->dev;
768 struct ixgbe_hw *hw = &adapter->hw;
769 const char *vendor, *product;
770
771 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
772 /*
773 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
774 * MA10-ST0.
775 */
776 vendor = pmf_get_platform("system-vendor");
777 product = pmf_get_platform("system-product");
778
779 if ((vendor == NULL) || (product == NULL))
780 return;
781
782 if ((strcmp(vendor, "GIGABYTE") == 0) &&
783 (strcmp(product, "MA10-ST0") == 0)) {
784 aprint_verbose_dev(dev,
785 "Enable SFP+ MOD_ABS inverse quirk\n");
786 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
787 }
788 }
789 }
790
791 /************************************************************************
792 * ixgbe_attach - Device initialization routine
793 *
794 * Called when the driver is being loaded.
795 * Identifies the type of hardware, allocates all resources
796 * and initializes the hardware.
797 *
798 * return 0 on success, positive on failure
799 ************************************************************************/
800 static void
801 ixgbe_attach(device_t parent, device_t dev, void *aux)
802 {
803 struct adapter *adapter;
804 struct ixgbe_hw *hw;
805 int error = -1;
806 u32 ctrl_ext;
807 u16 high, low, nvmreg;
808 pcireg_t id, subid;
809 const ixgbe_vendor_info_t *ent;
810 struct pci_attach_args *pa = aux;
811 bool unsupported_sfp = false;
812 const char *str;
813 char wqname[MAXCOMLEN];
814 char buf[256];
815
816 INIT_DEBUGOUT("ixgbe_attach: begin");
817
818 /* Allocate, clear, and link in our adapter structure */
819 adapter = device_private(dev);
820 adapter->hw.back = adapter;
821 adapter->dev = dev;
822 hw = &adapter->hw;
823 adapter->osdep.pc = pa->pa_pc;
824 adapter->osdep.tag = pa->pa_tag;
825 if (pci_dma64_available(pa))
826 adapter->osdep.dmat = pa->pa_dmat64;
827 else
828 adapter->osdep.dmat = pa->pa_dmat;
829 adapter->osdep.attached = false;
830 adapter->osdep.detaching = false;
831
832 ent = ixgbe_lookup(pa);
833
834 KASSERT(ent != NULL);
835
836 aprint_normal(": %s, Version - %s\n",
837 ixgbe_strings[ent->index], ixgbe_driver_version);
838
839 /* Core Lock Init */
840 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
841
842 /* Set up the timer callout and workqueue */
843 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
844 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
845 error = workqueue_create(&adapter->timer_wq, wqname,
846 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
847 IXGBE_TASKLET_WQ_FLAGS);
848 if (error) {
849 aprint_error_dev(dev,
850 "could not create timer workqueue (%d)\n", error);
851 goto err_out;
852 }
853
854 /* Determine hardware revision */
855 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
856 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
857
858 hw->vendor_id = PCI_VENDOR(id);
859 hw->device_id = PCI_PRODUCT(id);
860 hw->revision_id =
861 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
862 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
863 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
864
865 /* Set quirk flags */
866 ixgbe_quirks(adapter);
867
868 /*
869 * Make sure BUSMASTER is set
870 */
871 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
872
873 /* Do base PCI setup - map BAR0 */
874 if (ixgbe_allocate_pci_resources(adapter, pa)) {
875 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
876 error = ENXIO;
877 goto err_out;
878 }
879
880 /* let hardware know driver is loaded */
881 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
882 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
883 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
884
885 /*
886 * Initialize the shared code
887 */
888 if (ixgbe_init_shared_code(hw) != 0) {
889 aprint_error_dev(dev, "Unable to initialize the shared code\n");
890 error = ENXIO;
891 goto err_out;
892 }
893
894 switch (hw->mac.type) {
895 case ixgbe_mac_82598EB:
896 str = "82598EB";
897 break;
898 case ixgbe_mac_82599EB:
899 str = "82599EB";
900 break;
901 case ixgbe_mac_X540:
902 str = "X540";
903 break;
904 case ixgbe_mac_X550:
905 str = "X550";
906 break;
907 case ixgbe_mac_X550EM_x:
908 str = "X550EM X";
909 break;
910 case ixgbe_mac_X550EM_a:
911 str = "X550EM A";
912 break;
913 default:
914 str = "Unknown";
915 break;
916 }
917 aprint_normal_dev(dev, "device %s\n", str);
918
919 if (hw->mbx.ops.init_params)
920 hw->mbx.ops.init_params(hw);
921
922 hw->allow_unsupported_sfp = allow_unsupported_sfp;
923
924 /* Pick up the 82599 settings */
925 if (hw->mac.type != ixgbe_mac_82598EB) {
926 hw->phy.smart_speed = ixgbe_smart_speed;
927 adapter->num_segs = IXGBE_82599_SCATTER;
928 } else
929 adapter->num_segs = IXGBE_82598_SCATTER;
930
931 /* Ensure SW/FW semaphore is free */
932 ixgbe_init_swfw_semaphore(hw);
933
934 hw->mac.ops.set_lan_id(hw);
935 ixgbe_init_device_features(adapter);
936
937 if (ixgbe_configure_interrupts(adapter)) {
938 error = ENXIO;
939 goto err_out;
940 }
941
942 /* Allocate multicast array memory. */
943 adapter->mta = malloc(sizeof(*adapter->mta) *
944 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
945
946 /* Enable WoL (if supported) */
947 ixgbe_check_wol_support(adapter);
948
949 /* Register for VLAN events */
950 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
951
952 /* Verify adapter fan is still functional (if applicable) */
953 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
954 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
955 ixgbe_check_fan_failure(adapter, esdp, FALSE);
956 }
957
958 /* Set an initial default flow control value */
959 hw->fc.requested_mode = ixgbe_flow_control;
960
961 /* Sysctls for limiting the amount of work done in the taskqueues */
962 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
963 "max number of rx packets to process",
964 &adapter->rx_process_limit, ixgbe_rx_process_limit);
965
966 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
967 "max number of tx packets to process",
968 &adapter->tx_process_limit, ixgbe_tx_process_limit);
969
970 /* Do descriptor calc and sanity checks */
971 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
972 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
973 aprint_error_dev(dev, "TXD config issue, using default!\n");
974 adapter->num_tx_desc = DEFAULT_TXD;
975 } else
976 adapter->num_tx_desc = ixgbe_txd;
977
978 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
979 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
980 aprint_error_dev(dev, "RXD config issue, using default!\n");
981 adapter->num_rx_desc = DEFAULT_RXD;
982 } else
983 adapter->num_rx_desc = ixgbe_rxd;
984
985 /* Allocate our TX/RX Queues */
986 if (ixgbe_allocate_queues(adapter)) {
987 error = ENOMEM;
988 goto err_out;
989 }
990
991 hw->phy.reset_if_overtemp = TRUE;
992 error = ixgbe_reset_hw(hw);
993 hw->phy.reset_if_overtemp = FALSE;
994 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
995 error = IXGBE_SUCCESS;
996 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
997 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
998 unsupported_sfp = true;
999 error = IXGBE_SUCCESS;
1000 } else if (error) {
1001 aprint_error_dev(dev, "Hardware initialization failed\n");
1002 error = EIO;
1003 goto err_late;
1004 }
1005
1006 /* Make sure we have a good EEPROM before we read from it */
1007 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1008 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1009 error = EIO;
1010 goto err_late;
1011 }
1012
1013 aprint_normal("%s:", device_xname(dev));
1014 /* NVM Image Version */
1015 high = low = 0;
1016 switch (hw->mac.type) {
1017 case ixgbe_mac_X540:
1018 case ixgbe_mac_X550EM_a:
1019 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1020 if (nvmreg == 0xffff)
1021 break;
1022 high = (nvmreg >> 12) & 0x0f;
1023 low = (nvmreg >> 4) & 0xff;
1024 id = nvmreg & 0x0f;
1025 aprint_normal(" NVM Image Version %u.", high);
1026 if (hw->mac.type == ixgbe_mac_X540)
1027 str = "%x";
1028 else
1029 str = "%02x";
1030 aprint_normal(str, low);
1031 aprint_normal(" ID 0x%x,", id);
1032 break;
1033 case ixgbe_mac_X550EM_x:
1034 case ixgbe_mac_X550:
1035 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1036 if (nvmreg == 0xffff)
1037 break;
1038 high = (nvmreg >> 12) & 0x0f;
1039 low = nvmreg & 0xff;
1040 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1041 break;
1042 default:
1043 break;
1044 }
1045 hw->eeprom.nvm_image_ver_high = high;
1046 hw->eeprom.nvm_image_ver_low = low;
1047
1048 /* PHY firmware revision */
1049 switch (hw->mac.type) {
1050 case ixgbe_mac_X540:
1051 case ixgbe_mac_X550:
1052 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1053 if (nvmreg == 0xffff)
1054 break;
1055 high = (nvmreg >> 12) & 0x0f;
1056 low = (nvmreg >> 4) & 0xff;
1057 id = nvmreg & 0x000f;
1058 aprint_normal(" PHY FW Revision %u.", high);
1059 if (hw->mac.type == ixgbe_mac_X540)
1060 str = "%x";
1061 else
1062 str = "%02x";
1063 aprint_normal(str, low);
1064 aprint_normal(" ID 0x%x,", id);
1065 break;
1066 default:
1067 break;
1068 }
1069
1070 /* NVM Map version & OEM NVM Image version */
1071 switch (hw->mac.type) {
1072 case ixgbe_mac_X550:
1073 case ixgbe_mac_X550EM_x:
1074 case ixgbe_mac_X550EM_a:
1075 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1076 if (nvmreg != 0xffff) {
1077 high = (nvmreg >> 12) & 0x0f;
1078 low = nvmreg & 0x00ff;
1079 aprint_normal(" NVM Map version %u.%02x,", high, low);
1080 }
1081 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1082 if (nvmreg != 0xffff) {
1083 high = (nvmreg >> 12) & 0x0f;
1084 low = nvmreg & 0x00ff;
1085 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1086 low);
1087 }
1088 break;
1089 default:
1090 break;
1091 }
1092
1093 /* Print the ETrackID */
1094 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1095 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1096 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1097
1098 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1099 error = ixgbe_allocate_msix(adapter, pa);
1100 if (error) {
1101 /* Free allocated queue structures first */
1102 ixgbe_free_queues(adapter);
1103
1104 /* Fallback to legacy interrupt */
1105 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1106 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1107 adapter->feat_en |= IXGBE_FEATURE_MSI;
1108 adapter->num_queues = 1;
1109
1110 /* Allocate our TX/RX Queues again */
1111 if (ixgbe_allocate_queues(adapter)) {
1112 error = ENOMEM;
1113 goto err_out;
1114 }
1115 }
1116 }
1117 /* Recovery mode */
1118 switch (adapter->hw.mac.type) {
1119 case ixgbe_mac_X550:
1120 case ixgbe_mac_X550EM_x:
1121 case ixgbe_mac_X550EM_a:
1122 /* >= 2.00 */
1123 if (hw->eeprom.nvm_image_ver_high >= 2) {
1124 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1125 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1126 }
1127 break;
1128 default:
1129 break;
1130 }
1131
1132 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1133 error = ixgbe_allocate_legacy(adapter, pa);
1134 if (error)
1135 goto err_late;
1136
1137 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1138 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1139 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1140 error = workqueue_create(&adapter->admin_wq, wqname,
1141 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1142 IXGBE_TASKLET_WQ_FLAGS);
1143 if (error) {
1144 aprint_error_dev(dev,
1145 "could not create admin workqueue (%d)\n", error);
1146 goto err_out;
1147 }
1148
1149 error = ixgbe_start_hw(hw);
1150 switch (error) {
1151 case IXGBE_ERR_EEPROM_VERSION:
1152 aprint_error_dev(dev, "This device is a pre-production adapter/"
1153 "LOM. Please be aware there may be issues associated "
1154 "with your hardware.\nIf you are experiencing problems "
1155 "please contact your Intel or hardware representative "
1156 "who provided you with this hardware.\n");
1157 break;
1158 default:
1159 break;
1160 }
1161
1162 /* Setup OS specific network interface */
1163 if (ixgbe_setup_interface(dev, adapter) != 0)
1164 goto err_late;
1165
1166 /*
1167 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1168 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1169 */
1170 if (hw->phy.media_type == ixgbe_media_type_copper) {
1171 uint16_t id1, id2;
1172 int oui, model, rev;
1173 const char *descr;
1174
1175 id1 = hw->phy.id >> 16;
1176 id2 = hw->phy.id & 0xffff;
1177 oui = MII_OUI(id1, id2);
1178 model = MII_MODEL(id2);
1179 rev = MII_REV(id2);
1180 if ((descr = mii_get_descr(oui, model)) != NULL)
1181 aprint_normal_dev(dev,
1182 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1183 descr, oui, model, rev);
1184 else
1185 aprint_normal_dev(dev,
1186 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1187 oui, model, rev);
1188 }
1189
1190 /* Enable EEE power saving */
1191 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1192 hw->mac.ops.setup_eee(hw,
1193 adapter->feat_en & IXGBE_FEATURE_EEE);
1194
1195 /* Enable power to the phy. */
1196 if (!unsupported_sfp) {
1197 /* Enable the optics for 82599 SFP+ fiber */
1198 ixgbe_enable_tx_laser(hw);
1199
1200 /*
1201 * XXX Currently, ixgbe_set_phy_power() supports only copper
1202 * PHY, so it's not required to test with !unsupported_sfp.
1203 */
1204 ixgbe_set_phy_power(hw, TRUE);
1205 }
1206
1207 /* Initialize statistics */
1208 ixgbe_update_stats_counters(adapter);
1209
1210 /* Check PCIE slot type/speed/width */
1211 ixgbe_get_slot_info(adapter);
1212
1213 /*
1214 * Do time init and sysctl init here, but
1215 * only on the first port of a bypass adapter.
1216 */
1217 ixgbe_bypass_init(adapter);
1218
1219 /* Set an initial dmac value */
1220 adapter->dmac = 0;
1221 /* Set initial advertised speeds (if applicable) */
1222 adapter->advertise = ixgbe_get_advertise(adapter);
1223
1224 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1225 ixgbe_define_iov_schemas(dev, &error);
1226
1227 /* Add sysctls */
1228 ixgbe_add_device_sysctls(adapter);
1229 ixgbe_add_hw_stats(adapter);
1230
1231 /* For Netmap */
1232 adapter->init_locked = ixgbe_init_locked;
1233 adapter->stop_locked = ixgbe_stop_locked;
1234
1235 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1236 ixgbe_netmap_attach(adapter);
1237
1238 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1239 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1240 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1241 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1242
1243 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1244 pmf_class_network_register(dev, adapter->ifp);
1245 else
1246 aprint_error_dev(dev, "couldn't establish power handler\n");
1247
1248 /* Init recovery mode timer and state variable */
1249 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1250 adapter->recovery_mode = 0;
1251
1252 /* Set up the timer callout */
1253 callout_init(&adapter->recovery_mode_timer,
1254 IXGBE_CALLOUT_FLAGS);
1255 snprintf(wqname, sizeof(wqname), "%s-recovery",
1256 device_xname(dev));
1257 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1258 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1259 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1260 if (error) {
1261 aprint_error_dev(dev, "could not create "
1262 "recovery_mode_timer workqueue (%d)\n", error);
1263 goto err_out;
1264 }
1265
1266 /* Start the task */
1267 callout_reset(&adapter->recovery_mode_timer, hz,
1268 ixgbe_recovery_mode_timer, adapter);
1269 }
1270
1271 INIT_DEBUGOUT("ixgbe_attach: end");
1272 adapter->osdep.attached = true;
1273
1274 return;
1275
1276 err_late:
1277 ixgbe_free_queues(adapter);
1278 err_out:
1279 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1280 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1282 ixgbe_free_deferred_handlers(adapter);
1283 ixgbe_free_pci_resources(adapter);
1284 if (adapter->mta != NULL)
1285 free(adapter->mta, M_DEVBUF);
1286 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1287 IXGBE_CORE_LOCK_DESTROY(adapter);
1288
1289 return;
1290 } /* ixgbe_attach */
1291
1292 /************************************************************************
1293 * ixgbe_check_wol_support
1294 *
1295 * Checks whether the adapter's ports are capable of
1296 * Wake On LAN by reading the adapter's NVM.
1297 *
1298 * Sets each port's hw->wol_enabled value depending
1299 * on the value read here.
1300 ************************************************************************/
1301 static void
1302 ixgbe_check_wol_support(struct adapter *adapter)
1303 {
1304 struct ixgbe_hw *hw = &adapter->hw;
1305 u16 dev_caps = 0;
1306
1307 /* Find out WoL support for port */
1308 adapter->wol_support = hw->wol_enabled = 0;
1309 ixgbe_get_device_caps(hw, &dev_caps);
1310 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1311 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1312 hw->bus.func == 0))
1313 adapter->wol_support = hw->wol_enabled = 1;
1314
1315 /* Save initial wake up filter configuration */
1316 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1317
1318 return;
1319 } /* ixgbe_check_wol_support */
1320
1321 /************************************************************************
1322 * ixgbe_setup_interface
1323 *
1324 * Setup networking device structure and register an interface.
1325 ************************************************************************/
1326 static int
1327 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1328 {
1329 struct ethercom *ec = &adapter->osdep.ec;
1330 struct ifnet *ifp;
1331 int rv;
1332
1333 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1334
1335 ifp = adapter->ifp = &ec->ec_if;
1336 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1337 ifp->if_baudrate = IF_Gbps(10);
1338 ifp->if_init = ixgbe_init;
1339 ifp->if_stop = ixgbe_ifstop;
1340 ifp->if_softc = adapter;
1341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1342 #ifdef IXGBE_MPSAFE
1343 ifp->if_extflags = IFEF_MPSAFE;
1344 #endif
1345 ifp->if_ioctl = ixgbe_ioctl;
1346 #if __FreeBSD_version >= 1100045
1347 /* TSO parameters */
1348 ifp->if_hw_tsomax = 65518;
1349 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1350 ifp->if_hw_tsomaxsegsize = 2048;
1351 #endif
1352 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1353 #if 0
1354 ixgbe_start_locked = ixgbe_legacy_start_locked;
1355 #endif
1356 } else {
1357 ifp->if_transmit = ixgbe_mq_start;
1358 #if 0
1359 ixgbe_start_locked = ixgbe_mq_start_locked;
1360 #endif
1361 }
1362 ifp->if_start = ixgbe_legacy_start;
1363 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1364 IFQ_SET_READY(&ifp->if_snd);
1365
1366 rv = if_initialize(ifp);
1367 if (rv != 0) {
1368 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1369 return rv;
1370 }
1371 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1372 ether_ifattach(ifp, adapter->hw.mac.addr);
1373 aprint_normal_dev(dev, "Ethernet address %s\n",
1374 ether_sprintf(adapter->hw.mac.addr));
1375 /*
1376 * We use per TX queue softint, so if_deferred_start_init() isn't
1377 * used.
1378 */
1379 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1380
1381 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1382
1383 /*
1384 * Tell the upper layer(s) we support long frames.
1385 */
1386 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1387
1388 /* Set capability flags */
1389 ifp->if_capabilities |= IFCAP_RXCSUM
1390 | IFCAP_TXCSUM
1391 | IFCAP_TSOv4
1392 | IFCAP_TSOv6;
1393 ifp->if_capenable = 0;
1394
1395 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1396 | ETHERCAP_VLAN_HWCSUM
1397 | ETHERCAP_JUMBO_MTU
1398 | ETHERCAP_VLAN_MTU;
1399
1400 /* Enable the above capabilities by default */
1401 ec->ec_capenable = ec->ec_capabilities;
1402
1403 /*
1404 * Don't turn this on by default, if vlans are
1405 * created on another pseudo device (eg. lagg)
1406 * then vlan events are not passed thru, breaking
1407 * operation, but with HW FILTER off it works. If
1408 * using vlans directly on the ixgbe driver you can
1409 * enable this and get full hardware tag filtering.
1410 */
1411 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1412
1413 /*
1414 * Specify the media types supported by this adapter and register
1415 * callbacks to update media and link information
1416 */
1417 ec->ec_ifmedia = &adapter->media;
1418 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1419 ixgbe_media_status, &adapter->core_mtx);
1420
1421 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1422 ixgbe_add_media_types(adapter);
1423
1424 /* Set autoselect media by default */
1425 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1426
1427 if_register(ifp);
1428
1429 return (0);
1430 } /* ixgbe_setup_interface */
1431
1432 /************************************************************************
1433 * ixgbe_add_media_types
1434 ************************************************************************/
1435 static void
1436 ixgbe_add_media_types(struct adapter *adapter)
1437 {
1438 struct ixgbe_hw *hw = &adapter->hw;
1439 u64 layer;
1440
1441 layer = adapter->phy_layer;
1442
1443 #define ADD(mm, dd) \
1444 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1445
1446 ADD(IFM_NONE, 0);
1447
1448 /* Media types with matching NetBSD media defines */
1449 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1450 ADD(IFM_10G_T | IFM_FDX, 0);
1451 }
1452 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1453 ADD(IFM_1000_T | IFM_FDX, 0);
1454 }
1455 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1456 ADD(IFM_100_TX | IFM_FDX, 0);
1457 }
1458 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1459 ADD(IFM_10_T | IFM_FDX, 0);
1460 }
1461
1462 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1463 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1464 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1465 }
1466
1467 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1468 ADD(IFM_10G_LR | IFM_FDX, 0);
1469 if (hw->phy.multispeed_fiber) {
1470 ADD(IFM_1000_LX | IFM_FDX, 0);
1471 }
1472 }
1473 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1474 ADD(IFM_10G_SR | IFM_FDX, 0);
1475 if (hw->phy.multispeed_fiber) {
1476 ADD(IFM_1000_SX | IFM_FDX, 0);
1477 }
1478 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1479 ADD(IFM_1000_SX | IFM_FDX, 0);
1480 }
1481 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1482 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1483 }
1484
1485 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1486 ADD(IFM_10G_KR | IFM_FDX, 0);
1487 }
1488 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1489 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1490 }
1491 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1492 ADD(IFM_1000_KX | IFM_FDX, 0);
1493 }
1494 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1495 ADD(IFM_2500_KX | IFM_FDX, 0);
1496 }
1497 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1498 ADD(IFM_2500_T | IFM_FDX, 0);
1499 }
1500 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1501 ADD(IFM_5000_T | IFM_FDX, 0);
1502 }
1503 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1504 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1505 /* XXX no ifmedia_set? */
1506
1507 ADD(IFM_AUTO, 0);
1508
1509 #undef ADD
1510 } /* ixgbe_add_media_types */
1511
1512 /************************************************************************
1513 * ixgbe_is_sfp
1514 ************************************************************************/
1515 static inline bool
1516 ixgbe_is_sfp(struct ixgbe_hw *hw)
1517 {
1518 switch (hw->mac.type) {
1519 case ixgbe_mac_82598EB:
1520 if (hw->phy.type == ixgbe_phy_nl)
1521 return (TRUE);
1522 return (FALSE);
1523 case ixgbe_mac_82599EB:
1524 case ixgbe_mac_X550EM_x:
1525 case ixgbe_mac_X550EM_a:
1526 switch (hw->mac.ops.get_media_type(hw)) {
1527 case ixgbe_media_type_fiber:
1528 case ixgbe_media_type_fiber_qsfp:
1529 return (TRUE);
1530 default:
1531 return (FALSE);
1532 }
1533 default:
1534 return (FALSE);
1535 }
1536 } /* ixgbe_is_sfp */
1537
1538 static void
1539 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1540 {
1541
1542 KASSERT(mutex_owned(&adapter->admin_mtx));
1543
1544 if (__predict_true(adapter->osdep.detaching == false)) {
1545 if (adapter->admin_pending == 0)
1546 workqueue_enqueue(adapter->admin_wq,
1547 &adapter->admin_wc, NULL);
1548 adapter->admin_pending = 1;
1549 }
1550 }
1551
1552 /************************************************************************
1553 * ixgbe_config_link
1554 ************************************************************************/
1555 static void
1556 ixgbe_config_link(struct adapter *adapter)
1557 {
1558 struct ixgbe_hw *hw = &adapter->hw;
1559 u32 autoneg, err = 0;
1560 u32 task_requests = 0;
1561 bool sfp, negotiate = false;
1562
1563 sfp = ixgbe_is_sfp(hw);
1564
1565 if (sfp) {
1566 if (hw->phy.multispeed_fiber) {
1567 ixgbe_enable_tx_laser(hw);
1568 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1569 }
1570 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1571
1572 mutex_enter(&adapter->admin_mtx);
1573 adapter->task_requests |= task_requests;
1574 ixgbe_schedule_admin_tasklet(adapter);
1575 mutex_exit(&adapter->admin_mtx);
1576 } else {
1577 struct ifmedia *ifm = &adapter->media;
1578
1579 if (hw->mac.ops.check_link)
1580 err = ixgbe_check_link(hw, &adapter->link_speed,
1581 &adapter->link_up, FALSE);
1582 if (err)
1583 return;
1584
1585 /*
1586 * Check if it's the first call. If it's the first call,
1587 * get value for auto negotiation.
1588 */
1589 autoneg = hw->phy.autoneg_advertised;
1590 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1591 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1592 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1593 &negotiate);
1594 if (err)
1595 return;
1596 if (hw->mac.ops.setup_link)
1597 err = hw->mac.ops.setup_link(hw, autoneg,
1598 adapter->link_up);
1599 }
1600
1601 } /* ixgbe_config_link */
1602
1603 /************************************************************************
1604 * ixgbe_update_stats_counters - Update board statistics counters.
1605 ************************************************************************/
1606 static void
1607 ixgbe_update_stats_counters(struct adapter *adapter)
1608 {
1609 struct ifnet *ifp = adapter->ifp;
1610 struct ixgbe_hw *hw = &adapter->hw;
1611 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1612 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1613 u64 total_missed_rx = 0;
1614 uint64_t crcerrs, rlec;
1615 unsigned int queue_counters;
1616 int i;
1617
1618 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1619 stats->crcerrs.ev_count += crcerrs;
1620 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1621 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1622 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1623 if (hw->mac.type >= ixgbe_mac_X550)
1624 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1625
1626 /* 16 registers exist */
1627 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1628 for (i = 0; i < queue_counters; i++) {
1629 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1630 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1631 if (hw->mac.type >= ixgbe_mac_82599EB) {
1632 stats->qprdc[i].ev_count
1633 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1634 }
1635 }
1636
1637 /* 8 registers exist */
1638 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1639 uint32_t mp;
1640
1641 /* MPC */
1642 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1643 /* global total per queue */
1644 stats->mpc[i].ev_count += mp;
1645 /* running comprehensive total for stats display */
1646 total_missed_rx += mp;
1647
1648 if (hw->mac.type == ixgbe_mac_82598EB)
1649 stats->rnbc[i].ev_count
1650 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1651
1652 stats->pxontxc[i].ev_count
1653 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1654 stats->pxofftxc[i].ev_count
1655 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1656 if (hw->mac.type >= ixgbe_mac_82599EB) {
1657 stats->pxonrxc[i].ev_count
1658 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1659 stats->pxoffrxc[i].ev_count
1660 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1661 stats->pxon2offc[i].ev_count
1662 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1663 } else {
1664 stats->pxonrxc[i].ev_count
1665 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1666 stats->pxoffrxc[i].ev_count
1667 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1668 }
1669 }
1670 stats->mpctotal.ev_count += total_missed_rx;
1671
1672 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1673 if ((adapter->link_active == LINK_STATE_UP)
1674 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1675 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1676 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1677 }
1678 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1679 stats->rlec.ev_count += rlec;
1680
1681 /* Hardware workaround, gprc counts missed packets */
1682 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1683
1684 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1685 stats->lxontxc.ev_count += lxon;
1686 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1687 stats->lxofftxc.ev_count += lxoff;
1688 total = lxon + lxoff;
1689
1690 if (hw->mac.type != ixgbe_mac_82598EB) {
1691 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1692 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1693 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1694 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1695 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1696 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1697 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1698 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1699 } else {
1700 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1701 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1702 /* 82598 only has a counter in the high register */
1703 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1704 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1705 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1706 }
1707
1708 /*
1709 * Workaround: mprc hardware is incorrectly counting
1710 * broadcasts, so for now we subtract those.
1711 */
1712 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1713 stats->bprc.ev_count += bprc;
1714 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1715 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1716
1717 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1718 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1719 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1720 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1721 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1722 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1723
1724 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1725 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1726 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1727
1728 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1729 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1730 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1731 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1732 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1733 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1734 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1735 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1736 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1737 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1738 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1739 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1740 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1741 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1742 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1743 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1744 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1745 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1746 /* Only read FCOE on 82599 */
1747 if (hw->mac.type != ixgbe_mac_82598EB) {
1748 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1749 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1750 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1751 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1752 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1753 }
1754
1755 /*
1756 * Fill out the OS statistics structure. Only RX errors are required
1757 * here because all TX counters are incremented in the TX path and
1758 * normal RX counters are prepared in ether_input().
1759 */
1760 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1761 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1762 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1763 IF_STAT_PUTREF(ifp);
1764 } /* ixgbe_update_stats_counters */
1765
1766 /************************************************************************
1767 * ixgbe_add_hw_stats
1768 *
1769 * Add sysctl variables, one per statistic, to the system.
1770 ************************************************************************/
1771 static void
1772 ixgbe_add_hw_stats(struct adapter *adapter)
1773 {
1774 device_t dev = adapter->dev;
1775 const struct sysctlnode *rnode, *cnode;
1776 struct sysctllog **log = &adapter->sysctllog;
1777 struct tx_ring *txr = adapter->tx_rings;
1778 struct rx_ring *rxr = adapter->rx_rings;
1779 struct ixgbe_hw *hw = &adapter->hw;
1780 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1781 const char *xname = device_xname(dev);
1782 int i;
1783
1784 /* Driver Statistics */
1785 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1786 NULL, xname, "Driver tx dma soft fail EFBIG");
1787 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1788 NULL, xname, "m_defrag() failed");
1789 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1790 NULL, xname, "Driver tx dma hard fail EFBIG");
1791 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1792 NULL, xname, "Driver tx dma hard fail EINVAL");
1793 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1794 NULL, xname, "Driver tx dma hard fail other");
1795 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1796 NULL, xname, "Driver tx dma soft fail EAGAIN");
1797 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1798 NULL, xname, "Driver tx dma soft fail ENOMEM");
1799 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1800 NULL, xname, "Watchdog timeouts");
1801 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1802 NULL, xname, "TSO errors");
1803 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1804 NULL, xname, "Admin MSI-X IRQ Handled");
1805 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1806 NULL, xname, "Link event");
1807 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1808 NULL, xname, "SFP+ module event");
1809 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1810 NULL, xname, "Multispeed event");
1811 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1812 NULL, xname, "External PHY event");
1813
1814 /* Max number of traffic class is 8 */
1815 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1816 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1817 snprintf(adapter->tcs[i].evnamebuf,
1818 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1819 xname, i);
1820 if (i < __arraycount(stats->mpc)) {
1821 evcnt_attach_dynamic(&stats->mpc[i],
1822 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1823 "RX Missed Packet Count");
1824 if (hw->mac.type == ixgbe_mac_82598EB)
1825 evcnt_attach_dynamic(&stats->rnbc[i],
1826 EVCNT_TYPE_MISC, NULL,
1827 adapter->tcs[i].evnamebuf,
1828 "Receive No Buffers");
1829 }
1830 if (i < __arraycount(stats->pxontxc)) {
1831 evcnt_attach_dynamic(&stats->pxontxc[i],
1832 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1833 "pxontxc");
1834 evcnt_attach_dynamic(&stats->pxonrxc[i],
1835 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1836 "pxonrxc");
1837 evcnt_attach_dynamic(&stats->pxofftxc[i],
1838 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1839 "pxofftxc");
1840 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1842 "pxoffrxc");
1843 if (hw->mac.type >= ixgbe_mac_82599EB)
1844 evcnt_attach_dynamic(&stats->pxon2offc[i],
1845 EVCNT_TYPE_MISC, NULL,
1846 adapter->tcs[i].evnamebuf,
1847 "pxon2offc");
1848 }
1849 }
1850
1851 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1852 #ifdef LRO
1853 struct lro_ctrl *lro = &rxr->lro;
1854 #endif /* LRO */
1855
1856 snprintf(adapter->queues[i].evnamebuf,
1857 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1858 xname, i);
1859 snprintf(adapter->queues[i].namebuf,
1860 sizeof(adapter->queues[i].namebuf), "q%d", i);
1861
1862 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1863 aprint_error_dev(dev, "could not create sysctl root\n");
1864 break;
1865 }
1866
1867 if (sysctl_createv(log, 0, &rnode, &rnode,
1868 0, CTLTYPE_NODE,
1869 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1870 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1871 break;
1872
1873 if (sysctl_createv(log, 0, &rnode, &cnode,
1874 CTLFLAG_READWRITE, CTLTYPE_INT,
1875 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1876 ixgbe_sysctl_interrupt_rate_handler, 0,
1877 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1878 break;
1879
1880 if (sysctl_createv(log, 0, &rnode, &cnode,
1881 CTLFLAG_READONLY, CTLTYPE_INT,
1882 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1883 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1884 0, CTL_CREATE, CTL_EOL) != 0)
1885 break;
1886
1887 if (sysctl_createv(log, 0, &rnode, &cnode,
1888 CTLFLAG_READONLY, CTLTYPE_INT,
1889 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1890 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1891 0, CTL_CREATE, CTL_EOL) != 0)
1892 break;
1893
1894 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1895 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1896 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1897 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1898 "Handled queue in softint");
1899 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1900 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1901 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1902 NULL, adapter->queues[i].evnamebuf, "TSO");
1903 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1904 NULL, adapter->queues[i].evnamebuf,
1905 "TX Queue No Descriptor Available");
1906 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1907 NULL, adapter->queues[i].evnamebuf,
1908 "Queue Packets Transmitted");
1909 #ifndef IXGBE_LEGACY_TX
1910 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1911 NULL, adapter->queues[i].evnamebuf,
1912 "Packets dropped in pcq");
1913 #endif
1914
1915 if (sysctl_createv(log, 0, &rnode, &cnode,
1916 CTLFLAG_READONLY,
1917 CTLTYPE_INT,
1918 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1919 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1920 CTL_CREATE, CTL_EOL) != 0)
1921 break;
1922
1923 if (sysctl_createv(log, 0, &rnode, &cnode,
1924 CTLFLAG_READONLY,
1925 CTLTYPE_INT,
1926 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1927 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1928 CTL_CREATE, CTL_EOL) != 0)
1929 break;
1930
1931 if (sysctl_createv(log, 0, &rnode, &cnode,
1932 CTLFLAG_READONLY,
1933 CTLTYPE_INT,
1934 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1935 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1936 CTL_CREATE, CTL_EOL) != 0)
1937 break;
1938
1939 if (i < __arraycount(stats->qprc)) {
1940 evcnt_attach_dynamic(&stats->qprc[i],
1941 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1942 "qprc");
1943 evcnt_attach_dynamic(&stats->qptc[i],
1944 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1945 "qptc");
1946 evcnt_attach_dynamic(&stats->qbrc[i],
1947 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1948 "qbrc");
1949 evcnt_attach_dynamic(&stats->qbtc[i],
1950 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1951 "qbtc");
1952 if (hw->mac.type >= ixgbe_mac_82599EB)
1953 evcnt_attach_dynamic(&stats->qprdc[i],
1954 EVCNT_TYPE_MISC, NULL,
1955 adapter->queues[i].evnamebuf, "qprdc");
1956 }
1957
1958 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1959 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1960 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1961 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1962 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1963 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1964 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1965 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1966 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1967 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1968 #ifdef LRO
1969 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1970 CTLFLAG_RD, &lro->lro_queued, 0,
1971 "LRO Queued");
1972 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1973 CTLFLAG_RD, &lro->lro_flushed, 0,
1974 "LRO Flushed");
1975 #endif /* LRO */
1976 }
1977
1978 /* MAC stats get their own sub node */
1979
1980 snprintf(stats->namebuf,
1981 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1982
1983 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "rx csum offload - IP");
1985 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "rx csum offload - L4");
1987 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "rx csum offload - IP bad");
1989 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "rx csum offload - L4 bad");
1991 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Interrupt conditions zero");
1993 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Legacy interrupts");
1995
1996 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "CRC Errors");
1998 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Illegal Byte Errors");
2000 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Byte Errors");
2002 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "MAC Short Packets Discarded");
2004 if (hw->mac.type >= ixgbe_mac_X550)
2005 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "Bad SFD");
2007 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "Total Packets Missed");
2009 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "MAC Local Faults");
2011 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "MAC Remote Faults");
2013 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Receive Length Errors");
2015 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Link XON Transmitted");
2017 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Link XON Received");
2019 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Link XOFF Transmitted");
2021 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Link XOFF Received");
2023
2024 /* Packet Reception Stats */
2025 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Total Octets Received");
2027 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Good Octets Received");
2029 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Total Packets Received");
2031 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Good Packets Received");
2033 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "Multicast Packets Received");
2035 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "Broadcast Packets Received");
2037 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "64 byte frames received ");
2039 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "65-127 byte frames received");
2041 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "128-255 byte frames received");
2043 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "256-511 byte frames received");
2045 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "512-1023 byte frames received");
2047 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2048 stats->namebuf, "1023-1522 byte frames received");
2049 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2050 stats->namebuf, "Receive Undersized");
2051 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2052 stats->namebuf, "Fragmented Packets Received ");
2053 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2054 stats->namebuf, "Oversized Packets Received");
2055 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2056 stats->namebuf, "Received Jabber");
2057 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2058 stats->namebuf, "Management Packets Received");
2059 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2060 stats->namebuf, "Management Packets Dropped");
2061 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2062 stats->namebuf, "Checksum Errors");
2063
2064 /* Packet Transmission Stats */
2065 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2066 stats->namebuf, "Good Octets Transmitted");
2067 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2068 stats->namebuf, "Total Packets Transmitted");
2069 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2070 stats->namebuf, "Good Packets Transmitted");
2071 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2072 stats->namebuf, "Broadcast Packets Transmitted");
2073 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2074 stats->namebuf, "Multicast Packets Transmitted");
2075 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2076 stats->namebuf, "Management Packets Transmitted");
2077 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2078 stats->namebuf, "64 byte frames transmitted ");
2079 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2080 stats->namebuf, "65-127 byte frames transmitted");
2081 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2082 stats->namebuf, "128-255 byte frames transmitted");
2083 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2084 stats->namebuf, "256-511 byte frames transmitted");
2085 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2086 stats->namebuf, "512-1023 byte frames transmitted");
2087 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2088 stats->namebuf, "1024-1522 byte frames transmitted");
2089 } /* ixgbe_add_hw_stats */
2090
2091 static void
2092 ixgbe_clear_evcnt(struct adapter *adapter)
2093 {
2094 struct tx_ring *txr = adapter->tx_rings;
2095 struct rx_ring *rxr = adapter->rx_rings;
2096 struct ixgbe_hw *hw = &adapter->hw;
2097 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2098 int i;
2099
2100 adapter->efbig_tx_dma_setup.ev_count = 0;
2101 adapter->mbuf_defrag_failed.ev_count = 0;
2102 adapter->efbig2_tx_dma_setup.ev_count = 0;
2103 adapter->einval_tx_dma_setup.ev_count = 0;
2104 adapter->other_tx_dma_setup.ev_count = 0;
2105 adapter->eagain_tx_dma_setup.ev_count = 0;
2106 adapter->enomem_tx_dma_setup.ev_count = 0;
2107 adapter->tso_err.ev_count = 0;
2108 adapter->watchdog_events.ev_count = 0;
2109 adapter->admin_irqev.ev_count = 0;
2110 adapter->link_workev.ev_count = 0;
2111 adapter->mod_workev.ev_count = 0;
2112 adapter->msf_workev.ev_count = 0;
2113 adapter->phy_workev.ev_count = 0;
2114
2115 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2116 if (i < __arraycount(stats->mpc)) {
2117 stats->mpc[i].ev_count = 0;
2118 if (hw->mac.type == ixgbe_mac_82598EB)
2119 stats->rnbc[i].ev_count = 0;
2120 }
2121 if (i < __arraycount(stats->pxontxc)) {
2122 stats->pxontxc[i].ev_count = 0;
2123 stats->pxonrxc[i].ev_count = 0;
2124 stats->pxofftxc[i].ev_count = 0;
2125 stats->pxoffrxc[i].ev_count = 0;
2126 if (hw->mac.type >= ixgbe_mac_82599EB)
2127 stats->pxon2offc[i].ev_count = 0;
2128 }
2129 }
2130
2131 txr = adapter->tx_rings;
2132 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2133 adapter->queues[i].irqs.ev_count = 0;
2134 adapter->queues[i].handleq.ev_count = 0;
2135 adapter->queues[i].req.ev_count = 0;
2136 txr->no_desc_avail.ev_count = 0;
2137 txr->total_packets.ev_count = 0;
2138 txr->tso_tx.ev_count = 0;
2139 #ifndef IXGBE_LEGACY_TX
2140 txr->pcq_drops.ev_count = 0;
2141 #endif
2142 txr->q_efbig_tx_dma_setup = 0;
2143 txr->q_mbuf_defrag_failed = 0;
2144 txr->q_efbig2_tx_dma_setup = 0;
2145 txr->q_einval_tx_dma_setup = 0;
2146 txr->q_other_tx_dma_setup = 0;
2147 txr->q_eagain_tx_dma_setup = 0;
2148 txr->q_enomem_tx_dma_setup = 0;
2149 txr->q_tso_err = 0;
2150
2151 if (i < __arraycount(stats->qprc)) {
2152 stats->qprc[i].ev_count = 0;
2153 stats->qptc[i].ev_count = 0;
2154 stats->qbrc[i].ev_count = 0;
2155 stats->qbtc[i].ev_count = 0;
2156 if (hw->mac.type >= ixgbe_mac_82599EB)
2157 stats->qprdc[i].ev_count = 0;
2158 }
2159
2160 rxr->rx_packets.ev_count = 0;
2161 rxr->rx_bytes.ev_count = 0;
2162 rxr->rx_copies.ev_count = 0;
2163 rxr->no_jmbuf.ev_count = 0;
2164 rxr->rx_discarded.ev_count = 0;
2165 }
2166 stats->ipcs.ev_count = 0;
2167 stats->l4cs.ev_count = 0;
2168 stats->ipcs_bad.ev_count = 0;
2169 stats->l4cs_bad.ev_count = 0;
2170 stats->intzero.ev_count = 0;
2171 stats->legint.ev_count = 0;
2172 stats->crcerrs.ev_count = 0;
2173 stats->illerrc.ev_count = 0;
2174 stats->errbc.ev_count = 0;
2175 stats->mspdc.ev_count = 0;
2176 if (hw->mac.type >= ixgbe_mac_X550)
2177 stats->mbsdc.ev_count = 0;
2178 stats->mpctotal.ev_count = 0;
2179 stats->mlfc.ev_count = 0;
2180 stats->mrfc.ev_count = 0;
2181 stats->rlec.ev_count = 0;
2182 stats->lxontxc.ev_count = 0;
2183 stats->lxonrxc.ev_count = 0;
2184 stats->lxofftxc.ev_count = 0;
2185 stats->lxoffrxc.ev_count = 0;
2186
2187 /* Packet Reception Stats */
2188 stats->tor.ev_count = 0;
2189 stats->gorc.ev_count = 0;
2190 stats->tpr.ev_count = 0;
2191 stats->gprc.ev_count = 0;
2192 stats->mprc.ev_count = 0;
2193 stats->bprc.ev_count = 0;
2194 stats->prc64.ev_count = 0;
2195 stats->prc127.ev_count = 0;
2196 stats->prc255.ev_count = 0;
2197 stats->prc511.ev_count = 0;
2198 stats->prc1023.ev_count = 0;
2199 stats->prc1522.ev_count = 0;
2200 stats->ruc.ev_count = 0;
2201 stats->rfc.ev_count = 0;
2202 stats->roc.ev_count = 0;
2203 stats->rjc.ev_count = 0;
2204 stats->mngprc.ev_count = 0;
2205 stats->mngpdc.ev_count = 0;
2206 stats->xec.ev_count = 0;
2207
2208 /* Packet Transmission Stats */
2209 stats->gotc.ev_count = 0;
2210 stats->tpt.ev_count = 0;
2211 stats->gptc.ev_count = 0;
2212 stats->bptc.ev_count = 0;
2213 stats->mptc.ev_count = 0;
2214 stats->mngptc.ev_count = 0;
2215 stats->ptc64.ev_count = 0;
2216 stats->ptc127.ev_count = 0;
2217 stats->ptc255.ev_count = 0;
2218 stats->ptc511.ev_count = 0;
2219 stats->ptc1023.ev_count = 0;
2220 stats->ptc1522.ev_count = 0;
2221 }
2222
2223 /************************************************************************
2224 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2225 *
2226 * Retrieves the TDH value from the hardware
2227 ************************************************************************/
2228 static int
2229 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2230 {
2231 struct sysctlnode node = *rnode;
2232 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2233 struct adapter *adapter;
2234 uint32_t val;
2235
2236 if (!txr)
2237 return (0);
2238
2239 adapter = txr->adapter;
2240 if (ixgbe_fw_recovery_mode_swflag(adapter))
2241 return (EPERM);
2242
2243 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2244 node.sysctl_data = &val;
2245 return sysctl_lookup(SYSCTLFN_CALL(&node));
2246 } /* ixgbe_sysctl_tdh_handler */
2247
2248 /************************************************************************
2249 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2250 *
2251 * Retrieves the TDT value from the hardware
2252 ************************************************************************/
2253 static int
2254 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2255 {
2256 struct sysctlnode node = *rnode;
2257 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2258 struct adapter *adapter;
2259 uint32_t val;
2260
2261 if (!txr)
2262 return (0);
2263
2264 adapter = txr->adapter;
2265 if (ixgbe_fw_recovery_mode_swflag(adapter))
2266 return (EPERM);
2267
2268 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2269 node.sysctl_data = &val;
2270 return sysctl_lookup(SYSCTLFN_CALL(&node));
2271 } /* ixgbe_sysctl_tdt_handler */
2272
2273 /************************************************************************
2274 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2275 * handler function
2276 *
2277 * Retrieves the next_to_check value
2278 ************************************************************************/
2279 static int
2280 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2281 {
2282 struct sysctlnode node = *rnode;
2283 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2284 struct adapter *adapter;
2285 uint32_t val;
2286
2287 if (!rxr)
2288 return (0);
2289
2290 adapter = rxr->adapter;
2291 if (ixgbe_fw_recovery_mode_swflag(adapter))
2292 return (EPERM);
2293
2294 val = rxr->next_to_check;
2295 node.sysctl_data = &val;
2296 return sysctl_lookup(SYSCTLFN_CALL(&node));
2297 } /* ixgbe_sysctl_next_to_check_handler */
2298
2299 /************************************************************************
2300 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2301 *
2302 * Retrieves the RDH value from the hardware
2303 ************************************************************************/
2304 static int
2305 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2306 {
2307 struct sysctlnode node = *rnode;
2308 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2309 struct adapter *adapter;
2310 uint32_t val;
2311
2312 if (!rxr)
2313 return (0);
2314
2315 adapter = rxr->adapter;
2316 if (ixgbe_fw_recovery_mode_swflag(adapter))
2317 return (EPERM);
2318
2319 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2320 node.sysctl_data = &val;
2321 return sysctl_lookup(SYSCTLFN_CALL(&node));
2322 } /* ixgbe_sysctl_rdh_handler */
2323
2324 /************************************************************************
2325 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2326 *
2327 * Retrieves the RDT value from the hardware
2328 ************************************************************************/
2329 static int
2330 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2331 {
2332 struct sysctlnode node = *rnode;
2333 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2334 struct adapter *adapter;
2335 uint32_t val;
2336
2337 if (!rxr)
2338 return (0);
2339
2340 adapter = rxr->adapter;
2341 if (ixgbe_fw_recovery_mode_swflag(adapter))
2342 return (EPERM);
2343
2344 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2345 node.sysctl_data = &val;
2346 return sysctl_lookup(SYSCTLFN_CALL(&node));
2347 } /* ixgbe_sysctl_rdt_handler */
2348
2349 static int
2350 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2351 {
2352 struct ifnet *ifp = &ec->ec_if;
2353 struct adapter *adapter = ifp->if_softc;
2354 int rv;
2355
2356 if (set)
2357 rv = ixgbe_register_vlan(adapter, vid);
2358 else
2359 rv = ixgbe_unregister_vlan(adapter, vid);
2360
2361 if (rv != 0)
2362 return rv;
2363
2364 /*
2365 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2366 * or 0 to 1.
2367 */
2368 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2369 ixgbe_setup_vlan_hw_tagging(adapter);
2370
2371 return rv;
2372 }
2373
2374 /************************************************************************
2375 * ixgbe_register_vlan
2376 *
2377 * Run via vlan config EVENT, it enables us to use the
2378 * HW Filter table since we can get the vlan id. This
2379 * just creates the entry in the soft version of the
2380 * VFTA, init will repopulate the real table.
2381 ************************************************************************/
2382 static int
2383 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2384 {
2385 u16 index, bit;
2386 int error;
2387
2388 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2389 return EINVAL;
2390
2391 IXGBE_CORE_LOCK(adapter);
2392 index = (vtag >> 5) & 0x7F;
2393 bit = vtag & 0x1F;
2394 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2395 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2396 true);
2397 IXGBE_CORE_UNLOCK(adapter);
2398 if (error != 0)
2399 error = EACCES;
2400
2401 return error;
2402 } /* ixgbe_register_vlan */
2403
2404 /************************************************************************
2405 * ixgbe_unregister_vlan
2406 *
2407 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2408 ************************************************************************/
2409 static int
2410 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2411 {
2412 u16 index, bit;
2413 int error;
2414
2415 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2416 return EINVAL;
2417
2418 IXGBE_CORE_LOCK(adapter);
2419 index = (vtag >> 5) & 0x7F;
2420 bit = vtag & 0x1F;
2421 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2422 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2423 true);
2424 IXGBE_CORE_UNLOCK(adapter);
2425 if (error != 0)
2426 error = EACCES;
2427
2428 return error;
2429 } /* ixgbe_unregister_vlan */
2430
2431 static void
2432 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2433 {
2434 struct ethercom *ec = &adapter->osdep.ec;
2435 struct ixgbe_hw *hw = &adapter->hw;
2436 struct rx_ring *rxr;
2437 u32 ctrl;
2438 int i;
2439 bool hwtagging;
2440
2441 /* Enable HW tagging only if any vlan is attached */
2442 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2443 && VLAN_ATTACHED(ec);
2444
2445 /* Setup the queues for vlans */
2446 for (i = 0; i < adapter->num_queues; i++) {
2447 rxr = &adapter->rx_rings[i];
2448 /*
2449 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2450 */
2451 if (hw->mac.type != ixgbe_mac_82598EB) {
2452 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2453 if (hwtagging)
2454 ctrl |= IXGBE_RXDCTL_VME;
2455 else
2456 ctrl &= ~IXGBE_RXDCTL_VME;
2457 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2458 }
2459 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2460 }
2461
2462 /* VLAN hw tagging for 82598 */
2463 if (hw->mac.type == ixgbe_mac_82598EB) {
2464 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2465 if (hwtagging)
2466 ctrl |= IXGBE_VLNCTRL_VME;
2467 else
2468 ctrl &= ~IXGBE_VLNCTRL_VME;
2469 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2470 }
2471 } /* ixgbe_setup_vlan_hw_tagging */
2472
2473 static void
2474 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2475 {
2476 struct ethercom *ec = &adapter->osdep.ec;
2477 struct ixgbe_hw *hw = &adapter->hw;
2478 int i;
2479 u32 ctrl;
2480 struct vlanid_list *vlanidp;
2481
2482 /*
2483 * This function is called from both if_init and ifflags_cb()
2484 * on NetBSD.
2485 */
2486
2487 /*
2488 * Part 1:
2489 * Setup VLAN HW tagging
2490 */
2491 ixgbe_setup_vlan_hw_tagging(adapter);
2492
2493 /*
2494 * Part 2:
2495 * Setup VLAN HW filter
2496 */
2497 /* Cleanup shadow_vfta */
2498 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2499 adapter->shadow_vfta[i] = 0;
2500 /* Generate shadow_vfta from ec_vids */
2501 ETHER_LOCK(ec);
2502 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2503 uint32_t idx;
2504
2505 idx = vlanidp->vid / 32;
2506 KASSERT(idx < IXGBE_VFTA_SIZE);
2507 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2508 }
2509 ETHER_UNLOCK(ec);
2510 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2511 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2512
2513 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2514 /* Enable the Filter Table if enabled */
2515 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2516 ctrl |= IXGBE_VLNCTRL_VFE;
2517 else
2518 ctrl &= ~IXGBE_VLNCTRL_VFE;
2519 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2520 } /* ixgbe_setup_vlan_hw_support */
2521
2522 /************************************************************************
2523 * ixgbe_get_slot_info
2524 *
2525 * Get the width and transaction speed of
2526 * the slot this adapter is plugged into.
2527 ************************************************************************/
2528 static void
2529 ixgbe_get_slot_info(struct adapter *adapter)
2530 {
2531 device_t dev = adapter->dev;
2532 struct ixgbe_hw *hw = &adapter->hw;
2533 u32 offset;
2534 u16 link;
2535 int bus_info_valid = TRUE;
2536
2537 /* Some devices are behind an internal bridge */
2538 switch (hw->device_id) {
2539 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2540 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2541 goto get_parent_info;
2542 default:
2543 break;
2544 }
2545
2546 ixgbe_get_bus_info(hw);
2547
2548 /*
2549 * Some devices don't use PCI-E, but there is no need
2550 * to display "Unknown" for bus speed and width.
2551 */
2552 switch (hw->mac.type) {
2553 case ixgbe_mac_X550EM_x:
2554 case ixgbe_mac_X550EM_a:
2555 return;
2556 default:
2557 goto display;
2558 }
2559
2560 get_parent_info:
2561 /*
2562 * For the Quad port adapter we need to parse back
2563 * up the PCI tree to find the speed of the expansion
2564 * slot into which this adapter is plugged. A bit more work.
2565 */
2566 dev = device_parent(device_parent(dev));
2567 #if 0
2568 #ifdef IXGBE_DEBUG
2569 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2570 pci_get_slot(dev), pci_get_function(dev));
2571 #endif
2572 dev = device_parent(device_parent(dev));
2573 #ifdef IXGBE_DEBUG
2574 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2575 pci_get_slot(dev), pci_get_function(dev));
2576 #endif
2577 #endif
2578 /* Now get the PCI Express Capabilities offset */
2579 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2580 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2581 /*
2582 * Hmm...can't get PCI-Express capabilities.
2583 * Falling back to default method.
2584 */
2585 bus_info_valid = FALSE;
2586 ixgbe_get_bus_info(hw);
2587 goto display;
2588 }
2589 /* ...and read the Link Status Register */
2590 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2591 offset + PCIE_LCSR) >> 16;
2592 ixgbe_set_pci_config_data_generic(hw, link);
2593
2594 display:
2595 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2596 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2597 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2598 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2599 "Unknown"),
2600 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2601 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2602 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2603 "Unknown"));
2604
2605 if (bus_info_valid) {
2606 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2607 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2608 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2609 device_printf(dev, "PCI-Express bandwidth available"
2610 " for this card\n is not sufficient for"
2611 " optimal performance.\n");
2612 device_printf(dev, "For optimal performance a x8 "
2613 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2614 }
2615 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2616 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2617 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2618 device_printf(dev, "PCI-Express bandwidth available"
2619 " for this card\n is not sufficient for"
2620 " optimal performance.\n");
2621 device_printf(dev, "For optimal performance a x8 "
2622 "PCIE Gen3 slot is required.\n");
2623 }
2624 } else
2625 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2626
2627 return;
2628 } /* ixgbe_get_slot_info */
2629
2630 /************************************************************************
2631 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2632 ************************************************************************/
2633 static inline void
2634 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2635 {
2636 struct ixgbe_hw *hw = &adapter->hw;
2637 struct ix_queue *que = &adapter->queues[vector];
2638 u64 queue = 1ULL << vector;
2639 u32 mask;
2640
2641 mutex_enter(&que->dc_mtx);
2642 if (que->disabled_count > 0 && --que->disabled_count > 0)
2643 goto out;
2644
2645 if (hw->mac.type == ixgbe_mac_82598EB) {
2646 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2647 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2648 } else {
2649 mask = (queue & 0xFFFFFFFF);
2650 if (mask)
2651 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2652 mask = (queue >> 32);
2653 if (mask)
2654 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2655 }
2656 out:
2657 mutex_exit(&que->dc_mtx);
2658 } /* ixgbe_enable_queue */
2659
2660 /************************************************************************
2661 * ixgbe_disable_queue_internal
2662 ************************************************************************/
2663 static inline void
2664 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2665 {
2666 struct ixgbe_hw *hw = &adapter->hw;
2667 struct ix_queue *que = &adapter->queues[vector];
2668 u64 queue = 1ULL << vector;
2669 u32 mask;
2670
2671 mutex_enter(&que->dc_mtx);
2672
2673 if (que->disabled_count > 0) {
2674 if (nestok)
2675 que->disabled_count++;
2676 goto out;
2677 }
2678 que->disabled_count++;
2679
2680 if (hw->mac.type == ixgbe_mac_82598EB) {
2681 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2682 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2683 } else {
2684 mask = (queue & 0xFFFFFFFF);
2685 if (mask)
2686 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2687 mask = (queue >> 32);
2688 if (mask)
2689 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2690 }
2691 out:
2692 mutex_exit(&que->dc_mtx);
2693 } /* ixgbe_disable_queue_internal */
2694
2695 /************************************************************************
2696 * ixgbe_disable_queue
2697 ************************************************************************/
2698 static inline void
2699 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2700 {
2701
2702 ixgbe_disable_queue_internal(adapter, vector, true);
2703 } /* ixgbe_disable_queue */
2704
2705 /************************************************************************
2706 * ixgbe_sched_handle_que - schedule deferred packet processing
2707 ************************************************************************/
2708 static inline void
2709 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2710 {
2711
2712 if (que->txrx_use_workqueue) {
2713 /*
2714 * adapter->que_wq is bound to each CPU instead of
2715 * each NIC queue to reduce workqueue kthread. As we
2716 * should consider about interrupt affinity in this
2717 * function, the workqueue kthread must be WQ_PERCPU.
2718 * If create WQ_PERCPU workqueue kthread for each NIC
2719 * queue, that number of created workqueue kthread is
2720 * (number of used NIC queue) * (number of CPUs) =
2721 * (number of CPUs) ^ 2 most often.
2722 *
2723 * The same NIC queue's interrupts are avoided by
2724 * masking the queue's interrupt. And different
2725 * NIC queue's interrupts use different struct work
2726 * (que->wq_cookie). So, "enqueued flag" to avoid
2727 * twice workqueue_enqueue() is not required .
2728 */
2729 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2730 } else {
2731 softint_schedule(que->que_si);
2732 }
2733 }
2734
2735 /************************************************************************
2736 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2737 ************************************************************************/
2738 static int
2739 ixgbe_msix_que(void *arg)
2740 {
2741 struct ix_queue *que = arg;
2742 struct adapter *adapter = que->adapter;
2743 struct ifnet *ifp = adapter->ifp;
2744 struct tx_ring *txr = que->txr;
2745 struct rx_ring *rxr = que->rxr;
2746 bool more;
2747 u32 newitr = 0;
2748
2749 /* Protect against spurious interrupts */
2750 if ((ifp->if_flags & IFF_RUNNING) == 0)
2751 return 0;
2752
2753 ixgbe_disable_queue(adapter, que->msix);
2754 ++que->irqs.ev_count;
2755
2756 /*
2757 * Don't change "que->txrx_use_workqueue" from this point to avoid
2758 * flip-flopping softint/workqueue mode in one deferred processing.
2759 */
2760 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2761
2762 #ifdef __NetBSD__
2763 /* Don't run ixgbe_rxeof in interrupt context */
2764 more = true;
2765 #else
2766 more = ixgbe_rxeof(que);
2767 #endif
2768
2769 IXGBE_TX_LOCK(txr);
2770 ixgbe_txeof(txr);
2771 IXGBE_TX_UNLOCK(txr);
2772
2773 /* Do AIM now? */
2774
2775 if (adapter->enable_aim == false)
2776 goto no_calc;
2777 /*
2778 * Do Adaptive Interrupt Moderation:
2779 * - Write out last calculated setting
2780 * - Calculate based on average size over
2781 * the last interval.
2782 */
2783 if (que->eitr_setting)
2784 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2785
2786 que->eitr_setting = 0;
2787
2788 /* Idle, do nothing */
2789 if ((txr->bytes == 0) && (rxr->bytes == 0))
2790 goto no_calc;
2791
2792 if ((txr->bytes) && (txr->packets))
2793 newitr = txr->bytes/txr->packets;
2794 if ((rxr->bytes) && (rxr->packets))
2795 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2796 newitr += 24; /* account for hardware frame, crc */
2797
2798 /* set an upper boundary */
2799 newitr = uimin(newitr, 3000);
2800
2801 /* Be nice to the mid range */
2802 if ((newitr > 300) && (newitr < 1200))
2803 newitr = (newitr / 3);
2804 else
2805 newitr = (newitr / 2);
2806
2807 /*
2808 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2809 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2810 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2811 * on 1G and higher.
2812 */
2813 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2814 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2815 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2816 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2817 }
2818
2819 /* save for next interrupt */
2820 que->eitr_setting = newitr;
2821
2822 /* Reset state */
2823 txr->bytes = 0;
2824 txr->packets = 0;
2825 rxr->bytes = 0;
2826 rxr->packets = 0;
2827
2828 no_calc:
2829 if (more)
2830 ixgbe_sched_handle_que(adapter, que);
2831 else
2832 ixgbe_enable_queue(adapter, que->msix);
2833
2834 return 1;
2835 } /* ixgbe_msix_que */
2836
2837 /************************************************************************
2838 * ixgbe_media_status - Media Ioctl callback
2839 *
2840 * Called whenever the user queries the status of
2841 * the interface using ifconfig.
2842 ************************************************************************/
2843 static void
2844 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2845 {
2846 struct adapter *adapter = ifp->if_softc;
2847 struct ixgbe_hw *hw = &adapter->hw;
2848 int layer;
2849
2850 INIT_DEBUGOUT("ixgbe_media_status: begin");
2851 ixgbe_update_link_status(adapter);
2852
2853 ifmr->ifm_status = IFM_AVALID;
2854 ifmr->ifm_active = IFM_ETHER;
2855
2856 if (adapter->link_active != LINK_STATE_UP) {
2857 ifmr->ifm_active |= IFM_NONE;
2858 return;
2859 }
2860
2861 ifmr->ifm_status |= IFM_ACTIVE;
2862 layer = adapter->phy_layer;
2863
2864 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2865 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2866 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2867 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2868 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2869 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2870 switch (adapter->link_speed) {
2871 case IXGBE_LINK_SPEED_10GB_FULL:
2872 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2873 break;
2874 case IXGBE_LINK_SPEED_5GB_FULL:
2875 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2876 break;
2877 case IXGBE_LINK_SPEED_2_5GB_FULL:
2878 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2879 break;
2880 case IXGBE_LINK_SPEED_1GB_FULL:
2881 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2882 break;
2883 case IXGBE_LINK_SPEED_100_FULL:
2884 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2885 break;
2886 case IXGBE_LINK_SPEED_10_FULL:
2887 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2888 break;
2889 }
2890 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2891 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2892 switch (adapter->link_speed) {
2893 case IXGBE_LINK_SPEED_10GB_FULL:
2894 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2895 break;
2896 }
2897 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2898 switch (adapter->link_speed) {
2899 case IXGBE_LINK_SPEED_10GB_FULL:
2900 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2901 break;
2902 case IXGBE_LINK_SPEED_1GB_FULL:
2903 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2904 break;
2905 }
2906 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2907 switch (adapter->link_speed) {
2908 case IXGBE_LINK_SPEED_10GB_FULL:
2909 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2910 break;
2911 case IXGBE_LINK_SPEED_1GB_FULL:
2912 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2913 break;
2914 }
2915 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2916 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2917 switch (adapter->link_speed) {
2918 case IXGBE_LINK_SPEED_10GB_FULL:
2919 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2920 break;
2921 case IXGBE_LINK_SPEED_1GB_FULL:
2922 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2923 break;
2924 }
2925 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2926 switch (adapter->link_speed) {
2927 case IXGBE_LINK_SPEED_10GB_FULL:
2928 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2929 break;
2930 }
2931 /*
2932 * XXX: These need to use the proper media types once
2933 * they're added.
2934 */
2935 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2936 switch (adapter->link_speed) {
2937 case IXGBE_LINK_SPEED_10GB_FULL:
2938 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2939 break;
2940 case IXGBE_LINK_SPEED_2_5GB_FULL:
2941 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2942 break;
2943 case IXGBE_LINK_SPEED_1GB_FULL:
2944 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2945 break;
2946 }
2947 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2948 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2949 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2950 switch (adapter->link_speed) {
2951 case IXGBE_LINK_SPEED_10GB_FULL:
2952 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2953 break;
2954 case IXGBE_LINK_SPEED_2_5GB_FULL:
2955 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2956 break;
2957 case IXGBE_LINK_SPEED_1GB_FULL:
2958 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2959 break;
2960 }
2961
2962 /* If nothing is recognized... */
2963 #if 0
2964 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2965 ifmr->ifm_active |= IFM_UNKNOWN;
2966 #endif
2967
2968 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2969
2970 /* Display current flow control setting used on link */
2971 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2972 hw->fc.current_mode == ixgbe_fc_full)
2973 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2974 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2975 hw->fc.current_mode == ixgbe_fc_full)
2976 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2977
2978 return;
2979 } /* ixgbe_media_status */
2980
2981 /************************************************************************
2982 * ixgbe_media_change - Media Ioctl callback
2983 *
2984 * Called when the user changes speed/duplex using
2985 * media/mediopt option with ifconfig.
2986 ************************************************************************/
2987 static int
2988 ixgbe_media_change(struct ifnet *ifp)
2989 {
2990 struct adapter *adapter = ifp->if_softc;
2991 struct ifmedia *ifm = &adapter->media;
2992 struct ixgbe_hw *hw = &adapter->hw;
2993 ixgbe_link_speed speed = 0;
2994 ixgbe_link_speed link_caps = 0;
2995 bool negotiate = false;
2996 s32 err = IXGBE_NOT_IMPLEMENTED;
2997
2998 INIT_DEBUGOUT("ixgbe_media_change: begin");
2999
3000 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3001 return (EINVAL);
3002
3003 if (hw->phy.media_type == ixgbe_media_type_backplane)
3004 return (EPERM);
3005
3006 /*
3007 * We don't actually need to check against the supported
3008 * media types of the adapter; ifmedia will take care of
3009 * that for us.
3010 */
3011 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3012 case IFM_AUTO:
3013 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3014 &negotiate);
3015 if (err != IXGBE_SUCCESS) {
3016 device_printf(adapter->dev, "Unable to determine "
3017 "supported advertise speeds\n");
3018 return (ENODEV);
3019 }
3020 speed |= link_caps;
3021 break;
3022 case IFM_10G_T:
3023 case IFM_10G_LRM:
3024 case IFM_10G_LR:
3025 case IFM_10G_TWINAX:
3026 case IFM_10G_SR:
3027 case IFM_10G_CX4:
3028 case IFM_10G_KR:
3029 case IFM_10G_KX4:
3030 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3031 break;
3032 case IFM_5000_T:
3033 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3034 break;
3035 case IFM_2500_T:
3036 case IFM_2500_KX:
3037 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3038 break;
3039 case IFM_1000_T:
3040 case IFM_1000_LX:
3041 case IFM_1000_SX:
3042 case IFM_1000_KX:
3043 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3044 break;
3045 case IFM_100_TX:
3046 speed |= IXGBE_LINK_SPEED_100_FULL;
3047 break;
3048 case IFM_10_T:
3049 speed |= IXGBE_LINK_SPEED_10_FULL;
3050 break;
3051 case IFM_NONE:
3052 break;
3053 default:
3054 goto invalid;
3055 }
3056
3057 hw->mac.autotry_restart = TRUE;
3058 hw->mac.ops.setup_link(hw, speed, TRUE);
3059 adapter->advertise = 0;
3060 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3061 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3062 adapter->advertise |= 1 << 2;
3063 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3064 adapter->advertise |= 1 << 1;
3065 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3066 adapter->advertise |= 1 << 0;
3067 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3068 adapter->advertise |= 1 << 3;
3069 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3070 adapter->advertise |= 1 << 4;
3071 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3072 adapter->advertise |= 1 << 5;
3073 }
3074
3075 return (0);
3076
3077 invalid:
3078 device_printf(adapter->dev, "Invalid media type!\n");
3079
3080 return (EINVAL);
3081 } /* ixgbe_media_change */
3082
3083 /************************************************************************
3084 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3085 ************************************************************************/
3086 static int
3087 ixgbe_msix_admin(void *arg)
3088 {
3089 struct adapter *adapter = arg;
3090 struct ixgbe_hw *hw = &adapter->hw;
3091 u32 eicr;
3092 u32 eims_orig;
3093 u32 eims_disable = 0;
3094
3095 ++adapter->admin_irqev.ev_count;
3096
3097 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3098 /* Pause other interrupts */
3099 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3100
3101 /*
3102 * First get the cause.
3103 *
3104 * The specifications of 82598, 82599, X540 and X550 say EICS register
3105 * is write only. However, Linux says it is a workaround for silicon
3106 * errata to read EICS instead of EICR to get interrupt cause.
3107 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3108 */
3109 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3110 /* Be sure the queue bits are not cleared */
3111 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3112 /* Clear all OTHER interrupts with write */
3113 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3114
3115 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
3116
3117 /* Re-enable some OTHER interrupts */
3118 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3119
3120 return 1;
3121 } /* ixgbe_msix_admin */
3122
3123 static void
3124 ixgbe_intr_admin_common(struct adapter *adapter, u32 eicr, u32 *eims_disable)
3125 {
3126 struct ixgbe_hw *hw = &adapter->hw;
3127 u32 eicr_mask;
3128 u32 task_requests = 0;
3129 s32 retval;
3130
3131 /* Link status change */
3132 if (eicr & IXGBE_EICR_LSC) {
3133 task_requests |= IXGBE_REQUEST_TASK_LSC;
3134 *eims_disable |= IXGBE_EIMS_LSC;
3135 }
3136
3137 if (ixgbe_is_sfp(hw)) {
3138 /* Pluggable optics-related interrupt */
3139 if (hw->mac.type >= ixgbe_mac_X540)
3140 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3141 else
3142 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3143
3144 /*
3145 * An interrupt might not arrive when a module is inserted.
3146 * When an link status change interrupt occurred and the driver
3147 * still regard SFP as unplugged, issue the module softint
3148 * and then issue LSC interrupt.
3149 */
3150 if ((eicr & eicr_mask)
3151 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3152 && (eicr & IXGBE_EICR_LSC))) {
3153 task_requests |= IXGBE_REQUEST_TASK_MOD;
3154 *eims_disable |= IXGBE_EIMS_LSC;
3155 }
3156
3157 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3158 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3159 task_requests |= IXGBE_REQUEST_TASK_MSF;
3160 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3161 }
3162 }
3163
3164 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3165 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3166 (eicr & IXGBE_EICR_FLOW_DIR)) {
3167 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) {
3168 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3169 /* Disable the interrupt */
3170 *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3171 }
3172 }
3173
3174 if (eicr & IXGBE_EICR_ECC) {
3175 device_printf(adapter->dev,
3176 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3177 /* Disable interrupt to prevent log spam */
3178 *eims_disable |= IXGBE_EICR_ECC;
3179 }
3180
3181 /* Check for over temp condition */
3182 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3183 switch (adapter->hw.mac.type) {
3184 case ixgbe_mac_X550EM_a:
3185 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3186 break;
3187 /* Disable interrupt to prevent log spam */
3188 *eims_disable |= IXGBE_EICR_GPI_SDP0_X550EM_a;
3189
3190 retval = hw->phy.ops.check_overtemp(hw);
3191 if (retval != IXGBE_ERR_OVERTEMP)
3192 break;
3193 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3194 device_printf(adapter->dev, "System shutdown required!\n");
3195 break;
3196 default:
3197 if (!(eicr & IXGBE_EICR_TS))
3198 break;
3199 /* Disable interrupt to prevent log spam */
3200 *eims_disable |= IXGBE_EIMS_TS;
3201
3202 retval = hw->phy.ops.check_overtemp(hw);
3203 if (retval != IXGBE_ERR_OVERTEMP)
3204 break;
3205 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3206 device_printf(adapter->dev, "System shutdown required!\n");
3207 break;
3208 }
3209 }
3210
3211 /* Check for VF message */
3212 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3213 (eicr & IXGBE_EICR_MAILBOX)) {
3214 task_requests |= IXGBE_REQUEST_TASK_MBX;
3215 *eims_disable |= IXGBE_EIMS_MAILBOX;
3216 }
3217 }
3218
3219 /* Check for fan failure */
3220 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3221 retval = ixgbe_check_fan_failure(adapter, eicr, true);
3222 if (retval == IXGBE_ERR_FAN_FAILURE) {
3223 /* Disable interrupt to prevent log spam */
3224 *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3225 }
3226 }
3227
3228 /* External PHY interrupt */
3229 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3230 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3231 task_requests |= IXGBE_REQUEST_TASK_PHY;
3232 *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3233 }
3234
3235 if (task_requests != 0) {
3236 mutex_enter(&adapter->admin_mtx);
3237 adapter->task_requests |= task_requests;
3238 ixgbe_schedule_admin_tasklet(adapter);
3239 mutex_exit(&adapter->admin_mtx);
3240 }
3241
3242 }
3243
3244 static void
3245 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3246 {
3247
3248 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3249 itr |= itr << 16;
3250 else
3251 itr |= IXGBE_EITR_CNT_WDIS;
3252
3253 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3254 }
3255
3256
3257 /************************************************************************
3258 * ixgbe_sysctl_interrupt_rate_handler
3259 ************************************************************************/
3260 static int
3261 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3262 {
3263 struct sysctlnode node = *rnode;
3264 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3265 struct adapter *adapter;
3266 uint32_t reg, usec, rate;
3267 int error;
3268
3269 if (que == NULL)
3270 return 0;
3271
3272 adapter = que->adapter;
3273 if (ixgbe_fw_recovery_mode_swflag(adapter))
3274 return (EPERM);
3275
3276 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3277 usec = ((reg & 0x0FF8) >> 3);
3278 if (usec > 0)
3279 rate = 500000 / usec;
3280 else
3281 rate = 0;
3282 node.sysctl_data = &rate;
3283 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3284 if (error || newp == NULL)
3285 return error;
3286 reg &= ~0xfff; /* default, no limitation */
3287 if (rate > 0 && rate < 500000) {
3288 if (rate < 1000)
3289 rate = 1000;
3290 reg |= ((4000000 / rate) & 0xff8);
3291 /*
3292 * When RSC is used, ITR interval must be larger than
3293 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3294 * The minimum value is always greater than 2us on 100M
3295 * (and 10M?(not documented)), but it's not on 1G and higher.
3296 */
3297 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3298 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3299 if ((adapter->num_queues > 1)
3300 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3301 return EINVAL;
3302 }
3303 ixgbe_max_interrupt_rate = rate;
3304 } else
3305 ixgbe_max_interrupt_rate = 0;
3306 ixgbe_eitr_write(adapter, que->msix, reg);
3307
3308 return (0);
3309 } /* ixgbe_sysctl_interrupt_rate_handler */
3310
3311 const struct sysctlnode *
3312 ixgbe_sysctl_instance(struct adapter *adapter)
3313 {
3314 const char *dvname;
3315 struct sysctllog **log;
3316 int rc;
3317 const struct sysctlnode *rnode;
3318
3319 if (adapter->sysctltop != NULL)
3320 return adapter->sysctltop;
3321
3322 log = &adapter->sysctllog;
3323 dvname = device_xname(adapter->dev);
3324
3325 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3326 0, CTLTYPE_NODE, dvname,
3327 SYSCTL_DESCR("ixgbe information and settings"),
3328 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3329 goto err;
3330
3331 return rnode;
3332 err:
3333 device_printf(adapter->dev,
3334 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3335 return NULL;
3336 }
3337
3338 /************************************************************************
3339 * ixgbe_add_device_sysctls
3340 ************************************************************************/
3341 static void
3342 ixgbe_add_device_sysctls(struct adapter *adapter)
3343 {
3344 device_t dev = adapter->dev;
3345 struct ixgbe_hw *hw = &adapter->hw;
3346 struct sysctllog **log;
3347 const struct sysctlnode *rnode, *cnode;
3348
3349 log = &adapter->sysctllog;
3350
3351 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3352 aprint_error_dev(dev, "could not create sysctl root\n");
3353 return;
3354 }
3355
3356 if (sysctl_createv(log, 0, &rnode, &cnode,
3357 CTLFLAG_READWRITE, CTLTYPE_INT,
3358 "debug", SYSCTL_DESCR("Debug Info"),
3359 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3360 aprint_error_dev(dev, "could not create sysctl\n");
3361
3362 if (sysctl_createv(log, 0, &rnode, &cnode,
3363 CTLFLAG_READONLY, CTLTYPE_INT,
3364 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3365 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3366 aprint_error_dev(dev, "could not create sysctl\n");
3367
3368 if (sysctl_createv(log, 0, &rnode, &cnode,
3369 CTLFLAG_READONLY, CTLTYPE_INT,
3370 "num_queues", SYSCTL_DESCR("Number of queues"),
3371 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3372 aprint_error_dev(dev, "could not create sysctl\n");
3373
3374 /* Sysctls for all devices */
3375 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3376 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3377 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3378 CTL_EOL) != 0)
3379 aprint_error_dev(dev, "could not create sysctl\n");
3380
3381 adapter->enable_aim = ixgbe_enable_aim;
3382 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3383 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3384 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3385 aprint_error_dev(dev, "could not create sysctl\n");
3386
3387 if (sysctl_createv(log, 0, &rnode, &cnode,
3388 CTLFLAG_READWRITE, CTLTYPE_INT,
3389 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3390 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3391 CTL_EOL) != 0)
3392 aprint_error_dev(dev, "could not create sysctl\n");
3393
3394 /*
3395 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3396 * it causesflip-flopping softint/workqueue mode in one deferred
3397 * processing. Therefore, preempt_disable()/preempt_enable() are
3398 * required in ixgbe_sched_handle_que() to avoid
3399 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3400 * I think changing "que->txrx_use_workqueue" in interrupt handler
3401 * is lighter than doing preempt_disable()/preempt_enable() in every
3402 * ixgbe_sched_handle_que().
3403 */
3404 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3405 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3406 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3407 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3408 aprint_error_dev(dev, "could not create sysctl\n");
3409
3410 #ifdef IXGBE_DEBUG
3411 /* testing sysctls (for all devices) */
3412 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3413 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3414 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3415 CTL_EOL) != 0)
3416 aprint_error_dev(dev, "could not create sysctl\n");
3417
3418 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3419 CTLTYPE_STRING, "print_rss_config",
3420 SYSCTL_DESCR("Prints RSS Configuration"),
3421 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3422 CTL_EOL) != 0)
3423 aprint_error_dev(dev, "could not create sysctl\n");
3424 #endif
3425 /* for X550 series devices */
3426 if (hw->mac.type >= ixgbe_mac_X550)
3427 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3428 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3429 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3430 CTL_EOL) != 0)
3431 aprint_error_dev(dev, "could not create sysctl\n");
3432
3433 /* for WoL-capable devices */
3434 if (adapter->wol_support) {
3435 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3436 CTLTYPE_BOOL, "wol_enable",
3437 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3438 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3439 CTL_EOL) != 0)
3440 aprint_error_dev(dev, "could not create sysctl\n");
3441
3442 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3443 CTLTYPE_INT, "wufc",
3444 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3445 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3446 CTL_EOL) != 0)
3447 aprint_error_dev(dev, "could not create sysctl\n");
3448 }
3449
3450 /* for X552/X557-AT devices */
3451 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3452 const struct sysctlnode *phy_node;
3453
3454 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3455 "phy", SYSCTL_DESCR("External PHY sysctls"),
3456 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3457 aprint_error_dev(dev, "could not create sysctl\n");
3458 return;
3459 }
3460
3461 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3462 CTLTYPE_INT, "temp",
3463 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3464 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3465 CTL_EOL) != 0)
3466 aprint_error_dev(dev, "could not create sysctl\n");
3467
3468 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3469 CTLTYPE_INT, "overtemp_occurred",
3470 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3471 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3472 CTL_CREATE, CTL_EOL) != 0)
3473 aprint_error_dev(dev, "could not create sysctl\n");
3474 }
3475
3476 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3477 && (hw->phy.type == ixgbe_phy_fw))
3478 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3479 CTLTYPE_BOOL, "force_10_100_autonego",
3480 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3481 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3482 CTL_CREATE, CTL_EOL) != 0)
3483 aprint_error_dev(dev, "could not create sysctl\n");
3484
3485 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3486 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3487 CTLTYPE_INT, "eee_state",
3488 SYSCTL_DESCR("EEE Power Save State"),
3489 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3490 CTL_EOL) != 0)
3491 aprint_error_dev(dev, "could not create sysctl\n");
3492 }
3493 } /* ixgbe_add_device_sysctls */
3494
3495 /************************************************************************
3496 * ixgbe_allocate_pci_resources
3497 ************************************************************************/
3498 static int
3499 ixgbe_allocate_pci_resources(struct adapter *adapter,
3500 const struct pci_attach_args *pa)
3501 {
3502 pcireg_t memtype, csr;
3503 device_t dev = adapter->dev;
3504 bus_addr_t addr;
3505 int flags;
3506
3507 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3508 switch (memtype) {
3509 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3510 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3511 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3512 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3513 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3514 goto map_err;
3515 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3516 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3517 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3518 }
3519 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3520 adapter->osdep.mem_size, flags,
3521 &adapter->osdep.mem_bus_space_handle) != 0) {
3522 map_err:
3523 adapter->osdep.mem_size = 0;
3524 aprint_error_dev(dev, "unable to map BAR0\n");
3525 return ENXIO;
3526 }
3527 /*
3528 * Enable address decoding for memory range in case BIOS or
3529 * UEFI don't set it.
3530 */
3531 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3532 PCI_COMMAND_STATUS_REG);
3533 csr |= PCI_COMMAND_MEM_ENABLE;
3534 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3535 csr);
3536 break;
3537 default:
3538 aprint_error_dev(dev, "unexpected type on BAR0\n");
3539 return ENXIO;
3540 }
3541
3542 return (0);
3543 } /* ixgbe_allocate_pci_resources */
3544
3545 static void
3546 ixgbe_free_deferred_handlers(struct adapter *adapter)
3547 {
3548 struct ix_queue *que = adapter->queues;
3549 struct tx_ring *txr = adapter->tx_rings;
3550 int i;
3551
3552 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3553 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3554 if (txr->txr_si != NULL)
3555 softint_disestablish(txr->txr_si);
3556 }
3557 if (que->que_si != NULL)
3558 softint_disestablish(que->que_si);
3559 }
3560 if (adapter->txr_wq != NULL)
3561 workqueue_destroy(adapter->txr_wq);
3562 if (adapter->txr_wq_enqueued != NULL)
3563 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3564 if (adapter->que_wq != NULL)
3565 workqueue_destroy(adapter->que_wq);
3566
3567 if (adapter->admin_wq != NULL) {
3568 workqueue_destroy(adapter->admin_wq);
3569 adapter->admin_wq = NULL;
3570 }
3571 if (adapter->timer_wq != NULL) {
3572 workqueue_destroy(adapter->timer_wq);
3573 adapter->timer_wq = NULL;
3574 }
3575 if (adapter->recovery_mode_timer_wq != NULL) {
3576 /*
3577 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3578 * the recovery_mode_timer workqueue, so call it here.
3579 */
3580 workqueue_wait(adapter->recovery_mode_timer_wq,
3581 &adapter->recovery_mode_timer_wc);
3582 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3583 workqueue_destroy(adapter->recovery_mode_timer_wq);
3584 adapter->recovery_mode_timer_wq = NULL;
3585 }
3586 } /* ixgbe_free_deferred_handlers */
3587
3588 /************************************************************************
3589 * ixgbe_detach - Device removal routine
3590 *
3591 * Called when the driver is being removed.
3592 * Stops the adapter and deallocates all the resources
3593 * that were allocated for driver operation.
3594 *
3595 * return 0 on success, positive on failure
3596 ************************************************************************/
3597 static int
3598 ixgbe_detach(device_t dev, int flags)
3599 {
3600 struct adapter *adapter = device_private(dev);
3601 struct rx_ring *rxr = adapter->rx_rings;
3602 struct tx_ring *txr = adapter->tx_rings;
3603 struct ixgbe_hw *hw = &adapter->hw;
3604 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3605 u32 ctrl_ext;
3606 int i;
3607
3608 INIT_DEBUGOUT("ixgbe_detach: begin");
3609 if (adapter->osdep.attached == false)
3610 return 0;
3611
3612 if (ixgbe_pci_iov_detach(dev) != 0) {
3613 device_printf(dev, "SR-IOV in use; detach first.\n");
3614 return (EBUSY);
3615 }
3616
3617 #if NVLAN > 0
3618 /* Make sure VLANs are not using driver */
3619 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3620 ; /* nothing to do: no VLANs */
3621 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3622 vlan_ifdetach(adapter->ifp);
3623 else {
3624 aprint_error_dev(dev, "VLANs in use, detach first\n");
3625 return (EBUSY);
3626 }
3627 #endif
3628
3629 adapter->osdep.detaching = true;
3630 /*
3631 * Stop the interface. ixgbe_setup_low_power_mode() calls
3632 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3633 * directly.
3634 */
3635 ixgbe_setup_low_power_mode(adapter);
3636
3637 callout_halt(&adapter->timer, NULL);
3638 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3639 callout_halt(&adapter->recovery_mode_timer, NULL);
3640
3641 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3642 atomic_store_relaxed(&adapter->admin_pending, 0);
3643 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3644 atomic_store_relaxed(&adapter->timer_pending, 0);
3645
3646 pmf_device_deregister(dev);
3647
3648 ether_ifdetach(adapter->ifp);
3649
3650 ixgbe_free_deferred_handlers(adapter);
3651
3652 /* let hardware know driver is unloading */
3653 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3654 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3655 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3656
3657 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3658 netmap_detach(adapter->ifp);
3659
3660 ixgbe_free_pci_resources(adapter);
3661 #if 0 /* XXX the NetBSD port is probably missing something here */
3662 bus_generic_detach(dev);
3663 #endif
3664 if_detach(adapter->ifp);
3665 ifmedia_fini(&adapter->media);
3666 if_percpuq_destroy(adapter->ipq);
3667
3668 sysctl_teardown(&adapter->sysctllog);
3669 evcnt_detach(&adapter->efbig_tx_dma_setup);
3670 evcnt_detach(&adapter->mbuf_defrag_failed);
3671 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3672 evcnt_detach(&adapter->einval_tx_dma_setup);
3673 evcnt_detach(&adapter->other_tx_dma_setup);
3674 evcnt_detach(&adapter->eagain_tx_dma_setup);
3675 evcnt_detach(&adapter->enomem_tx_dma_setup);
3676 evcnt_detach(&adapter->watchdog_events);
3677 evcnt_detach(&adapter->tso_err);
3678 evcnt_detach(&adapter->admin_irqev);
3679 evcnt_detach(&adapter->link_workev);
3680 evcnt_detach(&adapter->mod_workev);
3681 evcnt_detach(&adapter->msf_workev);
3682 evcnt_detach(&adapter->phy_workev);
3683
3684 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3685 if (i < __arraycount(stats->mpc)) {
3686 evcnt_detach(&stats->mpc[i]);
3687 if (hw->mac.type == ixgbe_mac_82598EB)
3688 evcnt_detach(&stats->rnbc[i]);
3689 }
3690 if (i < __arraycount(stats->pxontxc)) {
3691 evcnt_detach(&stats->pxontxc[i]);
3692 evcnt_detach(&stats->pxonrxc[i]);
3693 evcnt_detach(&stats->pxofftxc[i]);
3694 evcnt_detach(&stats->pxoffrxc[i]);
3695 if (hw->mac.type >= ixgbe_mac_82599EB)
3696 evcnt_detach(&stats->pxon2offc[i]);
3697 }
3698 }
3699
3700 txr = adapter->tx_rings;
3701 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3702 evcnt_detach(&adapter->queues[i].irqs);
3703 evcnt_detach(&adapter->queues[i].handleq);
3704 evcnt_detach(&adapter->queues[i].req);
3705 evcnt_detach(&txr->no_desc_avail);
3706 evcnt_detach(&txr->total_packets);
3707 evcnt_detach(&txr->tso_tx);
3708 #ifndef IXGBE_LEGACY_TX
3709 evcnt_detach(&txr->pcq_drops);
3710 #endif
3711
3712 if (i < __arraycount(stats->qprc)) {
3713 evcnt_detach(&stats->qprc[i]);
3714 evcnt_detach(&stats->qptc[i]);
3715 evcnt_detach(&stats->qbrc[i]);
3716 evcnt_detach(&stats->qbtc[i]);
3717 if (hw->mac.type >= ixgbe_mac_82599EB)
3718 evcnt_detach(&stats->qprdc[i]);
3719 }
3720
3721 evcnt_detach(&rxr->rx_packets);
3722 evcnt_detach(&rxr->rx_bytes);
3723 evcnt_detach(&rxr->rx_copies);
3724 evcnt_detach(&rxr->no_jmbuf);
3725 evcnt_detach(&rxr->rx_discarded);
3726 }
3727 evcnt_detach(&stats->ipcs);
3728 evcnt_detach(&stats->l4cs);
3729 evcnt_detach(&stats->ipcs_bad);
3730 evcnt_detach(&stats->l4cs_bad);
3731 evcnt_detach(&stats->intzero);
3732 evcnt_detach(&stats->legint);
3733 evcnt_detach(&stats->crcerrs);
3734 evcnt_detach(&stats->illerrc);
3735 evcnt_detach(&stats->errbc);
3736 evcnt_detach(&stats->mspdc);
3737 if (hw->mac.type >= ixgbe_mac_X550)
3738 evcnt_detach(&stats->mbsdc);
3739 evcnt_detach(&stats->mpctotal);
3740 evcnt_detach(&stats->mlfc);
3741 evcnt_detach(&stats->mrfc);
3742 evcnt_detach(&stats->rlec);
3743 evcnt_detach(&stats->lxontxc);
3744 evcnt_detach(&stats->lxonrxc);
3745 evcnt_detach(&stats->lxofftxc);
3746 evcnt_detach(&stats->lxoffrxc);
3747
3748 /* Packet Reception Stats */
3749 evcnt_detach(&stats->tor);
3750 evcnt_detach(&stats->gorc);
3751 evcnt_detach(&stats->tpr);
3752 evcnt_detach(&stats->gprc);
3753 evcnt_detach(&stats->mprc);
3754 evcnt_detach(&stats->bprc);
3755 evcnt_detach(&stats->prc64);
3756 evcnt_detach(&stats->prc127);
3757 evcnt_detach(&stats->prc255);
3758 evcnt_detach(&stats->prc511);
3759 evcnt_detach(&stats->prc1023);
3760 evcnt_detach(&stats->prc1522);
3761 evcnt_detach(&stats->ruc);
3762 evcnt_detach(&stats->rfc);
3763 evcnt_detach(&stats->roc);
3764 evcnt_detach(&stats->rjc);
3765 evcnt_detach(&stats->mngprc);
3766 evcnt_detach(&stats->mngpdc);
3767 evcnt_detach(&stats->xec);
3768
3769 /* Packet Transmission Stats */
3770 evcnt_detach(&stats->gotc);
3771 evcnt_detach(&stats->tpt);
3772 evcnt_detach(&stats->gptc);
3773 evcnt_detach(&stats->bptc);
3774 evcnt_detach(&stats->mptc);
3775 evcnt_detach(&stats->mngptc);
3776 evcnt_detach(&stats->ptc64);
3777 evcnt_detach(&stats->ptc127);
3778 evcnt_detach(&stats->ptc255);
3779 evcnt_detach(&stats->ptc511);
3780 evcnt_detach(&stats->ptc1023);
3781 evcnt_detach(&stats->ptc1522);
3782
3783 ixgbe_free_queues(adapter);
3784 free(adapter->mta, M_DEVBUF);
3785
3786 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3787 IXGBE_CORE_LOCK_DESTROY(adapter);
3788
3789 return (0);
3790 } /* ixgbe_detach */
3791
3792 /************************************************************************
3793 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3794 *
3795 * Prepare the adapter/port for LPLU and/or WoL
3796 ************************************************************************/
3797 static int
3798 ixgbe_setup_low_power_mode(struct adapter *adapter)
3799 {
3800 struct ixgbe_hw *hw = &adapter->hw;
3801 device_t dev = adapter->dev;
3802 struct ifnet *ifp = adapter->ifp;
3803 s32 error = 0;
3804
3805 /* Limit power management flow to X550EM baseT */
3806 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3807 hw->phy.ops.enter_lplu) {
3808 /* X550EM baseT adapters need a special LPLU flow */
3809 hw->phy.reset_disable = true;
3810 ixgbe_ifstop(ifp, 1);
3811 error = hw->phy.ops.enter_lplu(hw);
3812 if (error)
3813 device_printf(dev,
3814 "Error entering LPLU: %d\n", error);
3815 hw->phy.reset_disable = false;
3816 } else {
3817 /* Just stop for other adapters */
3818 ixgbe_ifstop(ifp, 1);
3819 }
3820
3821 IXGBE_CORE_LOCK(adapter);
3822
3823 if (!hw->wol_enabled) {
3824 ixgbe_set_phy_power(hw, FALSE);
3825 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3826 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3827 } else {
3828 /* Turn off support for APM wakeup. (Using ACPI instead) */
3829 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3830 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3831
3832 /*
3833 * Clear Wake Up Status register to prevent any previous wakeup
3834 * events from waking us up immediately after we suspend.
3835 */
3836 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3837
3838 /*
3839 * Program the Wakeup Filter Control register with user filter
3840 * settings
3841 */
3842 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3843
3844 /* Enable wakeups and power management in Wakeup Control */
3845 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3846 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3847
3848 }
3849
3850 IXGBE_CORE_UNLOCK(adapter);
3851
3852 return error;
3853 } /* ixgbe_setup_low_power_mode */
3854
3855 /************************************************************************
3856 * ixgbe_shutdown - Shutdown entry point
3857 ************************************************************************/
3858 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3859 static int
3860 ixgbe_shutdown(device_t dev)
3861 {
3862 struct adapter *adapter = device_private(dev);
3863 int error = 0;
3864
3865 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3866
3867 error = ixgbe_setup_low_power_mode(adapter);
3868
3869 return (error);
3870 } /* ixgbe_shutdown */
3871 #endif
3872
3873 /************************************************************************
3874 * ixgbe_suspend
3875 *
3876 * From D0 to D3
3877 ************************************************************************/
3878 static bool
3879 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3880 {
3881 struct adapter *adapter = device_private(dev);
3882 int error = 0;
3883
3884 INIT_DEBUGOUT("ixgbe_suspend: begin");
3885
3886 error = ixgbe_setup_low_power_mode(adapter);
3887
3888 return (error);
3889 } /* ixgbe_suspend */
3890
3891 /************************************************************************
3892 * ixgbe_resume
3893 *
3894 * From D3 to D0
3895 ************************************************************************/
3896 static bool
3897 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3898 {
3899 struct adapter *adapter = device_private(dev);
3900 struct ifnet *ifp = adapter->ifp;
3901 struct ixgbe_hw *hw = &adapter->hw;
3902 u32 wus;
3903
3904 INIT_DEBUGOUT("ixgbe_resume: begin");
3905
3906 IXGBE_CORE_LOCK(adapter);
3907
3908 /* Read & clear WUS register */
3909 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3910 if (wus)
3911 device_printf(dev, "Woken up by (WUS): %#010x\n",
3912 IXGBE_READ_REG(hw, IXGBE_WUS));
3913 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3914 /* And clear WUFC until next low-power transition */
3915 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3916
3917 /*
3918 * Required after D3->D0 transition;
3919 * will re-advertise all previous advertised speeds
3920 */
3921 if (ifp->if_flags & IFF_UP)
3922 ixgbe_init_locked(adapter);
3923
3924 IXGBE_CORE_UNLOCK(adapter);
3925
3926 return true;
3927 } /* ixgbe_resume */
3928
3929 /*
3930 * Set the various hardware offload abilities.
3931 *
3932 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3933 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3934 * mbuf offload flags the driver will understand.
3935 */
3936 static void
3937 ixgbe_set_if_hwassist(struct adapter *adapter)
3938 {
3939 /* XXX */
3940 }
3941
3942 /************************************************************************
3943 * ixgbe_init_locked - Init entry point
3944 *
3945 * Used in two ways: It is used by the stack as an init
3946 * entry point in network interface structure. It is also
3947 * used by the driver as a hw/sw initialization routine to
3948 * get to a consistent state.
3949 *
3950 * return 0 on success, positive on failure
3951 ************************************************************************/
3952 static void
3953 ixgbe_init_locked(struct adapter *adapter)
3954 {
3955 struct ifnet *ifp = adapter->ifp;
3956 device_t dev = adapter->dev;
3957 struct ixgbe_hw *hw = &adapter->hw;
3958 struct ix_queue *que;
3959 struct tx_ring *txr;
3960 struct rx_ring *rxr;
3961 u32 txdctl, mhadd;
3962 u32 rxdctl, rxctrl;
3963 u32 ctrl_ext;
3964 bool unsupported_sfp = false;
3965 int i, j, err;
3966
3967 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3968
3969 KASSERT(mutex_owned(&adapter->core_mtx));
3970 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3971
3972 hw->need_unsupported_sfp_recovery = false;
3973 hw->adapter_stopped = FALSE;
3974 ixgbe_stop_adapter(hw);
3975 callout_stop(&adapter->timer);
3976 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3977 callout_stop(&adapter->recovery_mode_timer);
3978 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3979 que->disabled_count = 0;
3980
3981 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3982 adapter->max_frame_size =
3983 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3984
3985 /* Queue indices may change with IOV mode */
3986 ixgbe_align_all_queue_indices(adapter);
3987
3988 /* reprogram the RAR[0] in case user changed it. */
3989 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3990
3991 /* Get the latest mac address, User can use a LAA */
3992 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3993 IXGBE_ETH_LENGTH_OF_ADDRESS);
3994 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3995 hw->addr_ctrl.rar_used_count = 1;
3996
3997 /* Set hardware offload abilities from ifnet flags */
3998 ixgbe_set_if_hwassist(adapter);
3999
4000 /* Prepare transmit descriptors and buffers */
4001 if (ixgbe_setup_transmit_structures(adapter)) {
4002 device_printf(dev, "Could not setup transmit structures\n");
4003 ixgbe_stop_locked(adapter);
4004 return;
4005 }
4006
4007 ixgbe_init_hw(hw);
4008
4009 ixgbe_initialize_iov(adapter);
4010
4011 ixgbe_initialize_transmit_units(adapter);
4012
4013 /* Setup Multicast table */
4014 ixgbe_set_rxfilter(adapter);
4015
4016 /* Determine the correct mbuf pool, based on frame size */
4017 if (adapter->max_frame_size <= MCLBYTES)
4018 adapter->rx_mbuf_sz = MCLBYTES;
4019 else
4020 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4021
4022 /* Prepare receive descriptors and buffers */
4023 if (ixgbe_setup_receive_structures(adapter)) {
4024 device_printf(dev, "Could not setup receive structures\n");
4025 ixgbe_stop_locked(adapter);
4026 return;
4027 }
4028
4029 /* Configure RX settings */
4030 ixgbe_initialize_receive_units(adapter);
4031
4032 /* Initialize variable holding task enqueue requests interrupts */
4033 adapter->task_requests = 0;
4034
4035 /* Enable SDP & MSI-X interrupts based on adapter */
4036 ixgbe_config_gpie(adapter);
4037
4038 /* Set MTU size */
4039 if (ifp->if_mtu > ETHERMTU) {
4040 /* aka IXGBE_MAXFRS on 82599 and newer */
4041 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4042 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4043 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4044 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4045 }
4046
4047 /* Now enable all the queues */
4048 for (i = 0; i < adapter->num_queues; i++) {
4049 txr = &adapter->tx_rings[i];
4050 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4051 txdctl |= IXGBE_TXDCTL_ENABLE;
4052 /* Set WTHRESH to 8, burst writeback */
4053 txdctl |= (8 << 16);
4054 /*
4055 * When the internal queue falls below PTHRESH (32),
4056 * start prefetching as long as there are at least
4057 * HTHRESH (1) buffers ready. The values are taken
4058 * from the Intel linux driver 3.8.21.
4059 * Prefetching enables tx line rate even with 1 queue.
4060 */
4061 txdctl |= (32 << 0) | (1 << 8);
4062 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4063 }
4064
4065 for (i = 0; i < adapter->num_queues; i++) {
4066 rxr = &adapter->rx_rings[i];
4067 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4068 if (hw->mac.type == ixgbe_mac_82598EB) {
4069 /*
4070 * PTHRESH = 21
4071 * HTHRESH = 4
4072 * WTHRESH = 8
4073 */
4074 rxdctl &= ~0x3FFFFF;
4075 rxdctl |= 0x080420;
4076 }
4077 rxdctl |= IXGBE_RXDCTL_ENABLE;
4078 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4079 for (j = 0; j < 10; j++) {
4080 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4081 IXGBE_RXDCTL_ENABLE)
4082 break;
4083 else
4084 msec_delay(1);
4085 }
4086 IXGBE_WRITE_BARRIER(hw);
4087
4088 /*
4089 * In netmap mode, we must preserve the buffers made
4090 * available to userspace before the if_init()
4091 * (this is true by default on the TX side, because
4092 * init makes all buffers available to userspace).
4093 *
4094 * netmap_reset() and the device specific routines
4095 * (e.g. ixgbe_setup_receive_rings()) map these
4096 * buffers at the end of the NIC ring, so here we
4097 * must set the RDT (tail) register to make sure
4098 * they are not overwritten.
4099 *
4100 * In this driver the NIC ring starts at RDH = 0,
4101 * RDT points to the last slot available for reception (?),
4102 * so RDT = num_rx_desc - 1 means the whole ring is available.
4103 */
4104 #ifdef DEV_NETMAP
4105 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4106 (ifp->if_capenable & IFCAP_NETMAP)) {
4107 struct netmap_adapter *na = NA(adapter->ifp);
4108 struct netmap_kring *kring = na->rx_rings[i];
4109 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4110
4111 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4112 } else
4113 #endif /* DEV_NETMAP */
4114 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4115 adapter->num_rx_desc - 1);
4116 }
4117
4118 /* Enable Receive engine */
4119 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4120 if (hw->mac.type == ixgbe_mac_82598EB)
4121 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4122 rxctrl |= IXGBE_RXCTRL_RXEN;
4123 ixgbe_enable_rx_dma(hw, rxctrl);
4124
4125 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4126 atomic_store_relaxed(&adapter->timer_pending, 0);
4127 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4128 callout_reset(&adapter->recovery_mode_timer, hz,
4129 ixgbe_recovery_mode_timer, adapter);
4130
4131 /* Set up MSI/MSI-X routing */
4132 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4133 ixgbe_configure_ivars(adapter);
4134 /* Set up auto-mask */
4135 if (hw->mac.type == ixgbe_mac_82598EB)
4136 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4137 else {
4138 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4139 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4140 }
4141 } else { /* Simple settings for Legacy/MSI */
4142 ixgbe_set_ivar(adapter, 0, 0, 0);
4143 ixgbe_set_ivar(adapter, 0, 0, 1);
4144 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4145 }
4146
4147 ixgbe_init_fdir(adapter);
4148
4149 /*
4150 * Check on any SFP devices that
4151 * need to be kick-started
4152 */
4153 if (hw->phy.type == ixgbe_phy_none) {
4154 err = hw->phy.ops.identify(hw);
4155 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4156 unsupported_sfp = true;
4157 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4158 unsupported_sfp = true;
4159
4160 if (unsupported_sfp)
4161 device_printf(dev,
4162 "Unsupported SFP+ module type was detected.\n");
4163
4164 /* Set moderation on the Link interrupt */
4165 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4166
4167 /* Enable EEE power saving */
4168 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4169 hw->mac.ops.setup_eee(hw,
4170 adapter->feat_en & IXGBE_FEATURE_EEE);
4171
4172 /* Enable power to the phy. */
4173 if (!unsupported_sfp) {
4174 ixgbe_set_phy_power(hw, TRUE);
4175
4176 /* Config/Enable Link */
4177 ixgbe_config_link(adapter);
4178 }
4179
4180 /* Hardware Packet Buffer & Flow Control setup */
4181 ixgbe_config_delay_values(adapter);
4182
4183 /* Initialize the FC settings */
4184 ixgbe_start_hw(hw);
4185
4186 /* Set up VLAN support and filter */
4187 ixgbe_setup_vlan_hw_support(adapter);
4188
4189 /* Setup DMA Coalescing */
4190 ixgbe_config_dmac(adapter);
4191
4192 /* OK to schedule workqueues. */
4193 adapter->schedule_wqs_ok = true;
4194
4195 /* And now turn on interrupts */
4196 ixgbe_enable_intr(adapter);
4197
4198 /* Enable the use of the MBX by the VF's */
4199 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4200 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4201 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4202 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4203 }
4204
4205 /* Update saved flags. See ixgbe_ifflags_cb() */
4206 adapter->if_flags = ifp->if_flags;
4207 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4208
4209 /* Now inform the stack we're ready */
4210 ifp->if_flags |= IFF_RUNNING;
4211
4212 return;
4213 } /* ixgbe_init_locked */
4214
4215 /************************************************************************
4216 * ixgbe_init
4217 ************************************************************************/
4218 static int
4219 ixgbe_init(struct ifnet *ifp)
4220 {
4221 struct adapter *adapter = ifp->if_softc;
4222
4223 IXGBE_CORE_LOCK(adapter);
4224 ixgbe_init_locked(adapter);
4225 IXGBE_CORE_UNLOCK(adapter);
4226
4227 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4228 } /* ixgbe_init */
4229
4230 /************************************************************************
4231 * ixgbe_set_ivar
4232 *
4233 * Setup the correct IVAR register for a particular MSI-X interrupt
4234 * (yes this is all very magic and confusing :)
4235 * - entry is the register array entry
4236 * - vector is the MSI-X vector for this queue
4237 * - type is RX/TX/MISC
4238 ************************************************************************/
4239 static void
4240 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4241 {
4242 struct ixgbe_hw *hw = &adapter->hw;
4243 u32 ivar, index;
4244
4245 vector |= IXGBE_IVAR_ALLOC_VAL;
4246
4247 switch (hw->mac.type) {
4248 case ixgbe_mac_82598EB:
4249 if (type == -1)
4250 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4251 else
4252 entry += (type * 64);
4253 index = (entry >> 2) & 0x1F;
4254 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4255 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4256 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4258 break;
4259 case ixgbe_mac_82599EB:
4260 case ixgbe_mac_X540:
4261 case ixgbe_mac_X550:
4262 case ixgbe_mac_X550EM_x:
4263 case ixgbe_mac_X550EM_a:
4264 if (type == -1) { /* MISC IVAR */
4265 index = (entry & 1) * 8;
4266 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4267 ivar &= ~(0xffUL << index);
4268 ivar |= ((u32)vector << index);
4269 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4270 } else { /* RX/TX IVARS */
4271 index = (16 * (entry & 1)) + (8 * type);
4272 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4273 ivar &= ~(0xffUL << index);
4274 ivar |= ((u32)vector << index);
4275 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4276 }
4277 break;
4278 default:
4279 break;
4280 }
4281 } /* ixgbe_set_ivar */
4282
4283 /************************************************************************
4284 * ixgbe_configure_ivars
4285 ************************************************************************/
4286 static void
4287 ixgbe_configure_ivars(struct adapter *adapter)
4288 {
4289 struct ix_queue *que = adapter->queues;
4290 u32 newitr;
4291
4292 if (ixgbe_max_interrupt_rate > 0)
4293 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4294 else {
4295 /*
4296 * Disable DMA coalescing if interrupt moderation is
4297 * disabled.
4298 */
4299 adapter->dmac = 0;
4300 newitr = 0;
4301 }
4302
4303 for (int i = 0; i < adapter->num_queues; i++, que++) {
4304 struct rx_ring *rxr = &adapter->rx_rings[i];
4305 struct tx_ring *txr = &adapter->tx_rings[i];
4306 /* First the RX queue entry */
4307 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4308 /* ... and the TX */
4309 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4310 /* Set an Initial EITR value */
4311 ixgbe_eitr_write(adapter, que->msix, newitr);
4312 /*
4313 * To eliminate influence of the previous state.
4314 * At this point, Tx/Rx interrupt handler
4315 * (ixgbe_msix_que()) cannot be called, so both
4316 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4317 */
4318 que->eitr_setting = 0;
4319 }
4320
4321 /* For the Link interrupt */
4322 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4323 } /* ixgbe_configure_ivars */
4324
4325 /************************************************************************
4326 * ixgbe_config_gpie
4327 ************************************************************************/
4328 static void
4329 ixgbe_config_gpie(struct adapter *adapter)
4330 {
4331 struct ixgbe_hw *hw = &adapter->hw;
4332 u32 gpie;
4333
4334 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4335
4336 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4337 /* Enable Enhanced MSI-X mode */
4338 gpie |= IXGBE_GPIE_MSIX_MODE
4339 | IXGBE_GPIE_EIAME
4340 | IXGBE_GPIE_PBA_SUPPORT
4341 | IXGBE_GPIE_OCD;
4342 }
4343
4344 /* Fan Failure Interrupt */
4345 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4346 gpie |= IXGBE_SDP1_GPIEN;
4347
4348 /* Thermal Sensor Interrupt */
4349 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4350 gpie |= IXGBE_SDP0_GPIEN_X540;
4351
4352 /* Link detection */
4353 switch (hw->mac.type) {
4354 case ixgbe_mac_82599EB:
4355 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4356 break;
4357 case ixgbe_mac_X550EM_x:
4358 case ixgbe_mac_X550EM_a:
4359 gpie |= IXGBE_SDP0_GPIEN_X540;
4360 break;
4361 default:
4362 break;
4363 }
4364
4365 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4366
4367 } /* ixgbe_config_gpie */
4368
4369 /************************************************************************
4370 * ixgbe_config_delay_values
4371 *
4372 * Requires adapter->max_frame_size to be set.
4373 ************************************************************************/
4374 static void
4375 ixgbe_config_delay_values(struct adapter *adapter)
4376 {
4377 struct ixgbe_hw *hw = &adapter->hw;
4378 u32 rxpb, frame, size, tmp;
4379
4380 frame = adapter->max_frame_size;
4381
4382 /* Calculate High Water */
4383 switch (hw->mac.type) {
4384 case ixgbe_mac_X540:
4385 case ixgbe_mac_X550:
4386 case ixgbe_mac_X550EM_x:
4387 case ixgbe_mac_X550EM_a:
4388 tmp = IXGBE_DV_X540(frame, frame);
4389 break;
4390 default:
4391 tmp = IXGBE_DV(frame, frame);
4392 break;
4393 }
4394 size = IXGBE_BT2KB(tmp);
4395 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4396 hw->fc.high_water[0] = rxpb - size;
4397
4398 /* Now calculate Low Water */
4399 switch (hw->mac.type) {
4400 case ixgbe_mac_X540:
4401 case ixgbe_mac_X550:
4402 case ixgbe_mac_X550EM_x:
4403 case ixgbe_mac_X550EM_a:
4404 tmp = IXGBE_LOW_DV_X540(frame);
4405 break;
4406 default:
4407 tmp = IXGBE_LOW_DV(frame);
4408 break;
4409 }
4410 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4411
4412 hw->fc.pause_time = IXGBE_FC_PAUSE;
4413 hw->fc.send_xon = TRUE;
4414 } /* ixgbe_config_delay_values */
4415
4416 /************************************************************************
4417 * ixgbe_set_rxfilter - Multicast Update
4418 *
4419 * Called whenever multicast address list is updated.
4420 ************************************************************************/
4421 static void
4422 ixgbe_set_rxfilter(struct adapter *adapter)
4423 {
4424 struct ixgbe_mc_addr *mta;
4425 struct ifnet *ifp = adapter->ifp;
4426 u8 *update_ptr;
4427 int mcnt = 0;
4428 u32 fctrl;
4429 struct ethercom *ec = &adapter->osdep.ec;
4430 struct ether_multi *enm;
4431 struct ether_multistep step;
4432
4433 KASSERT(mutex_owned(&adapter->core_mtx));
4434 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4435
4436 mta = adapter->mta;
4437 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4438
4439 ETHER_LOCK(ec);
4440 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4441 ETHER_FIRST_MULTI(step, ec, enm);
4442 while (enm != NULL) {
4443 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4444 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4445 ETHER_ADDR_LEN) != 0)) {
4446 ec->ec_flags |= ETHER_F_ALLMULTI;
4447 break;
4448 }
4449 bcopy(enm->enm_addrlo,
4450 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4451 mta[mcnt].vmdq = adapter->pool;
4452 mcnt++;
4453 ETHER_NEXT_MULTI(step, enm);
4454 }
4455
4456 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4457 if (ifp->if_flags & IFF_PROMISC)
4458 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4459 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4460 fctrl |= IXGBE_FCTRL_MPE;
4461 fctrl &= ~IXGBE_FCTRL_UPE;
4462 } else
4463 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4464
4465 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4466
4467 /* Update multicast filter entries only when it's not ALLMULTI */
4468 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4469 ETHER_UNLOCK(ec);
4470 update_ptr = (u8 *)mta;
4471 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4472 ixgbe_mc_array_itr, TRUE);
4473 } else
4474 ETHER_UNLOCK(ec);
4475 } /* ixgbe_set_rxfilter */
4476
4477 /************************************************************************
4478 * ixgbe_mc_array_itr
4479 *
4480 * An iterator function needed by the multicast shared code.
4481 * It feeds the shared code routine the addresses in the
4482 * array of ixgbe_set_rxfilter() one by one.
4483 ************************************************************************/
4484 static u8 *
4485 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4486 {
4487 struct ixgbe_mc_addr *mta;
4488
4489 mta = (struct ixgbe_mc_addr *)*update_ptr;
4490 *vmdq = mta->vmdq;
4491
4492 *update_ptr = (u8*)(mta + 1);
4493
4494 return (mta->addr);
4495 } /* ixgbe_mc_array_itr */
4496
4497 /************************************************************************
4498 * ixgbe_local_timer - Timer routine
4499 *
4500 * Checks for link status, updates statistics,
4501 * and runs the watchdog check.
4502 ************************************************************************/
4503 static void
4504 ixgbe_local_timer(void *arg)
4505 {
4506 struct adapter *adapter = arg;
4507
4508 if (adapter->schedule_wqs_ok) {
4509 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4510 workqueue_enqueue(adapter->timer_wq,
4511 &adapter->timer_wc, NULL);
4512 }
4513 }
4514
4515 static void
4516 ixgbe_handle_timer(struct work *wk, void *context)
4517 {
4518 struct adapter *adapter = context;
4519 struct ixgbe_hw *hw = &adapter->hw;
4520 device_t dev = adapter->dev;
4521 struct ix_queue *que = adapter->queues;
4522 u64 queues = 0;
4523 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4524 int hung = 0;
4525 int i;
4526
4527 IXGBE_CORE_LOCK(adapter);
4528
4529 /* Check for pluggable optics */
4530 if (ixgbe_is_sfp(hw)) {
4531 bool sched_mod_task = false;
4532
4533 if (hw->mac.type == ixgbe_mac_82598EB) {
4534 /*
4535 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4536 * any GPIO(SDP). So just schedule TASK_MOD.
4537 */
4538 sched_mod_task = true;
4539 } else {
4540 bool was_full, is_full;
4541
4542 was_full =
4543 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4544 is_full = ixgbe_sfp_cage_full(hw);
4545
4546 /* Do probe if cage state changed */
4547 if (was_full ^ is_full)
4548 sched_mod_task = true;
4549 }
4550 if (sched_mod_task) {
4551 mutex_enter(&adapter->admin_mtx);
4552 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4553 ixgbe_schedule_admin_tasklet(adapter);
4554 mutex_exit(&adapter->admin_mtx);
4555 }
4556 }
4557
4558 ixgbe_update_link_status(adapter);
4559 ixgbe_update_stats_counters(adapter);
4560
4561 /* Update some event counters */
4562 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4563 que = adapter->queues;
4564 for (i = 0; i < adapter->num_queues; i++, que++) {
4565 struct tx_ring *txr = que->txr;
4566
4567 v0 += txr->q_efbig_tx_dma_setup;
4568 v1 += txr->q_mbuf_defrag_failed;
4569 v2 += txr->q_efbig2_tx_dma_setup;
4570 v3 += txr->q_einval_tx_dma_setup;
4571 v4 += txr->q_other_tx_dma_setup;
4572 v5 += txr->q_eagain_tx_dma_setup;
4573 v6 += txr->q_enomem_tx_dma_setup;
4574 v7 += txr->q_tso_err;
4575 }
4576 adapter->efbig_tx_dma_setup.ev_count = v0;
4577 adapter->mbuf_defrag_failed.ev_count = v1;
4578 adapter->efbig2_tx_dma_setup.ev_count = v2;
4579 adapter->einval_tx_dma_setup.ev_count = v3;
4580 adapter->other_tx_dma_setup.ev_count = v4;
4581 adapter->eagain_tx_dma_setup.ev_count = v5;
4582 adapter->enomem_tx_dma_setup.ev_count = v6;
4583 adapter->tso_err.ev_count = v7;
4584
4585 /*
4586 * Check the TX queues status
4587 * - mark hung queues so we don't schedule on them
4588 * - watchdog only if all queues show hung
4589 */
4590 que = adapter->queues;
4591 for (i = 0; i < adapter->num_queues; i++, que++) {
4592 /* Keep track of queues with work for soft irq */
4593 if (que->txr->busy)
4594 queues |= 1ULL << que->me;
4595 /*
4596 * Each time txeof runs without cleaning, but there
4597 * are uncleaned descriptors it increments busy. If
4598 * we get to the MAX we declare it hung.
4599 */
4600 if (que->busy == IXGBE_QUEUE_HUNG) {
4601 ++hung;
4602 /* Mark the queue as inactive */
4603 adapter->active_queues &= ~(1ULL << que->me);
4604 continue;
4605 } else {
4606 /* Check if we've come back from hung */
4607 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4608 adapter->active_queues |= 1ULL << que->me;
4609 }
4610 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4611 device_printf(dev,
4612 "Warning queue %d appears to be hung!\n", i);
4613 que->txr->busy = IXGBE_QUEUE_HUNG;
4614 ++hung;
4615 }
4616 }
4617
4618 /* Only truly watchdog if all queues show hung */
4619 if (hung == adapter->num_queues)
4620 goto watchdog;
4621 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4622 else if (queues != 0) { /* Force an IRQ on queues with work */
4623 que = adapter->queues;
4624 for (i = 0; i < adapter->num_queues; i++, que++) {
4625 mutex_enter(&que->dc_mtx);
4626 if (que->disabled_count == 0)
4627 ixgbe_rearm_queues(adapter,
4628 queues & ((u64)1 << i));
4629 mutex_exit(&que->dc_mtx);
4630 }
4631 }
4632 #endif
4633
4634 atomic_store_relaxed(&adapter->timer_pending, 0);
4635 IXGBE_CORE_UNLOCK(adapter);
4636 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4637 return;
4638
4639 watchdog:
4640 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4641 adapter->ifp->if_flags &= ~IFF_RUNNING;
4642 adapter->watchdog_events.ev_count++;
4643 ixgbe_init_locked(adapter);
4644 IXGBE_CORE_UNLOCK(adapter);
4645 } /* ixgbe_handle_timer */
4646
4647 /************************************************************************
4648 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4649 ************************************************************************/
4650 static void
4651 ixgbe_recovery_mode_timer(void *arg)
4652 {
4653 struct adapter *adapter = arg;
4654
4655 if (__predict_true(adapter->osdep.detaching == false)) {
4656 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4657 0, 1) == 0) {
4658 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4659 &adapter->recovery_mode_timer_wc, NULL);
4660 }
4661 }
4662 }
4663
4664 static void
4665 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4666 {
4667 struct adapter *adapter = context;
4668 struct ixgbe_hw *hw = &adapter->hw;
4669
4670 IXGBE_CORE_LOCK(adapter);
4671 if (ixgbe_fw_recovery_mode(hw)) {
4672 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4673 /* Firmware error detected, entering recovery mode */
4674 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4675
4676 if (hw->adapter_stopped == FALSE)
4677 ixgbe_stop_locked(adapter);
4678 }
4679 } else
4680 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4681
4682 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4683 callout_reset(&adapter->recovery_mode_timer, hz,
4684 ixgbe_recovery_mode_timer, adapter);
4685 IXGBE_CORE_UNLOCK(adapter);
4686 } /* ixgbe_handle_recovery_mode_timer */
4687
4688 /************************************************************************
4689 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4690 * bool int_en: true if it's called when the interrupt is enabled.
4691 ************************************************************************/
4692 static void
4693 ixgbe_handle_mod(void *context, bool int_en)
4694 {
4695 struct adapter *adapter = context;
4696 struct ixgbe_hw *hw = &adapter->hw;
4697 device_t dev = adapter->dev;
4698 enum ixgbe_sfp_type last_sfp_type;
4699 u32 err;
4700 bool last_unsupported_sfp_recovery;
4701
4702 KASSERT(mutex_owned(&adapter->core_mtx));
4703
4704 last_sfp_type = hw->phy.sfp_type;
4705 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4706 ++adapter->mod_workev.ev_count;
4707 if (adapter->hw.need_crosstalk_fix) {
4708 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4709 !ixgbe_sfp_cage_full(hw))
4710 goto out;
4711 }
4712
4713 err = hw->phy.ops.identify_sfp(hw);
4714 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4715 if (last_unsupported_sfp_recovery == false)
4716 device_printf(dev,
4717 "Unsupported SFP+ module type was detected.\n");
4718 goto out;
4719 }
4720
4721 if (hw->need_unsupported_sfp_recovery) {
4722 device_printf(dev, "Recovering from unsupported SFP\n");
4723 /*
4724 * We could recover the status by calling setup_sfp(),
4725 * setup_link() and some others. It's complex and might not
4726 * work correctly on some unknown cases. To avoid such type of
4727 * problem, call ixgbe_init_locked(). It's simple and safe
4728 * approach.
4729 */
4730 ixgbe_init_locked(adapter);
4731 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4732 (hw->phy.sfp_type != last_sfp_type)) {
4733 /* A module is inserted and changed. */
4734
4735 if (hw->mac.type == ixgbe_mac_82598EB)
4736 err = hw->phy.ops.reset(hw);
4737 else {
4738 err = hw->mac.ops.setup_sfp(hw);
4739 hw->phy.sfp_setup_needed = FALSE;
4740 }
4741 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4742 device_printf(dev,
4743 "Setup failure - unsupported SFP+ module type.\n");
4744 goto out;
4745 }
4746 }
4747
4748 out:
4749 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4750 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4751
4752 /* Adjust media types shown in ifconfig */
4753 IXGBE_CORE_UNLOCK(adapter);
4754 ifmedia_removeall(&adapter->media);
4755 ixgbe_add_media_types(adapter);
4756 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4757 IXGBE_CORE_LOCK(adapter);
4758
4759 /*
4760 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4761 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4762 * flap because the function calls setup_link().
4763 */
4764 if (hw->mac.type != ixgbe_mac_82598EB) {
4765 mutex_enter(&adapter->admin_mtx);
4766 if (int_en)
4767 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4768 else
4769 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4770 mutex_exit(&adapter->admin_mtx);
4771 }
4772
4773 /*
4774 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4775 * the workqueue now.
4776 */
4777 } /* ixgbe_handle_mod */
4778
4779
4780 /************************************************************************
4781 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4782 ************************************************************************/
4783 static void
4784 ixgbe_handle_msf(void *context)
4785 {
4786 struct adapter *adapter = context;
4787 struct ixgbe_hw *hw = &adapter->hw;
4788 u32 autoneg;
4789 bool negotiate;
4790
4791 KASSERT(mutex_owned(&adapter->core_mtx));
4792
4793 ++adapter->msf_workev.ev_count;
4794
4795 autoneg = hw->phy.autoneg_advertised;
4796 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4797 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4798 if (hw->mac.ops.setup_link)
4799 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4800 } /* ixgbe_handle_msf */
4801
4802 /************************************************************************
4803 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4804 ************************************************************************/
4805 static void
4806 ixgbe_handle_phy(void *context)
4807 {
4808 struct adapter *adapter = context;
4809 struct ixgbe_hw *hw = &adapter->hw;
4810 int error;
4811
4812 KASSERT(mutex_owned(&adapter->core_mtx));
4813
4814 ++adapter->phy_workev.ev_count;
4815 error = hw->phy.ops.handle_lasi(hw);
4816 if (error == IXGBE_ERR_OVERTEMP)
4817 device_printf(adapter->dev,
4818 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4819 " PHY will downshift to lower power state!\n");
4820 else if (error)
4821 device_printf(adapter->dev,
4822 "Error handling LASI interrupt: %d\n", error);
4823 } /* ixgbe_handle_phy */
4824
4825 static void
4826 ixgbe_handle_admin(struct work *wk, void *context)
4827 {
4828 struct adapter *adapter = context;
4829 struct ifnet *ifp = adapter->ifp;
4830 struct ixgbe_hw *hw = &adapter->hw;
4831 u32 task_requests;
4832 u32 eims_enable = 0;
4833
4834 mutex_enter(&adapter->admin_mtx);
4835 adapter->admin_pending = 0;
4836 task_requests = adapter->task_requests;
4837 adapter->task_requests = 0;
4838 mutex_exit(&adapter->admin_mtx);
4839
4840 /*
4841 * Hold the IFNET_LOCK across this entire call. This will
4842 * prevent additional changes to adapter->phy_layer
4843 * and serialize calls to this tasklet. We cannot hold the
4844 * CORE_LOCK while calling into the ifmedia functions as
4845 * they call ifmedia_lock() and the lock is CORE_LOCK.
4846 */
4847 IFNET_LOCK(ifp);
4848 IXGBE_CORE_LOCK(adapter);
4849 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4850 ixgbe_handle_link(adapter);
4851 eims_enable |= IXGBE_EIMS_LSC;
4852 }
4853 if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) {
4854 ixgbe_handle_mod(adapter, false);
4855 }
4856 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4857 ixgbe_handle_mod(adapter, true);
4858 if (hw->mac.type >= ixgbe_mac_X540)
4859 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4860 else
4861 eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4862 }
4863 if ((task_requests
4864 & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4865 ixgbe_handle_msf(adapter);
4866 if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4867 (hw->mac.type == ixgbe_mac_82599EB))
4868 eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4869 }
4870 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4871 ixgbe_handle_phy(adapter);
4872 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4873 }
4874 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4875 ixgbe_reinit_fdir(adapter);
4876 eims_enable |= IXGBE_EIMS_FLOW_DIR;
4877 }
4878 #if 0 /* notyet */
4879 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4880 ixgbe_handle_mbx(adapter);
4881 eims_enable |= IXGBE_EIMS_MAILBOX;
4882 }
4883 #endif
4884 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4885
4886 IXGBE_CORE_UNLOCK(adapter);
4887 IFNET_UNLOCK(ifp);
4888 } /* ixgbe_handle_admin */
4889
4890 static void
4891 ixgbe_ifstop(struct ifnet *ifp, int disable)
4892 {
4893 struct adapter *adapter = ifp->if_softc;
4894
4895 IXGBE_CORE_LOCK(adapter);
4896 ixgbe_stop_locked(adapter);
4897 IXGBE_CORE_UNLOCK(adapter);
4898
4899 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4900 atomic_store_relaxed(&adapter->timer_pending, 0);
4901 }
4902
4903 /************************************************************************
4904 * ixgbe_stop_locked - Stop the hardware
4905 *
4906 * Disables all traffic on the adapter by issuing a
4907 * global reset on the MAC and deallocates TX/RX buffers.
4908 ************************************************************************/
4909 static void
4910 ixgbe_stop_locked(void *arg)
4911 {
4912 struct ifnet *ifp;
4913 struct adapter *adapter = arg;
4914 struct ixgbe_hw *hw = &adapter->hw;
4915
4916 ifp = adapter->ifp;
4917
4918 KASSERT(mutex_owned(&adapter->core_mtx));
4919
4920 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4921 ixgbe_disable_intr(adapter);
4922 callout_stop(&adapter->timer);
4923
4924 /* Don't schedule workqueues. */
4925 adapter->schedule_wqs_ok = false;
4926
4927 /* Let the stack know...*/
4928 ifp->if_flags &= ~IFF_RUNNING;
4929
4930 ixgbe_reset_hw(hw);
4931 hw->adapter_stopped = FALSE;
4932 ixgbe_stop_adapter(hw);
4933 if (hw->mac.type == ixgbe_mac_82599EB)
4934 ixgbe_stop_mac_link_on_d3_82599(hw);
4935 /* Turn off the laser - noop with no optics */
4936 ixgbe_disable_tx_laser(hw);
4937
4938 /* Update the stack */
4939 adapter->link_up = FALSE;
4940 ixgbe_update_link_status(adapter);
4941
4942 /* reprogram the RAR[0] in case user changed it. */
4943 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4944
4945 return;
4946 } /* ixgbe_stop_locked */
4947
4948 /************************************************************************
4949 * ixgbe_update_link_status - Update OS on link state
4950 *
4951 * Note: Only updates the OS on the cached link state.
4952 * The real check of the hardware only happens with
4953 * a link interrupt.
4954 ************************************************************************/
4955 static void
4956 ixgbe_update_link_status(struct adapter *adapter)
4957 {
4958 struct ifnet *ifp = adapter->ifp;
4959 device_t dev = adapter->dev;
4960 struct ixgbe_hw *hw = &adapter->hw;
4961
4962 KASSERT(mutex_owned(&adapter->core_mtx));
4963
4964 if (adapter->link_up) {
4965 if (adapter->link_active != LINK_STATE_UP) {
4966 /*
4967 * To eliminate influence of the previous state
4968 * in the same way as ixgbe_init_locked().
4969 */
4970 struct ix_queue *que = adapter->queues;
4971 for (int i = 0; i < adapter->num_queues; i++, que++)
4972 que->eitr_setting = 0;
4973
4974 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4975 /*
4976 * Discard count for both MAC Local Fault and
4977 * Remote Fault because those registers are
4978 * valid only when the link speed is up and
4979 * 10Gbps.
4980 */
4981 IXGBE_READ_REG(hw, IXGBE_MLFC);
4982 IXGBE_READ_REG(hw, IXGBE_MRFC);
4983 }
4984
4985 if (bootverbose) {
4986 const char *bpsmsg;
4987
4988 switch (adapter->link_speed) {
4989 case IXGBE_LINK_SPEED_10GB_FULL:
4990 bpsmsg = "10 Gbps";
4991 break;
4992 case IXGBE_LINK_SPEED_5GB_FULL:
4993 bpsmsg = "5 Gbps";
4994 break;
4995 case IXGBE_LINK_SPEED_2_5GB_FULL:
4996 bpsmsg = "2.5 Gbps";
4997 break;
4998 case IXGBE_LINK_SPEED_1GB_FULL:
4999 bpsmsg = "1 Gbps";
5000 break;
5001 case IXGBE_LINK_SPEED_100_FULL:
5002 bpsmsg = "100 Mbps";
5003 break;
5004 case IXGBE_LINK_SPEED_10_FULL:
5005 bpsmsg = "10 Mbps";
5006 break;
5007 default:
5008 bpsmsg = "unknown speed";
5009 break;
5010 }
5011 device_printf(dev, "Link is up %s %s \n",
5012 bpsmsg, "Full Duplex");
5013 }
5014 adapter->link_active = LINK_STATE_UP;
5015 /* Update any Flow Control changes */
5016 ixgbe_fc_enable(&adapter->hw);
5017 /* Update DMA coalescing config */
5018 ixgbe_config_dmac(adapter);
5019 if_link_state_change(ifp, LINK_STATE_UP);
5020
5021 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5022 ixgbe_ping_all_vfs(adapter);
5023 }
5024 } else {
5025 /*
5026 * Do it when link active changes to DOWN. i.e.
5027 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5028 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5029 */
5030 if (adapter->link_active != LINK_STATE_DOWN) {
5031 if (bootverbose)
5032 device_printf(dev, "Link is Down\n");
5033 if_link_state_change(ifp, LINK_STATE_DOWN);
5034 adapter->link_active = LINK_STATE_DOWN;
5035 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5036 ixgbe_ping_all_vfs(adapter);
5037 ixgbe_drain_all(adapter);
5038 }
5039 }
5040 } /* ixgbe_update_link_status */
5041
5042 /************************************************************************
5043 * ixgbe_config_dmac - Configure DMA Coalescing
5044 ************************************************************************/
5045 static void
5046 ixgbe_config_dmac(struct adapter *adapter)
5047 {
5048 struct ixgbe_hw *hw = &adapter->hw;
5049 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5050
5051 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5052 return;
5053
5054 if (dcfg->watchdog_timer ^ adapter->dmac ||
5055 dcfg->link_speed ^ adapter->link_speed) {
5056 dcfg->watchdog_timer = adapter->dmac;
5057 dcfg->fcoe_en = false;
5058 dcfg->link_speed = adapter->link_speed;
5059 dcfg->num_tcs = 1;
5060
5061 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5062 dcfg->watchdog_timer, dcfg->link_speed);
5063
5064 hw->mac.ops.dmac_config(hw);
5065 }
5066 } /* ixgbe_config_dmac */
5067
5068 /************************************************************************
5069 * ixgbe_enable_intr
5070 ************************************************************************/
5071 static void
5072 ixgbe_enable_intr(struct adapter *adapter)
5073 {
5074 struct ixgbe_hw *hw = &adapter->hw;
5075 struct ix_queue *que = adapter->queues;
5076 u32 mask, fwsm;
5077
5078 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5079
5080 switch (adapter->hw.mac.type) {
5081 case ixgbe_mac_82599EB:
5082 mask |= IXGBE_EIMS_ECC;
5083 /* Temperature sensor on some adapters */
5084 mask |= IXGBE_EIMS_GPI_SDP0;
5085 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5086 mask |= IXGBE_EIMS_GPI_SDP1;
5087 mask |= IXGBE_EIMS_GPI_SDP2;
5088 break;
5089 case ixgbe_mac_X540:
5090 /* Detect if Thermal Sensor is enabled */
5091 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5092 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5093 mask |= IXGBE_EIMS_TS;
5094 mask |= IXGBE_EIMS_ECC;
5095 break;
5096 case ixgbe_mac_X550:
5097 /* MAC thermal sensor is automatically enabled */
5098 mask |= IXGBE_EIMS_TS;
5099 mask |= IXGBE_EIMS_ECC;
5100 break;
5101 case ixgbe_mac_X550EM_x:
5102 case ixgbe_mac_X550EM_a:
5103 /* Some devices use SDP0 for important information */
5104 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5105 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5106 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5107 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5108 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5109 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5110 mask |= IXGBE_EICR_GPI_SDP0_X540;
5111 mask |= IXGBE_EIMS_ECC;
5112 break;
5113 default:
5114 break;
5115 }
5116
5117 /* Enable Fan Failure detection */
5118 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5119 mask |= IXGBE_EIMS_GPI_SDP1;
5120 /* Enable SR-IOV */
5121 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5122 mask |= IXGBE_EIMS_MAILBOX;
5123 /* Enable Flow Director */
5124 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5125 mask |= IXGBE_EIMS_FLOW_DIR;
5126
5127 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5128
5129 /* With MSI-X we use auto clear */
5130 if (adapter->msix_mem) {
5131 /*
5132 * It's not required to set TCP_TIMER because we don't use
5133 * it.
5134 */
5135 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5136 }
5137
5138 /*
5139 * Now enable all queues, this is done separately to
5140 * allow for handling the extended (beyond 32) MSI-X
5141 * vectors that can be used by 82599
5142 */
5143 for (int i = 0; i < adapter->num_queues; i++, que++)
5144 ixgbe_enable_queue(adapter, que->msix);
5145
5146 IXGBE_WRITE_FLUSH(hw);
5147
5148 } /* ixgbe_enable_intr */
5149
5150 /************************************************************************
5151 * ixgbe_disable_intr_internal
5152 ************************************************************************/
5153 static void
5154 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5155 {
5156 struct ix_queue *que = adapter->queues;
5157
5158 /* disable interrupts other than queues */
5159 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5160
5161 if (adapter->msix_mem)
5162 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5163
5164 for (int i = 0; i < adapter->num_queues; i++, que++)
5165 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5166
5167 IXGBE_WRITE_FLUSH(&adapter->hw);
5168
5169 } /* ixgbe_do_disable_intr_internal */
5170
5171 /************************************************************************
5172 * ixgbe_disable_intr
5173 ************************************************************************/
5174 static void
5175 ixgbe_disable_intr(struct adapter *adapter)
5176 {
5177
5178 ixgbe_disable_intr_internal(adapter, true);
5179 } /* ixgbe_disable_intr */
5180
5181 /************************************************************************
5182 * ixgbe_ensure_disabled_intr
5183 ************************************************************************/
5184 void
5185 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5186 {
5187
5188 ixgbe_disable_intr_internal(adapter, false);
5189 } /* ixgbe_ensure_disabled_intr */
5190
5191 /************************************************************************
5192 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5193 ************************************************************************/
5194 static int
5195 ixgbe_legacy_irq(void *arg)
5196 {
5197 struct ix_queue *que = arg;
5198 struct adapter *adapter = que->adapter;
5199 struct ixgbe_hw *hw = &adapter->hw;
5200 struct tx_ring *txr = adapter->tx_rings;
5201 u32 eicr;
5202 u32 eims_orig;
5203 u32 eims_enable = 0;
5204 u32 eims_disable = 0;
5205
5206 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5207 /*
5208 * Silicon errata #26 on 82598. Disable all interrupts before reading
5209 * EICR.
5210 */
5211 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5212
5213 /* Read and clear EICR */
5214 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5215
5216 adapter->stats.pf.legint.ev_count++;
5217 if (eicr == 0) {
5218 adapter->stats.pf.intzero.ev_count++;
5219 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5220 return 0;
5221 }
5222
5223 /* Queue (0) intr */
5224 if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5225 ++que->irqs.ev_count;
5226
5227 /*
5228 * The same as ixgbe_msix_que() about
5229 * "que->txrx_use_workqueue".
5230 */
5231 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5232
5233 IXGBE_TX_LOCK(txr);
5234 ixgbe_txeof(txr);
5235 #ifdef notyet
5236 if (!ixgbe_ring_empty(ifp, txr->br))
5237 ixgbe_start_locked(ifp, txr);
5238 #endif
5239 IXGBE_TX_UNLOCK(txr);
5240
5241 que->req.ev_count++;
5242 ixgbe_sched_handle_que(adapter, que);
5243 /* Disable queue 0 interrupt */
5244 eims_disable |= 1UL << 0;
5245
5246 } else
5247 eims_enable |= IXGBE_EIMC_RTX_QUEUE;
5248
5249 ixgbe_intr_admin_common(adapter, eicr, &eims_disable);
5250
5251 /* Re-enable some interrupts */
5252 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5253 (eims_orig & ~eims_disable) | eims_enable);
5254
5255 return 1;
5256 } /* ixgbe_legacy_irq */
5257
5258 /************************************************************************
5259 * ixgbe_free_pciintr_resources
5260 ************************************************************************/
5261 static void
5262 ixgbe_free_pciintr_resources(struct adapter *adapter)
5263 {
5264 struct ix_queue *que = adapter->queues;
5265 int rid;
5266
5267 /*
5268 * Release all msix queue resources:
5269 */
5270 for (int i = 0; i < adapter->num_queues; i++, que++) {
5271 if (que->res != NULL) {
5272 pci_intr_disestablish(adapter->osdep.pc,
5273 adapter->osdep.ihs[i]);
5274 adapter->osdep.ihs[i] = NULL;
5275 }
5276 }
5277
5278 /* Clean the Legacy or Link interrupt last */
5279 if (adapter->vector) /* we are doing MSIX */
5280 rid = adapter->vector;
5281 else
5282 rid = 0;
5283
5284 if (adapter->osdep.ihs[rid] != NULL) {
5285 pci_intr_disestablish(adapter->osdep.pc,
5286 adapter->osdep.ihs[rid]);
5287 adapter->osdep.ihs[rid] = NULL;
5288 }
5289
5290 if (adapter->osdep.intrs != NULL) {
5291 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5292 adapter->osdep.nintrs);
5293 adapter->osdep.intrs = NULL;
5294 }
5295 } /* ixgbe_free_pciintr_resources */
5296
5297 /************************************************************************
5298 * ixgbe_free_pci_resources
5299 ************************************************************************/
5300 static void
5301 ixgbe_free_pci_resources(struct adapter *adapter)
5302 {
5303
5304 ixgbe_free_pciintr_resources(adapter);
5305
5306 if (adapter->osdep.mem_size != 0) {
5307 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5308 adapter->osdep.mem_bus_space_handle,
5309 adapter->osdep.mem_size);
5310 }
5311
5312 } /* ixgbe_free_pci_resources */
5313
5314 /************************************************************************
5315 * ixgbe_set_sysctl_value
5316 ************************************************************************/
5317 static void
5318 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5319 const char *description, int *limit, int value)
5320 {
5321 device_t dev = adapter->dev;
5322 struct sysctllog **log;
5323 const struct sysctlnode *rnode, *cnode;
5324
5325 /*
5326 * It's not required to check recovery mode because this function never
5327 * touches hardware.
5328 */
5329
5330 log = &adapter->sysctllog;
5331 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5332 aprint_error_dev(dev, "could not create sysctl root\n");
5333 return;
5334 }
5335 if (sysctl_createv(log, 0, &rnode, &cnode,
5336 CTLFLAG_READWRITE, CTLTYPE_INT,
5337 name, SYSCTL_DESCR(description),
5338 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5339 aprint_error_dev(dev, "could not create sysctl\n");
5340 *limit = value;
5341 } /* ixgbe_set_sysctl_value */
5342
5343 /************************************************************************
5344 * ixgbe_sysctl_flowcntl
5345 *
5346 * SYSCTL wrapper around setting Flow Control
5347 ************************************************************************/
5348 static int
5349 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5350 {
5351 struct sysctlnode node = *rnode;
5352 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5353 int error, fc;
5354
5355 if (ixgbe_fw_recovery_mode_swflag(adapter))
5356 return (EPERM);
5357
5358 fc = adapter->hw.fc.current_mode;
5359 node.sysctl_data = &fc;
5360 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5361 if (error != 0 || newp == NULL)
5362 return error;
5363
5364 /* Don't bother if it's not changed */
5365 if (fc == adapter->hw.fc.current_mode)
5366 return (0);
5367
5368 return ixgbe_set_flowcntl(adapter, fc);
5369 } /* ixgbe_sysctl_flowcntl */
5370
5371 /************************************************************************
5372 * ixgbe_set_flowcntl - Set flow control
5373 *
5374 * Flow control values:
5375 * 0 - off
5376 * 1 - rx pause
5377 * 2 - tx pause
5378 * 3 - full
5379 ************************************************************************/
5380 static int
5381 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5382 {
5383 switch (fc) {
5384 case ixgbe_fc_rx_pause:
5385 case ixgbe_fc_tx_pause:
5386 case ixgbe_fc_full:
5387 adapter->hw.fc.requested_mode = fc;
5388 if (adapter->num_queues > 1)
5389 ixgbe_disable_rx_drop(adapter);
5390 break;
5391 case ixgbe_fc_none:
5392 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5393 if (adapter->num_queues > 1)
5394 ixgbe_enable_rx_drop(adapter);
5395 break;
5396 default:
5397 return (EINVAL);
5398 }
5399
5400 #if 0 /* XXX NetBSD */
5401 /* Don't autoneg if forcing a value */
5402 adapter->hw.fc.disable_fc_autoneg = TRUE;
5403 #endif
5404 ixgbe_fc_enable(&adapter->hw);
5405
5406 return (0);
5407 } /* ixgbe_set_flowcntl */
5408
5409 /************************************************************************
5410 * ixgbe_enable_rx_drop
5411 *
5412 * Enable the hardware to drop packets when the buffer is
5413 * full. This is useful with multiqueue, so that no single
5414 * queue being full stalls the entire RX engine. We only
5415 * enable this when Multiqueue is enabled AND Flow Control
5416 * is disabled.
5417 ************************************************************************/
5418 static void
5419 ixgbe_enable_rx_drop(struct adapter *adapter)
5420 {
5421 struct ixgbe_hw *hw = &adapter->hw;
5422 struct rx_ring *rxr;
5423 u32 srrctl;
5424
5425 for (int i = 0; i < adapter->num_queues; i++) {
5426 rxr = &adapter->rx_rings[i];
5427 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5428 srrctl |= IXGBE_SRRCTL_DROP_EN;
5429 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5430 }
5431
5432 /* enable drop for each vf */
5433 for (int i = 0; i < adapter->num_vfs; i++) {
5434 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5435 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5436 IXGBE_QDE_ENABLE));
5437 }
5438 } /* ixgbe_enable_rx_drop */
5439
5440 /************************************************************************
5441 * ixgbe_disable_rx_drop
5442 ************************************************************************/
5443 static void
5444 ixgbe_disable_rx_drop(struct adapter *adapter)
5445 {
5446 struct ixgbe_hw *hw = &adapter->hw;
5447 struct rx_ring *rxr;
5448 u32 srrctl;
5449
5450 for (int i = 0; i < adapter->num_queues; i++) {
5451 rxr = &adapter->rx_rings[i];
5452 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5453 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5454 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5455 }
5456
5457 /* disable drop for each vf */
5458 for (int i = 0; i < adapter->num_vfs; i++) {
5459 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5460 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5461 }
5462 } /* ixgbe_disable_rx_drop */
5463
5464 /************************************************************************
5465 * ixgbe_sysctl_advertise
5466 *
5467 * SYSCTL wrapper around setting advertised speed
5468 ************************************************************************/
5469 static int
5470 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5471 {
5472 struct sysctlnode node = *rnode;
5473 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5474 int error = 0, advertise;
5475
5476 if (ixgbe_fw_recovery_mode_swflag(adapter))
5477 return (EPERM);
5478
5479 advertise = adapter->advertise;
5480 node.sysctl_data = &advertise;
5481 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5482 if (error != 0 || newp == NULL)
5483 return error;
5484
5485 return ixgbe_set_advertise(adapter, advertise);
5486 } /* ixgbe_sysctl_advertise */
5487
5488 /************************************************************************
5489 * ixgbe_set_advertise - Control advertised link speed
5490 *
5491 * Flags:
5492 * 0x00 - Default (all capable link speed)
5493 * 0x01 - advertise 100 Mb
5494 * 0x02 - advertise 1G
5495 * 0x04 - advertise 10G
5496 * 0x08 - advertise 10 Mb
5497 * 0x10 - advertise 2.5G
5498 * 0x20 - advertise 5G
5499 ************************************************************************/
5500 static int
5501 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5502 {
5503 device_t dev;
5504 struct ixgbe_hw *hw;
5505 ixgbe_link_speed speed = 0;
5506 ixgbe_link_speed link_caps = 0;
5507 s32 err = IXGBE_NOT_IMPLEMENTED;
5508 bool negotiate = FALSE;
5509
5510 /* Checks to validate new value */
5511 if (adapter->advertise == advertise) /* no change */
5512 return (0);
5513
5514 dev = adapter->dev;
5515 hw = &adapter->hw;
5516
5517 /* No speed changes for backplane media */
5518 if (hw->phy.media_type == ixgbe_media_type_backplane)
5519 return (ENODEV);
5520
5521 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5522 (hw->phy.multispeed_fiber))) {
5523 device_printf(dev,
5524 "Advertised speed can only be set on copper or "
5525 "multispeed fiber media types.\n");
5526 return (EINVAL);
5527 }
5528
5529 if (advertise < 0x0 || advertise > 0x3f) {
5530 device_printf(dev,
5531 "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5532 return (EINVAL);
5533 }
5534
5535 if (hw->mac.ops.get_link_capabilities) {
5536 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5537 &negotiate);
5538 if (err != IXGBE_SUCCESS) {
5539 device_printf(dev, "Unable to determine supported advertise speeds\n");
5540 return (ENODEV);
5541 }
5542 }
5543
5544 /* Set new value and report new advertised mode */
5545 if (advertise & 0x1) {
5546 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5547 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5548 return (EINVAL);
5549 }
5550 speed |= IXGBE_LINK_SPEED_100_FULL;
5551 }
5552 if (advertise & 0x2) {
5553 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5554 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5555 return (EINVAL);
5556 }
5557 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5558 }
5559 if (advertise & 0x4) {
5560 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5561 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5562 return (EINVAL);
5563 }
5564 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5565 }
5566 if (advertise & 0x8) {
5567 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5568 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5569 return (EINVAL);
5570 }
5571 speed |= IXGBE_LINK_SPEED_10_FULL;
5572 }
5573 if (advertise & 0x10) {
5574 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5575 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5576 return (EINVAL);
5577 }
5578 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5579 }
5580 if (advertise & 0x20) {
5581 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5582 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5583 return (EINVAL);
5584 }
5585 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5586 }
5587 if (advertise == 0)
5588 speed = link_caps; /* All capable link speed */
5589
5590 hw->mac.autotry_restart = TRUE;
5591 hw->mac.ops.setup_link(hw, speed, TRUE);
5592 adapter->advertise = advertise;
5593
5594 return (0);
5595 } /* ixgbe_set_advertise */
5596
5597 /************************************************************************
5598 * ixgbe_get_advertise - Get current advertised speed settings
5599 *
5600 * Formatted for sysctl usage.
5601 * Flags:
5602 * 0x01 - advertise 100 Mb
5603 * 0x02 - advertise 1G
5604 * 0x04 - advertise 10G
5605 * 0x08 - advertise 10 Mb (yes, Mb)
5606 * 0x10 - advertise 2.5G
5607 * 0x20 - advertise 5G
5608 ************************************************************************/
5609 static int
5610 ixgbe_get_advertise(struct adapter *adapter)
5611 {
5612 struct ixgbe_hw *hw = &adapter->hw;
5613 int speed;
5614 ixgbe_link_speed link_caps = 0;
5615 s32 err;
5616 bool negotiate = FALSE;
5617
5618 /*
5619 * Advertised speed means nothing unless it's copper or
5620 * multi-speed fiber
5621 */
5622 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5623 !(hw->phy.multispeed_fiber))
5624 return (0);
5625
5626 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5627 if (err != IXGBE_SUCCESS)
5628 return (0);
5629
5630 speed =
5631 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5632 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5633 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5634 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5635 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5636 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5637
5638 return speed;
5639 } /* ixgbe_get_advertise */
5640
5641 /************************************************************************
5642 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5643 *
5644 * Control values:
5645 * 0/1 - off / on (use default value of 1000)
5646 *
5647 * Legal timer values are:
5648 * 50,100,250,500,1000,2000,5000,10000
5649 *
5650 * Turning off interrupt moderation will also turn this off.
5651 ************************************************************************/
5652 static int
5653 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5654 {
5655 struct sysctlnode node = *rnode;
5656 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5657 struct ifnet *ifp = adapter->ifp;
5658 int error;
5659 int newval;
5660
5661 if (ixgbe_fw_recovery_mode_swflag(adapter))
5662 return (EPERM);
5663
5664 newval = adapter->dmac;
5665 node.sysctl_data = &newval;
5666 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5667 if ((error) || (newp == NULL))
5668 return (error);
5669
5670 switch (newval) {
5671 case 0:
5672 /* Disabled */
5673 adapter->dmac = 0;
5674 break;
5675 case 1:
5676 /* Enable and use default */
5677 adapter->dmac = 1000;
5678 break;
5679 case 50:
5680 case 100:
5681 case 250:
5682 case 500:
5683 case 1000:
5684 case 2000:
5685 case 5000:
5686 case 10000:
5687 /* Legal values - allow */
5688 adapter->dmac = newval;
5689 break;
5690 default:
5691 /* Do nothing, illegal value */
5692 return (EINVAL);
5693 }
5694
5695 /* Re-initialize hardware if it's already running */
5696 if (ifp->if_flags & IFF_RUNNING)
5697 ifp->if_init(ifp);
5698
5699 return (0);
5700 }
5701
5702 #ifdef IXGBE_DEBUG
5703 /************************************************************************
5704 * ixgbe_sysctl_power_state
5705 *
5706 * Sysctl to test power states
5707 * Values:
5708 * 0 - set device to D0
5709 * 3 - set device to D3
5710 * (none) - get current device power state
5711 ************************************************************************/
5712 static int
5713 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5714 {
5715 #ifdef notyet
5716 struct sysctlnode node = *rnode;
5717 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5718 device_t dev = adapter->dev;
5719 int curr_ps, new_ps, error = 0;
5720
5721 if (ixgbe_fw_recovery_mode_swflag(adapter))
5722 return (EPERM);
5723
5724 curr_ps = new_ps = pci_get_powerstate(dev);
5725
5726 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5727 if ((error) || (req->newp == NULL))
5728 return (error);
5729
5730 if (new_ps == curr_ps)
5731 return (0);
5732
5733 if (new_ps == 3 && curr_ps == 0)
5734 error = DEVICE_SUSPEND(dev);
5735 else if (new_ps == 0 && curr_ps == 3)
5736 error = DEVICE_RESUME(dev);
5737 else
5738 return (EINVAL);
5739
5740 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5741
5742 return (error);
5743 #else
5744 return 0;
5745 #endif
5746 } /* ixgbe_sysctl_power_state */
5747 #endif
5748
5749 /************************************************************************
5750 * ixgbe_sysctl_wol_enable
5751 *
5752 * Sysctl to enable/disable the WoL capability,
5753 * if supported by the adapter.
5754 *
5755 * Values:
5756 * 0 - disabled
5757 * 1 - enabled
5758 ************************************************************************/
5759 static int
5760 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5761 {
5762 struct sysctlnode node = *rnode;
5763 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5764 struct ixgbe_hw *hw = &adapter->hw;
5765 bool new_wol_enabled;
5766 int error = 0;
5767
5768 /*
5769 * It's not required to check recovery mode because this function never
5770 * touches hardware.
5771 */
5772 new_wol_enabled = hw->wol_enabled;
5773 node.sysctl_data = &new_wol_enabled;
5774 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5775 if ((error) || (newp == NULL))
5776 return (error);
5777 if (new_wol_enabled == hw->wol_enabled)
5778 return (0);
5779
5780 if (new_wol_enabled && !adapter->wol_support)
5781 return (ENODEV);
5782 else
5783 hw->wol_enabled = new_wol_enabled;
5784
5785 return (0);
5786 } /* ixgbe_sysctl_wol_enable */
5787
5788 /************************************************************************
5789 * ixgbe_sysctl_wufc - Wake Up Filter Control
5790 *
5791 * Sysctl to enable/disable the types of packets that the
5792 * adapter will wake up on upon receipt.
5793 * Flags:
5794 * 0x1 - Link Status Change
5795 * 0x2 - Magic Packet
5796 * 0x4 - Direct Exact
5797 * 0x8 - Directed Multicast
5798 * 0x10 - Broadcast
5799 * 0x20 - ARP/IPv4 Request Packet
5800 * 0x40 - Direct IPv4 Packet
5801 * 0x80 - Direct IPv6 Packet
5802 *
5803 * Settings not listed above will cause the sysctl to return an error.
5804 ************************************************************************/
5805 static int
5806 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5807 {
5808 struct sysctlnode node = *rnode;
5809 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5810 int error = 0;
5811 u32 new_wufc;
5812
5813 /*
5814 * It's not required to check recovery mode because this function never
5815 * touches hardware.
5816 */
5817 new_wufc = adapter->wufc;
5818 node.sysctl_data = &new_wufc;
5819 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5820 if ((error) || (newp == NULL))
5821 return (error);
5822 if (new_wufc == adapter->wufc)
5823 return (0);
5824
5825 if (new_wufc & 0xffffff00)
5826 return (EINVAL);
5827
5828 new_wufc &= 0xff;
5829 new_wufc |= (0xffffff & adapter->wufc);
5830 adapter->wufc = new_wufc;
5831
5832 return (0);
5833 } /* ixgbe_sysctl_wufc */
5834
5835 #ifdef IXGBE_DEBUG
5836 /************************************************************************
5837 * ixgbe_sysctl_print_rss_config
5838 ************************************************************************/
5839 static int
5840 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5841 {
5842 #ifdef notyet
5843 struct sysctlnode node = *rnode;
5844 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5845 struct ixgbe_hw *hw = &adapter->hw;
5846 device_t dev = adapter->dev;
5847 struct sbuf *buf;
5848 int error = 0, reta_size;
5849 u32 reg;
5850
5851 if (ixgbe_fw_recovery_mode_swflag(adapter))
5852 return (EPERM);
5853
5854 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5855 if (!buf) {
5856 device_printf(dev, "Could not allocate sbuf for output.\n");
5857 return (ENOMEM);
5858 }
5859
5860 // TODO: use sbufs to make a string to print out
5861 /* Set multiplier for RETA setup and table size based on MAC */
5862 switch (adapter->hw.mac.type) {
5863 case ixgbe_mac_X550:
5864 case ixgbe_mac_X550EM_x:
5865 case ixgbe_mac_X550EM_a:
5866 reta_size = 128;
5867 break;
5868 default:
5869 reta_size = 32;
5870 break;
5871 }
5872
5873 /* Print out the redirection table */
5874 sbuf_cat(buf, "\n");
5875 for (int i = 0; i < reta_size; i++) {
5876 if (i < 32) {
5877 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5878 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5879 } else {
5880 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5881 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5882 }
5883 }
5884
5885 // TODO: print more config
5886
5887 error = sbuf_finish(buf);
5888 if (error)
5889 device_printf(dev, "Error finishing sbuf: %d\n", error);
5890
5891 sbuf_delete(buf);
5892 #endif
5893 return (0);
5894 } /* ixgbe_sysctl_print_rss_config */
5895 #endif /* IXGBE_DEBUG */
5896
5897 /************************************************************************
5898 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5899 *
5900 * For X552/X557-AT devices using an external PHY
5901 ************************************************************************/
5902 static int
5903 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5904 {
5905 struct sysctlnode node = *rnode;
5906 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5907 struct ixgbe_hw *hw = &adapter->hw;
5908 int val;
5909 u16 reg;
5910 int error;
5911
5912 if (ixgbe_fw_recovery_mode_swflag(adapter))
5913 return (EPERM);
5914
5915 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5916 device_printf(adapter->dev,
5917 "Device has no supported external thermal sensor.\n");
5918 return (ENODEV);
5919 }
5920
5921 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5922 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5923 device_printf(adapter->dev,
5924 "Error reading from PHY's current temperature register\n");
5925 return (EAGAIN);
5926 }
5927
5928 node.sysctl_data = &val;
5929
5930 /* Shift temp for output */
5931 val = reg >> 8;
5932
5933 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5934 if ((error) || (newp == NULL))
5935 return (error);
5936
5937 return (0);
5938 } /* ixgbe_sysctl_phy_temp */
5939
5940 /************************************************************************
5941 * ixgbe_sysctl_phy_overtemp_occurred
5942 *
5943 * Reports (directly from the PHY) whether the current PHY
5944 * temperature is over the overtemp threshold.
5945 ************************************************************************/
5946 static int
5947 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5948 {
5949 struct sysctlnode node = *rnode;
5950 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5951 struct ixgbe_hw *hw = &adapter->hw;
5952 int val, error;
5953 u16 reg;
5954
5955 if (ixgbe_fw_recovery_mode_swflag(adapter))
5956 return (EPERM);
5957
5958 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5959 device_printf(adapter->dev,
5960 "Device has no supported external thermal sensor.\n");
5961 return (ENODEV);
5962 }
5963
5964 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5965 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5966 device_printf(adapter->dev,
5967 "Error reading from PHY's temperature status register\n");
5968 return (EAGAIN);
5969 }
5970
5971 node.sysctl_data = &val;
5972
5973 /* Get occurrence bit */
5974 val = !!(reg & 0x4000);
5975
5976 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5977 if ((error) || (newp == NULL))
5978 return (error);
5979
5980 return (0);
5981 } /* ixgbe_sysctl_phy_overtemp_occurred */
5982
5983 /************************************************************************
5984 * ixgbe_sysctl_eee_state
5985 *
5986 * Sysctl to set EEE power saving feature
5987 * Values:
5988 * 0 - disable EEE
5989 * 1 - enable EEE
5990 * (none) - get current device EEE state
5991 ************************************************************************/
5992 static int
5993 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5994 {
5995 struct sysctlnode node = *rnode;
5996 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5997 struct ifnet *ifp = adapter->ifp;
5998 device_t dev = adapter->dev;
5999 int curr_eee, new_eee, error = 0;
6000 s32 retval;
6001
6002 if (ixgbe_fw_recovery_mode_swflag(adapter))
6003 return (EPERM);
6004
6005 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6006 node.sysctl_data = &new_eee;
6007 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6008 if ((error) || (newp == NULL))
6009 return (error);
6010
6011 /* Nothing to do */
6012 if (new_eee == curr_eee)
6013 return (0);
6014
6015 /* Not supported */
6016 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6017 return (EINVAL);
6018
6019 /* Bounds checking */
6020 if ((new_eee < 0) || (new_eee > 1))
6021 return (EINVAL);
6022
6023 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6024 if (retval) {
6025 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6026 return (EINVAL);
6027 }
6028
6029 /* Restart auto-neg */
6030 ifp->if_init(ifp);
6031
6032 device_printf(dev, "New EEE state: %d\n", new_eee);
6033
6034 /* Cache new value */
6035 if (new_eee)
6036 adapter->feat_en |= IXGBE_FEATURE_EEE;
6037 else
6038 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6039
6040 return (error);
6041 } /* ixgbe_sysctl_eee_state */
6042
6043 #define PRINTQS(adapter, regname) \
6044 do { \
6045 struct ixgbe_hw *_hw = &(adapter)->hw; \
6046 int _i; \
6047 \
6048 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6049 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6050 printf((_i == 0) ? "\t" : " "); \
6051 printf("%08x", IXGBE_READ_REG(_hw, \
6052 IXGBE_##regname(_i))); \
6053 } \
6054 printf("\n"); \
6055 } while (0)
6056
6057 /************************************************************************
6058 * ixgbe_print_debug_info
6059 *
6060 * Called only when em_display_debug_stats is enabled.
6061 * Provides a way to take a look at important statistics
6062 * maintained by the driver and hardware.
6063 ************************************************************************/
6064 static void
6065 ixgbe_print_debug_info(struct adapter *adapter)
6066 {
6067 device_t dev = adapter->dev;
6068 struct ixgbe_hw *hw = &adapter->hw;
6069 int table_size;
6070 int i;
6071
6072 switch (adapter->hw.mac.type) {
6073 case ixgbe_mac_X550:
6074 case ixgbe_mac_X550EM_x:
6075 case ixgbe_mac_X550EM_a:
6076 table_size = 128;
6077 break;
6078 default:
6079 table_size = 32;
6080 break;
6081 }
6082
6083 device_printf(dev, "[E]RETA:\n");
6084 for (i = 0; i < table_size; i++) {
6085 if (i < 32)
6086 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6087 IXGBE_RETA(i)));
6088 else
6089 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6090 IXGBE_ERETA(i - 32)));
6091 }
6092
6093 device_printf(dev, "queue:");
6094 for (i = 0; i < adapter->num_queues; i++) {
6095 printf((i == 0) ? "\t" : " ");
6096 printf("%8d", i);
6097 }
6098 printf("\n");
6099 PRINTQS(adapter, RDBAL);
6100 PRINTQS(adapter, RDBAH);
6101 PRINTQS(adapter, RDLEN);
6102 PRINTQS(adapter, SRRCTL);
6103 PRINTQS(adapter, RDH);
6104 PRINTQS(adapter, RDT);
6105 PRINTQS(adapter, RXDCTL);
6106
6107 device_printf(dev, "RQSMR:");
6108 for (i = 0; i < adapter->num_queues / 4; i++) {
6109 printf((i == 0) ? "\t" : " ");
6110 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6111 }
6112 printf("\n");
6113
6114 device_printf(dev, "disabled_count:");
6115 for (i = 0; i < adapter->num_queues; i++) {
6116 printf((i == 0) ? "\t" : " ");
6117 printf("%8d", adapter->queues[i].disabled_count);
6118 }
6119 printf("\n");
6120
6121 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6122 if (hw->mac.type != ixgbe_mac_82598EB) {
6123 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6124 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6125 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6126 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6127 }
6128 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6129 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6130 } /* ixgbe_print_debug_info */
6131
6132 /************************************************************************
6133 * ixgbe_sysctl_debug
6134 ************************************************************************/
6135 static int
6136 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6137 {
6138 struct sysctlnode node = *rnode;
6139 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6140 int error, result = 0;
6141
6142 if (ixgbe_fw_recovery_mode_swflag(adapter))
6143 return (EPERM);
6144
6145 node.sysctl_data = &result;
6146 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6147
6148 if (error || newp == NULL)
6149 return error;
6150
6151 if (result == 1)
6152 ixgbe_print_debug_info(adapter);
6153
6154 return 0;
6155 } /* ixgbe_sysctl_debug */
6156
6157 /************************************************************************
6158 * ixgbe_init_device_features
6159 ************************************************************************/
6160 static void
6161 ixgbe_init_device_features(struct adapter *adapter)
6162 {
6163 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6164 | IXGBE_FEATURE_RSS
6165 | IXGBE_FEATURE_MSI
6166 | IXGBE_FEATURE_MSIX
6167 | IXGBE_FEATURE_LEGACY_IRQ
6168 | IXGBE_FEATURE_LEGACY_TX;
6169
6170 /* Set capabilities first... */
6171 switch (adapter->hw.mac.type) {
6172 case ixgbe_mac_82598EB:
6173 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6174 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6175 break;
6176 case ixgbe_mac_X540:
6177 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6178 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6179 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6180 (adapter->hw.bus.func == 0))
6181 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6182 break;
6183 case ixgbe_mac_X550:
6184 /*
6185 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6186 * NVM Image version.
6187 */
6188 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6189 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6190 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6191 break;
6192 case ixgbe_mac_X550EM_x:
6193 /*
6194 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6195 * NVM Image version.
6196 */
6197 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6198 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6199 break;
6200 case ixgbe_mac_X550EM_a:
6201 /*
6202 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6203 * NVM Image version.
6204 */
6205 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6206 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6207 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6208 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6209 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6210 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6211 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6212 }
6213 break;
6214 case ixgbe_mac_82599EB:
6215 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6216 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6217 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6218 (adapter->hw.bus.func == 0))
6219 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6220 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6221 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6222 break;
6223 default:
6224 break;
6225 }
6226
6227 /* Enabled by default... */
6228 /* Fan failure detection */
6229 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6230 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6231 /* Netmap */
6232 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6233 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6234 /* EEE */
6235 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6236 adapter->feat_en |= IXGBE_FEATURE_EEE;
6237 /* Thermal Sensor */
6238 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6239 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6240 /*
6241 * Recovery mode:
6242 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6243 * NVM Image version.
6244 */
6245
6246 /* Enabled via global sysctl... */
6247 /* Flow Director */
6248 if (ixgbe_enable_fdir) {
6249 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6250 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6251 else
6252 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6253 }
6254 /* Legacy (single queue) transmit */
6255 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6256 ixgbe_enable_legacy_tx)
6257 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6258 /*
6259 * Message Signal Interrupts - Extended (MSI-X)
6260 * Normal MSI is only enabled if MSI-X calls fail.
6261 */
6262 if (!ixgbe_enable_msix)
6263 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6264 /* Receive-Side Scaling (RSS) */
6265 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6266 adapter->feat_en |= IXGBE_FEATURE_RSS;
6267
6268 /* Disable features with unmet dependencies... */
6269 /* No MSI-X */
6270 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6271 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6272 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6273 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6274 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6275 }
6276 } /* ixgbe_init_device_features */
6277
6278 /************************************************************************
6279 * ixgbe_probe - Device identification routine
6280 *
6281 * Determines if the driver should be loaded on
6282 * adapter based on its PCI vendor/device ID.
6283 *
6284 * return BUS_PROBE_DEFAULT on success, positive on failure
6285 ************************************************************************/
6286 static int
6287 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6288 {
6289 const struct pci_attach_args *pa = aux;
6290
6291 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6292 }
6293
6294 static const ixgbe_vendor_info_t *
6295 ixgbe_lookup(const struct pci_attach_args *pa)
6296 {
6297 const ixgbe_vendor_info_t *ent;
6298 pcireg_t subid;
6299
6300 INIT_DEBUGOUT("ixgbe_lookup: begin");
6301
6302 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6303 return NULL;
6304
6305 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6306
6307 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6308 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6309 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6310 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6311 (ent->subvendor_id == 0)) &&
6312 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6313 (ent->subdevice_id == 0))) {
6314 return ent;
6315 }
6316 }
6317 return NULL;
6318 }
6319
6320 static int
6321 ixgbe_ifflags_cb(struct ethercom *ec)
6322 {
6323 struct ifnet *ifp = &ec->ec_if;
6324 struct adapter *adapter = ifp->if_softc;
6325 u_short change;
6326 int rv = 0;
6327
6328 IXGBE_CORE_LOCK(adapter);
6329
6330 change = ifp->if_flags ^ adapter->if_flags;
6331 if (change != 0)
6332 adapter->if_flags = ifp->if_flags;
6333
6334 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6335 rv = ENETRESET;
6336 goto out;
6337 } else if ((change & IFF_PROMISC) != 0)
6338 ixgbe_set_rxfilter(adapter);
6339
6340 /* Check for ec_capenable. */
6341 change = ec->ec_capenable ^ adapter->ec_capenable;
6342 adapter->ec_capenable = ec->ec_capenable;
6343 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6344 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6345 rv = ENETRESET;
6346 goto out;
6347 }
6348
6349 /*
6350 * Special handling is not required for ETHERCAP_VLAN_MTU.
6351 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6352 */
6353
6354 /* Set up VLAN support and filter */
6355 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6356 ixgbe_setup_vlan_hw_support(adapter);
6357
6358 out:
6359 IXGBE_CORE_UNLOCK(adapter);
6360
6361 return rv;
6362 }
6363
6364 /************************************************************************
6365 * ixgbe_ioctl - Ioctl entry point
6366 *
6367 * Called when the user wants to configure the interface.
6368 *
6369 * return 0 on success, positive on failure
6370 ************************************************************************/
6371 static int
6372 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6373 {
6374 struct adapter *adapter = ifp->if_softc;
6375 struct ixgbe_hw *hw = &adapter->hw;
6376 struct ifcapreq *ifcr = data;
6377 struct ifreq *ifr = data;
6378 int error = 0;
6379 int l4csum_en;
6380 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6381 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6382
6383 if (ixgbe_fw_recovery_mode_swflag(adapter))
6384 return (EPERM);
6385
6386 switch (command) {
6387 case SIOCSIFFLAGS:
6388 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6389 break;
6390 case SIOCADDMULTI:
6391 case SIOCDELMULTI:
6392 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6393 break;
6394 case SIOCSIFMEDIA:
6395 case SIOCGIFMEDIA:
6396 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6397 break;
6398 case SIOCSIFCAP:
6399 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6400 break;
6401 case SIOCSIFMTU:
6402 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6403 break;
6404 #ifdef __NetBSD__
6405 case SIOCINITIFADDR:
6406 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6407 break;
6408 case SIOCGIFFLAGS:
6409 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6410 break;
6411 case SIOCGIFAFLAG_IN:
6412 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6413 break;
6414 case SIOCGIFADDR:
6415 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6416 break;
6417 case SIOCGIFMTU:
6418 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6419 break;
6420 case SIOCGIFCAP:
6421 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6422 break;
6423 case SIOCGETHERCAP:
6424 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6425 break;
6426 case SIOCGLIFADDR:
6427 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6428 break;
6429 case SIOCZIFDATA:
6430 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6431 hw->mac.ops.clear_hw_cntrs(hw);
6432 ixgbe_clear_evcnt(adapter);
6433 break;
6434 case SIOCAIFADDR:
6435 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6436 break;
6437 #endif
6438 default:
6439 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6440 break;
6441 }
6442
6443 switch (command) {
6444 case SIOCGI2C:
6445 {
6446 struct ixgbe_i2c_req i2c;
6447
6448 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6449 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6450 if (error != 0)
6451 break;
6452 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6453 error = EINVAL;
6454 break;
6455 }
6456 if (i2c.len > sizeof(i2c.data)) {
6457 error = EINVAL;
6458 break;
6459 }
6460
6461 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6462 i2c.dev_addr, i2c.data);
6463 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6464 break;
6465 }
6466 case SIOCSIFCAP:
6467 /* Layer-4 Rx checksum offload has to be turned on and
6468 * off as a unit.
6469 */
6470 l4csum_en = ifcr->ifcr_capenable & l4csum;
6471 if (l4csum_en != l4csum && l4csum_en != 0)
6472 return EINVAL;
6473 /*FALLTHROUGH*/
6474 case SIOCADDMULTI:
6475 case SIOCDELMULTI:
6476 case SIOCSIFFLAGS:
6477 case SIOCSIFMTU:
6478 default:
6479 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6480 return error;
6481 if ((ifp->if_flags & IFF_RUNNING) == 0)
6482 ;
6483 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6484 IXGBE_CORE_LOCK(adapter);
6485 if ((ifp->if_flags & IFF_RUNNING) != 0)
6486 ixgbe_init_locked(adapter);
6487 ixgbe_recalculate_max_frame(adapter);
6488 IXGBE_CORE_UNLOCK(adapter);
6489 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6490 /*
6491 * Multicast list has changed; set the hardware filter
6492 * accordingly.
6493 */
6494 IXGBE_CORE_LOCK(adapter);
6495 ixgbe_disable_intr(adapter);
6496 ixgbe_set_rxfilter(adapter);
6497 ixgbe_enable_intr(adapter);
6498 IXGBE_CORE_UNLOCK(adapter);
6499 }
6500 return 0;
6501 }
6502
6503 return error;
6504 } /* ixgbe_ioctl */
6505
6506 /************************************************************************
6507 * ixgbe_check_fan_failure
6508 ************************************************************************/
6509 static int
6510 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6511 {
6512 u32 mask;
6513
6514 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6515 IXGBE_ESDP_SDP1;
6516
6517 if (reg & mask) {
6518 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6519 return IXGBE_ERR_FAN_FAILURE;
6520 }
6521
6522 return IXGBE_SUCCESS;
6523 } /* ixgbe_check_fan_failure */
6524
6525 /************************************************************************
6526 * ixgbe_handle_que
6527 ************************************************************************/
6528 static void
6529 ixgbe_handle_que(void *context)
6530 {
6531 struct ix_queue *que = context;
6532 struct adapter *adapter = que->adapter;
6533 struct tx_ring *txr = que->txr;
6534 struct ifnet *ifp = adapter->ifp;
6535 bool more = false;
6536
6537 que->handleq.ev_count++;
6538
6539 if (ifp->if_flags & IFF_RUNNING) {
6540 more = ixgbe_rxeof(que);
6541 IXGBE_TX_LOCK(txr);
6542 more |= ixgbe_txeof(txr);
6543 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6544 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6545 ixgbe_mq_start_locked(ifp, txr);
6546 /* Only for queue 0 */
6547 /* NetBSD still needs this for CBQ */
6548 if ((&adapter->queues[0] == que)
6549 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6550 ixgbe_legacy_start_locked(ifp, txr);
6551 IXGBE_TX_UNLOCK(txr);
6552 }
6553
6554 if (more) {
6555 que->req.ev_count++;
6556 ixgbe_sched_handle_que(adapter, que);
6557 } else if (que->res != NULL) {
6558 /* MSIX: Re-enable this interrupt */
6559 ixgbe_enable_queue(adapter, que->msix);
6560 } else {
6561 /* INTx or MSI */
6562 ixgbe_enable_queue(adapter, 0);
6563 }
6564
6565 return;
6566 } /* ixgbe_handle_que */
6567
6568 /************************************************************************
6569 * ixgbe_handle_que_work
6570 ************************************************************************/
6571 static void
6572 ixgbe_handle_que_work(struct work *wk, void *context)
6573 {
6574 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6575
6576 /*
6577 * "enqueued flag" is not required here.
6578 * See ixgbe_msix_que().
6579 */
6580 ixgbe_handle_que(que);
6581 }
6582
6583 /************************************************************************
6584 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6585 ************************************************************************/
6586 static int
6587 ixgbe_allocate_legacy(struct adapter *adapter,
6588 const struct pci_attach_args *pa)
6589 {
6590 device_t dev = adapter->dev;
6591 struct ix_queue *que = adapter->queues;
6592 struct tx_ring *txr = adapter->tx_rings;
6593 int counts[PCI_INTR_TYPE_SIZE];
6594 pci_intr_type_t intr_type, max_type;
6595 char intrbuf[PCI_INTRSTR_LEN];
6596 char wqname[MAXCOMLEN];
6597 const char *intrstr = NULL;
6598 int defertx_error = 0, error;
6599
6600 /* We allocate a single interrupt resource */
6601 max_type = PCI_INTR_TYPE_MSI;
6602 counts[PCI_INTR_TYPE_MSIX] = 0;
6603 counts[PCI_INTR_TYPE_MSI] =
6604 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6605 /* Check not feat_en but feat_cap to fallback to INTx */
6606 counts[PCI_INTR_TYPE_INTX] =
6607 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6608
6609 alloc_retry:
6610 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6611 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6612 return ENXIO;
6613 }
6614 adapter->osdep.nintrs = 1;
6615 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6616 intrbuf, sizeof(intrbuf));
6617 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6618 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6619 device_xname(dev));
6620 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6621 if (adapter->osdep.ihs[0] == NULL) {
6622 aprint_error_dev(dev,"unable to establish %s\n",
6623 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6624 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6625 adapter->osdep.intrs = NULL;
6626 switch (intr_type) {
6627 case PCI_INTR_TYPE_MSI:
6628 /* The next try is for INTx: Disable MSI */
6629 max_type = PCI_INTR_TYPE_INTX;
6630 counts[PCI_INTR_TYPE_INTX] = 1;
6631 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6632 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6633 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6634 goto alloc_retry;
6635 } else
6636 break;
6637 case PCI_INTR_TYPE_INTX:
6638 default:
6639 /* See below */
6640 break;
6641 }
6642 }
6643 if (intr_type == PCI_INTR_TYPE_INTX) {
6644 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6645 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6646 }
6647 if (adapter->osdep.ihs[0] == NULL) {
6648 aprint_error_dev(dev,
6649 "couldn't establish interrupt%s%s\n",
6650 intrstr ? " at " : "", intrstr ? intrstr : "");
6651 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6652 adapter->osdep.intrs = NULL;
6653 return ENXIO;
6654 }
6655 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6656 /*
6657 * Try allocating a fast interrupt and the associated deferred
6658 * processing contexts.
6659 */
6660 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6661 txr->txr_si =
6662 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6663 ixgbe_deferred_mq_start, txr);
6664
6665 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6666 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6667 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6668 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6669 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6670 }
6671 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6672 ixgbe_handle_que, que);
6673 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6674 error = workqueue_create(&adapter->que_wq, wqname,
6675 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6676 IXGBE_WORKQUEUE_FLAGS);
6677
6678 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6679 && ((txr->txr_si == NULL) || defertx_error != 0))
6680 || (que->que_si == NULL) || error != 0) {
6681 aprint_error_dev(dev,
6682 "could not establish software interrupts\n");
6683
6684 return ENXIO;
6685 }
6686 /* For simplicity in the handlers */
6687 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6688
6689 return (0);
6690 } /* ixgbe_allocate_legacy */
6691
6692 /************************************************************************
6693 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6694 ************************************************************************/
6695 static int
6696 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6697 {
6698 device_t dev = adapter->dev;
6699 struct ix_queue *que = adapter->queues;
6700 struct tx_ring *txr = adapter->tx_rings;
6701 pci_chipset_tag_t pc;
6702 char intrbuf[PCI_INTRSTR_LEN];
6703 char intr_xname[32];
6704 char wqname[MAXCOMLEN];
6705 const char *intrstr = NULL;
6706 int error, vector = 0;
6707 int cpu_id = 0;
6708 kcpuset_t *affinity;
6709 #ifdef RSS
6710 unsigned int rss_buckets = 0;
6711 kcpuset_t cpu_mask;
6712 #endif
6713
6714 pc = adapter->osdep.pc;
6715 #ifdef RSS
6716 /*
6717 * If we're doing RSS, the number of queues needs to
6718 * match the number of RSS buckets that are configured.
6719 *
6720 * + If there's more queues than RSS buckets, we'll end
6721 * up with queues that get no traffic.
6722 *
6723 * + If there's more RSS buckets than queues, we'll end
6724 * up having multiple RSS buckets map to the same queue,
6725 * so there'll be some contention.
6726 */
6727 rss_buckets = rss_getnumbuckets();
6728 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6729 (adapter->num_queues != rss_buckets)) {
6730 device_printf(dev,
6731 "%s: number of queues (%d) != number of RSS buckets (%d)"
6732 "; performance will be impacted.\n",
6733 __func__, adapter->num_queues, rss_buckets);
6734 }
6735 #endif
6736
6737 adapter->osdep.nintrs = adapter->num_queues + 1;
6738 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6739 adapter->osdep.nintrs) != 0) {
6740 aprint_error_dev(dev,
6741 "failed to allocate MSI-X interrupt\n");
6742 return (ENXIO);
6743 }
6744
6745 kcpuset_create(&affinity, false);
6746 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6747 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6748 device_xname(dev), i);
6749 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6750 sizeof(intrbuf));
6751 #ifdef IXGBE_MPSAFE
6752 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6753 true);
6754 #endif
6755 /* Set the handler function */
6756 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6757 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6758 intr_xname);
6759 if (que->res == NULL) {
6760 aprint_error_dev(dev,
6761 "Failed to register QUE handler\n");
6762 error = ENXIO;
6763 goto err_out;
6764 }
6765 que->msix = vector;
6766 adapter->active_queues |= 1ULL << que->msix;
6767
6768 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6769 #ifdef RSS
6770 /*
6771 * The queue ID is used as the RSS layer bucket ID.
6772 * We look up the queue ID -> RSS CPU ID and select
6773 * that.
6774 */
6775 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6776 CPU_SETOF(cpu_id, &cpu_mask);
6777 #endif
6778 } else {
6779 /*
6780 * Bind the MSI-X vector, and thus the
6781 * rings to the corresponding CPU.
6782 *
6783 * This just happens to match the default RSS
6784 * round-robin bucket -> queue -> CPU allocation.
6785 */
6786 if (adapter->num_queues > 1)
6787 cpu_id = i;
6788 }
6789 /* Round-robin affinity */
6790 kcpuset_zero(affinity);
6791 kcpuset_set(affinity, cpu_id % ncpu);
6792 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6793 NULL);
6794 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6795 intrstr);
6796 if (error == 0) {
6797 #if 1 /* def IXGBE_DEBUG */
6798 #ifdef RSS
6799 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6800 cpu_id % ncpu);
6801 #else
6802 aprint_normal(", bound queue %d to cpu %d", i,
6803 cpu_id % ncpu);
6804 #endif
6805 #endif /* IXGBE_DEBUG */
6806 }
6807 aprint_normal("\n");
6808
6809 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6810 txr->txr_si = softint_establish(
6811 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6812 ixgbe_deferred_mq_start, txr);
6813 if (txr->txr_si == NULL) {
6814 aprint_error_dev(dev,
6815 "couldn't establish software interrupt\n");
6816 error = ENXIO;
6817 goto err_out;
6818 }
6819 }
6820 que->que_si
6821 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6822 ixgbe_handle_que, que);
6823 if (que->que_si == NULL) {
6824 aprint_error_dev(dev,
6825 "couldn't establish software interrupt\n");
6826 error = ENXIO;
6827 goto err_out;
6828 }
6829 }
6830 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6831 error = workqueue_create(&adapter->txr_wq, wqname,
6832 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6833 IXGBE_WORKQUEUE_FLAGS);
6834 if (error) {
6835 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6836 goto err_out;
6837 }
6838 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6839
6840 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6841 error = workqueue_create(&adapter->que_wq, wqname,
6842 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6843 IXGBE_WORKQUEUE_FLAGS);
6844 if (error) {
6845 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6846 goto err_out;
6847 }
6848
6849 /* and Link */
6850 cpu_id++;
6851 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6852 adapter->vector = vector;
6853 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6854 sizeof(intrbuf));
6855 #ifdef IXGBE_MPSAFE
6856 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6857 true);
6858 #endif
6859 /* Set the link handler function */
6860 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6861 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6862 intr_xname);
6863 if (adapter->osdep.ihs[vector] == NULL) {
6864 aprint_error_dev(dev, "Failed to register LINK handler\n");
6865 error = ENXIO;
6866 goto err_out;
6867 }
6868 /* Round-robin affinity */
6869 kcpuset_zero(affinity);
6870 kcpuset_set(affinity, cpu_id % ncpu);
6871 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6872 NULL);
6873
6874 aprint_normal_dev(dev,
6875 "for link, interrupting at %s", intrstr);
6876 if (error == 0)
6877 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6878 else
6879 aprint_normal("\n");
6880
6881 kcpuset_destroy(affinity);
6882 aprint_normal_dev(dev,
6883 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6884
6885 return (0);
6886
6887 err_out:
6888 kcpuset_destroy(affinity);
6889 ixgbe_free_deferred_handlers(adapter);
6890 ixgbe_free_pciintr_resources(adapter);
6891 return (error);
6892 } /* ixgbe_allocate_msix */
6893
6894 /************************************************************************
6895 * ixgbe_configure_interrupts
6896 *
6897 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6898 * This will also depend on user settings.
6899 ************************************************************************/
6900 static int
6901 ixgbe_configure_interrupts(struct adapter *adapter)
6902 {
6903 device_t dev = adapter->dev;
6904 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6905 int want, queues, msgs;
6906
6907 /* Default to 1 queue if MSI-X setup fails */
6908 adapter->num_queues = 1;
6909
6910 /* Override by tuneable */
6911 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6912 goto msi;
6913
6914 /*
6915 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6916 * interrupt slot.
6917 */
6918 if (ncpu == 1)
6919 goto msi;
6920
6921 /* First try MSI-X */
6922 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6923 msgs = MIN(msgs, IXG_MAX_NINTR);
6924 if (msgs < 2)
6925 goto msi;
6926
6927 adapter->msix_mem = (void *)1; /* XXX */
6928
6929 /* Figure out a reasonable auto config value */
6930 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6931
6932 #ifdef RSS
6933 /* If we're doing RSS, clamp at the number of RSS buckets */
6934 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6935 queues = uimin(queues, rss_getnumbuckets());
6936 #endif
6937 if (ixgbe_num_queues > queues) {
6938 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6939 ixgbe_num_queues = queues;
6940 }
6941
6942 if (ixgbe_num_queues != 0)
6943 queues = ixgbe_num_queues;
6944 else
6945 queues = uimin(queues,
6946 uimin(mac->max_tx_queues, mac->max_rx_queues));
6947
6948 /* reflect correct sysctl value */
6949 ixgbe_num_queues = queues;
6950
6951 /*
6952 * Want one vector (RX/TX pair) per queue
6953 * plus an additional for Link.
6954 */
6955 want = queues + 1;
6956 if (msgs >= want)
6957 msgs = want;
6958 else {
6959 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6960 "%d vectors but %d queues wanted!\n",
6961 msgs, want);
6962 goto msi;
6963 }
6964 adapter->num_queues = queues;
6965 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6966 return (0);
6967
6968 /*
6969 * MSI-X allocation failed or provided us with
6970 * less vectors than needed. Free MSI-X resources
6971 * and we'll try enabling MSI.
6972 */
6973 msi:
6974 /* Without MSI-X, some features are no longer supported */
6975 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6976 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6977 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6978 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6979
6980 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6981 adapter->msix_mem = NULL; /* XXX */
6982 if (msgs > 1)
6983 msgs = 1;
6984 if (msgs != 0) {
6985 msgs = 1;
6986 adapter->feat_en |= IXGBE_FEATURE_MSI;
6987 return (0);
6988 }
6989
6990 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6991 aprint_error_dev(dev,
6992 "Device does not support legacy interrupts.\n");
6993 return 1;
6994 }
6995
6996 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6997
6998 return (0);
6999 } /* ixgbe_configure_interrupts */
7000
7001
7002 /************************************************************************
7003 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7004 *
7005 * Done outside of interrupt context since the driver might sleep
7006 ************************************************************************/
7007 static void
7008 ixgbe_handle_link(void *context)
7009 {
7010 struct adapter *adapter = context;
7011 struct ixgbe_hw *hw = &adapter->hw;
7012
7013 KASSERT(mutex_owned(&adapter->core_mtx));
7014
7015 ++adapter->link_workev.ev_count;
7016 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7017 ixgbe_update_link_status(adapter);
7018
7019 /* Re-enable link interrupts */
7020 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7021 } /* ixgbe_handle_link */
7022
7023 #if 0
7024 /************************************************************************
7025 * ixgbe_rearm_queues
7026 ************************************************************************/
7027 static __inline void
7028 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7029 {
7030 u32 mask;
7031
7032 switch (adapter->hw.mac.type) {
7033 case ixgbe_mac_82598EB:
7034 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7036 break;
7037 case ixgbe_mac_82599EB:
7038 case ixgbe_mac_X540:
7039 case ixgbe_mac_X550:
7040 case ixgbe_mac_X550EM_x:
7041 case ixgbe_mac_X550EM_a:
7042 mask = (queues & 0xFFFFFFFF);
7043 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7044 mask = (queues >> 32);
7045 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7046 break;
7047 default:
7048 break;
7049 }
7050 } /* ixgbe_rearm_queues */
7051 #endif
7052