ixgbe.c revision 1.243 1 /* $NetBSD: ixgbe.c,v 1.243 2020/08/24 18:42:17 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.10 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */
148 {0, 0, 0, 0, 0}
149 };
150
151 /************************************************************************
152 * Table of branding strings
153 ************************************************************************/
154 static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156 };
157
158 /************************************************************************
159 * Function prototypes
160 ************************************************************************/
161 static int ixgbe_probe(device_t, cfdata_t, void *);
162 static void ixgbe_attach(device_t, device_t, void *);
163 static int ixgbe_detach(device_t, int);
164 #if 0
165 static int ixgbe_shutdown(device_t);
166 #endif
167 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168 static bool ixgbe_resume(device_t, const pmf_qual_t *);
169 static int ixgbe_ifflags_cb(struct ethercom *);
170 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171 static int ixgbe_init(struct ifnet *);
172 static void ixgbe_init_locked(struct adapter *);
173 static void ixgbe_ifstop(struct ifnet *, int);
174 static void ixgbe_stop(void *);
175 static void ixgbe_init_device_features(struct adapter *);
176 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177 static void ixgbe_add_media_types(struct adapter *);
178 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179 static int ixgbe_media_change(struct ifnet *);
180 static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *);
182 static void ixgbe_free_workqueue(struct adapter *);
183 static void ixgbe_get_slot_info(struct adapter *);
184 static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *);
186 static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_configure_interrupts(struct adapter *);
189 static void ixgbe_free_pciintr_resources(struct adapter *);
190 static void ixgbe_free_pci_resources(struct adapter *);
191 static void ixgbe_local_timer(void *);
192 static void ixgbe_handle_timer(struct work *, void *);
193 static void ixgbe_recovery_mode_timer(void *);
194 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
195 static int ixgbe_setup_interface(device_t, struct adapter *);
196 static void ixgbe_config_gpie(struct adapter *);
197 static void ixgbe_config_dmac(struct adapter *);
198 static void ixgbe_config_delay_values(struct adapter *);
199 static void ixgbe_schedule_admin_tasklet(struct adapter *);
200 static void ixgbe_config_link(struct adapter *);
201 static void ixgbe_check_wol_support(struct adapter *);
202 static int ixgbe_setup_low_power_mode(struct adapter *);
203 #if 0
204 static void ixgbe_rearm_queues(struct adapter *, u64);
205 #endif
206
207 static void ixgbe_initialize_transmit_units(struct adapter *);
208 static void ixgbe_initialize_receive_units(struct adapter *);
209 static void ixgbe_enable_rx_drop(struct adapter *);
210 static void ixgbe_disable_rx_drop(struct adapter *);
211 static void ixgbe_initialize_rss_mapping(struct adapter *);
212
213 static void ixgbe_enable_intr(struct adapter *);
214 static void ixgbe_disable_intr(struct adapter *);
215 static void ixgbe_update_stats_counters(struct adapter *);
216 static void ixgbe_set_rxfilter(struct adapter *);
217 static void ixgbe_update_link_status(struct adapter *);
218 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
219 static void ixgbe_configure_ivars(struct adapter *);
220 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
221 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
222
223 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
224 static void ixgbe_setup_vlan_hw_support(struct adapter *);
225 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
226 static int ixgbe_register_vlan(struct adapter *, u16);
227 static int ixgbe_unregister_vlan(struct adapter *, u16);
228
229 static void ixgbe_add_device_sysctls(struct adapter *);
230 static void ixgbe_add_hw_stats(struct adapter *);
231 static void ixgbe_clear_evcnt(struct adapter *);
232 static int ixgbe_set_flowcntl(struct adapter *, int);
233 static int ixgbe_set_advertise(struct adapter *, int);
234 static int ixgbe_get_advertise(struct adapter *);
235
236 /* Sysctl handlers */
237 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
238 const char *, int *, int);
239 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
245 #ifdef IXGBE_DEBUG
246 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
248 #endif
249 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
258
259 /* Support for pluggable optic modules */
260 static bool ixgbe_sfp_cage_full(struct ixgbe_hw *);
261
262 /* Legacy (single vector) interrupt handler */
263 static int ixgbe_legacy_irq(void *);
264
265 /* The MSI/MSI-X Interrupt handlers */
266 static int ixgbe_msix_que(void *);
267 static int ixgbe_msix_admin(void *);
268
269 /* Event handlers running on workqueue */
270 static void ixgbe_handle_que(void *);
271 static void ixgbe_handle_link(void *);
272 static void ixgbe_handle_msf(void *);
273 static void ixgbe_handle_mod(void *);
274 static void ixgbe_handle_phy(void *);
275
276 /* Deferred workqueue handlers */
277 static void ixgbe_handle_admin(struct work *, void *);
278 static void ixgbe_handle_que_work(struct work *, void *);
279
280 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
281
282 /************************************************************************
283 * NetBSD Device Interface Entry Points
284 ************************************************************************/
285 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
286 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
287 DVF_DETACH_SHUTDOWN);
288
289 #if 0
290 devclass_t ix_devclass;
291 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
292
293 MODULE_DEPEND(ix, pci, 1, 1, 1);
294 MODULE_DEPEND(ix, ether, 1, 1, 1);
295 #ifdef DEV_NETMAP
296 MODULE_DEPEND(ix, netmap, 1, 1, 1);
297 #endif
298 #endif
299
300 /*
301 * TUNEABLE PARAMETERS:
302 */
303
304 /*
305 * AIM: Adaptive Interrupt Moderation
306 * which means that the interrupt rate
307 * is varied over time based on the
308 * traffic for that interrupt vector
309 */
310 static bool ixgbe_enable_aim = true;
311 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
312 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
313 "Enable adaptive interrupt moderation");
314
315 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
316 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
317 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
318
319 /* How many packets rxeof tries to clean at a time */
320 static int ixgbe_rx_process_limit = 256;
321 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
322 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
323
324 /* How many packets txeof tries to clean at a time */
325 static int ixgbe_tx_process_limit = 256;
326 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
327 &ixgbe_tx_process_limit, 0,
328 "Maximum number of sent packets to process at a time, -1 means unlimited");
329
330 /* Flow control setting, default to full */
331 static int ixgbe_flow_control = ixgbe_fc_full;
332 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
333 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
334
335 /* Which packet processing uses workqueue or softint */
336 static bool ixgbe_txrx_workqueue = false;
337
338 /*
339 * Smart speed setting, default to on
340 * this only works as a compile option
341 * right now as its during attach, set
342 * this to 'ixgbe_smart_speed_off' to
343 * disable.
344 */
345 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
346
347 /*
348 * MSI-X should be the default for best performance,
349 * but this allows it to be forced off for testing.
350 */
351 static int ixgbe_enable_msix = 1;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
353 "Enable MSI-X interrupts");
354
355 /*
356 * Number of Queues, can be set to 0,
357 * it then autoconfigures based on the
358 * number of cpus with a max of 8. This
359 * can be overridden manually here.
360 */
361 static int ixgbe_num_queues = 0;
362 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
363 "Number of queues to configure, 0 indicates autoconfigure");
364
365 /*
366 * Number of TX descriptors per ring,
367 * setting higher than RX as this seems
368 * the better performing choice.
369 */
370 static int ixgbe_txd = PERFORM_TXD;
371 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
372 "Number of transmit descriptors per queue");
373
374 /* Number of RX descriptors per ring */
375 static int ixgbe_rxd = PERFORM_RXD;
376 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
377 "Number of receive descriptors per queue");
378
379 /*
380 * Defining this on will allow the use
381 * of unsupported SFP+ modules, note that
382 * doing so you are on your own :)
383 */
384 static int allow_unsupported_sfp = false;
385 #define TUNABLE_INT(__x, __y)
386 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
387
388 /*
389 * Not sure if Flow Director is fully baked,
390 * so we'll default to turning it off.
391 */
392 static int ixgbe_enable_fdir = 0;
393 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
394 "Enable Flow Director");
395
396 /* Legacy Transmit (single queue) */
397 static int ixgbe_enable_legacy_tx = 0;
398 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
399 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
400
401 /* Receive-Side Scaling */
402 static int ixgbe_enable_rss = 1;
403 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
404 "Enable Receive-Side Scaling (RSS)");
405
406 #if 0
407 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
408 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
409 #endif
410
411 #ifdef NET_MPSAFE
412 #define IXGBE_MPSAFE 1
413 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
414 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
415 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
416 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
417 #else
418 #define IXGBE_CALLOUT_FLAGS 0
419 #define IXGBE_SOFTINT_FLAGS 0
420 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
421 #define IXGBE_TASKLET_WQ_FLAGS 0
422 #endif
423 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
424
425 /************************************************************************
426 * ixgbe_initialize_rss_mapping
427 ************************************************************************/
428 static void
429 ixgbe_initialize_rss_mapping(struct adapter *adapter)
430 {
431 struct ixgbe_hw *hw = &adapter->hw;
432 u32 reta = 0, mrqc, rss_key[10];
433 int queue_id, table_size, index_mult;
434 int i, j;
435 u32 rss_hash_config;
436
437 /* force use default RSS key. */
438 #ifdef __NetBSD__
439 rss_getkey((uint8_t *) &rss_key);
440 #else
441 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
442 /* Fetch the configured RSS key */
443 rss_getkey((uint8_t *) &rss_key);
444 } else {
445 /* set up random bits */
446 cprng_fast(&rss_key, sizeof(rss_key));
447 }
448 #endif
449
450 /* Set multiplier for RETA setup and table size based on MAC */
451 index_mult = 0x1;
452 table_size = 128;
453 switch (adapter->hw.mac.type) {
454 case ixgbe_mac_82598EB:
455 index_mult = 0x11;
456 break;
457 case ixgbe_mac_X550:
458 case ixgbe_mac_X550EM_x:
459 case ixgbe_mac_X550EM_a:
460 table_size = 512;
461 break;
462 default:
463 break;
464 }
465
466 /* Set up the redirection table */
467 for (i = 0, j = 0; i < table_size; i++, j++) {
468 if (j == adapter->num_queues)
469 j = 0;
470
471 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
472 /*
473 * Fetch the RSS bucket id for the given indirection
474 * entry. Cap it at the number of configured buckets
475 * (which is num_queues.)
476 */
477 queue_id = rss_get_indirection_to_bucket(i);
478 queue_id = queue_id % adapter->num_queues;
479 } else
480 queue_id = (j * index_mult);
481
482 /*
483 * The low 8 bits are for hash value (n+0);
484 * The next 8 bits are for hash value (n+1), etc.
485 */
486 reta = reta >> 8;
487 reta = reta | (((uint32_t) queue_id) << 24);
488 if ((i & 3) == 3) {
489 if (i < 128)
490 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
491 else
492 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
493 reta);
494 reta = 0;
495 }
496 }
497
498 /* Now fill our hash function seeds */
499 for (i = 0; i < 10; i++)
500 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
501
502 /* Perform hash on these packet types */
503 if (adapter->feat_en & IXGBE_FEATURE_RSS)
504 rss_hash_config = rss_gethashconfig();
505 else {
506 /*
507 * Disable UDP - IP fragments aren't currently being handled
508 * and so we end up with a mix of 2-tuple and 4-tuple
509 * traffic.
510 */
511 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
512 | RSS_HASHTYPE_RSS_TCP_IPV4
513 | RSS_HASHTYPE_RSS_IPV6
514 | RSS_HASHTYPE_RSS_TCP_IPV6
515 | RSS_HASHTYPE_RSS_IPV6_EX
516 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
517 }
518
519 mrqc = IXGBE_MRQC_RSSEN;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
538 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
539 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
540 } /* ixgbe_initialize_rss_mapping */
541
542 /************************************************************************
543 * ixgbe_initialize_receive_units - Setup receive registers and features.
544 ************************************************************************/
545 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
546
547 static void
548 ixgbe_initialize_receive_units(struct adapter *adapter)
549 {
550 struct rx_ring *rxr = adapter->rx_rings;
551 struct ixgbe_hw *hw = &adapter->hw;
552 struct ifnet *ifp = adapter->ifp;
553 int i, j;
554 u32 bufsz, fctrl, srrctl, rxcsum;
555 u32 hlreg;
556
557 /*
558 * Make sure receives are disabled while
559 * setting up the descriptor ring
560 */
561 ixgbe_disable_rx(hw);
562
563 /* Enable broadcasts */
564 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
565 fctrl |= IXGBE_FCTRL_BAM;
566 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
567 fctrl |= IXGBE_FCTRL_DPF;
568 fctrl |= IXGBE_FCTRL_PMCF;
569 }
570 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
571
572 /* Set for Jumbo Frames? */
573 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
574 if (ifp->if_mtu > ETHERMTU)
575 hlreg |= IXGBE_HLREG0_JUMBOEN;
576 else
577 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
578
579 #ifdef DEV_NETMAP
580 /* CRC stripping is conditional in Netmap */
581 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
582 (ifp->if_capenable & IFCAP_NETMAP) &&
583 !ix_crcstrip)
584 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
585 else
586 #endif /* DEV_NETMAP */
587 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
588
589 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
590
591 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
592 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
593
594 for (i = 0; i < adapter->num_queues; i++, rxr++) {
595 u64 rdba = rxr->rxdma.dma_paddr;
596 u32 reg;
597 int regnum = i / 4; /* 1 register per 4 queues */
598 int regshift = i % 4; /* 4 bits per 1 queue */
599 j = rxr->me;
600
601 /* Setup the Base and Length of the Rx Descriptor Ring */
602 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
603 (rdba & 0x00000000ffffffffULL));
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
605 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
606 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
607
608 /* Set up the SRRCTL register */
609 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
610 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
611 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
612 srrctl |= bufsz;
613 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
614
615 /* Set RQSMR (Receive Queue Statistic Mapping) register */
616 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
617 reg &= ~(0x000000ffUL << (regshift * 8));
618 reg |= i << (regshift * 8);
619 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
620
621 /*
622 * Set DROP_EN iff we have no flow control and >1 queue.
623 * Note that srrctl was cleared shortly before during reset,
624 * so we do not need to clear the bit, but do it just in case
625 * this code is moved elsewhere.
626 */
627 if (adapter->num_queues > 1 &&
628 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
629 srrctl |= IXGBE_SRRCTL_DROP_EN;
630 } else {
631 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
632 }
633
634 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
635
636 /* Setup the HW Rx Head and Tail Descriptor Pointers */
637 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
638 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
639
640 /* Set the driver rx tail address */
641 rxr->tail = IXGBE_RDT(rxr->me);
642 }
643
644 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
645 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
646 | IXGBE_PSRTYPE_UDPHDR
647 | IXGBE_PSRTYPE_IPV4HDR
648 | IXGBE_PSRTYPE_IPV6HDR;
649 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
650 }
651
652 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
653
654 ixgbe_initialize_rss_mapping(adapter);
655
656 if (adapter->num_queues > 1) {
657 /* RSS and RX IPP Checksum are mutually exclusive */
658 rxcsum |= IXGBE_RXCSUM_PCSD;
659 }
660
661 if (ifp->if_capenable & IFCAP_RXCSUM)
662 rxcsum |= IXGBE_RXCSUM_PCSD;
663
664 /* This is useful for calculating UDP/IP fragment checksums */
665 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
666 rxcsum |= IXGBE_RXCSUM_IPPCSE;
667
668 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
669
670 } /* ixgbe_initialize_receive_units */
671
672 /************************************************************************
673 * ixgbe_initialize_transmit_units - Enable transmit units.
674 ************************************************************************/
675 static void
676 ixgbe_initialize_transmit_units(struct adapter *adapter)
677 {
678 struct tx_ring *txr = adapter->tx_rings;
679 struct ixgbe_hw *hw = &adapter->hw;
680 int i;
681
682 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
683
684 /* Setup the Base and Length of the Tx Descriptor Ring */
685 for (i = 0; i < adapter->num_queues; i++, txr++) {
686 u64 tdba = txr->txdma.dma_paddr;
687 u32 txctrl = 0;
688 u32 tqsmreg, reg;
689 int regnum = i / 4; /* 1 register per 4 queues */
690 int regshift = i % 4; /* 4 bits per 1 queue */
691 int j = txr->me;
692
693 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
694 (tdba & 0x00000000ffffffffULL));
695 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
696 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
697 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
698
699 /*
700 * Set TQSMR (Transmit Queue Statistic Mapping) register.
701 * Register location is different between 82598 and others.
702 */
703 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
704 tqsmreg = IXGBE_TQSMR(regnum);
705 else
706 tqsmreg = IXGBE_TQSM(regnum);
707 reg = IXGBE_READ_REG(hw, tqsmreg);
708 reg &= ~(0x000000ffUL << (regshift * 8));
709 reg |= i << (regshift * 8);
710 IXGBE_WRITE_REG(hw, tqsmreg, reg);
711
712 /* Setup the HW Tx Head and Tail descriptor pointers */
713 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
714 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
715
716 /* Cache the tail address */
717 txr->tail = IXGBE_TDT(j);
718
719 txr->txr_no_space = false;
720
721 /* Disable Head Writeback */
722 /*
723 * Note: for X550 series devices, these registers are actually
724 * prefixed with TPH_ isntead of DCA_, but the addresses and
725 * fields remain the same.
726 */
727 switch (hw->mac.type) {
728 case ixgbe_mac_82598EB:
729 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
730 break;
731 default:
732 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
733 break;
734 }
735 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
736 switch (hw->mac.type) {
737 case ixgbe_mac_82598EB:
738 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
739 break;
740 default:
741 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
742 break;
743 }
744
745 }
746
747 if (hw->mac.type != ixgbe_mac_82598EB) {
748 u32 dmatxctl, rttdcs;
749
750 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
751 dmatxctl |= IXGBE_DMATXCTL_TE;
752 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
753 /* Disable arbiter to set MTQC */
754 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
755 rttdcs |= IXGBE_RTTDCS_ARBDIS;
756 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
757 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
758 ixgbe_get_mtqc(adapter->iov_mode));
759 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
760 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
761 }
762
763 return;
764 } /* ixgbe_initialize_transmit_units */
765
766 /************************************************************************
767 * ixgbe_attach - Device initialization routine
768 *
769 * Called when the driver is being loaded.
770 * Identifies the type of hardware, allocates all resources
771 * and initializes the hardware.
772 *
773 * return 0 on success, positive on failure
774 ************************************************************************/
775 static void
776 ixgbe_attach(device_t parent, device_t dev, void *aux)
777 {
778 struct adapter *adapter;
779 struct ixgbe_hw *hw;
780 int error = -1;
781 u32 ctrl_ext;
782 u16 high, low, nvmreg;
783 pcireg_t id, subid;
784 const ixgbe_vendor_info_t *ent;
785 struct pci_attach_args *pa = aux;
786 bool unsupported_sfp = false;
787 const char *str;
788 char wqname[MAXCOMLEN];
789 char buf[256];
790
791 INIT_DEBUGOUT("ixgbe_attach: begin");
792
793 /* Allocate, clear, and link in our adapter structure */
794 adapter = device_private(dev);
795 adapter->hw.back = adapter;
796 adapter->dev = dev;
797 hw = &adapter->hw;
798 adapter->osdep.pc = pa->pa_pc;
799 adapter->osdep.tag = pa->pa_tag;
800 if (pci_dma64_available(pa))
801 adapter->osdep.dmat = pa->pa_dmat64;
802 else
803 adapter->osdep.dmat = pa->pa_dmat;
804 adapter->osdep.attached = false;
805
806 ent = ixgbe_lookup(pa);
807
808 KASSERT(ent != NULL);
809
810 aprint_normal(": %s, Version - %s\n",
811 ixgbe_strings[ent->index], ixgbe_driver_version);
812
813 /* Core Lock Init */
814 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
815
816 /* Set up the timer callout and workqueue */
817 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
818 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
819 error = workqueue_create(&adapter->timer_wq, wqname,
820 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
821 IXGBE_TASKLET_WQ_FLAGS);
822 if (error) {
823 aprint_error_dev(dev,
824 "could not create timer workqueue (%d)\n", error);
825 goto err_out;
826 }
827
828 /* Determine hardware revision */
829 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
830 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
831
832 hw->vendor_id = PCI_VENDOR(id);
833 hw->device_id = PCI_PRODUCT(id);
834 hw->revision_id =
835 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
836 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
837 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
838
839 /*
840 * Make sure BUSMASTER is set
841 */
842 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
843
844 /* Do base PCI setup - map BAR0 */
845 if (ixgbe_allocate_pci_resources(adapter, pa)) {
846 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
847 error = ENXIO;
848 goto err_out;
849 }
850
851 /* let hardware know driver is loaded */
852 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
853 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
854 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
855
856 /*
857 * Initialize the shared code
858 */
859 if (ixgbe_init_shared_code(hw) != 0) {
860 aprint_error_dev(dev, "Unable to initialize the shared code\n");
861 error = ENXIO;
862 goto err_out;
863 }
864
865 switch (hw->mac.type) {
866 case ixgbe_mac_82598EB:
867 str = "82598EB";
868 break;
869 case ixgbe_mac_82599EB:
870 str = "82599EB";
871 break;
872 case ixgbe_mac_X540:
873 str = "X540";
874 break;
875 case ixgbe_mac_X550:
876 str = "X550";
877 break;
878 case ixgbe_mac_X550EM_x:
879 str = "X550EM";
880 break;
881 case ixgbe_mac_X550EM_a:
882 str = "X550EM A";
883 break;
884 default:
885 str = "Unknown";
886 break;
887 }
888 aprint_normal_dev(dev, "device %s\n", str);
889
890 if (hw->mbx.ops.init_params)
891 hw->mbx.ops.init_params(hw);
892
893 hw->allow_unsupported_sfp = allow_unsupported_sfp;
894
895 /* Pick up the 82599 settings */
896 if (hw->mac.type != ixgbe_mac_82598EB) {
897 hw->phy.smart_speed = ixgbe_smart_speed;
898 adapter->num_segs = IXGBE_82599_SCATTER;
899 } else
900 adapter->num_segs = IXGBE_82598_SCATTER;
901
902 /* Ensure SW/FW semaphore is free */
903 ixgbe_init_swfw_semaphore(hw);
904
905 hw->mac.ops.set_lan_id(hw);
906 ixgbe_init_device_features(adapter);
907
908 if (ixgbe_configure_interrupts(adapter)) {
909 error = ENXIO;
910 goto err_out;
911 }
912
913 /* Allocate multicast array memory. */
914 adapter->mta = malloc(sizeof(*adapter->mta) *
915 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
916
917 /* Enable WoL (if supported) */
918 ixgbe_check_wol_support(adapter);
919
920 /* Register for VLAN events */
921 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
922
923 /* Verify adapter fan is still functional (if applicable) */
924 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
925 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
926 ixgbe_check_fan_failure(adapter, esdp, FALSE);
927 }
928
929 /* Set an initial default flow control value */
930 hw->fc.requested_mode = ixgbe_flow_control;
931
932 /* Sysctls for limiting the amount of work done in the taskqueues */
933 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
934 "max number of rx packets to process",
935 &adapter->rx_process_limit, ixgbe_rx_process_limit);
936
937 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
938 "max number of tx packets to process",
939 &adapter->tx_process_limit, ixgbe_tx_process_limit);
940
941 /* Do descriptor calc and sanity checks */
942 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
943 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
944 aprint_error_dev(dev, "TXD config issue, using default!\n");
945 adapter->num_tx_desc = DEFAULT_TXD;
946 } else
947 adapter->num_tx_desc = ixgbe_txd;
948
949 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
950 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
951 aprint_error_dev(dev, "RXD config issue, using default!\n");
952 adapter->num_rx_desc = DEFAULT_RXD;
953 } else
954 adapter->num_rx_desc = ixgbe_rxd;
955
956 /* Allocate our TX/RX Queues */
957 if (ixgbe_allocate_queues(adapter)) {
958 error = ENOMEM;
959 goto err_out;
960 }
961
962 hw->phy.reset_if_overtemp = TRUE;
963 error = ixgbe_reset_hw(hw);
964 hw->phy.reset_if_overtemp = FALSE;
965 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
966 error = IXGBE_SUCCESS;
967 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
968 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
969 unsupported_sfp = true;
970 error = IXGBE_SUCCESS;
971 } else if (error) {
972 aprint_error_dev(dev, "Hardware initialization failed\n");
973 error = EIO;
974 goto err_late;
975 }
976
977 /* Make sure we have a good EEPROM before we read from it */
978 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
979 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
980 error = EIO;
981 goto err_late;
982 }
983
984 aprint_normal("%s:", device_xname(dev));
985 /* NVM Image Version */
986 high = low = 0;
987 switch (hw->mac.type) {
988 case ixgbe_mac_X540:
989 case ixgbe_mac_X550EM_a:
990 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
991 if (nvmreg == 0xffff)
992 break;
993 high = (nvmreg >> 12) & 0x0f;
994 low = (nvmreg >> 4) & 0xff;
995 id = nvmreg & 0x0f;
996 aprint_normal(" NVM Image Version %u.", high);
997 if (hw->mac.type == ixgbe_mac_X540)
998 str = "%x";
999 else
1000 str = "%02x";
1001 aprint_normal(str, low);
1002 aprint_normal(" ID 0x%x,", id);
1003 break;
1004 case ixgbe_mac_X550EM_x:
1005 case ixgbe_mac_X550:
1006 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1007 if (nvmreg == 0xffff)
1008 break;
1009 high = (nvmreg >> 12) & 0x0f;
1010 low = nvmreg & 0xff;
1011 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1012 break;
1013 default:
1014 break;
1015 }
1016 hw->eeprom.nvm_image_ver_high = high;
1017 hw->eeprom.nvm_image_ver_low = low;
1018
1019 /* PHY firmware revision */
1020 switch (hw->mac.type) {
1021 case ixgbe_mac_X540:
1022 case ixgbe_mac_X550:
1023 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1024 if (nvmreg == 0xffff)
1025 break;
1026 high = (nvmreg >> 12) & 0x0f;
1027 low = (nvmreg >> 4) & 0xff;
1028 id = nvmreg & 0x000f;
1029 aprint_normal(" PHY FW Revision %u.", high);
1030 if (hw->mac.type == ixgbe_mac_X540)
1031 str = "%x";
1032 else
1033 str = "%02x";
1034 aprint_normal(str, low);
1035 aprint_normal(" ID 0x%x,", id);
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 /* NVM Map version & OEM NVM Image version */
1042 switch (hw->mac.type) {
1043 case ixgbe_mac_X550:
1044 case ixgbe_mac_X550EM_x:
1045 case ixgbe_mac_X550EM_a:
1046 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1047 if (nvmreg != 0xffff) {
1048 high = (nvmreg >> 12) & 0x0f;
1049 low = nvmreg & 0x00ff;
1050 aprint_normal(" NVM Map version %u.%02x,", high, low);
1051 }
1052 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1053 if (nvmreg != 0xffff) {
1054 high = (nvmreg >> 12) & 0x0f;
1055 low = nvmreg & 0x00ff;
1056 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1057 low);
1058 }
1059 break;
1060 default:
1061 break;
1062 }
1063
1064 /* Print the ETrackID */
1065 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1066 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1067 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1068
1069 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1070 error = ixgbe_allocate_msix(adapter, pa);
1071 if (error) {
1072 /* Free allocated queue structures first */
1073 ixgbe_free_queues(adapter);
1074
1075 /* Fallback to legacy interrupt */
1076 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1077 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1078 adapter->feat_en |= IXGBE_FEATURE_MSI;
1079 adapter->num_queues = 1;
1080
1081 /* Allocate our TX/RX Queues again */
1082 if (ixgbe_allocate_queues(adapter)) {
1083 error = ENOMEM;
1084 goto err_out;
1085 }
1086 }
1087 }
1088 /* Recovery mode */
1089 switch (adapter->hw.mac.type) {
1090 case ixgbe_mac_X550:
1091 case ixgbe_mac_X550EM_x:
1092 case ixgbe_mac_X550EM_a:
1093 /* >= 2.00 */
1094 if (hw->eeprom.nvm_image_ver_high >= 2) {
1095 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1096 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1097 }
1098 break;
1099 default:
1100 break;
1101 }
1102
1103 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1104 error = ixgbe_allocate_legacy(adapter, pa);
1105 if (error)
1106 goto err_late;
1107
1108 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1109 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1110 error = workqueue_create(&adapter->admin_wq, wqname,
1111 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1112 IXGBE_TASKLET_WQ_FLAGS);
1113 if (error) {
1114 aprint_error_dev(dev,
1115 "could not create admin workqueue (%d)\n", error);
1116 goto err_out;
1117 }
1118
1119 error = ixgbe_start_hw(hw);
1120 switch (error) {
1121 case IXGBE_ERR_EEPROM_VERSION:
1122 aprint_error_dev(dev, "This device is a pre-production adapter/"
1123 "LOM. Please be aware there may be issues associated "
1124 "with your hardware.\nIf you are experiencing problems "
1125 "please contact your Intel or hardware representative "
1126 "who provided you with this hardware.\n");
1127 break;
1128 default:
1129 break;
1130 }
1131
1132 /* Setup OS specific network interface */
1133 if (ixgbe_setup_interface(dev, adapter) != 0)
1134 goto err_late;
1135
1136 /*
1137 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1138 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1139 */
1140 if (hw->phy.media_type == ixgbe_media_type_copper) {
1141 uint16_t id1, id2;
1142 int oui, model, rev;
1143 const char *descr;
1144
1145 id1 = hw->phy.id >> 16;
1146 id2 = hw->phy.id & 0xffff;
1147 oui = MII_OUI(id1, id2);
1148 model = MII_MODEL(id2);
1149 rev = MII_REV(id2);
1150 if ((descr = mii_get_descr(oui, model)) != NULL)
1151 aprint_normal_dev(dev,
1152 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1153 descr, oui, model, rev);
1154 else
1155 aprint_normal_dev(dev,
1156 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1157 oui, model, rev);
1158 }
1159
1160 /* Enable EEE power saving */
1161 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1162 hw->mac.ops.setup_eee(hw,
1163 adapter->feat_en & IXGBE_FEATURE_EEE);
1164
1165 /* Enable power to the phy. */
1166 if (!unsupported_sfp) {
1167 /* Enable the optics for 82599 SFP+ fiber */
1168 ixgbe_enable_tx_laser(hw);
1169
1170 /*
1171 * XXX Currently, ixgbe_set_phy_power() supports only copper
1172 * PHY, so it's not required to test with !unsupported_sfp.
1173 */
1174 ixgbe_set_phy_power(hw, TRUE);
1175 }
1176
1177 /* Initialize statistics */
1178 ixgbe_update_stats_counters(adapter);
1179
1180 /* Check PCIE slot type/speed/width */
1181 ixgbe_get_slot_info(adapter);
1182
1183 /*
1184 * Do time init and sysctl init here, but
1185 * only on the first port of a bypass adapter.
1186 */
1187 ixgbe_bypass_init(adapter);
1188
1189 /* Set an initial dmac value */
1190 adapter->dmac = 0;
1191 /* Set initial advertised speeds (if applicable) */
1192 adapter->advertise = ixgbe_get_advertise(adapter);
1193
1194 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1195 ixgbe_define_iov_schemas(dev, &error);
1196
1197 /* Add sysctls */
1198 ixgbe_add_device_sysctls(adapter);
1199 ixgbe_add_hw_stats(adapter);
1200
1201 /* For Netmap */
1202 adapter->init_locked = ixgbe_init_locked;
1203 adapter->stop_locked = ixgbe_stop;
1204
1205 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1206 ixgbe_netmap_attach(adapter);
1207
1208 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1209 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1210 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1211 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1212
1213 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1214 pmf_class_network_register(dev, adapter->ifp);
1215 else
1216 aprint_error_dev(dev, "couldn't establish power handler\n");
1217
1218 /* Init recovery mode timer and state variable */
1219 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1220 adapter->recovery_mode = 0;
1221
1222 /* Set up the timer callout */
1223 callout_init(&adapter->recovery_mode_timer,
1224 IXGBE_CALLOUT_FLAGS);
1225 snprintf(wqname, sizeof(wqname), "%s-recovery",
1226 device_xname(dev));
1227 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1228 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1229 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1230 if (error) {
1231 aprint_error_dev(dev, "could not create "
1232 "recovery_mode_timer workqueue (%d)\n", error);
1233 goto err_out;
1234 }
1235
1236 /* Start the task */
1237 callout_reset(&adapter->recovery_mode_timer, hz,
1238 ixgbe_recovery_mode_timer, adapter);
1239 }
1240
1241 INIT_DEBUGOUT("ixgbe_attach: end");
1242 adapter->osdep.attached = true;
1243
1244 return;
1245
1246 err_late:
1247 ixgbe_free_queues(adapter);
1248 err_out:
1249 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1250 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1251 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1252 ixgbe_free_workqueue(adapter);
1253 ixgbe_free_pci_resources(adapter);
1254 if (adapter->mta != NULL)
1255 free(adapter->mta, M_DEVBUF);
1256 IXGBE_CORE_LOCK_DESTROY(adapter);
1257
1258 return;
1259 } /* ixgbe_attach */
1260
1261 /************************************************************************
1262 * ixgbe_check_wol_support
1263 *
1264 * Checks whether the adapter's ports are capable of
1265 * Wake On LAN by reading the adapter's NVM.
1266 *
1267 * Sets each port's hw->wol_enabled value depending
1268 * on the value read here.
1269 ************************************************************************/
1270 static void
1271 ixgbe_check_wol_support(struct adapter *adapter)
1272 {
1273 struct ixgbe_hw *hw = &adapter->hw;
1274 u16 dev_caps = 0;
1275
1276 /* Find out WoL support for port */
1277 adapter->wol_support = hw->wol_enabled = 0;
1278 ixgbe_get_device_caps(hw, &dev_caps);
1279 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1280 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1281 hw->bus.func == 0))
1282 adapter->wol_support = hw->wol_enabled = 1;
1283
1284 /* Save initial wake up filter configuration */
1285 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1286
1287 return;
1288 } /* ixgbe_check_wol_support */
1289
1290 /************************************************************************
1291 * ixgbe_setup_interface
1292 *
1293 * Setup networking device structure and register an interface.
1294 ************************************************************************/
1295 static int
1296 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1297 {
1298 struct ethercom *ec = &adapter->osdep.ec;
1299 struct ifnet *ifp;
1300 int rv;
1301
1302 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1303
1304 ifp = adapter->ifp = &ec->ec_if;
1305 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1306 ifp->if_baudrate = IF_Gbps(10);
1307 ifp->if_init = ixgbe_init;
1308 ifp->if_stop = ixgbe_ifstop;
1309 ifp->if_softc = adapter;
1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1311 #ifdef IXGBE_MPSAFE
1312 ifp->if_extflags = IFEF_MPSAFE;
1313 #endif
1314 ifp->if_ioctl = ixgbe_ioctl;
1315 #if __FreeBSD_version >= 1100045
1316 /* TSO parameters */
1317 ifp->if_hw_tsomax = 65518;
1318 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1319 ifp->if_hw_tsomaxsegsize = 2048;
1320 #endif
1321 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1322 #if 0
1323 ixgbe_start_locked = ixgbe_legacy_start_locked;
1324 #endif
1325 } else {
1326 ifp->if_transmit = ixgbe_mq_start;
1327 #if 0
1328 ixgbe_start_locked = ixgbe_mq_start_locked;
1329 #endif
1330 }
1331 ifp->if_start = ixgbe_legacy_start;
1332 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1333 IFQ_SET_READY(&ifp->if_snd);
1334
1335 rv = if_initialize(ifp);
1336 if (rv != 0) {
1337 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1338 return rv;
1339 }
1340 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1341 ether_ifattach(ifp, adapter->hw.mac.addr);
1342 aprint_normal_dev(dev, "Ethernet address %s\n",
1343 ether_sprintf(adapter->hw.mac.addr));
1344 /*
1345 * We use per TX queue softint, so if_deferred_start_init() isn't
1346 * used.
1347 */
1348 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1349
1350 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1351
1352 /*
1353 * Tell the upper layer(s) we support long frames.
1354 */
1355 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1356
1357 /* Set capability flags */
1358 ifp->if_capabilities |= IFCAP_RXCSUM
1359 | IFCAP_TXCSUM
1360 | IFCAP_TSOv4
1361 | IFCAP_TSOv6;
1362 ifp->if_capenable = 0;
1363
1364 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1365 | ETHERCAP_VLAN_HWCSUM
1366 | ETHERCAP_JUMBO_MTU
1367 | ETHERCAP_VLAN_MTU;
1368
1369 /* Enable the above capabilities by default */
1370 ec->ec_capenable = ec->ec_capabilities;
1371
1372 /*
1373 * Don't turn this on by default, if vlans are
1374 * created on another pseudo device (eg. lagg)
1375 * then vlan events are not passed thru, breaking
1376 * operation, but with HW FILTER off it works. If
1377 * using vlans directly on the ixgbe driver you can
1378 * enable this and get full hardware tag filtering.
1379 */
1380 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1381
1382 /*
1383 * Specify the media types supported by this adapter and register
1384 * callbacks to update media and link information
1385 */
1386 ec->ec_ifmedia = &adapter->media;
1387 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1388 ixgbe_media_status, &adapter->core_mtx);
1389
1390 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1391 ixgbe_add_media_types(adapter);
1392
1393 /* Set autoselect media by default */
1394 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1395
1396 if_register(ifp);
1397
1398 return (0);
1399 } /* ixgbe_setup_interface */
1400
1401 /************************************************************************
1402 * ixgbe_add_media_types
1403 ************************************************************************/
1404 static void
1405 ixgbe_add_media_types(struct adapter *adapter)
1406 {
1407 struct ixgbe_hw *hw = &adapter->hw;
1408 u64 layer;
1409
1410 layer = adapter->phy_layer;
1411
1412 #define ADD(mm, dd) \
1413 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1414
1415 ADD(IFM_NONE, 0);
1416
1417 /* Media types with matching NetBSD media defines */
1418 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1419 ADD(IFM_10G_T | IFM_FDX, 0);
1420 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1422 ADD(IFM_1000_T | IFM_FDX, 0);
1423 }
1424 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1425 ADD(IFM_100_TX | IFM_FDX, 0);
1426 }
1427 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1428 ADD(IFM_10_T | IFM_FDX, 0);
1429 }
1430
1431 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1432 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1433 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1434 }
1435
1436 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1437 ADD(IFM_10G_LR | IFM_FDX, 0);
1438 if (hw->phy.multispeed_fiber) {
1439 ADD(IFM_1000_LX | IFM_FDX, 0);
1440 }
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1443 ADD(IFM_10G_SR | IFM_FDX, 0);
1444 if (hw->phy.multispeed_fiber) {
1445 ADD(IFM_1000_SX | IFM_FDX, 0);
1446 }
1447 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1448 ADD(IFM_1000_SX | IFM_FDX, 0);
1449 }
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1451 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1452 }
1453
1454 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1455 ADD(IFM_10G_KR | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1458 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1459 }
1460 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1461 ADD(IFM_1000_KX | IFM_FDX, 0);
1462 }
1463 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1464 ADD(IFM_2500_KX | IFM_FDX, 0);
1465 }
1466 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1467 ADD(IFM_2500_T | IFM_FDX, 0);
1468 }
1469 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1470 ADD(IFM_5000_T | IFM_FDX, 0);
1471 }
1472 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1473 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1474 /* XXX no ifmedia_set? */
1475
1476 ADD(IFM_AUTO, 0);
1477
1478 #undef ADD
1479 } /* ixgbe_add_media_types */
1480
1481 /************************************************************************
1482 * ixgbe_is_sfp
1483 ************************************************************************/
1484 static inline bool
1485 ixgbe_is_sfp(struct ixgbe_hw *hw)
1486 {
1487 switch (hw->mac.type) {
1488 case ixgbe_mac_82598EB:
1489 if (hw->phy.type == ixgbe_phy_nl)
1490 return (TRUE);
1491 return (FALSE);
1492 case ixgbe_mac_82599EB:
1493 case ixgbe_mac_X550EM_x:
1494 case ixgbe_mac_X550EM_a:
1495 switch (hw->mac.ops.get_media_type(hw)) {
1496 case ixgbe_media_type_fiber:
1497 case ixgbe_media_type_fiber_qsfp:
1498 return (TRUE);
1499 default:
1500 return (FALSE);
1501 }
1502 default:
1503 return (FALSE);
1504 }
1505 } /* ixgbe_is_sfp */
1506
1507 static void
1508 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1509 {
1510
1511 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1512 workqueue_enqueue(adapter->admin_wq,
1513 &adapter->admin_wc, NULL);
1514 }
1515
1516 /************************************************************************
1517 * ixgbe_config_link
1518 ************************************************************************/
1519 static void
1520 ixgbe_config_link(struct adapter *adapter)
1521 {
1522 struct ixgbe_hw *hw = &adapter->hw;
1523 u32 autoneg, err = 0;
1524 u32 task_requests = 0;
1525 bool sfp, negotiate = false;
1526
1527 sfp = ixgbe_is_sfp(hw);
1528
1529 if (sfp) {
1530 if (hw->phy.multispeed_fiber) {
1531 ixgbe_enable_tx_laser(hw);
1532 task_requests |= IXGBE_REQUEST_TASK_MSF;
1533 }
1534 task_requests |= IXGBE_REQUEST_TASK_MOD;
1535 atomic_or_32(&adapter->task_requests, task_requests);
1536 ixgbe_schedule_admin_tasklet(adapter);
1537 } else {
1538 struct ifmedia *ifm = &adapter->media;
1539
1540 if (hw->mac.ops.check_link)
1541 err = ixgbe_check_link(hw, &adapter->link_speed,
1542 &adapter->link_up, FALSE);
1543 if (err)
1544 return;
1545
1546 /*
1547 * Check if it's the first call. If it's the first call,
1548 * get value for auto negotiation.
1549 */
1550 autoneg = hw->phy.autoneg_advertised;
1551 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1552 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1553 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1554 &negotiate);
1555 if (err)
1556 return;
1557 if (hw->mac.ops.setup_link)
1558 err = hw->mac.ops.setup_link(hw, autoneg,
1559 adapter->link_up);
1560 }
1561
1562 } /* ixgbe_config_link */
1563
1564 /************************************************************************
1565 * ixgbe_update_stats_counters - Update board statistics counters.
1566 ************************************************************************/
1567 static void
1568 ixgbe_update_stats_counters(struct adapter *adapter)
1569 {
1570 struct ifnet *ifp = adapter->ifp;
1571 struct ixgbe_hw *hw = &adapter->hw;
1572 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1573 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1574 u64 total_missed_rx = 0;
1575 uint64_t crcerrs, rlec;
1576 unsigned int queue_counters;
1577 int i;
1578
1579 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1580 stats->crcerrs.ev_count += crcerrs;
1581 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1582 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1583 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1584 if (hw->mac.type >= ixgbe_mac_X550)
1585 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1586
1587 /* 16 registers exist */
1588 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1589 for (i = 0; i < queue_counters; i++) {
1590 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1591 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1592 if (hw->mac.type >= ixgbe_mac_82599EB) {
1593 stats->qprdc[i].ev_count
1594 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1595 }
1596 }
1597
1598 /* 8 registers exist */
1599 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1600 uint32_t mp;
1601
1602 /* MPC */
1603 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1604 /* global total per queue */
1605 stats->mpc[i].ev_count += mp;
1606 /* running comprehensive total for stats display */
1607 total_missed_rx += mp;
1608
1609 if (hw->mac.type == ixgbe_mac_82598EB)
1610 stats->rnbc[i].ev_count
1611 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1612
1613 stats->pxontxc[i].ev_count
1614 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1615 stats->pxofftxc[i].ev_count
1616 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1617 if (hw->mac.type >= ixgbe_mac_82599EB) {
1618 stats->pxonrxc[i].ev_count
1619 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1620 stats->pxoffrxc[i].ev_count
1621 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1622 stats->pxon2offc[i].ev_count
1623 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1624 } else {
1625 stats->pxonrxc[i].ev_count
1626 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1627 stats->pxoffrxc[i].ev_count
1628 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1629 }
1630 }
1631 stats->mpctotal.ev_count += total_missed_rx;
1632
1633 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1634 if ((adapter->link_active == LINK_STATE_UP)
1635 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1636 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1637 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1638 }
1639 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1640 stats->rlec.ev_count += rlec;
1641
1642 /* Hardware workaround, gprc counts missed packets */
1643 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1644
1645 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1646 stats->lxontxc.ev_count += lxon;
1647 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1648 stats->lxofftxc.ev_count += lxoff;
1649 total = lxon + lxoff;
1650
1651 if (hw->mac.type != ixgbe_mac_82598EB) {
1652 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1653 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1654 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1655 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1656 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1657 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1658 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1659 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1660 } else {
1661 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1662 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1663 /* 82598 only has a counter in the high register */
1664 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1665 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1666 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1667 }
1668
1669 /*
1670 * Workaround: mprc hardware is incorrectly counting
1671 * broadcasts, so for now we subtract those.
1672 */
1673 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1674 stats->bprc.ev_count += bprc;
1675 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1676 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1677
1678 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1679 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1680 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1681 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1682 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1683 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1684
1685 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1686 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1687 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1688
1689 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1690 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1691 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1692 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1693 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1694 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1695 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1696 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1697 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1698 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1699 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1700 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1701 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1702 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1703 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1704 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1705 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1706 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1707 /* Only read FCOE on 82599 */
1708 if (hw->mac.type != ixgbe_mac_82598EB) {
1709 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1710 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1711 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1712 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1713 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1714 }
1715
1716 /*
1717 * Fill out the OS statistics structure. Only RX errors are required
1718 * here because all TX counters are incremented in the TX path and
1719 * normal RX counters are prepared in ether_input().
1720 */
1721 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1722 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1723 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1724 IF_STAT_PUTREF(ifp);
1725 } /* ixgbe_update_stats_counters */
1726
1727 /************************************************************************
1728 * ixgbe_add_hw_stats
1729 *
1730 * Add sysctl variables, one per statistic, to the system.
1731 ************************************************************************/
1732 static void
1733 ixgbe_add_hw_stats(struct adapter *adapter)
1734 {
1735 device_t dev = adapter->dev;
1736 const struct sysctlnode *rnode, *cnode;
1737 struct sysctllog **log = &adapter->sysctllog;
1738 struct tx_ring *txr = adapter->tx_rings;
1739 struct rx_ring *rxr = adapter->rx_rings;
1740 struct ixgbe_hw *hw = &adapter->hw;
1741 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1742 const char *xname = device_xname(dev);
1743 int i;
1744
1745 /* Driver Statistics */
1746 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1747 NULL, xname, "Driver tx dma soft fail EFBIG");
1748 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1749 NULL, xname, "m_defrag() failed");
1750 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1751 NULL, xname, "Driver tx dma hard fail EFBIG");
1752 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1753 NULL, xname, "Driver tx dma hard fail EINVAL");
1754 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1755 NULL, xname, "Driver tx dma hard fail other");
1756 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1757 NULL, xname, "Driver tx dma soft fail EAGAIN");
1758 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1759 NULL, xname, "Driver tx dma soft fail ENOMEM");
1760 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1761 NULL, xname, "Watchdog timeouts");
1762 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1763 NULL, xname, "TSO errors");
1764 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1765 NULL, xname, "Admin MSI-X IRQ Handled");
1766 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1767 NULL, xname, "Link event");
1768 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1769 NULL, xname, "SFP+ module event");
1770 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1771 NULL, xname, "Multispeed event");
1772 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1773 NULL, xname, "External PHY event");
1774
1775 /* Max number of traffic class is 8 */
1776 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1777 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1778 snprintf(adapter->tcs[i].evnamebuf,
1779 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1780 xname, i);
1781 if (i < __arraycount(stats->mpc)) {
1782 evcnt_attach_dynamic(&stats->mpc[i],
1783 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1784 "RX Missed Packet Count");
1785 if (hw->mac.type == ixgbe_mac_82598EB)
1786 evcnt_attach_dynamic(&stats->rnbc[i],
1787 EVCNT_TYPE_MISC, NULL,
1788 adapter->tcs[i].evnamebuf,
1789 "Receive No Buffers");
1790 }
1791 if (i < __arraycount(stats->pxontxc)) {
1792 evcnt_attach_dynamic(&stats->pxontxc[i],
1793 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1794 "pxontxc");
1795 evcnt_attach_dynamic(&stats->pxonrxc[i],
1796 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1797 "pxonrxc");
1798 evcnt_attach_dynamic(&stats->pxofftxc[i],
1799 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1800 "pxofftxc");
1801 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1802 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1803 "pxoffrxc");
1804 if (hw->mac.type >= ixgbe_mac_82599EB)
1805 evcnt_attach_dynamic(&stats->pxon2offc[i],
1806 EVCNT_TYPE_MISC, NULL,
1807 adapter->tcs[i].evnamebuf,
1808 "pxon2offc");
1809 }
1810 }
1811
1812 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1813 #ifdef LRO
1814 struct lro_ctrl *lro = &rxr->lro;
1815 #endif /* LRO */
1816
1817 snprintf(adapter->queues[i].evnamebuf,
1818 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1819 xname, i);
1820 snprintf(adapter->queues[i].namebuf,
1821 sizeof(adapter->queues[i].namebuf), "q%d", i);
1822
1823 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1824 aprint_error_dev(dev, "could not create sysctl root\n");
1825 break;
1826 }
1827
1828 if (sysctl_createv(log, 0, &rnode, &rnode,
1829 0, CTLTYPE_NODE,
1830 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1831 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1832 break;
1833
1834 if (sysctl_createv(log, 0, &rnode, &cnode,
1835 CTLFLAG_READWRITE, CTLTYPE_INT,
1836 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1837 ixgbe_sysctl_interrupt_rate_handler, 0,
1838 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1839 break;
1840
1841 if (sysctl_createv(log, 0, &rnode, &cnode,
1842 CTLFLAG_READONLY, CTLTYPE_INT,
1843 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1844 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1845 0, CTL_CREATE, CTL_EOL) != 0)
1846 break;
1847
1848 if (sysctl_createv(log, 0, &rnode, &cnode,
1849 CTLFLAG_READONLY, CTLTYPE_INT,
1850 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1851 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1852 0, CTL_CREATE, CTL_EOL) != 0)
1853 break;
1854
1855 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1856 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1857 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1858 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1859 "Handled queue in softint");
1860 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1861 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1862 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1863 NULL, adapter->queues[i].evnamebuf, "TSO");
1864 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1865 NULL, adapter->queues[i].evnamebuf,
1866 "Queue No Descriptor Available");
1867 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1868 NULL, adapter->queues[i].evnamebuf,
1869 "Queue Packets Transmitted");
1870 #ifndef IXGBE_LEGACY_TX
1871 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1872 NULL, adapter->queues[i].evnamebuf,
1873 "Packets dropped in pcq");
1874 #endif
1875
1876 if (sysctl_createv(log, 0, &rnode, &cnode,
1877 CTLFLAG_READONLY,
1878 CTLTYPE_INT,
1879 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1880 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1881 CTL_CREATE, CTL_EOL) != 0)
1882 break;
1883
1884 if (sysctl_createv(log, 0, &rnode, &cnode,
1885 CTLFLAG_READONLY,
1886 CTLTYPE_INT,
1887 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1888 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1889 CTL_CREATE, CTL_EOL) != 0)
1890 break;
1891
1892 if (sysctl_createv(log, 0, &rnode, &cnode,
1893 CTLFLAG_READONLY,
1894 CTLTYPE_INT,
1895 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1896 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1897 CTL_CREATE, CTL_EOL) != 0)
1898 break;
1899
1900 if (i < __arraycount(stats->qprc)) {
1901 evcnt_attach_dynamic(&stats->qprc[i],
1902 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1903 "qprc");
1904 evcnt_attach_dynamic(&stats->qptc[i],
1905 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1906 "qptc");
1907 evcnt_attach_dynamic(&stats->qbrc[i],
1908 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1909 "qbrc");
1910 evcnt_attach_dynamic(&stats->qbtc[i],
1911 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1912 "qbtc");
1913 if (hw->mac.type >= ixgbe_mac_82599EB)
1914 evcnt_attach_dynamic(&stats->qprdc[i],
1915 EVCNT_TYPE_MISC, NULL,
1916 adapter->queues[i].evnamebuf, "qprdc");
1917 }
1918
1919 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1920 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1921 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1922 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1923 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1924 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1925 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1926 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1927 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1928 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1929 #ifdef LRO
1930 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1931 CTLFLAG_RD, &lro->lro_queued, 0,
1932 "LRO Queued");
1933 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1934 CTLFLAG_RD, &lro->lro_flushed, 0,
1935 "LRO Flushed");
1936 #endif /* LRO */
1937 }
1938
1939 /* MAC stats get their own sub node */
1940
1941 snprintf(stats->namebuf,
1942 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1943
1944 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "rx csum offload - IP");
1946 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "rx csum offload - L4");
1948 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "rx csum offload - IP bad");
1950 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "rx csum offload - L4 bad");
1952 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Interrupt conditions zero");
1954 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Legacy interrupts");
1956
1957 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "CRC Errors");
1959 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "Illegal Byte Errors");
1961 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "Byte Errors");
1963 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "MAC Short Packets Discarded");
1965 if (hw->mac.type >= ixgbe_mac_X550)
1966 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "Bad SFD");
1968 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "Total Packets Missed");
1970 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "MAC Local Faults");
1972 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "MAC Remote Faults");
1974 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Receive Length Errors");
1976 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Link XON Transmitted");
1978 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Link XON Received");
1980 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "Link XOFF Transmitted");
1982 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "Link XOFF Received");
1984
1985 /* Packet Reception Stats */
1986 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Total Octets Received");
1988 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Good Octets Received");
1990 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Total Packets Received");
1992 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Good Packets Received");
1994 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Multicast Packets Received");
1996 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Broadcast Packets Received");
1998 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "64 byte frames received ");
2000 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "65-127 byte frames received");
2002 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "128-255 byte frames received");
2004 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "256-511 byte frames received");
2006 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "512-1023 byte frames received");
2008 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "1023-1522 byte frames received");
2010 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Receive Undersized");
2012 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Fragmented Packets Received ");
2014 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Oversized Packets Received");
2016 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Received Jabber");
2018 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Management Packets Received");
2020 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Management Packets Dropped");
2022 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Checksum Errors");
2024
2025 /* Packet Transmission Stats */
2026 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Good Octets Transmitted");
2028 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Total Packets Transmitted");
2030 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Good Packets Transmitted");
2032 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Broadcast Packets Transmitted");
2034 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Multicast Packets Transmitted");
2036 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Management Packets Transmitted");
2038 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "64 byte frames transmitted ");
2040 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "65-127 byte frames transmitted");
2042 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "128-255 byte frames transmitted");
2044 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "256-511 byte frames transmitted");
2046 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "512-1023 byte frames transmitted");
2048 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "1024-1522 byte frames transmitted");
2050 } /* ixgbe_add_hw_stats */
2051
2052 static void
2053 ixgbe_clear_evcnt(struct adapter *adapter)
2054 {
2055 struct tx_ring *txr = adapter->tx_rings;
2056 struct rx_ring *rxr = adapter->rx_rings;
2057 struct ixgbe_hw *hw = &adapter->hw;
2058 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2059 int i;
2060
2061 adapter->efbig_tx_dma_setup.ev_count = 0;
2062 adapter->mbuf_defrag_failed.ev_count = 0;
2063 adapter->efbig2_tx_dma_setup.ev_count = 0;
2064 adapter->einval_tx_dma_setup.ev_count = 0;
2065 adapter->other_tx_dma_setup.ev_count = 0;
2066 adapter->eagain_tx_dma_setup.ev_count = 0;
2067 adapter->enomem_tx_dma_setup.ev_count = 0;
2068 adapter->tso_err.ev_count = 0;
2069 adapter->watchdog_events.ev_count = 0;
2070 adapter->admin_irqev.ev_count = 0;
2071 adapter->link_workev.ev_count = 0;
2072 adapter->mod_workev.ev_count = 0;
2073 adapter->msf_workev.ev_count = 0;
2074 adapter->phy_workev.ev_count = 0;
2075
2076 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2077 if (i < __arraycount(stats->mpc)) {
2078 stats->mpc[i].ev_count = 0;
2079 if (hw->mac.type == ixgbe_mac_82598EB)
2080 stats->rnbc[i].ev_count = 0;
2081 }
2082 if (i < __arraycount(stats->pxontxc)) {
2083 stats->pxontxc[i].ev_count = 0;
2084 stats->pxonrxc[i].ev_count = 0;
2085 stats->pxofftxc[i].ev_count = 0;
2086 stats->pxoffrxc[i].ev_count = 0;
2087 if (hw->mac.type >= ixgbe_mac_82599EB)
2088 stats->pxon2offc[i].ev_count = 0;
2089 }
2090 }
2091
2092 txr = adapter->tx_rings;
2093 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2094 adapter->queues[i].irqs.ev_count = 0;
2095 adapter->queues[i].handleq.ev_count = 0;
2096 adapter->queues[i].req.ev_count = 0;
2097 txr->no_desc_avail.ev_count = 0;
2098 txr->total_packets.ev_count = 0;
2099 txr->tso_tx.ev_count = 0;
2100 #ifndef IXGBE_LEGACY_TX
2101 txr->pcq_drops.ev_count = 0;
2102 #endif
2103 txr->q_efbig_tx_dma_setup = 0;
2104 txr->q_mbuf_defrag_failed = 0;
2105 txr->q_efbig2_tx_dma_setup = 0;
2106 txr->q_einval_tx_dma_setup = 0;
2107 txr->q_other_tx_dma_setup = 0;
2108 txr->q_eagain_tx_dma_setup = 0;
2109 txr->q_enomem_tx_dma_setup = 0;
2110 txr->q_tso_err = 0;
2111
2112 if (i < __arraycount(stats->qprc)) {
2113 stats->qprc[i].ev_count = 0;
2114 stats->qptc[i].ev_count = 0;
2115 stats->qbrc[i].ev_count = 0;
2116 stats->qbtc[i].ev_count = 0;
2117 if (hw->mac.type >= ixgbe_mac_82599EB)
2118 stats->qprdc[i].ev_count = 0;
2119 }
2120
2121 rxr->rx_packets.ev_count = 0;
2122 rxr->rx_bytes.ev_count = 0;
2123 rxr->rx_copies.ev_count = 0;
2124 rxr->no_jmbuf.ev_count = 0;
2125 rxr->rx_discarded.ev_count = 0;
2126 }
2127 stats->ipcs.ev_count = 0;
2128 stats->l4cs.ev_count = 0;
2129 stats->ipcs_bad.ev_count = 0;
2130 stats->l4cs_bad.ev_count = 0;
2131 stats->intzero.ev_count = 0;
2132 stats->legint.ev_count = 0;
2133 stats->crcerrs.ev_count = 0;
2134 stats->illerrc.ev_count = 0;
2135 stats->errbc.ev_count = 0;
2136 stats->mspdc.ev_count = 0;
2137 if (hw->mac.type >= ixgbe_mac_X550)
2138 stats->mbsdc.ev_count = 0;
2139 stats->mpctotal.ev_count = 0;
2140 stats->mlfc.ev_count = 0;
2141 stats->mrfc.ev_count = 0;
2142 stats->rlec.ev_count = 0;
2143 stats->lxontxc.ev_count = 0;
2144 stats->lxonrxc.ev_count = 0;
2145 stats->lxofftxc.ev_count = 0;
2146 stats->lxoffrxc.ev_count = 0;
2147
2148 /* Packet Reception Stats */
2149 stats->tor.ev_count = 0;
2150 stats->gorc.ev_count = 0;
2151 stats->tpr.ev_count = 0;
2152 stats->gprc.ev_count = 0;
2153 stats->mprc.ev_count = 0;
2154 stats->bprc.ev_count = 0;
2155 stats->prc64.ev_count = 0;
2156 stats->prc127.ev_count = 0;
2157 stats->prc255.ev_count = 0;
2158 stats->prc511.ev_count = 0;
2159 stats->prc1023.ev_count = 0;
2160 stats->prc1522.ev_count = 0;
2161 stats->ruc.ev_count = 0;
2162 stats->rfc.ev_count = 0;
2163 stats->roc.ev_count = 0;
2164 stats->rjc.ev_count = 0;
2165 stats->mngprc.ev_count = 0;
2166 stats->mngpdc.ev_count = 0;
2167 stats->xec.ev_count = 0;
2168
2169 /* Packet Transmission Stats */
2170 stats->gotc.ev_count = 0;
2171 stats->tpt.ev_count = 0;
2172 stats->gptc.ev_count = 0;
2173 stats->bptc.ev_count = 0;
2174 stats->mptc.ev_count = 0;
2175 stats->mngptc.ev_count = 0;
2176 stats->ptc64.ev_count = 0;
2177 stats->ptc127.ev_count = 0;
2178 stats->ptc255.ev_count = 0;
2179 stats->ptc511.ev_count = 0;
2180 stats->ptc1023.ev_count = 0;
2181 stats->ptc1522.ev_count = 0;
2182 }
2183
2184 /************************************************************************
2185 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2186 *
2187 * Retrieves the TDH value from the hardware
2188 ************************************************************************/
2189 static int
2190 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2191 {
2192 struct sysctlnode node = *rnode;
2193 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2194 struct adapter *adapter;
2195 uint32_t val;
2196
2197 if (!txr)
2198 return (0);
2199
2200 adapter = txr->adapter;
2201 if (ixgbe_fw_recovery_mode_swflag(adapter))
2202 return (EPERM);
2203
2204 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2205 node.sysctl_data = &val;
2206 return sysctl_lookup(SYSCTLFN_CALL(&node));
2207 } /* ixgbe_sysctl_tdh_handler */
2208
2209 /************************************************************************
2210 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2211 *
2212 * Retrieves the TDT value from the hardware
2213 ************************************************************************/
2214 static int
2215 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2216 {
2217 struct sysctlnode node = *rnode;
2218 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2219 struct adapter *adapter;
2220 uint32_t val;
2221
2222 if (!txr)
2223 return (0);
2224
2225 adapter = txr->adapter;
2226 if (ixgbe_fw_recovery_mode_swflag(adapter))
2227 return (EPERM);
2228
2229 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2230 node.sysctl_data = &val;
2231 return sysctl_lookup(SYSCTLFN_CALL(&node));
2232 } /* ixgbe_sysctl_tdt_handler */
2233
2234 /************************************************************************
2235 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2236 * handler function
2237 *
2238 * Retrieves the next_to_check value
2239 ************************************************************************/
2240 static int
2241 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2242 {
2243 struct sysctlnode node = *rnode;
2244 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2245 struct adapter *adapter;
2246 uint32_t val;
2247
2248 if (!rxr)
2249 return (0);
2250
2251 adapter = rxr->adapter;
2252 if (ixgbe_fw_recovery_mode_swflag(adapter))
2253 return (EPERM);
2254
2255 val = rxr->next_to_check;
2256 node.sysctl_data = &val;
2257 return sysctl_lookup(SYSCTLFN_CALL(&node));
2258 } /* ixgbe_sysctl_next_to_check_handler */
2259
2260 /************************************************************************
2261 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2262 *
2263 * Retrieves the RDH value from the hardware
2264 ************************************************************************/
2265 static int
2266 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2267 {
2268 struct sysctlnode node = *rnode;
2269 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2270 struct adapter *adapter;
2271 uint32_t val;
2272
2273 if (!rxr)
2274 return (0);
2275
2276 adapter = rxr->adapter;
2277 if (ixgbe_fw_recovery_mode_swflag(adapter))
2278 return (EPERM);
2279
2280 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2281 node.sysctl_data = &val;
2282 return sysctl_lookup(SYSCTLFN_CALL(&node));
2283 } /* ixgbe_sysctl_rdh_handler */
2284
2285 /************************************************************************
2286 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2287 *
2288 * Retrieves the RDT value from the hardware
2289 ************************************************************************/
2290 static int
2291 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2292 {
2293 struct sysctlnode node = *rnode;
2294 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2295 struct adapter *adapter;
2296 uint32_t val;
2297
2298 if (!rxr)
2299 return (0);
2300
2301 adapter = rxr->adapter;
2302 if (ixgbe_fw_recovery_mode_swflag(adapter))
2303 return (EPERM);
2304
2305 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2306 node.sysctl_data = &val;
2307 return sysctl_lookup(SYSCTLFN_CALL(&node));
2308 } /* ixgbe_sysctl_rdt_handler */
2309
2310 static int
2311 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2312 {
2313 struct ifnet *ifp = &ec->ec_if;
2314 struct adapter *adapter = ifp->if_softc;
2315 int rv;
2316
2317 if (set)
2318 rv = ixgbe_register_vlan(adapter, vid);
2319 else
2320 rv = ixgbe_unregister_vlan(adapter, vid);
2321
2322 if (rv != 0)
2323 return rv;
2324
2325 /*
2326 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2327 * or 0 to 1.
2328 */
2329 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2330 ixgbe_setup_vlan_hw_tagging(adapter);
2331
2332 return rv;
2333 }
2334
2335 /************************************************************************
2336 * ixgbe_register_vlan
2337 *
2338 * Run via vlan config EVENT, it enables us to use the
2339 * HW Filter table since we can get the vlan id. This
2340 * just creates the entry in the soft version of the
2341 * VFTA, init will repopulate the real table.
2342 ************************************************************************/
2343 static int
2344 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2345 {
2346 u16 index, bit;
2347 int error;
2348
2349 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2350 return EINVAL;
2351
2352 IXGBE_CORE_LOCK(adapter);
2353 index = (vtag >> 5) & 0x7F;
2354 bit = vtag & 0x1F;
2355 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2356 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2357 true);
2358 IXGBE_CORE_UNLOCK(adapter);
2359 if (error != 0)
2360 error = EACCES;
2361
2362 return error;
2363 } /* ixgbe_register_vlan */
2364
2365 /************************************************************************
2366 * ixgbe_unregister_vlan
2367 *
2368 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2369 ************************************************************************/
2370 static int
2371 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2372 {
2373 u16 index, bit;
2374 int error;
2375
2376 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2377 return EINVAL;
2378
2379 IXGBE_CORE_LOCK(adapter);
2380 index = (vtag >> 5) & 0x7F;
2381 bit = vtag & 0x1F;
2382 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2383 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2384 true);
2385 IXGBE_CORE_UNLOCK(adapter);
2386 if (error != 0)
2387 error = EACCES;
2388
2389 return error;
2390 } /* ixgbe_unregister_vlan */
2391
2392 static void
2393 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2394 {
2395 struct ethercom *ec = &adapter->osdep.ec;
2396 struct ixgbe_hw *hw = &adapter->hw;
2397 struct rx_ring *rxr;
2398 u32 ctrl;
2399 int i;
2400 bool hwtagging;
2401
2402 /* Enable HW tagging only if any vlan is attached */
2403 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2404 && VLAN_ATTACHED(ec);
2405
2406 /* Setup the queues for vlans */
2407 for (i = 0; i < adapter->num_queues; i++) {
2408 rxr = &adapter->rx_rings[i];
2409 /*
2410 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2411 */
2412 if (hw->mac.type != ixgbe_mac_82598EB) {
2413 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2414 if (hwtagging)
2415 ctrl |= IXGBE_RXDCTL_VME;
2416 else
2417 ctrl &= ~IXGBE_RXDCTL_VME;
2418 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2419 }
2420 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2421 }
2422
2423 /* VLAN hw tagging for 82598 */
2424 if (hw->mac.type == ixgbe_mac_82598EB) {
2425 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2426 if (hwtagging)
2427 ctrl |= IXGBE_VLNCTRL_VME;
2428 else
2429 ctrl &= ~IXGBE_VLNCTRL_VME;
2430 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2431 }
2432 } /* ixgbe_setup_vlan_hw_tagging */
2433
2434 static void
2435 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2436 {
2437 struct ethercom *ec = &adapter->osdep.ec;
2438 struct ixgbe_hw *hw = &adapter->hw;
2439 int i;
2440 u32 ctrl;
2441 struct vlanid_list *vlanidp;
2442
2443 /*
2444 * This function is called from both if_init and ifflags_cb()
2445 * on NetBSD.
2446 */
2447
2448 /*
2449 * Part 1:
2450 * Setup VLAN HW tagging
2451 */
2452 ixgbe_setup_vlan_hw_tagging(adapter);
2453
2454 /*
2455 * Part 2:
2456 * Setup VLAN HW filter
2457 */
2458 /* Cleanup shadow_vfta */
2459 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2460 adapter->shadow_vfta[i] = 0;
2461 /* Generate shadow_vfta from ec_vids */
2462 ETHER_LOCK(ec);
2463 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2464 uint32_t idx;
2465
2466 idx = vlanidp->vid / 32;
2467 KASSERT(idx < IXGBE_VFTA_SIZE);
2468 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2469 }
2470 ETHER_UNLOCK(ec);
2471 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2472 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2473
2474 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2475 /* Enable the Filter Table if enabled */
2476 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2477 ctrl |= IXGBE_VLNCTRL_VFE;
2478 else
2479 ctrl &= ~IXGBE_VLNCTRL_VFE;
2480 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2481 } /* ixgbe_setup_vlan_hw_support */
2482
2483 /************************************************************************
2484 * ixgbe_get_slot_info
2485 *
2486 * Get the width and transaction speed of
2487 * the slot this adapter is plugged into.
2488 ************************************************************************/
2489 static void
2490 ixgbe_get_slot_info(struct adapter *adapter)
2491 {
2492 device_t dev = adapter->dev;
2493 struct ixgbe_hw *hw = &adapter->hw;
2494 u32 offset;
2495 u16 link;
2496 int bus_info_valid = TRUE;
2497
2498 /* Some devices are behind an internal bridge */
2499 switch (hw->device_id) {
2500 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2501 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2502 goto get_parent_info;
2503 default:
2504 break;
2505 }
2506
2507 ixgbe_get_bus_info(hw);
2508
2509 /*
2510 * Some devices don't use PCI-E, but there is no need
2511 * to display "Unknown" for bus speed and width.
2512 */
2513 switch (hw->mac.type) {
2514 case ixgbe_mac_X550EM_x:
2515 case ixgbe_mac_X550EM_a:
2516 return;
2517 default:
2518 goto display;
2519 }
2520
2521 get_parent_info:
2522 /*
2523 * For the Quad port adapter we need to parse back
2524 * up the PCI tree to find the speed of the expansion
2525 * slot into which this adapter is plugged. A bit more work.
2526 */
2527 dev = device_parent(device_parent(dev));
2528 #if 0
2529 #ifdef IXGBE_DEBUG
2530 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2531 pci_get_slot(dev), pci_get_function(dev));
2532 #endif
2533 dev = device_parent(device_parent(dev));
2534 #ifdef IXGBE_DEBUG
2535 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2536 pci_get_slot(dev), pci_get_function(dev));
2537 #endif
2538 #endif
2539 /* Now get the PCI Express Capabilities offset */
2540 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2541 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2542 /*
2543 * Hmm...can't get PCI-Express capabilities.
2544 * Falling back to default method.
2545 */
2546 bus_info_valid = FALSE;
2547 ixgbe_get_bus_info(hw);
2548 goto display;
2549 }
2550 /* ...and read the Link Status Register */
2551 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2552 offset + PCIE_LCSR) >> 16;
2553 ixgbe_set_pci_config_data_generic(hw, link);
2554
2555 display:
2556 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2557 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2558 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2559 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2560 "Unknown"),
2561 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2562 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2563 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2564 "Unknown"));
2565
2566 if (bus_info_valid) {
2567 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2568 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2569 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2570 device_printf(dev, "PCI-Express bandwidth available"
2571 " for this card\n is not sufficient for"
2572 " optimal performance.\n");
2573 device_printf(dev, "For optimal performance a x8 "
2574 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2575 }
2576 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2577 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2578 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2579 device_printf(dev, "PCI-Express bandwidth available"
2580 " for this card\n is not sufficient for"
2581 " optimal performance.\n");
2582 device_printf(dev, "For optimal performance a x8 "
2583 "PCIE Gen3 slot is required.\n");
2584 }
2585 } else
2586 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2587
2588 return;
2589 } /* ixgbe_get_slot_info */
2590
2591 /************************************************************************
2592 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2593 ************************************************************************/
2594 static inline void
2595 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2596 {
2597 struct ixgbe_hw *hw = &adapter->hw;
2598 struct ix_queue *que = &adapter->queues[vector];
2599 u64 queue = 1ULL << vector;
2600 u32 mask;
2601
2602 mutex_enter(&que->dc_mtx);
2603 if (que->disabled_count > 0 && --que->disabled_count > 0)
2604 goto out;
2605
2606 if (hw->mac.type == ixgbe_mac_82598EB) {
2607 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2608 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2609 } else {
2610 mask = (queue & 0xFFFFFFFF);
2611 if (mask)
2612 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2613 mask = (queue >> 32);
2614 if (mask)
2615 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2616 }
2617 out:
2618 mutex_exit(&que->dc_mtx);
2619 } /* ixgbe_enable_queue */
2620
2621 /************************************************************************
2622 * ixgbe_disable_queue_internal
2623 ************************************************************************/
2624 static inline void
2625 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2626 {
2627 struct ixgbe_hw *hw = &adapter->hw;
2628 struct ix_queue *que = &adapter->queues[vector];
2629 u64 queue = 1ULL << vector;
2630 u32 mask;
2631
2632 mutex_enter(&que->dc_mtx);
2633
2634 if (que->disabled_count > 0) {
2635 if (nestok)
2636 que->disabled_count++;
2637 goto out;
2638 }
2639 que->disabled_count++;
2640
2641 if (hw->mac.type == ixgbe_mac_82598EB) {
2642 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2643 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2644 } else {
2645 mask = (queue & 0xFFFFFFFF);
2646 if (mask)
2647 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2648 mask = (queue >> 32);
2649 if (mask)
2650 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2651 }
2652 out:
2653 mutex_exit(&que->dc_mtx);
2654 } /* ixgbe_disable_queue_internal */
2655
2656 /************************************************************************
2657 * ixgbe_disable_queue
2658 ************************************************************************/
2659 static inline void
2660 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2661 {
2662
2663 ixgbe_disable_queue_internal(adapter, vector, true);
2664 } /* ixgbe_disable_queue */
2665
2666 /************************************************************************
2667 * ixgbe_sched_handle_que - schedule deferred packet processing
2668 ************************************************************************/
2669 static inline void
2670 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2671 {
2672
2673 if (que->txrx_use_workqueue) {
2674 /*
2675 * adapter->que_wq is bound to each CPU instead of
2676 * each NIC queue to reduce workqueue kthread. As we
2677 * should consider about interrupt affinity in this
2678 * function, the workqueue kthread must be WQ_PERCPU.
2679 * If create WQ_PERCPU workqueue kthread for each NIC
2680 * queue, that number of created workqueue kthread is
2681 * (number of used NIC queue) * (number of CPUs) =
2682 * (number of CPUs) ^ 2 most often.
2683 *
2684 * The same NIC queue's interrupts are avoided by
2685 * masking the queue's interrupt. And different
2686 * NIC queue's interrupts use different struct work
2687 * (que->wq_cookie). So, "enqueued flag" to avoid
2688 * twice workqueue_enqueue() is not required .
2689 */
2690 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2691 } else {
2692 softint_schedule(que->que_si);
2693 }
2694 }
2695
2696 /************************************************************************
2697 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2698 ************************************************************************/
2699 static int
2700 ixgbe_msix_que(void *arg)
2701 {
2702 struct ix_queue *que = arg;
2703 struct adapter *adapter = que->adapter;
2704 struct ifnet *ifp = adapter->ifp;
2705 struct tx_ring *txr = que->txr;
2706 struct rx_ring *rxr = que->rxr;
2707 bool more;
2708 u32 newitr = 0;
2709
2710 /* Protect against spurious interrupts */
2711 if ((ifp->if_flags & IFF_RUNNING) == 0)
2712 return 0;
2713
2714 ixgbe_disable_queue(adapter, que->msix);
2715 ++que->irqs.ev_count;
2716
2717 /*
2718 * Don't change "que->txrx_use_workqueue" from this point to avoid
2719 * flip-flopping softint/workqueue mode in one deferred processing.
2720 */
2721 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2722
2723 #ifdef __NetBSD__
2724 /* Don't run ixgbe_rxeof in interrupt context */
2725 more = true;
2726 #else
2727 more = ixgbe_rxeof(que);
2728 #endif
2729
2730 IXGBE_TX_LOCK(txr);
2731 ixgbe_txeof(txr);
2732 IXGBE_TX_UNLOCK(txr);
2733
2734 /* Do AIM now? */
2735
2736 if (adapter->enable_aim == false)
2737 goto no_calc;
2738 /*
2739 * Do Adaptive Interrupt Moderation:
2740 * - Write out last calculated setting
2741 * - Calculate based on average size over
2742 * the last interval.
2743 */
2744 if (que->eitr_setting)
2745 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2746
2747 que->eitr_setting = 0;
2748
2749 /* Idle, do nothing */
2750 if ((txr->bytes == 0) && (rxr->bytes == 0))
2751 goto no_calc;
2752
2753 if ((txr->bytes) && (txr->packets))
2754 newitr = txr->bytes/txr->packets;
2755 if ((rxr->bytes) && (rxr->packets))
2756 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2757 newitr += 24; /* account for hardware frame, crc */
2758
2759 /* set an upper boundary */
2760 newitr = uimin(newitr, 3000);
2761
2762 /* Be nice to the mid range */
2763 if ((newitr > 300) && (newitr < 1200))
2764 newitr = (newitr / 3);
2765 else
2766 newitr = (newitr / 2);
2767
2768 /*
2769 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2770 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2771 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2772 * on 1G and higher.
2773 */
2774 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2775 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2776 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2777 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2778 }
2779
2780 /* save for next interrupt */
2781 que->eitr_setting = newitr;
2782
2783 /* Reset state */
2784 txr->bytes = 0;
2785 txr->packets = 0;
2786 rxr->bytes = 0;
2787 rxr->packets = 0;
2788
2789 no_calc:
2790 if (more)
2791 ixgbe_sched_handle_que(adapter, que);
2792 else
2793 ixgbe_enable_queue(adapter, que->msix);
2794
2795 return 1;
2796 } /* ixgbe_msix_que */
2797
2798 /************************************************************************
2799 * ixgbe_media_status - Media Ioctl callback
2800 *
2801 * Called whenever the user queries the status of
2802 * the interface using ifconfig.
2803 ************************************************************************/
2804 static void
2805 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2806 {
2807 struct adapter *adapter = ifp->if_softc;
2808 struct ixgbe_hw *hw = &adapter->hw;
2809 int layer;
2810
2811 INIT_DEBUGOUT("ixgbe_media_status: begin");
2812 ixgbe_update_link_status(adapter);
2813
2814 ifmr->ifm_status = IFM_AVALID;
2815 ifmr->ifm_active = IFM_ETHER;
2816
2817 if (adapter->link_active != LINK_STATE_UP) {
2818 ifmr->ifm_active |= IFM_NONE;
2819 return;
2820 }
2821
2822 ifmr->ifm_status |= IFM_ACTIVE;
2823 layer = adapter->phy_layer;
2824
2825 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2826 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2827 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2828 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2829 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2830 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2831 switch (adapter->link_speed) {
2832 case IXGBE_LINK_SPEED_10GB_FULL:
2833 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2834 break;
2835 case IXGBE_LINK_SPEED_5GB_FULL:
2836 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2837 break;
2838 case IXGBE_LINK_SPEED_2_5GB_FULL:
2839 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2840 break;
2841 case IXGBE_LINK_SPEED_1GB_FULL:
2842 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2843 break;
2844 case IXGBE_LINK_SPEED_100_FULL:
2845 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2846 break;
2847 case IXGBE_LINK_SPEED_10_FULL:
2848 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2849 break;
2850 }
2851 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2852 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2853 switch (adapter->link_speed) {
2854 case IXGBE_LINK_SPEED_10GB_FULL:
2855 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2856 break;
2857 }
2858 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2859 switch (adapter->link_speed) {
2860 case IXGBE_LINK_SPEED_10GB_FULL:
2861 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2862 break;
2863 case IXGBE_LINK_SPEED_1GB_FULL:
2864 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2865 break;
2866 }
2867 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2868 switch (adapter->link_speed) {
2869 case IXGBE_LINK_SPEED_10GB_FULL:
2870 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2871 break;
2872 case IXGBE_LINK_SPEED_1GB_FULL:
2873 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2874 break;
2875 }
2876 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2877 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2878 switch (adapter->link_speed) {
2879 case IXGBE_LINK_SPEED_10GB_FULL:
2880 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2881 break;
2882 case IXGBE_LINK_SPEED_1GB_FULL:
2883 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2884 break;
2885 }
2886 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2887 switch (adapter->link_speed) {
2888 case IXGBE_LINK_SPEED_10GB_FULL:
2889 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2890 break;
2891 }
2892 /*
2893 * XXX: These need to use the proper media types once
2894 * they're added.
2895 */
2896 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2897 switch (adapter->link_speed) {
2898 case IXGBE_LINK_SPEED_10GB_FULL:
2899 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2900 break;
2901 case IXGBE_LINK_SPEED_2_5GB_FULL:
2902 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2903 break;
2904 case IXGBE_LINK_SPEED_1GB_FULL:
2905 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2906 break;
2907 }
2908 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2909 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2910 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2911 switch (adapter->link_speed) {
2912 case IXGBE_LINK_SPEED_10GB_FULL:
2913 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2914 break;
2915 case IXGBE_LINK_SPEED_2_5GB_FULL:
2916 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2917 break;
2918 case IXGBE_LINK_SPEED_1GB_FULL:
2919 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2920 break;
2921 }
2922
2923 /* If nothing is recognized... */
2924 #if 0
2925 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2926 ifmr->ifm_active |= IFM_UNKNOWN;
2927 #endif
2928
2929 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2930
2931 /* Display current flow control setting used on link */
2932 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2933 hw->fc.current_mode == ixgbe_fc_full)
2934 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2935 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2936 hw->fc.current_mode == ixgbe_fc_full)
2937 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2938
2939 return;
2940 } /* ixgbe_media_status */
2941
2942 /************************************************************************
2943 * ixgbe_media_change - Media Ioctl callback
2944 *
2945 * Called when the user changes speed/duplex using
2946 * media/mediopt option with ifconfig.
2947 ************************************************************************/
2948 static int
2949 ixgbe_media_change(struct ifnet *ifp)
2950 {
2951 struct adapter *adapter = ifp->if_softc;
2952 struct ifmedia *ifm = &adapter->media;
2953 struct ixgbe_hw *hw = &adapter->hw;
2954 ixgbe_link_speed speed = 0;
2955 ixgbe_link_speed link_caps = 0;
2956 bool negotiate = false;
2957 s32 err = IXGBE_NOT_IMPLEMENTED;
2958
2959 INIT_DEBUGOUT("ixgbe_media_change: begin");
2960
2961 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2962 return (EINVAL);
2963
2964 if (hw->phy.media_type == ixgbe_media_type_backplane)
2965 return (EPERM);
2966
2967 /*
2968 * We don't actually need to check against the supported
2969 * media types of the adapter; ifmedia will take care of
2970 * that for us.
2971 */
2972 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2973 case IFM_AUTO:
2974 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2975 &negotiate);
2976 if (err != IXGBE_SUCCESS) {
2977 device_printf(adapter->dev, "Unable to determine "
2978 "supported advertise speeds\n");
2979 return (ENODEV);
2980 }
2981 speed |= link_caps;
2982 break;
2983 case IFM_10G_T:
2984 case IFM_10G_LRM:
2985 case IFM_10G_LR:
2986 case IFM_10G_TWINAX:
2987 case IFM_10G_SR:
2988 case IFM_10G_CX4:
2989 case IFM_10G_KR:
2990 case IFM_10G_KX4:
2991 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2992 break;
2993 case IFM_5000_T:
2994 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2995 break;
2996 case IFM_2500_T:
2997 case IFM_2500_KX:
2998 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2999 break;
3000 case IFM_1000_T:
3001 case IFM_1000_LX:
3002 case IFM_1000_SX:
3003 case IFM_1000_KX:
3004 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3005 break;
3006 case IFM_100_TX:
3007 speed |= IXGBE_LINK_SPEED_100_FULL;
3008 break;
3009 case IFM_10_T:
3010 speed |= IXGBE_LINK_SPEED_10_FULL;
3011 break;
3012 case IFM_NONE:
3013 break;
3014 default:
3015 goto invalid;
3016 }
3017
3018 hw->mac.autotry_restart = TRUE;
3019 hw->mac.ops.setup_link(hw, speed, TRUE);
3020 adapter->advertise = 0;
3021 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3022 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3023 adapter->advertise |= 1 << 2;
3024 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3025 adapter->advertise |= 1 << 1;
3026 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3027 adapter->advertise |= 1 << 0;
3028 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3029 adapter->advertise |= 1 << 3;
3030 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3031 adapter->advertise |= 1 << 4;
3032 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3033 adapter->advertise |= 1 << 5;
3034 }
3035
3036 return (0);
3037
3038 invalid:
3039 device_printf(adapter->dev, "Invalid media type!\n");
3040
3041 return (EINVAL);
3042 } /* ixgbe_media_change */
3043
3044 /************************************************************************
3045 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3046 ************************************************************************/
3047 static int
3048 ixgbe_msix_admin(void *arg)
3049 {
3050 struct adapter *adapter = arg;
3051 struct ixgbe_hw *hw = &adapter->hw;
3052 u32 eicr, eicr_mask;
3053 u32 task_requests = 0;
3054 s32 retval;
3055
3056 ++adapter->admin_irqev.ev_count;
3057
3058 /* Pause other interrupts */
3059 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3060
3061 /* First get the cause */
3062 /*
3063 * The specifications of 82598, 82599, X540 and X550 say EICS register
3064 * is write only. However, Linux says it is a workaround for silicon
3065 * errata to read EICS instead of EICR to get interrupt cause. It seems
3066 * there is a problem about read clear mechanism for EICR register.
3067 */
3068 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3069 /* Be sure the queue bits are not cleared */
3070 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3071 /* Clear interrupt with write */
3072 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3073
3074 if (ixgbe_is_sfp(hw)) {
3075 /* Pluggable optics-related interrupt */
3076 if (hw->mac.type >= ixgbe_mac_X540)
3077 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3078 else
3079 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3080
3081 /*
3082 * An interrupt might not arrive when a module is inserted.
3083 * When an link status change interrupt occurred and the driver
3084 * still regard SFP as unplugged, issue the module softint
3085 * and then issue LSC interrupt.
3086 */
3087 if ((eicr & eicr_mask)
3088 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3089 && (eicr & IXGBE_EICR_LSC))) {
3090 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3091 task_requests |= IXGBE_REQUEST_TASK_MOD;
3092 }
3093
3094 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3095 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3096 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3097 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3098 task_requests |= IXGBE_REQUEST_TASK_MSF;
3099 }
3100 }
3101
3102 /* Link status change */
3103 if (eicr & IXGBE_EICR_LSC) {
3104 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3105 task_requests |= IXGBE_REQUEST_TASK_LSC;
3106 }
3107
3108 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3109 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3110 (eicr & IXGBE_EICR_FLOW_DIR)) {
3111 /* This is probably overkill :) */
3112 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3113 return 1;
3114 /* Disable the interrupt */
3115 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3116 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3117 }
3118
3119 if (eicr & IXGBE_EICR_ECC) {
3120 device_printf(adapter->dev,
3121 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3122 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3123 }
3124
3125 /* Check for over temp condition */
3126 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3127 switch (adapter->hw.mac.type) {
3128 case ixgbe_mac_X550EM_a:
3129 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3130 break;
3131 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3132 IXGBE_EICR_GPI_SDP0_X550EM_a);
3133 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3134 IXGBE_EICR_GPI_SDP0_X550EM_a);
3135 retval = hw->phy.ops.check_overtemp(hw);
3136 if (retval != IXGBE_ERR_OVERTEMP)
3137 break;
3138 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3139 device_printf(adapter->dev, "System shutdown required!\n");
3140 break;
3141 default:
3142 if (!(eicr & IXGBE_EICR_TS))
3143 break;
3144 retval = hw->phy.ops.check_overtemp(hw);
3145 if (retval != IXGBE_ERR_OVERTEMP)
3146 break;
3147 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3148 device_printf(adapter->dev, "System shutdown required!\n");
3149 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3150 break;
3151 }
3152 }
3153
3154 /* Check for VF message */
3155 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3156 (eicr & IXGBE_EICR_MAILBOX)) {
3157 task_requests |= IXGBE_REQUEST_TASK_MBX;
3158 }
3159 }
3160
3161 /* Check for fan failure */
3162 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3163 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3165 }
3166
3167 /* External PHY interrupt */
3168 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3169 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3170 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3171 task_requests |= IXGBE_REQUEST_TASK_PHY;
3172 }
3173
3174 if (task_requests != 0) {
3175 /* Re-enabling other interrupts is done in the admin task */
3176 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
3177 atomic_or_32(&adapter->task_requests, task_requests);
3178 ixgbe_schedule_admin_tasklet(adapter);
3179 } else {
3180 /* Re-enable other interrupts */
3181 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3182 }
3183
3184 return 1;
3185 } /* ixgbe_msix_admin */
3186
3187 static void
3188 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3189 {
3190
3191 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3192 itr |= itr << 16;
3193 else
3194 itr |= IXGBE_EITR_CNT_WDIS;
3195
3196 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3197 }
3198
3199
3200 /************************************************************************
3201 * ixgbe_sysctl_interrupt_rate_handler
3202 ************************************************************************/
3203 static int
3204 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3205 {
3206 struct sysctlnode node = *rnode;
3207 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3208 struct adapter *adapter;
3209 uint32_t reg, usec, rate;
3210 int error;
3211
3212 if (que == NULL)
3213 return 0;
3214
3215 adapter = que->adapter;
3216 if (ixgbe_fw_recovery_mode_swflag(adapter))
3217 return (EPERM);
3218
3219 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3220 usec = ((reg & 0x0FF8) >> 3);
3221 if (usec > 0)
3222 rate = 500000 / usec;
3223 else
3224 rate = 0;
3225 node.sysctl_data = &rate;
3226 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3227 if (error || newp == NULL)
3228 return error;
3229 reg &= ~0xfff; /* default, no limitation */
3230 if (rate > 0 && rate < 500000) {
3231 if (rate < 1000)
3232 rate = 1000;
3233 reg |= ((4000000 / rate) & 0xff8);
3234 /*
3235 * When RSC is used, ITR interval must be larger than
3236 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3237 * The minimum value is always greater than 2us on 100M
3238 * (and 10M?(not documented)), but it's not on 1G and higher.
3239 */
3240 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3241 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3242 if ((adapter->num_queues > 1)
3243 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3244 return EINVAL;
3245 }
3246 ixgbe_max_interrupt_rate = rate;
3247 } else
3248 ixgbe_max_interrupt_rate = 0;
3249 ixgbe_eitr_write(adapter, que->msix, reg);
3250
3251 return (0);
3252 } /* ixgbe_sysctl_interrupt_rate_handler */
3253
3254 const struct sysctlnode *
3255 ixgbe_sysctl_instance(struct adapter *adapter)
3256 {
3257 const char *dvname;
3258 struct sysctllog **log;
3259 int rc;
3260 const struct sysctlnode *rnode;
3261
3262 if (adapter->sysctltop != NULL)
3263 return adapter->sysctltop;
3264
3265 log = &adapter->sysctllog;
3266 dvname = device_xname(adapter->dev);
3267
3268 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3269 0, CTLTYPE_NODE, dvname,
3270 SYSCTL_DESCR("ixgbe information and settings"),
3271 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3272 goto err;
3273
3274 return rnode;
3275 err:
3276 device_printf(adapter->dev,
3277 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3278 return NULL;
3279 }
3280
3281 /************************************************************************
3282 * ixgbe_add_device_sysctls
3283 ************************************************************************/
3284 static void
3285 ixgbe_add_device_sysctls(struct adapter *adapter)
3286 {
3287 device_t dev = adapter->dev;
3288 struct ixgbe_hw *hw = &adapter->hw;
3289 struct sysctllog **log;
3290 const struct sysctlnode *rnode, *cnode;
3291
3292 log = &adapter->sysctllog;
3293
3294 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3295 aprint_error_dev(dev, "could not create sysctl root\n");
3296 return;
3297 }
3298
3299 if (sysctl_createv(log, 0, &rnode, &cnode,
3300 CTLFLAG_READWRITE, CTLTYPE_INT,
3301 "debug", SYSCTL_DESCR("Debug Info"),
3302 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3303 aprint_error_dev(dev, "could not create sysctl\n");
3304
3305 if (sysctl_createv(log, 0, &rnode, &cnode,
3306 CTLFLAG_READONLY, CTLTYPE_INT,
3307 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3308 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3309 aprint_error_dev(dev, "could not create sysctl\n");
3310
3311 if (sysctl_createv(log, 0, &rnode, &cnode,
3312 CTLFLAG_READONLY, CTLTYPE_INT,
3313 "num_queues", SYSCTL_DESCR("Number of queues"),
3314 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3315 aprint_error_dev(dev, "could not create sysctl\n");
3316
3317 /* Sysctls for all devices */
3318 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3319 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3320 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3321 CTL_EOL) != 0)
3322 aprint_error_dev(dev, "could not create sysctl\n");
3323
3324 adapter->enable_aim = ixgbe_enable_aim;
3325 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3326 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3327 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3328 aprint_error_dev(dev, "could not create sysctl\n");
3329
3330 if (sysctl_createv(log, 0, &rnode, &cnode,
3331 CTLFLAG_READWRITE, CTLTYPE_INT,
3332 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3333 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3334 CTL_EOL) != 0)
3335 aprint_error_dev(dev, "could not create sysctl\n");
3336
3337 /*
3338 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3339 * it causesflip-flopping softint/workqueue mode in one deferred
3340 * processing. Therefore, preempt_disable()/preempt_enable() are
3341 * required in ixgbe_sched_handle_que() to avoid
3342 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3343 * I think changing "que->txrx_use_workqueue" in interrupt handler
3344 * is lighter than doing preempt_disable()/preempt_enable() in every
3345 * ixgbe_sched_handle_que().
3346 */
3347 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3348 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3349 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3350 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3351 aprint_error_dev(dev, "could not create sysctl\n");
3352
3353 #ifdef IXGBE_DEBUG
3354 /* testing sysctls (for all devices) */
3355 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3356 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3357 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3358 CTL_EOL) != 0)
3359 aprint_error_dev(dev, "could not create sysctl\n");
3360
3361 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3362 CTLTYPE_STRING, "print_rss_config",
3363 SYSCTL_DESCR("Prints RSS Configuration"),
3364 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3365 CTL_EOL) != 0)
3366 aprint_error_dev(dev, "could not create sysctl\n");
3367 #endif
3368 /* for X550 series devices */
3369 if (hw->mac.type >= ixgbe_mac_X550)
3370 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3371 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3372 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3373 CTL_EOL) != 0)
3374 aprint_error_dev(dev, "could not create sysctl\n");
3375
3376 /* for WoL-capable devices */
3377 if (adapter->wol_support) {
3378 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3379 CTLTYPE_BOOL, "wol_enable",
3380 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3381 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3382 CTL_EOL) != 0)
3383 aprint_error_dev(dev, "could not create sysctl\n");
3384
3385 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3386 CTLTYPE_INT, "wufc",
3387 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3388 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3389 CTL_EOL) != 0)
3390 aprint_error_dev(dev, "could not create sysctl\n");
3391 }
3392
3393 /* for X552/X557-AT devices */
3394 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3395 const struct sysctlnode *phy_node;
3396
3397 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3398 "phy", SYSCTL_DESCR("External PHY sysctls"),
3399 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3400 aprint_error_dev(dev, "could not create sysctl\n");
3401 return;
3402 }
3403
3404 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3405 CTLTYPE_INT, "temp",
3406 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3407 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3408 CTL_EOL) != 0)
3409 aprint_error_dev(dev, "could not create sysctl\n");
3410
3411 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3412 CTLTYPE_INT, "overtemp_occurred",
3413 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3414 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3415 CTL_CREATE, CTL_EOL) != 0)
3416 aprint_error_dev(dev, "could not create sysctl\n");
3417 }
3418
3419 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3420 && (hw->phy.type == ixgbe_phy_fw))
3421 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3422 CTLTYPE_BOOL, "force_10_100_autonego",
3423 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3424 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3425 CTL_CREATE, CTL_EOL) != 0)
3426 aprint_error_dev(dev, "could not create sysctl\n");
3427
3428 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3429 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3430 CTLTYPE_INT, "eee_state",
3431 SYSCTL_DESCR("EEE Power Save State"),
3432 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3433 CTL_EOL) != 0)
3434 aprint_error_dev(dev, "could not create sysctl\n");
3435 }
3436 } /* ixgbe_add_device_sysctls */
3437
3438 /************************************************************************
3439 * ixgbe_allocate_pci_resources
3440 ************************************************************************/
3441 static int
3442 ixgbe_allocate_pci_resources(struct adapter *adapter,
3443 const struct pci_attach_args *pa)
3444 {
3445 pcireg_t memtype, csr;
3446 device_t dev = adapter->dev;
3447 bus_addr_t addr;
3448 int flags;
3449
3450 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3451 switch (memtype) {
3452 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3453 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3454 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3455 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3456 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3457 goto map_err;
3458 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3459 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3460 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3461 }
3462 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3463 adapter->osdep.mem_size, flags,
3464 &adapter->osdep.mem_bus_space_handle) != 0) {
3465 map_err:
3466 adapter->osdep.mem_size = 0;
3467 aprint_error_dev(dev, "unable to map BAR0\n");
3468 return ENXIO;
3469 }
3470 /*
3471 * Enable address decoding for memory range in case BIOS or
3472 * UEFI don't set it.
3473 */
3474 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3475 PCI_COMMAND_STATUS_REG);
3476 csr |= PCI_COMMAND_MEM_ENABLE;
3477 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3478 csr);
3479 break;
3480 default:
3481 aprint_error_dev(dev, "unexpected type on BAR0\n");
3482 return ENXIO;
3483 }
3484
3485 return (0);
3486 } /* ixgbe_allocate_pci_resources */
3487
3488 static void
3489 ixgbe_free_workqueue(struct adapter *adapter)
3490 {
3491 struct ix_queue *que = adapter->queues;
3492 struct tx_ring *txr = adapter->tx_rings;
3493 int i;
3494
3495 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3496 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3497 if (txr->txr_si != NULL)
3498 softint_disestablish(txr->txr_si);
3499 }
3500 if (que->que_si != NULL)
3501 softint_disestablish(que->que_si);
3502 }
3503 if (adapter->txr_wq != NULL)
3504 workqueue_destroy(adapter->txr_wq);
3505 if (adapter->txr_wq_enqueued != NULL)
3506 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3507 if (adapter->que_wq != NULL)
3508 workqueue_destroy(adapter->que_wq);
3509
3510 if (adapter->admin_wq != NULL) {
3511 workqueue_destroy(adapter->admin_wq);
3512 adapter->admin_wq = NULL;
3513 }
3514 if (adapter->timer_wq != NULL) {
3515 workqueue_destroy(adapter->timer_wq);
3516 adapter->timer_wq = NULL;
3517 }
3518 if (adapter->recovery_mode_timer_wq != NULL) {
3519 /*
3520 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3521 * the recovery_mode_timer workqueue, so call it here.
3522 */
3523 workqueue_wait(adapter->recovery_mode_timer_wq,
3524 &adapter->recovery_mode_timer_wc);
3525 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3526 workqueue_destroy(adapter->recovery_mode_timer_wq);
3527 adapter->recovery_mode_timer_wq = NULL;
3528 }
3529 } /* ixgbe_free_workqueue */
3530
3531 /************************************************************************
3532 * ixgbe_detach - Device removal routine
3533 *
3534 * Called when the driver is being removed.
3535 * Stops the adapter and deallocates all the resources
3536 * that were allocated for driver operation.
3537 *
3538 * return 0 on success, positive on failure
3539 ************************************************************************/
3540 static int
3541 ixgbe_detach(device_t dev, int flags)
3542 {
3543 struct adapter *adapter = device_private(dev);
3544 struct rx_ring *rxr = adapter->rx_rings;
3545 struct tx_ring *txr = adapter->tx_rings;
3546 struct ixgbe_hw *hw = &adapter->hw;
3547 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3548 u32 ctrl_ext;
3549 int i;
3550
3551 INIT_DEBUGOUT("ixgbe_detach: begin");
3552 if (adapter->osdep.attached == false)
3553 return 0;
3554
3555 if (ixgbe_pci_iov_detach(dev) != 0) {
3556 device_printf(dev, "SR-IOV in use; detach first.\n");
3557 return (EBUSY);
3558 }
3559
3560 #if NVLAN > 0
3561 /* Make sure VLANs are not using driver */
3562 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3563 ; /* nothing to do: no VLANs */
3564 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3565 vlan_ifdetach(adapter->ifp);
3566 else {
3567 aprint_error_dev(dev, "VLANs in use, detach first\n");
3568 return (EBUSY);
3569 }
3570 #endif
3571
3572 /*
3573 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3574 * so it's not required to call ixgbe_stop() directly.
3575 */
3576 IXGBE_CORE_LOCK(adapter);
3577 ixgbe_setup_low_power_mode(adapter);
3578 IXGBE_CORE_UNLOCK(adapter);
3579
3580 callout_halt(&adapter->timer, NULL);
3581 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
3582 callout_stop(&adapter->recovery_mode_timer);
3583 callout_halt(&adapter->recovery_mode_timer, NULL);
3584 }
3585
3586 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3587 atomic_store_relaxed(&adapter->admin_pending, 0);
3588 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3589 atomic_store_relaxed(&adapter->timer_pending, 0);
3590
3591 pmf_device_deregister(dev);
3592
3593 ether_ifdetach(adapter->ifp);
3594
3595 ixgbe_free_workqueue(adapter);
3596
3597 /* let hardware know driver is unloading */
3598 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3599 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3601
3602 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3603 netmap_detach(adapter->ifp);
3604
3605 ixgbe_free_pci_resources(adapter);
3606 #if 0 /* XXX the NetBSD port is probably missing something here */
3607 bus_generic_detach(dev);
3608 #endif
3609 if_detach(adapter->ifp);
3610 ifmedia_fini(&adapter->media);
3611 if_percpuq_destroy(adapter->ipq);
3612
3613 sysctl_teardown(&adapter->sysctllog);
3614 evcnt_detach(&adapter->efbig_tx_dma_setup);
3615 evcnt_detach(&adapter->mbuf_defrag_failed);
3616 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3617 evcnt_detach(&adapter->einval_tx_dma_setup);
3618 evcnt_detach(&adapter->other_tx_dma_setup);
3619 evcnt_detach(&adapter->eagain_tx_dma_setup);
3620 evcnt_detach(&adapter->enomem_tx_dma_setup);
3621 evcnt_detach(&adapter->watchdog_events);
3622 evcnt_detach(&adapter->tso_err);
3623 evcnt_detach(&adapter->admin_irqev);
3624 evcnt_detach(&adapter->link_workev);
3625 evcnt_detach(&adapter->mod_workev);
3626 evcnt_detach(&adapter->msf_workev);
3627 evcnt_detach(&adapter->phy_workev);
3628
3629 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3630 if (i < __arraycount(stats->mpc)) {
3631 evcnt_detach(&stats->mpc[i]);
3632 if (hw->mac.type == ixgbe_mac_82598EB)
3633 evcnt_detach(&stats->rnbc[i]);
3634 }
3635 if (i < __arraycount(stats->pxontxc)) {
3636 evcnt_detach(&stats->pxontxc[i]);
3637 evcnt_detach(&stats->pxonrxc[i]);
3638 evcnt_detach(&stats->pxofftxc[i]);
3639 evcnt_detach(&stats->pxoffrxc[i]);
3640 if (hw->mac.type >= ixgbe_mac_82599EB)
3641 evcnt_detach(&stats->pxon2offc[i]);
3642 }
3643 }
3644
3645 txr = adapter->tx_rings;
3646 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3647 evcnt_detach(&adapter->queues[i].irqs);
3648 evcnt_detach(&adapter->queues[i].handleq);
3649 evcnt_detach(&adapter->queues[i].req);
3650 evcnt_detach(&txr->no_desc_avail);
3651 evcnt_detach(&txr->total_packets);
3652 evcnt_detach(&txr->tso_tx);
3653 #ifndef IXGBE_LEGACY_TX
3654 evcnt_detach(&txr->pcq_drops);
3655 #endif
3656
3657 if (i < __arraycount(stats->qprc)) {
3658 evcnt_detach(&stats->qprc[i]);
3659 evcnt_detach(&stats->qptc[i]);
3660 evcnt_detach(&stats->qbrc[i]);
3661 evcnt_detach(&stats->qbtc[i]);
3662 if (hw->mac.type >= ixgbe_mac_82599EB)
3663 evcnt_detach(&stats->qprdc[i]);
3664 }
3665
3666 evcnt_detach(&rxr->rx_packets);
3667 evcnt_detach(&rxr->rx_bytes);
3668 evcnt_detach(&rxr->rx_copies);
3669 evcnt_detach(&rxr->no_jmbuf);
3670 evcnt_detach(&rxr->rx_discarded);
3671 }
3672 evcnt_detach(&stats->ipcs);
3673 evcnt_detach(&stats->l4cs);
3674 evcnt_detach(&stats->ipcs_bad);
3675 evcnt_detach(&stats->l4cs_bad);
3676 evcnt_detach(&stats->intzero);
3677 evcnt_detach(&stats->legint);
3678 evcnt_detach(&stats->crcerrs);
3679 evcnt_detach(&stats->illerrc);
3680 evcnt_detach(&stats->errbc);
3681 evcnt_detach(&stats->mspdc);
3682 if (hw->mac.type >= ixgbe_mac_X550)
3683 evcnt_detach(&stats->mbsdc);
3684 evcnt_detach(&stats->mpctotal);
3685 evcnt_detach(&stats->mlfc);
3686 evcnt_detach(&stats->mrfc);
3687 evcnt_detach(&stats->rlec);
3688 evcnt_detach(&stats->lxontxc);
3689 evcnt_detach(&stats->lxonrxc);
3690 evcnt_detach(&stats->lxofftxc);
3691 evcnt_detach(&stats->lxoffrxc);
3692
3693 /* Packet Reception Stats */
3694 evcnt_detach(&stats->tor);
3695 evcnt_detach(&stats->gorc);
3696 evcnt_detach(&stats->tpr);
3697 evcnt_detach(&stats->gprc);
3698 evcnt_detach(&stats->mprc);
3699 evcnt_detach(&stats->bprc);
3700 evcnt_detach(&stats->prc64);
3701 evcnt_detach(&stats->prc127);
3702 evcnt_detach(&stats->prc255);
3703 evcnt_detach(&stats->prc511);
3704 evcnt_detach(&stats->prc1023);
3705 evcnt_detach(&stats->prc1522);
3706 evcnt_detach(&stats->ruc);
3707 evcnt_detach(&stats->rfc);
3708 evcnt_detach(&stats->roc);
3709 evcnt_detach(&stats->rjc);
3710 evcnt_detach(&stats->mngprc);
3711 evcnt_detach(&stats->mngpdc);
3712 evcnt_detach(&stats->xec);
3713
3714 /* Packet Transmission Stats */
3715 evcnt_detach(&stats->gotc);
3716 evcnt_detach(&stats->tpt);
3717 evcnt_detach(&stats->gptc);
3718 evcnt_detach(&stats->bptc);
3719 evcnt_detach(&stats->mptc);
3720 evcnt_detach(&stats->mngptc);
3721 evcnt_detach(&stats->ptc64);
3722 evcnt_detach(&stats->ptc127);
3723 evcnt_detach(&stats->ptc255);
3724 evcnt_detach(&stats->ptc511);
3725 evcnt_detach(&stats->ptc1023);
3726 evcnt_detach(&stats->ptc1522);
3727
3728 ixgbe_free_queues(adapter);
3729 free(adapter->mta, M_DEVBUF);
3730
3731 IXGBE_CORE_LOCK_DESTROY(adapter);
3732
3733 return (0);
3734 } /* ixgbe_detach */
3735
3736 /************************************************************************
3737 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3738 *
3739 * Prepare the adapter/port for LPLU and/or WoL
3740 ************************************************************************/
3741 static int
3742 ixgbe_setup_low_power_mode(struct adapter *adapter)
3743 {
3744 struct ixgbe_hw *hw = &adapter->hw;
3745 device_t dev = adapter->dev;
3746 s32 error = 0;
3747
3748 KASSERT(mutex_owned(&adapter->core_mtx));
3749
3750 /* Limit power management flow to X550EM baseT */
3751 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3752 hw->phy.ops.enter_lplu) {
3753 /* X550EM baseT adapters need a special LPLU flow */
3754 hw->phy.reset_disable = true;
3755 ixgbe_stop(adapter);
3756 error = hw->phy.ops.enter_lplu(hw);
3757 if (error)
3758 device_printf(dev,
3759 "Error entering LPLU: %d\n", error);
3760 hw->phy.reset_disable = false;
3761 } else {
3762 /* Just stop for other adapters */
3763 ixgbe_stop(adapter);
3764 }
3765
3766 if (!hw->wol_enabled) {
3767 ixgbe_set_phy_power(hw, FALSE);
3768 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3769 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3770 } else {
3771 /* Turn off support for APM wakeup. (Using ACPI instead) */
3772 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3773 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3774
3775 /*
3776 * Clear Wake Up Status register to prevent any previous wakeup
3777 * events from waking us up immediately after we suspend.
3778 */
3779 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3780
3781 /*
3782 * Program the Wakeup Filter Control register with user filter
3783 * settings
3784 */
3785 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3786
3787 /* Enable wakeups and power management in Wakeup Control */
3788 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3789 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3790
3791 }
3792
3793 return error;
3794 } /* ixgbe_setup_low_power_mode */
3795
3796 /************************************************************************
3797 * ixgbe_shutdown - Shutdown entry point
3798 ************************************************************************/
3799 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3800 static int
3801 ixgbe_shutdown(device_t dev)
3802 {
3803 struct adapter *adapter = device_private(dev);
3804 int error = 0;
3805
3806 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3807
3808 IXGBE_CORE_LOCK(adapter);
3809 error = ixgbe_setup_low_power_mode(adapter);
3810 IXGBE_CORE_UNLOCK(adapter);
3811
3812 return (error);
3813 } /* ixgbe_shutdown */
3814 #endif
3815
3816 /************************************************************************
3817 * ixgbe_suspend
3818 *
3819 * From D0 to D3
3820 ************************************************************************/
3821 static bool
3822 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3823 {
3824 struct adapter *adapter = device_private(dev);
3825 int error = 0;
3826
3827 INIT_DEBUGOUT("ixgbe_suspend: begin");
3828
3829 IXGBE_CORE_LOCK(adapter);
3830
3831 error = ixgbe_setup_low_power_mode(adapter);
3832
3833 IXGBE_CORE_UNLOCK(adapter);
3834
3835 return (error);
3836 } /* ixgbe_suspend */
3837
3838 /************************************************************************
3839 * ixgbe_resume
3840 *
3841 * From D3 to D0
3842 ************************************************************************/
3843 static bool
3844 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3845 {
3846 struct adapter *adapter = device_private(dev);
3847 struct ifnet *ifp = adapter->ifp;
3848 struct ixgbe_hw *hw = &adapter->hw;
3849 u32 wus;
3850
3851 INIT_DEBUGOUT("ixgbe_resume: begin");
3852
3853 IXGBE_CORE_LOCK(adapter);
3854
3855 /* Read & clear WUS register */
3856 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3857 if (wus)
3858 device_printf(dev, "Woken up by (WUS): %#010x\n",
3859 IXGBE_READ_REG(hw, IXGBE_WUS));
3860 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3861 /* And clear WUFC until next low-power transition */
3862 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3863
3864 /*
3865 * Required after D3->D0 transition;
3866 * will re-advertise all previous advertised speeds
3867 */
3868 if (ifp->if_flags & IFF_UP)
3869 ixgbe_init_locked(adapter);
3870
3871 IXGBE_CORE_UNLOCK(adapter);
3872
3873 return true;
3874 } /* ixgbe_resume */
3875
3876 /*
3877 * Set the various hardware offload abilities.
3878 *
3879 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3880 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3881 * mbuf offload flags the driver will understand.
3882 */
3883 static void
3884 ixgbe_set_if_hwassist(struct adapter *adapter)
3885 {
3886 /* XXX */
3887 }
3888
3889 /************************************************************************
3890 * ixgbe_init_locked - Init entry point
3891 *
3892 * Used in two ways: It is used by the stack as an init
3893 * entry point in network interface structure. It is also
3894 * used by the driver as a hw/sw initialization routine to
3895 * get to a consistent state.
3896 *
3897 * return 0 on success, positive on failure
3898 ************************************************************************/
3899 static void
3900 ixgbe_init_locked(struct adapter *adapter)
3901 {
3902 struct ifnet *ifp = adapter->ifp;
3903 device_t dev = adapter->dev;
3904 struct ixgbe_hw *hw = &adapter->hw;
3905 struct ix_queue *que;
3906 struct tx_ring *txr;
3907 struct rx_ring *rxr;
3908 u32 txdctl, mhadd;
3909 u32 rxdctl, rxctrl;
3910 u32 ctrl_ext;
3911 bool unsupported_sfp = false;
3912 int i, j, err;
3913
3914 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3915
3916 KASSERT(mutex_owned(&adapter->core_mtx));
3917 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3918
3919 hw->need_unsupported_sfp_recovery = false;
3920 hw->adapter_stopped = FALSE;
3921 ixgbe_stop_adapter(hw);
3922 callout_stop(&adapter->timer);
3923 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3924 callout_stop(&adapter->recovery_mode_timer);
3925 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3926 que->disabled_count = 0;
3927
3928 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3929 adapter->max_frame_size =
3930 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3931
3932 /* Queue indices may change with IOV mode */
3933 ixgbe_align_all_queue_indices(adapter);
3934
3935 /* reprogram the RAR[0] in case user changed it. */
3936 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3937
3938 /* Get the latest mac address, User can use a LAA */
3939 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3940 IXGBE_ETH_LENGTH_OF_ADDRESS);
3941 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3942 hw->addr_ctrl.rar_used_count = 1;
3943
3944 /* Set hardware offload abilities from ifnet flags */
3945 ixgbe_set_if_hwassist(adapter);
3946
3947 /* Prepare transmit descriptors and buffers */
3948 if (ixgbe_setup_transmit_structures(adapter)) {
3949 device_printf(dev, "Could not setup transmit structures\n");
3950 ixgbe_stop(adapter);
3951 return;
3952 }
3953
3954 ixgbe_init_hw(hw);
3955
3956 ixgbe_initialize_iov(adapter);
3957
3958 ixgbe_initialize_transmit_units(adapter);
3959
3960 /* Setup Multicast table */
3961 ixgbe_set_rxfilter(adapter);
3962
3963 /* Determine the correct mbuf pool, based on frame size */
3964 if (adapter->max_frame_size <= MCLBYTES)
3965 adapter->rx_mbuf_sz = MCLBYTES;
3966 else
3967 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3968
3969 /* Prepare receive descriptors and buffers */
3970 if (ixgbe_setup_receive_structures(adapter)) {
3971 device_printf(dev, "Could not setup receive structures\n");
3972 ixgbe_stop(adapter);
3973 return;
3974 }
3975
3976 /* Configure RX settings */
3977 ixgbe_initialize_receive_units(adapter);
3978
3979 /* Initialize variable holding task enqueue requests interrupts */
3980 adapter->task_requests = 0;
3981
3982 /* Enable SDP & MSI-X interrupts based on adapter */
3983 ixgbe_config_gpie(adapter);
3984
3985 /* Set MTU size */
3986 if (ifp->if_mtu > ETHERMTU) {
3987 /* aka IXGBE_MAXFRS on 82599 and newer */
3988 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3989 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3990 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3991 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3992 }
3993
3994 /* Now enable all the queues */
3995 for (i = 0; i < adapter->num_queues; i++) {
3996 txr = &adapter->tx_rings[i];
3997 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3998 txdctl |= IXGBE_TXDCTL_ENABLE;
3999 /* Set WTHRESH to 8, burst writeback */
4000 txdctl |= (8 << 16);
4001 /*
4002 * When the internal queue falls below PTHRESH (32),
4003 * start prefetching as long as there are at least
4004 * HTHRESH (1) buffers ready. The values are taken
4005 * from the Intel linux driver 3.8.21.
4006 * Prefetching enables tx line rate even with 1 queue.
4007 */
4008 txdctl |= (32 << 0) | (1 << 8);
4009 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4010 }
4011
4012 for (i = 0; i < adapter->num_queues; i++) {
4013 rxr = &adapter->rx_rings[i];
4014 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4015 if (hw->mac.type == ixgbe_mac_82598EB) {
4016 /*
4017 * PTHRESH = 21
4018 * HTHRESH = 4
4019 * WTHRESH = 8
4020 */
4021 rxdctl &= ~0x3FFFFF;
4022 rxdctl |= 0x080420;
4023 }
4024 rxdctl |= IXGBE_RXDCTL_ENABLE;
4025 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4026 for (j = 0; j < 10; j++) {
4027 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4028 IXGBE_RXDCTL_ENABLE)
4029 break;
4030 else
4031 msec_delay(1);
4032 }
4033 IXGBE_WRITE_BARRIER(hw);
4034
4035 /*
4036 * In netmap mode, we must preserve the buffers made
4037 * available to userspace before the if_init()
4038 * (this is true by default on the TX side, because
4039 * init makes all buffers available to userspace).
4040 *
4041 * netmap_reset() and the device specific routines
4042 * (e.g. ixgbe_setup_receive_rings()) map these
4043 * buffers at the end of the NIC ring, so here we
4044 * must set the RDT (tail) register to make sure
4045 * they are not overwritten.
4046 *
4047 * In this driver the NIC ring starts at RDH = 0,
4048 * RDT points to the last slot available for reception (?),
4049 * so RDT = num_rx_desc - 1 means the whole ring is available.
4050 */
4051 #ifdef DEV_NETMAP
4052 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4053 (ifp->if_capenable & IFCAP_NETMAP)) {
4054 struct netmap_adapter *na = NA(adapter->ifp);
4055 struct netmap_kring *kring = na->rx_rings[i];
4056 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4057
4058 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4059 } else
4060 #endif /* DEV_NETMAP */
4061 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4062 adapter->num_rx_desc - 1);
4063 }
4064
4065 /* Enable Receive engine */
4066 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4067 if (hw->mac.type == ixgbe_mac_82598EB)
4068 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4069 rxctrl |= IXGBE_RXCTRL_RXEN;
4070 ixgbe_enable_rx_dma(hw, rxctrl);
4071
4072 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4073 atomic_store_relaxed(&adapter->timer_pending, 0);
4074 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4075 callout_reset(&adapter->recovery_mode_timer, hz,
4076 ixgbe_recovery_mode_timer, adapter);
4077
4078 /* Set up MSI/MSI-X routing */
4079 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4080 ixgbe_configure_ivars(adapter);
4081 /* Set up auto-mask */
4082 if (hw->mac.type == ixgbe_mac_82598EB)
4083 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4084 else {
4085 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4086 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4087 }
4088 } else { /* Simple settings for Legacy/MSI */
4089 ixgbe_set_ivar(adapter, 0, 0, 0);
4090 ixgbe_set_ivar(adapter, 0, 0, 1);
4091 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4092 }
4093
4094 ixgbe_init_fdir(adapter);
4095
4096 /*
4097 * Check on any SFP devices that
4098 * need to be kick-started
4099 */
4100 if (hw->phy.type == ixgbe_phy_none) {
4101 err = hw->phy.ops.identify(hw);
4102 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4103 unsupported_sfp = true;
4104 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4105 unsupported_sfp = true;
4106
4107 if (unsupported_sfp)
4108 device_printf(dev,
4109 "Unsupported SFP+ module type was detected.\n");
4110
4111 /* Set moderation on the Link interrupt */
4112 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4113
4114 /* Enable EEE power saving */
4115 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4116 hw->mac.ops.setup_eee(hw,
4117 adapter->feat_en & IXGBE_FEATURE_EEE);
4118
4119 /* Enable power to the phy. */
4120 if (!unsupported_sfp) {
4121 ixgbe_set_phy_power(hw, TRUE);
4122
4123 /* Config/Enable Link */
4124 ixgbe_config_link(adapter);
4125 }
4126
4127 /* Hardware Packet Buffer & Flow Control setup */
4128 ixgbe_config_delay_values(adapter);
4129
4130 /* Initialize the FC settings */
4131 ixgbe_start_hw(hw);
4132
4133 /* Set up VLAN support and filter */
4134 ixgbe_setup_vlan_hw_support(adapter);
4135
4136 /* Setup DMA Coalescing */
4137 ixgbe_config_dmac(adapter);
4138
4139 /* OK to schedule workqueues. */
4140 adapter->schedule_wqs_ok = true;
4141
4142 /* And now turn on interrupts */
4143 ixgbe_enable_intr(adapter);
4144
4145 /* Enable the use of the MBX by the VF's */
4146 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4147 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4148 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4149 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4150 }
4151
4152 /* Update saved flags. See ixgbe_ifflags_cb() */
4153 adapter->if_flags = ifp->if_flags;
4154 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4155
4156 /* Now inform the stack we're ready */
4157 ifp->if_flags |= IFF_RUNNING;
4158
4159 return;
4160 } /* ixgbe_init_locked */
4161
4162 /************************************************************************
4163 * ixgbe_init
4164 ************************************************************************/
4165 static int
4166 ixgbe_init(struct ifnet *ifp)
4167 {
4168 struct adapter *adapter = ifp->if_softc;
4169
4170 IXGBE_CORE_LOCK(adapter);
4171 ixgbe_init_locked(adapter);
4172 IXGBE_CORE_UNLOCK(adapter);
4173
4174 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4175 } /* ixgbe_init */
4176
4177 /************************************************************************
4178 * ixgbe_set_ivar
4179 *
4180 * Setup the correct IVAR register for a particular MSI-X interrupt
4181 * (yes this is all very magic and confusing :)
4182 * - entry is the register array entry
4183 * - vector is the MSI-X vector for this queue
4184 * - type is RX/TX/MISC
4185 ************************************************************************/
4186 static void
4187 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4188 {
4189 struct ixgbe_hw *hw = &adapter->hw;
4190 u32 ivar, index;
4191
4192 vector |= IXGBE_IVAR_ALLOC_VAL;
4193
4194 switch (hw->mac.type) {
4195 case ixgbe_mac_82598EB:
4196 if (type == -1)
4197 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4198 else
4199 entry += (type * 64);
4200 index = (entry >> 2) & 0x1F;
4201 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4202 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4203 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4204 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4205 break;
4206 case ixgbe_mac_82599EB:
4207 case ixgbe_mac_X540:
4208 case ixgbe_mac_X550:
4209 case ixgbe_mac_X550EM_x:
4210 case ixgbe_mac_X550EM_a:
4211 if (type == -1) { /* MISC IVAR */
4212 index = (entry & 1) * 8;
4213 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4214 ivar &= ~(0xffUL << index);
4215 ivar |= ((u32)vector << index);
4216 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4217 } else { /* RX/TX IVARS */
4218 index = (16 * (entry & 1)) + (8 * type);
4219 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4220 ivar &= ~(0xffUL << index);
4221 ivar |= ((u32)vector << index);
4222 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4223 }
4224 break;
4225 default:
4226 break;
4227 }
4228 } /* ixgbe_set_ivar */
4229
4230 /************************************************************************
4231 * ixgbe_configure_ivars
4232 ************************************************************************/
4233 static void
4234 ixgbe_configure_ivars(struct adapter *adapter)
4235 {
4236 struct ix_queue *que = adapter->queues;
4237 u32 newitr;
4238
4239 if (ixgbe_max_interrupt_rate > 0)
4240 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4241 else {
4242 /*
4243 * Disable DMA coalescing if interrupt moderation is
4244 * disabled.
4245 */
4246 adapter->dmac = 0;
4247 newitr = 0;
4248 }
4249
4250 for (int i = 0; i < adapter->num_queues; i++, que++) {
4251 struct rx_ring *rxr = &adapter->rx_rings[i];
4252 struct tx_ring *txr = &adapter->tx_rings[i];
4253 /* First the RX queue entry */
4254 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4255 /* ... and the TX */
4256 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4257 /* Set an Initial EITR value */
4258 ixgbe_eitr_write(adapter, que->msix, newitr);
4259 /*
4260 * To eliminate influence of the previous state.
4261 * At this point, Tx/Rx interrupt handler
4262 * (ixgbe_msix_que()) cannot be called, so both
4263 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4264 */
4265 que->eitr_setting = 0;
4266 }
4267
4268 /* For the Link interrupt */
4269 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4270 } /* ixgbe_configure_ivars */
4271
4272 /************************************************************************
4273 * ixgbe_config_gpie
4274 ************************************************************************/
4275 static void
4276 ixgbe_config_gpie(struct adapter *adapter)
4277 {
4278 struct ixgbe_hw *hw = &adapter->hw;
4279 u32 gpie;
4280
4281 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4282
4283 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4284 /* Enable Enhanced MSI-X mode */
4285 gpie |= IXGBE_GPIE_MSIX_MODE
4286 | IXGBE_GPIE_EIAME
4287 | IXGBE_GPIE_PBA_SUPPORT
4288 | IXGBE_GPIE_OCD;
4289 }
4290
4291 /* Fan Failure Interrupt */
4292 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4293 gpie |= IXGBE_SDP1_GPIEN;
4294
4295 /* Thermal Sensor Interrupt */
4296 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4297 gpie |= IXGBE_SDP0_GPIEN_X540;
4298
4299 /* Link detection */
4300 switch (hw->mac.type) {
4301 case ixgbe_mac_82599EB:
4302 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4303 break;
4304 case ixgbe_mac_X550EM_x:
4305 case ixgbe_mac_X550EM_a:
4306 gpie |= IXGBE_SDP0_GPIEN_X540;
4307 break;
4308 default:
4309 break;
4310 }
4311
4312 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4313
4314 } /* ixgbe_config_gpie */
4315
4316 /************************************************************************
4317 * ixgbe_config_delay_values
4318 *
4319 * Requires adapter->max_frame_size to be set.
4320 ************************************************************************/
4321 static void
4322 ixgbe_config_delay_values(struct adapter *adapter)
4323 {
4324 struct ixgbe_hw *hw = &adapter->hw;
4325 u32 rxpb, frame, size, tmp;
4326
4327 frame = adapter->max_frame_size;
4328
4329 /* Calculate High Water */
4330 switch (hw->mac.type) {
4331 case ixgbe_mac_X540:
4332 case ixgbe_mac_X550:
4333 case ixgbe_mac_X550EM_x:
4334 case ixgbe_mac_X550EM_a:
4335 tmp = IXGBE_DV_X540(frame, frame);
4336 break;
4337 default:
4338 tmp = IXGBE_DV(frame, frame);
4339 break;
4340 }
4341 size = IXGBE_BT2KB(tmp);
4342 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4343 hw->fc.high_water[0] = rxpb - size;
4344
4345 /* Now calculate Low Water */
4346 switch (hw->mac.type) {
4347 case ixgbe_mac_X540:
4348 case ixgbe_mac_X550:
4349 case ixgbe_mac_X550EM_x:
4350 case ixgbe_mac_X550EM_a:
4351 tmp = IXGBE_LOW_DV_X540(frame);
4352 break;
4353 default:
4354 tmp = IXGBE_LOW_DV(frame);
4355 break;
4356 }
4357 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4358
4359 hw->fc.pause_time = IXGBE_FC_PAUSE;
4360 hw->fc.send_xon = TRUE;
4361 } /* ixgbe_config_delay_values */
4362
4363 /************************************************************************
4364 * ixgbe_set_rxfilter - Multicast Update
4365 *
4366 * Called whenever multicast address list is updated.
4367 ************************************************************************/
4368 static void
4369 ixgbe_set_rxfilter(struct adapter *adapter)
4370 {
4371 struct ixgbe_mc_addr *mta;
4372 struct ifnet *ifp = adapter->ifp;
4373 u8 *update_ptr;
4374 int mcnt = 0;
4375 u32 fctrl;
4376 struct ethercom *ec = &adapter->osdep.ec;
4377 struct ether_multi *enm;
4378 struct ether_multistep step;
4379
4380 KASSERT(mutex_owned(&adapter->core_mtx));
4381 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4382
4383 mta = adapter->mta;
4384 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4385
4386 ETHER_LOCK(ec);
4387 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4388 ETHER_FIRST_MULTI(step, ec, enm);
4389 while (enm != NULL) {
4390 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4391 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4392 ETHER_ADDR_LEN) != 0)) {
4393 ec->ec_flags |= ETHER_F_ALLMULTI;
4394 break;
4395 }
4396 bcopy(enm->enm_addrlo,
4397 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4398 mta[mcnt].vmdq = adapter->pool;
4399 mcnt++;
4400 ETHER_NEXT_MULTI(step, enm);
4401 }
4402
4403 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4404 if (ifp->if_flags & IFF_PROMISC)
4405 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4406 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4407 fctrl |= IXGBE_FCTRL_MPE;
4408 fctrl &= ~IXGBE_FCTRL_UPE;
4409 } else
4410 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4411
4412 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4413
4414 /* Update multicast filter entries only when it's not ALLMULTI */
4415 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4416 ETHER_UNLOCK(ec);
4417 update_ptr = (u8 *)mta;
4418 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4419 ixgbe_mc_array_itr, TRUE);
4420 } else
4421 ETHER_UNLOCK(ec);
4422 } /* ixgbe_set_rxfilter */
4423
4424 /************************************************************************
4425 * ixgbe_mc_array_itr
4426 *
4427 * An iterator function needed by the multicast shared code.
4428 * It feeds the shared code routine the addresses in the
4429 * array of ixgbe_set_rxfilter() one by one.
4430 ************************************************************************/
4431 static u8 *
4432 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4433 {
4434 struct ixgbe_mc_addr *mta;
4435
4436 mta = (struct ixgbe_mc_addr *)*update_ptr;
4437 *vmdq = mta->vmdq;
4438
4439 *update_ptr = (u8*)(mta + 1);
4440
4441 return (mta->addr);
4442 } /* ixgbe_mc_array_itr */
4443
4444 /************************************************************************
4445 * ixgbe_local_timer - Timer routine
4446 *
4447 * Checks for link status, updates statistics,
4448 * and runs the watchdog check.
4449 ************************************************************************/
4450 static void
4451 ixgbe_local_timer(void *arg)
4452 {
4453 struct adapter *adapter = arg;
4454
4455 if (adapter->schedule_wqs_ok) {
4456 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4457 workqueue_enqueue(adapter->timer_wq,
4458 &adapter->timer_wc, NULL);
4459 }
4460 }
4461
4462 static void
4463 ixgbe_handle_timer(struct work *wk, void *context)
4464 {
4465 struct adapter *adapter = context;
4466 struct ixgbe_hw *hw = &adapter->hw;
4467 device_t dev = adapter->dev;
4468 struct ix_queue *que = adapter->queues;
4469 u64 queues = 0;
4470 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4471 int hung = 0;
4472 int i;
4473
4474 IXGBE_CORE_LOCK(adapter);
4475
4476 /* Check for pluggable optics */
4477 if (ixgbe_is_sfp(hw)) {
4478 bool was_full = hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4479 bool is_full = ixgbe_sfp_cage_full(hw);
4480
4481 /* do probe if cage state changed */
4482 if (was_full ^ is_full) {
4483 atomic_or_32(&adapter->task_requests,
4484 IXGBE_REQUEST_TASK_MOD);
4485 ixgbe_schedule_admin_tasklet(adapter);
4486 }
4487 }
4488
4489 ixgbe_update_link_status(adapter);
4490 ixgbe_update_stats_counters(adapter);
4491
4492 /* Update some event counters */
4493 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4494 que = adapter->queues;
4495 for (i = 0; i < adapter->num_queues; i++, que++) {
4496 struct tx_ring *txr = que->txr;
4497
4498 v0 += txr->q_efbig_tx_dma_setup;
4499 v1 += txr->q_mbuf_defrag_failed;
4500 v2 += txr->q_efbig2_tx_dma_setup;
4501 v3 += txr->q_einval_tx_dma_setup;
4502 v4 += txr->q_other_tx_dma_setup;
4503 v5 += txr->q_eagain_tx_dma_setup;
4504 v6 += txr->q_enomem_tx_dma_setup;
4505 v7 += txr->q_tso_err;
4506 }
4507 adapter->efbig_tx_dma_setup.ev_count = v0;
4508 adapter->mbuf_defrag_failed.ev_count = v1;
4509 adapter->efbig2_tx_dma_setup.ev_count = v2;
4510 adapter->einval_tx_dma_setup.ev_count = v3;
4511 adapter->other_tx_dma_setup.ev_count = v4;
4512 adapter->eagain_tx_dma_setup.ev_count = v5;
4513 adapter->enomem_tx_dma_setup.ev_count = v6;
4514 adapter->tso_err.ev_count = v7;
4515
4516 /*
4517 * Check the TX queues status
4518 * - mark hung queues so we don't schedule on them
4519 * - watchdog only if all queues show hung
4520 */
4521 que = adapter->queues;
4522 for (i = 0; i < adapter->num_queues; i++, que++) {
4523 /* Keep track of queues with work for soft irq */
4524 if (que->txr->busy)
4525 queues |= 1ULL << que->me;
4526 /*
4527 * Each time txeof runs without cleaning, but there
4528 * are uncleaned descriptors it increments busy. If
4529 * we get to the MAX we declare it hung.
4530 */
4531 if (que->busy == IXGBE_QUEUE_HUNG) {
4532 ++hung;
4533 /* Mark the queue as inactive */
4534 adapter->active_queues &= ~(1ULL << que->me);
4535 continue;
4536 } else {
4537 /* Check if we've come back from hung */
4538 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4539 adapter->active_queues |= 1ULL << que->me;
4540 }
4541 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4542 device_printf(dev,
4543 "Warning queue %d appears to be hung!\n", i);
4544 que->txr->busy = IXGBE_QUEUE_HUNG;
4545 ++hung;
4546 }
4547 }
4548
4549 /* Only truly watchdog if all queues show hung */
4550 if (hung == adapter->num_queues)
4551 goto watchdog;
4552 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4553 else if (queues != 0) { /* Force an IRQ on queues with work */
4554 que = adapter->queues;
4555 for (i = 0; i < adapter->num_queues; i++, que++) {
4556 mutex_enter(&que->dc_mtx);
4557 if (que->disabled_count == 0)
4558 ixgbe_rearm_queues(adapter,
4559 queues & ((u64)1 << i));
4560 mutex_exit(&que->dc_mtx);
4561 }
4562 }
4563 #endif
4564
4565 atomic_store_relaxed(&adapter->timer_pending, 0);
4566 IXGBE_CORE_UNLOCK(adapter);
4567 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4568 return;
4569
4570 watchdog:
4571 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4572 adapter->ifp->if_flags &= ~IFF_RUNNING;
4573 adapter->watchdog_events.ev_count++;
4574 ixgbe_init_locked(adapter);
4575 IXGBE_CORE_UNLOCK(adapter);
4576 } /* ixgbe_handle_timer */
4577
4578 /************************************************************************
4579 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4580 ************************************************************************/
4581 static void
4582 ixgbe_recovery_mode_timer(void *arg)
4583 {
4584 struct adapter *adapter = arg;
4585
4586 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0)
4587 {
4588 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4589 &adapter->recovery_mode_timer_wc, NULL);
4590 }
4591 }
4592
4593 static void
4594 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4595 {
4596 struct adapter *adapter = context;
4597 struct ixgbe_hw *hw = &adapter->hw;
4598
4599 IXGBE_CORE_LOCK(adapter);
4600 if (ixgbe_fw_recovery_mode(hw)) {
4601 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4602 /* Firmware error detected, entering recovery mode */
4603 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4604
4605 if (hw->adapter_stopped == FALSE)
4606 ixgbe_stop(adapter);
4607 }
4608 } else
4609 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4610
4611 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4612 callout_reset(&adapter->recovery_mode_timer, hz,
4613 ixgbe_recovery_mode_timer, adapter);
4614 IXGBE_CORE_UNLOCK(adapter);
4615 } /* ixgbe_handle_recovery_mode_timer */
4616
4617 /************************************************************************
4618 * ixgbe_sfp_cage_full
4619 *
4620 * Determine if a port had optics inserted.
4621 ************************************************************************/
4622 static bool
4623 ixgbe_sfp_cage_full(struct ixgbe_hw *hw)
4624 {
4625 uint32_t mask;
4626 int rv;
4627
4628 if (hw->mac.type >= ixgbe_mac_X540)
4629 mask = IXGBE_ESDP_SDP0;
4630 else
4631 mask = IXGBE_ESDP_SDP2;
4632
4633 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask;
4634 if (hw->mac.type == ixgbe_mac_X550EM_a) {
4635 /* It seems X550EM_a's SDP0 is inverted than others... */
4636 return (rv == 0);
4637 }
4638
4639 return rv;
4640 } /* ixgbe_sfp_cage_full */
4641
4642 /************************************************************************
4643 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4644 ************************************************************************/
4645 static void
4646 ixgbe_handle_mod(void *context)
4647 {
4648 struct adapter *adapter = context;
4649 struct ixgbe_hw *hw = &adapter->hw;
4650 device_t dev = adapter->dev;
4651 u32 err, cage_full = 0;
4652
4653 ++adapter->mod_workev.ev_count;
4654 if (adapter->hw.need_crosstalk_fix) {
4655 switch (hw->mac.type) {
4656 case ixgbe_mac_82599EB:
4657 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4658 IXGBE_ESDP_SDP2;
4659 break;
4660 case ixgbe_mac_X550EM_x:
4661 case ixgbe_mac_X550EM_a:
4662 /*
4663 * XXX See ixgbe_sfp_cage_full(). It seems the bit is
4664 * inverted on X550EM_a, so I think this is incorrect.
4665 */
4666 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4667 IXGBE_ESDP_SDP0;
4668 break;
4669 default:
4670 break;
4671 }
4672
4673 if (!cage_full)
4674 goto out;
4675 }
4676
4677 err = hw->phy.ops.identify_sfp(hw);
4678 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4679 device_printf(dev,
4680 "Unsupported SFP+ module type was detected.\n");
4681 goto out;
4682 }
4683
4684 if (hw->need_unsupported_sfp_recovery) {
4685 device_printf(dev, "Recovering from unsupported SFP\n");
4686 /*
4687 * We could recover the status by calling setup_sfp(),
4688 * setup_link() and some others. It's complex and might not
4689 * work correctly on some unknown cases. To avoid such type of
4690 * problem, call ixgbe_init_locked(). It's simple and safe
4691 * approach.
4692 */
4693 ixgbe_init_locked(adapter);
4694 } else {
4695 if (hw->mac.type == ixgbe_mac_82598EB)
4696 err = hw->phy.ops.reset(hw);
4697 else {
4698 err = hw->mac.ops.setup_sfp(hw);
4699 hw->phy.sfp_setup_needed = FALSE;
4700 }
4701 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4702 device_printf(dev,
4703 "Setup failure - unsupported SFP+ module type.\n");
4704 goto out;
4705 }
4706 }
4707
4708 out:
4709 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4710 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4711
4712 /* Adjust media types shown in ifconfig */
4713 IXGBE_CORE_UNLOCK(adapter);
4714 ifmedia_removeall(&adapter->media);
4715 ixgbe_add_media_types(adapter);
4716 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4717 IXGBE_CORE_LOCK(adapter);
4718
4719 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4720 /*
4721 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4722 * the workqueue now.
4723 */
4724 } /* ixgbe_handle_mod */
4725
4726
4727 /************************************************************************
4728 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4729 ************************************************************************/
4730 static void
4731 ixgbe_handle_msf(void *context)
4732 {
4733 struct adapter *adapter = context;
4734 struct ixgbe_hw *hw = &adapter->hw;
4735 u32 autoneg;
4736 bool negotiate;
4737
4738 ++adapter->msf_workev.ev_count;
4739
4740 autoneg = hw->phy.autoneg_advertised;
4741 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4742 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4743 if (hw->mac.ops.setup_link)
4744 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4745 } /* ixgbe_handle_msf */
4746
4747 /************************************************************************
4748 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4749 ************************************************************************/
4750 static void
4751 ixgbe_handle_phy(void *context)
4752 {
4753 struct adapter *adapter = context;
4754 struct ixgbe_hw *hw = &adapter->hw;
4755 int error;
4756
4757 ++adapter->phy_workev.ev_count;
4758 error = hw->phy.ops.handle_lasi(hw);
4759 if (error == IXGBE_ERR_OVERTEMP)
4760 device_printf(adapter->dev,
4761 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4762 " PHY will downshift to lower power state!\n");
4763 else if (error)
4764 device_printf(adapter->dev,
4765 "Error handling LASI interrupt: %d\n", error);
4766 } /* ixgbe_handle_phy */
4767
4768 static void
4769 ixgbe_handle_admin(struct work *wk, void *context)
4770 {
4771 struct adapter *adapter = context;
4772 struct ifnet *ifp = adapter->ifp;
4773 struct ixgbe_hw *hw = &adapter->hw;
4774 u32 req;
4775
4776 /*
4777 * Hold the IFNET_LOCK across this entire call. This will
4778 * prevent additional changes to adapter->phy_layer
4779 * and serialize calls to this tasklet. We cannot hold the
4780 * CORE_LOCK while calling into the ifmedia functions as
4781 * they call ifmedia_lock() and the lock is CORE_LOCK.
4782 */
4783 IFNET_LOCK(ifp);
4784 IXGBE_CORE_LOCK(adapter);
4785 while ((req = adapter->task_requests) != 0) {
4786 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4787 ixgbe_handle_link(adapter);
4788 atomic_and_32(&adapter->task_requests,
4789 ~IXGBE_REQUEST_TASK_LSC);
4790 }
4791 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4792 ixgbe_handle_mod(adapter);
4793 atomic_and_32(&adapter->task_requests,
4794 ~IXGBE_REQUEST_TASK_MOD);
4795 }
4796 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4797 ixgbe_handle_msf(adapter);
4798 atomic_and_32(&adapter->task_requests,
4799 ~IXGBE_REQUEST_TASK_MSF);
4800 }
4801 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4802 ixgbe_handle_phy(adapter);
4803 atomic_and_32(&adapter->task_requests,
4804 ~IXGBE_REQUEST_TASK_PHY);
4805 }
4806 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4807 ixgbe_reinit_fdir(adapter);
4808 atomic_and_32(&adapter->task_requests,
4809 ~IXGBE_REQUEST_TASK_FDIR);
4810 }
4811 #if 0 /* notyet */
4812 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4813 ixgbe_handle_mbx(adapter);
4814 atomic_and_32(&adapter->task_requests,
4815 ~IXGBE_REQUEST_TASK_MBX);
4816 }
4817 #endif
4818 }
4819 atomic_store_relaxed(&adapter->admin_pending, 0);
4820 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4821 atomic_and_32(&adapter->task_requests,
4822 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4823 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4824 /* Re-enable other interrupts */
4825 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4826 } else
4827 ixgbe_enable_intr(adapter);
4828 }
4829
4830 IXGBE_CORE_UNLOCK(adapter);
4831 IFNET_UNLOCK(ifp);
4832 } /* ixgbe_handle_admin */
4833
4834 static void
4835 ixgbe_ifstop(struct ifnet *ifp, int disable)
4836 {
4837 struct adapter *adapter = ifp->if_softc;
4838
4839 IXGBE_CORE_LOCK(adapter);
4840 ixgbe_stop(adapter);
4841 IXGBE_CORE_UNLOCK(adapter);
4842
4843 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4844 atomic_store_relaxed(&adapter->timer_pending, 0);
4845 }
4846
4847 /************************************************************************
4848 * ixgbe_stop - Stop the hardware
4849 *
4850 * Disables all traffic on the adapter by issuing a
4851 * global reset on the MAC and deallocates TX/RX buffers.
4852 ************************************************************************/
4853 static void
4854 ixgbe_stop(void *arg)
4855 {
4856 struct ifnet *ifp;
4857 struct adapter *adapter = arg;
4858 struct ixgbe_hw *hw = &adapter->hw;
4859
4860 ifp = adapter->ifp;
4861
4862 KASSERT(mutex_owned(&adapter->core_mtx));
4863
4864 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4865 ixgbe_disable_intr(adapter);
4866 callout_stop(&adapter->timer);
4867
4868 /* Don't schedule workqueues. */
4869 adapter->schedule_wqs_ok = false;
4870
4871 /* Let the stack know...*/
4872 ifp->if_flags &= ~IFF_RUNNING;
4873
4874 ixgbe_reset_hw(hw);
4875 hw->adapter_stopped = FALSE;
4876 ixgbe_stop_adapter(hw);
4877 if (hw->mac.type == ixgbe_mac_82599EB)
4878 ixgbe_stop_mac_link_on_d3_82599(hw);
4879 /* Turn off the laser - noop with no optics */
4880 ixgbe_disable_tx_laser(hw);
4881
4882 /* Update the stack */
4883 adapter->link_up = FALSE;
4884 ixgbe_update_link_status(adapter);
4885
4886 /* reprogram the RAR[0] in case user changed it. */
4887 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4888
4889 return;
4890 } /* ixgbe_stop */
4891
4892 /************************************************************************
4893 * ixgbe_update_link_status - Update OS on link state
4894 *
4895 * Note: Only updates the OS on the cached link state.
4896 * The real check of the hardware only happens with
4897 * a link interrupt.
4898 ************************************************************************/
4899 static void
4900 ixgbe_update_link_status(struct adapter *adapter)
4901 {
4902 struct ifnet *ifp = adapter->ifp;
4903 device_t dev = adapter->dev;
4904 struct ixgbe_hw *hw = &adapter->hw;
4905
4906 KASSERT(mutex_owned(&adapter->core_mtx));
4907
4908 if (adapter->link_up) {
4909 if (adapter->link_active != LINK_STATE_UP) {
4910 /*
4911 * To eliminate influence of the previous state
4912 * in the same way as ixgbe_init_locked().
4913 */
4914 struct ix_queue *que = adapter->queues;
4915 for (int i = 0; i < adapter->num_queues; i++, que++)
4916 que->eitr_setting = 0;
4917
4918 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4919 /*
4920 * Discard count for both MAC Local Fault and
4921 * Remote Fault because those registers are
4922 * valid only when the link speed is up and
4923 * 10Gbps.
4924 */
4925 IXGBE_READ_REG(hw, IXGBE_MLFC);
4926 IXGBE_READ_REG(hw, IXGBE_MRFC);
4927 }
4928
4929 if (bootverbose) {
4930 const char *bpsmsg;
4931
4932 switch (adapter->link_speed) {
4933 case IXGBE_LINK_SPEED_10GB_FULL:
4934 bpsmsg = "10 Gbps";
4935 break;
4936 case IXGBE_LINK_SPEED_5GB_FULL:
4937 bpsmsg = "5 Gbps";
4938 break;
4939 case IXGBE_LINK_SPEED_2_5GB_FULL:
4940 bpsmsg = "2.5 Gbps";
4941 break;
4942 case IXGBE_LINK_SPEED_1GB_FULL:
4943 bpsmsg = "1 Gbps";
4944 break;
4945 case IXGBE_LINK_SPEED_100_FULL:
4946 bpsmsg = "100 Mbps";
4947 break;
4948 case IXGBE_LINK_SPEED_10_FULL:
4949 bpsmsg = "10 Mbps";
4950 break;
4951 default:
4952 bpsmsg = "unknown speed";
4953 break;
4954 }
4955 device_printf(dev, "Link is up %s %s \n",
4956 bpsmsg, "Full Duplex");
4957 }
4958 adapter->link_active = LINK_STATE_UP;
4959 /* Update any Flow Control changes */
4960 ixgbe_fc_enable(&adapter->hw);
4961 /* Update DMA coalescing config */
4962 ixgbe_config_dmac(adapter);
4963 if_link_state_change(ifp, LINK_STATE_UP);
4964
4965 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4966 ixgbe_ping_all_vfs(adapter);
4967 }
4968 } else {
4969 /*
4970 * Do it when link active changes to DOWN. i.e.
4971 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4972 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4973 */
4974 if (adapter->link_active != LINK_STATE_DOWN) {
4975 if (bootverbose)
4976 device_printf(dev, "Link is Down\n");
4977 if_link_state_change(ifp, LINK_STATE_DOWN);
4978 adapter->link_active = LINK_STATE_DOWN;
4979 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4980 ixgbe_ping_all_vfs(adapter);
4981 ixgbe_drain_all(adapter);
4982 }
4983 }
4984 } /* ixgbe_update_link_status */
4985
4986 /************************************************************************
4987 * ixgbe_config_dmac - Configure DMA Coalescing
4988 ************************************************************************/
4989 static void
4990 ixgbe_config_dmac(struct adapter *adapter)
4991 {
4992 struct ixgbe_hw *hw = &adapter->hw;
4993 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4994
4995 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4996 return;
4997
4998 if (dcfg->watchdog_timer ^ adapter->dmac ||
4999 dcfg->link_speed ^ adapter->link_speed) {
5000 dcfg->watchdog_timer = adapter->dmac;
5001 dcfg->fcoe_en = false;
5002 dcfg->link_speed = adapter->link_speed;
5003 dcfg->num_tcs = 1;
5004
5005 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5006 dcfg->watchdog_timer, dcfg->link_speed);
5007
5008 hw->mac.ops.dmac_config(hw);
5009 }
5010 } /* ixgbe_config_dmac */
5011
5012 /************************************************************************
5013 * ixgbe_enable_intr
5014 ************************************************************************/
5015 static void
5016 ixgbe_enable_intr(struct adapter *adapter)
5017 {
5018 struct ixgbe_hw *hw = &adapter->hw;
5019 struct ix_queue *que = adapter->queues;
5020 u32 mask, fwsm;
5021
5022 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5023
5024 switch (adapter->hw.mac.type) {
5025 case ixgbe_mac_82599EB:
5026 mask |= IXGBE_EIMS_ECC;
5027 /* Temperature sensor on some adapters */
5028 mask |= IXGBE_EIMS_GPI_SDP0;
5029 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5030 mask |= IXGBE_EIMS_GPI_SDP1;
5031 mask |= IXGBE_EIMS_GPI_SDP2;
5032 break;
5033 case ixgbe_mac_X540:
5034 /* Detect if Thermal Sensor is enabled */
5035 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5036 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5037 mask |= IXGBE_EIMS_TS;
5038 mask |= IXGBE_EIMS_ECC;
5039 break;
5040 case ixgbe_mac_X550:
5041 /* MAC thermal sensor is automatically enabled */
5042 mask |= IXGBE_EIMS_TS;
5043 mask |= IXGBE_EIMS_ECC;
5044 break;
5045 case ixgbe_mac_X550EM_x:
5046 case ixgbe_mac_X550EM_a:
5047 /* Some devices use SDP0 for important information */
5048 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5049 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5050 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5051 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5052 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5053 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5054 mask |= IXGBE_EICR_GPI_SDP0_X540;
5055 mask |= IXGBE_EIMS_ECC;
5056 break;
5057 default:
5058 break;
5059 }
5060
5061 /* Enable Fan Failure detection */
5062 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5063 mask |= IXGBE_EIMS_GPI_SDP1;
5064 /* Enable SR-IOV */
5065 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5066 mask |= IXGBE_EIMS_MAILBOX;
5067 /* Enable Flow Director */
5068 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5069 mask |= IXGBE_EIMS_FLOW_DIR;
5070
5071 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5072
5073 /* With MSI-X we use auto clear */
5074 if (adapter->msix_mem) {
5075 mask = IXGBE_EIMS_ENABLE_MASK;
5076 /* Don't autoclear Link */
5077 mask &= ~IXGBE_EIMS_OTHER;
5078 mask &= ~IXGBE_EIMS_LSC;
5079 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5080 mask &= ~IXGBE_EIMS_MAILBOX;
5081 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5082 }
5083
5084 /*
5085 * Now enable all queues, this is done separately to
5086 * allow for handling the extended (beyond 32) MSI-X
5087 * vectors that can be used by 82599
5088 */
5089 for (int i = 0; i < adapter->num_queues; i++, que++)
5090 ixgbe_enable_queue(adapter, que->msix);
5091
5092 IXGBE_WRITE_FLUSH(hw);
5093
5094 } /* ixgbe_enable_intr */
5095
5096 /************************************************************************
5097 * ixgbe_disable_intr_internal
5098 ************************************************************************/
5099 static void
5100 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5101 {
5102 struct ix_queue *que = adapter->queues;
5103
5104 /* disable interrupts other than queues */
5105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5106
5107 if (adapter->msix_mem)
5108 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5109
5110 for (int i = 0; i < adapter->num_queues; i++, que++)
5111 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5112
5113 IXGBE_WRITE_FLUSH(&adapter->hw);
5114
5115 } /* ixgbe_do_disable_intr_internal */
5116
5117 /************************************************************************
5118 * ixgbe_disable_intr
5119 ************************************************************************/
5120 static void
5121 ixgbe_disable_intr(struct adapter *adapter)
5122 {
5123
5124 ixgbe_disable_intr_internal(adapter, true);
5125 } /* ixgbe_disable_intr */
5126
5127 /************************************************************************
5128 * ixgbe_ensure_disabled_intr
5129 ************************************************************************/
5130 void
5131 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5132 {
5133
5134 ixgbe_disable_intr_internal(adapter, false);
5135 } /* ixgbe_ensure_disabled_intr */
5136
5137 /************************************************************************
5138 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5139 ************************************************************************/
5140 static int
5141 ixgbe_legacy_irq(void *arg)
5142 {
5143 struct ix_queue *que = arg;
5144 struct adapter *adapter = que->adapter;
5145 struct ixgbe_hw *hw = &adapter->hw;
5146 struct ifnet *ifp = adapter->ifp;
5147 struct tx_ring *txr = adapter->tx_rings;
5148 bool more = false;
5149 bool reenable_intr = true;
5150 u32 eicr, eicr_mask;
5151 u32 task_requests = 0;
5152
5153 /* Silicon errata #26 on 82598 */
5154 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5155
5156 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5157
5158 adapter->stats.pf.legint.ev_count++;
5159 ++que->irqs.ev_count;
5160 if (eicr == 0) {
5161 adapter->stats.pf.intzero.ev_count++;
5162 if ((ifp->if_flags & IFF_UP) != 0)
5163 ixgbe_enable_intr(adapter);
5164 return 0;
5165 }
5166
5167 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5168 /*
5169 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5170 */
5171 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5172
5173 #ifdef __NetBSD__
5174 /* Don't run ixgbe_rxeof in interrupt context */
5175 more = true;
5176 #else
5177 more = ixgbe_rxeof(que);
5178 #endif
5179
5180 IXGBE_TX_LOCK(txr);
5181 ixgbe_txeof(txr);
5182 #ifdef notyet
5183 if (!ixgbe_ring_empty(ifp, txr->br))
5184 ixgbe_start_locked(ifp, txr);
5185 #endif
5186 IXGBE_TX_UNLOCK(txr);
5187 }
5188
5189 /* Check for fan failure */
5190 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5191 ixgbe_check_fan_failure(adapter, eicr, true);
5192 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5193 }
5194
5195 /* Link status change */
5196 if (eicr & IXGBE_EICR_LSC)
5197 task_requests |= IXGBE_REQUEST_TASK_LSC;
5198
5199 if (ixgbe_is_sfp(hw)) {
5200 /* Pluggable optics-related interrupt */
5201 if (hw->mac.type >= ixgbe_mac_X540)
5202 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5203 else
5204 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5205
5206 if (eicr & eicr_mask) {
5207 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5208 task_requests |= IXGBE_REQUEST_TASK_MOD;
5209 }
5210
5211 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5212 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5213 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5214 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5215 task_requests |= IXGBE_REQUEST_TASK_MSF;
5216 }
5217 }
5218
5219 /* External PHY interrupt */
5220 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5221 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5222 task_requests |= IXGBE_REQUEST_TASK_PHY;
5223
5224 if (more) {
5225 que->req.ev_count++;
5226 ixgbe_sched_handle_que(adapter, que);
5227 reenable_intr = false;
5228 }
5229 if (task_requests != 0) {
5230 /* Re-enabling other interrupts is done in the admin task */
5231 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5232 atomic_or_32(&adapter->task_requests, task_requests);
5233 ixgbe_schedule_admin_tasklet(adapter);
5234 reenable_intr = false;
5235 }
5236
5237 if (reenable_intr == true)
5238 ixgbe_enable_intr(adapter);
5239
5240 return 1;
5241 } /* ixgbe_legacy_irq */
5242
5243 /************************************************************************
5244 * ixgbe_free_pciintr_resources
5245 ************************************************************************/
5246 static void
5247 ixgbe_free_pciintr_resources(struct adapter *adapter)
5248 {
5249 struct ix_queue *que = adapter->queues;
5250 int rid;
5251
5252 /*
5253 * Release all msix queue resources:
5254 */
5255 for (int i = 0; i < adapter->num_queues; i++, que++) {
5256 if (que->res != NULL) {
5257 pci_intr_disestablish(adapter->osdep.pc,
5258 adapter->osdep.ihs[i]);
5259 adapter->osdep.ihs[i] = NULL;
5260 }
5261 }
5262
5263 /* Clean the Legacy or Link interrupt last */
5264 if (adapter->vector) /* we are doing MSIX */
5265 rid = adapter->vector;
5266 else
5267 rid = 0;
5268
5269 if (adapter->osdep.ihs[rid] != NULL) {
5270 pci_intr_disestablish(adapter->osdep.pc,
5271 adapter->osdep.ihs[rid]);
5272 adapter->osdep.ihs[rid] = NULL;
5273 }
5274
5275 if (adapter->osdep.intrs != NULL) {
5276 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5277 adapter->osdep.nintrs);
5278 adapter->osdep.intrs = NULL;
5279 }
5280 } /* ixgbe_free_pciintr_resources */
5281
5282 /************************************************************************
5283 * ixgbe_free_pci_resources
5284 ************************************************************************/
5285 static void
5286 ixgbe_free_pci_resources(struct adapter *adapter)
5287 {
5288
5289 ixgbe_free_pciintr_resources(adapter);
5290
5291 if (adapter->osdep.mem_size != 0) {
5292 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5293 adapter->osdep.mem_bus_space_handle,
5294 adapter->osdep.mem_size);
5295 }
5296
5297 } /* ixgbe_free_pci_resources */
5298
5299 /************************************************************************
5300 * ixgbe_set_sysctl_value
5301 ************************************************************************/
5302 static void
5303 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5304 const char *description, int *limit, int value)
5305 {
5306 device_t dev = adapter->dev;
5307 struct sysctllog **log;
5308 const struct sysctlnode *rnode, *cnode;
5309
5310 /*
5311 * It's not required to check recovery mode because this function never
5312 * touches hardware.
5313 */
5314
5315 log = &adapter->sysctllog;
5316 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5317 aprint_error_dev(dev, "could not create sysctl root\n");
5318 return;
5319 }
5320 if (sysctl_createv(log, 0, &rnode, &cnode,
5321 CTLFLAG_READWRITE, CTLTYPE_INT,
5322 name, SYSCTL_DESCR(description),
5323 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5324 aprint_error_dev(dev, "could not create sysctl\n");
5325 *limit = value;
5326 } /* ixgbe_set_sysctl_value */
5327
5328 /************************************************************************
5329 * ixgbe_sysctl_flowcntl
5330 *
5331 * SYSCTL wrapper around setting Flow Control
5332 ************************************************************************/
5333 static int
5334 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5335 {
5336 struct sysctlnode node = *rnode;
5337 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5338 int error, fc;
5339
5340 if (ixgbe_fw_recovery_mode_swflag(adapter))
5341 return (EPERM);
5342
5343 fc = adapter->hw.fc.current_mode;
5344 node.sysctl_data = &fc;
5345 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5346 if (error != 0 || newp == NULL)
5347 return error;
5348
5349 /* Don't bother if it's not changed */
5350 if (fc == adapter->hw.fc.current_mode)
5351 return (0);
5352
5353 return ixgbe_set_flowcntl(adapter, fc);
5354 } /* ixgbe_sysctl_flowcntl */
5355
5356 /************************************************************************
5357 * ixgbe_set_flowcntl - Set flow control
5358 *
5359 * Flow control values:
5360 * 0 - off
5361 * 1 - rx pause
5362 * 2 - tx pause
5363 * 3 - full
5364 ************************************************************************/
5365 static int
5366 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5367 {
5368 switch (fc) {
5369 case ixgbe_fc_rx_pause:
5370 case ixgbe_fc_tx_pause:
5371 case ixgbe_fc_full:
5372 adapter->hw.fc.requested_mode = fc;
5373 if (adapter->num_queues > 1)
5374 ixgbe_disable_rx_drop(adapter);
5375 break;
5376 case ixgbe_fc_none:
5377 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5378 if (adapter->num_queues > 1)
5379 ixgbe_enable_rx_drop(adapter);
5380 break;
5381 default:
5382 return (EINVAL);
5383 }
5384
5385 #if 0 /* XXX NetBSD */
5386 /* Don't autoneg if forcing a value */
5387 adapter->hw.fc.disable_fc_autoneg = TRUE;
5388 #endif
5389 ixgbe_fc_enable(&adapter->hw);
5390
5391 return (0);
5392 } /* ixgbe_set_flowcntl */
5393
5394 /************************************************************************
5395 * ixgbe_enable_rx_drop
5396 *
5397 * Enable the hardware to drop packets when the buffer is
5398 * full. This is useful with multiqueue, so that no single
5399 * queue being full stalls the entire RX engine. We only
5400 * enable this when Multiqueue is enabled AND Flow Control
5401 * is disabled.
5402 ************************************************************************/
5403 static void
5404 ixgbe_enable_rx_drop(struct adapter *adapter)
5405 {
5406 struct ixgbe_hw *hw = &adapter->hw;
5407 struct rx_ring *rxr;
5408 u32 srrctl;
5409
5410 for (int i = 0; i < adapter->num_queues; i++) {
5411 rxr = &adapter->rx_rings[i];
5412 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5413 srrctl |= IXGBE_SRRCTL_DROP_EN;
5414 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5415 }
5416
5417 /* enable drop for each vf */
5418 for (int i = 0; i < adapter->num_vfs; i++) {
5419 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5420 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5421 IXGBE_QDE_ENABLE));
5422 }
5423 } /* ixgbe_enable_rx_drop */
5424
5425 /************************************************************************
5426 * ixgbe_disable_rx_drop
5427 ************************************************************************/
5428 static void
5429 ixgbe_disable_rx_drop(struct adapter *adapter)
5430 {
5431 struct ixgbe_hw *hw = &adapter->hw;
5432 struct rx_ring *rxr;
5433 u32 srrctl;
5434
5435 for (int i = 0; i < adapter->num_queues; i++) {
5436 rxr = &adapter->rx_rings[i];
5437 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5438 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5439 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5440 }
5441
5442 /* disable drop for each vf */
5443 for (int i = 0; i < adapter->num_vfs; i++) {
5444 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5445 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5446 }
5447 } /* ixgbe_disable_rx_drop */
5448
5449 /************************************************************************
5450 * ixgbe_sysctl_advertise
5451 *
5452 * SYSCTL wrapper around setting advertised speed
5453 ************************************************************************/
5454 static int
5455 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5456 {
5457 struct sysctlnode node = *rnode;
5458 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5459 int error = 0, advertise;
5460
5461 if (ixgbe_fw_recovery_mode_swflag(adapter))
5462 return (EPERM);
5463
5464 advertise = adapter->advertise;
5465 node.sysctl_data = &advertise;
5466 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5467 if (error != 0 || newp == NULL)
5468 return error;
5469
5470 return ixgbe_set_advertise(adapter, advertise);
5471 } /* ixgbe_sysctl_advertise */
5472
5473 /************************************************************************
5474 * ixgbe_set_advertise - Control advertised link speed
5475 *
5476 * Flags:
5477 * 0x00 - Default (all capable link speed)
5478 * 0x01 - advertise 100 Mb
5479 * 0x02 - advertise 1G
5480 * 0x04 - advertise 10G
5481 * 0x08 - advertise 10 Mb
5482 * 0x10 - advertise 2.5G
5483 * 0x20 - advertise 5G
5484 ************************************************************************/
5485 static int
5486 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5487 {
5488 device_t dev;
5489 struct ixgbe_hw *hw;
5490 ixgbe_link_speed speed = 0;
5491 ixgbe_link_speed link_caps = 0;
5492 s32 err = IXGBE_NOT_IMPLEMENTED;
5493 bool negotiate = FALSE;
5494
5495 /* Checks to validate new value */
5496 if (adapter->advertise == advertise) /* no change */
5497 return (0);
5498
5499 dev = adapter->dev;
5500 hw = &adapter->hw;
5501
5502 /* No speed changes for backplane media */
5503 if (hw->phy.media_type == ixgbe_media_type_backplane)
5504 return (ENODEV);
5505
5506 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5507 (hw->phy.multispeed_fiber))) {
5508 device_printf(dev,
5509 "Advertised speed can only be set on copper or "
5510 "multispeed fiber media types.\n");
5511 return (EINVAL);
5512 }
5513
5514 if (advertise < 0x0 || advertise > 0x2f) {
5515 device_printf(dev,
5516 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5517 return (EINVAL);
5518 }
5519
5520 if (hw->mac.ops.get_link_capabilities) {
5521 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5522 &negotiate);
5523 if (err != IXGBE_SUCCESS) {
5524 device_printf(dev, "Unable to determine supported advertise speeds\n");
5525 return (ENODEV);
5526 }
5527 }
5528
5529 /* Set new value and report new advertised mode */
5530 if (advertise & 0x1) {
5531 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5532 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5533 return (EINVAL);
5534 }
5535 speed |= IXGBE_LINK_SPEED_100_FULL;
5536 }
5537 if (advertise & 0x2) {
5538 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5539 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5540 return (EINVAL);
5541 }
5542 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5543 }
5544 if (advertise & 0x4) {
5545 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5546 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5547 return (EINVAL);
5548 }
5549 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5550 }
5551 if (advertise & 0x8) {
5552 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5553 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5554 return (EINVAL);
5555 }
5556 speed |= IXGBE_LINK_SPEED_10_FULL;
5557 }
5558 if (advertise & 0x10) {
5559 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5560 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5561 return (EINVAL);
5562 }
5563 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5564 }
5565 if (advertise & 0x20) {
5566 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5567 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5568 return (EINVAL);
5569 }
5570 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5571 }
5572 if (advertise == 0)
5573 speed = link_caps; /* All capable link speed */
5574
5575 hw->mac.autotry_restart = TRUE;
5576 hw->mac.ops.setup_link(hw, speed, TRUE);
5577 adapter->advertise = advertise;
5578
5579 return (0);
5580 } /* ixgbe_set_advertise */
5581
5582 /************************************************************************
5583 * ixgbe_get_advertise - Get current advertised speed settings
5584 *
5585 * Formatted for sysctl usage.
5586 * Flags:
5587 * 0x01 - advertise 100 Mb
5588 * 0x02 - advertise 1G
5589 * 0x04 - advertise 10G
5590 * 0x08 - advertise 10 Mb (yes, Mb)
5591 * 0x10 - advertise 2.5G
5592 * 0x20 - advertise 5G
5593 ************************************************************************/
5594 static int
5595 ixgbe_get_advertise(struct adapter *adapter)
5596 {
5597 struct ixgbe_hw *hw = &adapter->hw;
5598 int speed;
5599 ixgbe_link_speed link_caps = 0;
5600 s32 err;
5601 bool negotiate = FALSE;
5602
5603 /*
5604 * Advertised speed means nothing unless it's copper or
5605 * multi-speed fiber
5606 */
5607 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5608 !(hw->phy.multispeed_fiber))
5609 return (0);
5610
5611 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5612 if (err != IXGBE_SUCCESS)
5613 return (0);
5614
5615 speed =
5616 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5617 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5618 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5619 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5620 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5621 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5622
5623 return speed;
5624 } /* ixgbe_get_advertise */
5625
5626 /************************************************************************
5627 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5628 *
5629 * Control values:
5630 * 0/1 - off / on (use default value of 1000)
5631 *
5632 * Legal timer values are:
5633 * 50,100,250,500,1000,2000,5000,10000
5634 *
5635 * Turning off interrupt moderation will also turn this off.
5636 ************************************************************************/
5637 static int
5638 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5639 {
5640 struct sysctlnode node = *rnode;
5641 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5642 struct ifnet *ifp = adapter->ifp;
5643 int error;
5644 int newval;
5645
5646 if (ixgbe_fw_recovery_mode_swflag(adapter))
5647 return (EPERM);
5648
5649 newval = adapter->dmac;
5650 node.sysctl_data = &newval;
5651 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5652 if ((error) || (newp == NULL))
5653 return (error);
5654
5655 switch (newval) {
5656 case 0:
5657 /* Disabled */
5658 adapter->dmac = 0;
5659 break;
5660 case 1:
5661 /* Enable and use default */
5662 adapter->dmac = 1000;
5663 break;
5664 case 50:
5665 case 100:
5666 case 250:
5667 case 500:
5668 case 1000:
5669 case 2000:
5670 case 5000:
5671 case 10000:
5672 /* Legal values - allow */
5673 adapter->dmac = newval;
5674 break;
5675 default:
5676 /* Do nothing, illegal value */
5677 return (EINVAL);
5678 }
5679
5680 /* Re-initialize hardware if it's already running */
5681 if (ifp->if_flags & IFF_RUNNING)
5682 ifp->if_init(ifp);
5683
5684 return (0);
5685 }
5686
5687 #ifdef IXGBE_DEBUG
5688 /************************************************************************
5689 * ixgbe_sysctl_power_state
5690 *
5691 * Sysctl to test power states
5692 * Values:
5693 * 0 - set device to D0
5694 * 3 - set device to D3
5695 * (none) - get current device power state
5696 ************************************************************************/
5697 static int
5698 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5699 {
5700 #ifdef notyet
5701 struct sysctlnode node = *rnode;
5702 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5703 device_t dev = adapter->dev;
5704 int curr_ps, new_ps, error = 0;
5705
5706 if (ixgbe_fw_recovery_mode_swflag(adapter))
5707 return (EPERM);
5708
5709 curr_ps = new_ps = pci_get_powerstate(dev);
5710
5711 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5712 if ((error) || (req->newp == NULL))
5713 return (error);
5714
5715 if (new_ps == curr_ps)
5716 return (0);
5717
5718 if (new_ps == 3 && curr_ps == 0)
5719 error = DEVICE_SUSPEND(dev);
5720 else if (new_ps == 0 && curr_ps == 3)
5721 error = DEVICE_RESUME(dev);
5722 else
5723 return (EINVAL);
5724
5725 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5726
5727 return (error);
5728 #else
5729 return 0;
5730 #endif
5731 } /* ixgbe_sysctl_power_state */
5732 #endif
5733
5734 /************************************************************************
5735 * ixgbe_sysctl_wol_enable
5736 *
5737 * Sysctl to enable/disable the WoL capability,
5738 * if supported by the adapter.
5739 *
5740 * Values:
5741 * 0 - disabled
5742 * 1 - enabled
5743 ************************************************************************/
5744 static int
5745 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5746 {
5747 struct sysctlnode node = *rnode;
5748 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5749 struct ixgbe_hw *hw = &adapter->hw;
5750 bool new_wol_enabled;
5751 int error = 0;
5752
5753 /*
5754 * It's not required to check recovery mode because this function never
5755 * touches hardware.
5756 */
5757 new_wol_enabled = hw->wol_enabled;
5758 node.sysctl_data = &new_wol_enabled;
5759 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5760 if ((error) || (newp == NULL))
5761 return (error);
5762 if (new_wol_enabled == hw->wol_enabled)
5763 return (0);
5764
5765 if (new_wol_enabled && !adapter->wol_support)
5766 return (ENODEV);
5767 else
5768 hw->wol_enabled = new_wol_enabled;
5769
5770 return (0);
5771 } /* ixgbe_sysctl_wol_enable */
5772
5773 /************************************************************************
5774 * ixgbe_sysctl_wufc - Wake Up Filter Control
5775 *
5776 * Sysctl to enable/disable the types of packets that the
5777 * adapter will wake up on upon receipt.
5778 * Flags:
5779 * 0x1 - Link Status Change
5780 * 0x2 - Magic Packet
5781 * 0x4 - Direct Exact
5782 * 0x8 - Directed Multicast
5783 * 0x10 - Broadcast
5784 * 0x20 - ARP/IPv4 Request Packet
5785 * 0x40 - Direct IPv4 Packet
5786 * 0x80 - Direct IPv6 Packet
5787 *
5788 * Settings not listed above will cause the sysctl to return an error.
5789 ************************************************************************/
5790 static int
5791 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5792 {
5793 struct sysctlnode node = *rnode;
5794 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5795 int error = 0;
5796 u32 new_wufc;
5797
5798 /*
5799 * It's not required to check recovery mode because this function never
5800 * touches hardware.
5801 */
5802 new_wufc = adapter->wufc;
5803 node.sysctl_data = &new_wufc;
5804 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5805 if ((error) || (newp == NULL))
5806 return (error);
5807 if (new_wufc == adapter->wufc)
5808 return (0);
5809
5810 if (new_wufc & 0xffffff00)
5811 return (EINVAL);
5812
5813 new_wufc &= 0xff;
5814 new_wufc |= (0xffffff & adapter->wufc);
5815 adapter->wufc = new_wufc;
5816
5817 return (0);
5818 } /* ixgbe_sysctl_wufc */
5819
5820 #ifdef IXGBE_DEBUG
5821 /************************************************************************
5822 * ixgbe_sysctl_print_rss_config
5823 ************************************************************************/
5824 static int
5825 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5826 {
5827 #ifdef notyet
5828 struct sysctlnode node = *rnode;
5829 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5830 struct ixgbe_hw *hw = &adapter->hw;
5831 device_t dev = adapter->dev;
5832 struct sbuf *buf;
5833 int error = 0, reta_size;
5834 u32 reg;
5835
5836 if (ixgbe_fw_recovery_mode_swflag(adapter))
5837 return (EPERM);
5838
5839 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5840 if (!buf) {
5841 device_printf(dev, "Could not allocate sbuf for output.\n");
5842 return (ENOMEM);
5843 }
5844
5845 // TODO: use sbufs to make a string to print out
5846 /* Set multiplier for RETA setup and table size based on MAC */
5847 switch (adapter->hw.mac.type) {
5848 case ixgbe_mac_X550:
5849 case ixgbe_mac_X550EM_x:
5850 case ixgbe_mac_X550EM_a:
5851 reta_size = 128;
5852 break;
5853 default:
5854 reta_size = 32;
5855 break;
5856 }
5857
5858 /* Print out the redirection table */
5859 sbuf_cat(buf, "\n");
5860 for (int i = 0; i < reta_size; i++) {
5861 if (i < 32) {
5862 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5863 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5864 } else {
5865 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5866 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5867 }
5868 }
5869
5870 // TODO: print more config
5871
5872 error = sbuf_finish(buf);
5873 if (error)
5874 device_printf(dev, "Error finishing sbuf: %d\n", error);
5875
5876 sbuf_delete(buf);
5877 #endif
5878 return (0);
5879 } /* ixgbe_sysctl_print_rss_config */
5880 #endif /* IXGBE_DEBUG */
5881
5882 /************************************************************************
5883 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5884 *
5885 * For X552/X557-AT devices using an external PHY
5886 ************************************************************************/
5887 static int
5888 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5889 {
5890 struct sysctlnode node = *rnode;
5891 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5892 struct ixgbe_hw *hw = &adapter->hw;
5893 int val;
5894 u16 reg;
5895 int error;
5896
5897 if (ixgbe_fw_recovery_mode_swflag(adapter))
5898 return (EPERM);
5899
5900 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5901 device_printf(adapter->dev,
5902 "Device has no supported external thermal sensor.\n");
5903 return (ENODEV);
5904 }
5905
5906 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5907 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5908 device_printf(adapter->dev,
5909 "Error reading from PHY's current temperature register\n");
5910 return (EAGAIN);
5911 }
5912
5913 node.sysctl_data = &val;
5914
5915 /* Shift temp for output */
5916 val = reg >> 8;
5917
5918 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5919 if ((error) || (newp == NULL))
5920 return (error);
5921
5922 return (0);
5923 } /* ixgbe_sysctl_phy_temp */
5924
5925 /************************************************************************
5926 * ixgbe_sysctl_phy_overtemp_occurred
5927 *
5928 * Reports (directly from the PHY) whether the current PHY
5929 * temperature is over the overtemp threshold.
5930 ************************************************************************/
5931 static int
5932 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5933 {
5934 struct sysctlnode node = *rnode;
5935 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5936 struct ixgbe_hw *hw = &adapter->hw;
5937 int val, error;
5938 u16 reg;
5939
5940 if (ixgbe_fw_recovery_mode_swflag(adapter))
5941 return (EPERM);
5942
5943 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5944 device_printf(adapter->dev,
5945 "Device has no supported external thermal sensor.\n");
5946 return (ENODEV);
5947 }
5948
5949 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5950 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5951 device_printf(adapter->dev,
5952 "Error reading from PHY's temperature status register\n");
5953 return (EAGAIN);
5954 }
5955
5956 node.sysctl_data = &val;
5957
5958 /* Get occurrence bit */
5959 val = !!(reg & 0x4000);
5960
5961 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5962 if ((error) || (newp == NULL))
5963 return (error);
5964
5965 return (0);
5966 } /* ixgbe_sysctl_phy_overtemp_occurred */
5967
5968 /************************************************************************
5969 * ixgbe_sysctl_eee_state
5970 *
5971 * Sysctl to set EEE power saving feature
5972 * Values:
5973 * 0 - disable EEE
5974 * 1 - enable EEE
5975 * (none) - get current device EEE state
5976 ************************************************************************/
5977 static int
5978 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5979 {
5980 struct sysctlnode node = *rnode;
5981 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5982 struct ifnet *ifp = adapter->ifp;
5983 device_t dev = adapter->dev;
5984 int curr_eee, new_eee, error = 0;
5985 s32 retval;
5986
5987 if (ixgbe_fw_recovery_mode_swflag(adapter))
5988 return (EPERM);
5989
5990 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5991 node.sysctl_data = &new_eee;
5992 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5993 if ((error) || (newp == NULL))
5994 return (error);
5995
5996 /* Nothing to do */
5997 if (new_eee == curr_eee)
5998 return (0);
5999
6000 /* Not supported */
6001 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6002 return (EINVAL);
6003
6004 /* Bounds checking */
6005 if ((new_eee < 0) || (new_eee > 1))
6006 return (EINVAL);
6007
6008 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6009 if (retval) {
6010 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6011 return (EINVAL);
6012 }
6013
6014 /* Restart auto-neg */
6015 ifp->if_init(ifp);
6016
6017 device_printf(dev, "New EEE state: %d\n", new_eee);
6018
6019 /* Cache new value */
6020 if (new_eee)
6021 adapter->feat_en |= IXGBE_FEATURE_EEE;
6022 else
6023 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6024
6025 return (error);
6026 } /* ixgbe_sysctl_eee_state */
6027
6028 #define PRINTQS(adapter, regname) \
6029 do { \
6030 struct ixgbe_hw *_hw = &(adapter)->hw; \
6031 int _i; \
6032 \
6033 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6034 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6035 printf((_i == 0) ? "\t" : " "); \
6036 printf("%08x", IXGBE_READ_REG(_hw, \
6037 IXGBE_##regname(_i))); \
6038 } \
6039 printf("\n"); \
6040 } while (0)
6041
6042 /************************************************************************
6043 * ixgbe_print_debug_info
6044 *
6045 * Called only when em_display_debug_stats is enabled.
6046 * Provides a way to take a look at important statistics
6047 * maintained by the driver and hardware.
6048 ************************************************************************/
6049 static void
6050 ixgbe_print_debug_info(struct adapter *adapter)
6051 {
6052 device_t dev = adapter->dev;
6053 struct ixgbe_hw *hw = &adapter->hw;
6054 int table_size;
6055 int i;
6056
6057 switch (adapter->hw.mac.type) {
6058 case ixgbe_mac_X550:
6059 case ixgbe_mac_X550EM_x:
6060 case ixgbe_mac_X550EM_a:
6061 table_size = 128;
6062 break;
6063 default:
6064 table_size = 32;
6065 break;
6066 }
6067
6068 device_printf(dev, "[E]RETA:\n");
6069 for (i = 0; i < table_size; i++) {
6070 if (i < 32)
6071 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6072 IXGBE_RETA(i)));
6073 else
6074 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6075 IXGBE_ERETA(i - 32)));
6076 }
6077
6078 device_printf(dev, "queue:");
6079 for (i = 0; i < adapter->num_queues; i++) {
6080 printf((i == 0) ? "\t" : " ");
6081 printf("%8d", i);
6082 }
6083 printf("\n");
6084 PRINTQS(adapter, RDBAL);
6085 PRINTQS(adapter, RDBAH);
6086 PRINTQS(adapter, RDLEN);
6087 PRINTQS(adapter, SRRCTL);
6088 PRINTQS(adapter, RDH);
6089 PRINTQS(adapter, RDT);
6090 PRINTQS(adapter, RXDCTL);
6091
6092 device_printf(dev, "RQSMR:");
6093 for (i = 0; i < adapter->num_queues / 4; i++) {
6094 printf((i == 0) ? "\t" : " ");
6095 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6096 }
6097 printf("\n");
6098
6099 device_printf(dev, "disabled_count:");
6100 for (i = 0; i < adapter->num_queues; i++) {
6101 printf((i == 0) ? "\t" : " ");
6102 printf("%8d", adapter->queues[i].disabled_count);
6103 }
6104 printf("\n");
6105
6106 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6107 if (hw->mac.type != ixgbe_mac_82598EB) {
6108 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6109 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6110 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6111 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6112 }
6113 } /* ixgbe_print_debug_info */
6114
6115 /************************************************************************
6116 * ixgbe_sysctl_debug
6117 ************************************************************************/
6118 static int
6119 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6120 {
6121 struct sysctlnode node = *rnode;
6122 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6123 int error, result = 0;
6124
6125 if (ixgbe_fw_recovery_mode_swflag(adapter))
6126 return (EPERM);
6127
6128 node.sysctl_data = &result;
6129 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6130
6131 if (error || newp == NULL)
6132 return error;
6133
6134 if (result == 1)
6135 ixgbe_print_debug_info(adapter);
6136
6137 return 0;
6138 } /* ixgbe_sysctl_debug */
6139
6140 /************************************************************************
6141 * ixgbe_init_device_features
6142 ************************************************************************/
6143 static void
6144 ixgbe_init_device_features(struct adapter *adapter)
6145 {
6146 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6147 | IXGBE_FEATURE_RSS
6148 | IXGBE_FEATURE_MSI
6149 | IXGBE_FEATURE_MSIX
6150 | IXGBE_FEATURE_LEGACY_IRQ
6151 | IXGBE_FEATURE_LEGACY_TX;
6152
6153 /* Set capabilities first... */
6154 switch (adapter->hw.mac.type) {
6155 case ixgbe_mac_82598EB:
6156 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6157 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6158 break;
6159 case ixgbe_mac_X540:
6160 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6161 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6162 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6163 (adapter->hw.bus.func == 0))
6164 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6165 break;
6166 case ixgbe_mac_X550:
6167 /*
6168 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6169 * NVM Image version.
6170 */
6171 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6172 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6173 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6174 break;
6175 case ixgbe_mac_X550EM_x:
6176 /*
6177 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6178 * NVM Image version.
6179 */
6180 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6181 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6182 break;
6183 case ixgbe_mac_X550EM_a:
6184 /*
6185 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6186 * NVM Image version.
6187 */
6188 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6189 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6190 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6191 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6192 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6193 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6194 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6195 }
6196 break;
6197 case ixgbe_mac_82599EB:
6198 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6199 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6200 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6201 (adapter->hw.bus.func == 0))
6202 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6203 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6204 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6205 break;
6206 default:
6207 break;
6208 }
6209
6210 /* Enabled by default... */
6211 /* Fan failure detection */
6212 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6213 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6214 /* Netmap */
6215 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6216 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6217 /* EEE */
6218 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6219 adapter->feat_en |= IXGBE_FEATURE_EEE;
6220 /* Thermal Sensor */
6221 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6222 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6223 /*
6224 * Recovery mode:
6225 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6226 * NVM Image version.
6227 */
6228
6229 /* Enabled via global sysctl... */
6230 /* Flow Director */
6231 if (ixgbe_enable_fdir) {
6232 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6233 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6234 else
6235 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6236 }
6237 /* Legacy (single queue) transmit */
6238 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6239 ixgbe_enable_legacy_tx)
6240 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6241 /*
6242 * Message Signal Interrupts - Extended (MSI-X)
6243 * Normal MSI is only enabled if MSI-X calls fail.
6244 */
6245 if (!ixgbe_enable_msix)
6246 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6247 /* Receive-Side Scaling (RSS) */
6248 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6249 adapter->feat_en |= IXGBE_FEATURE_RSS;
6250
6251 /* Disable features with unmet dependencies... */
6252 /* No MSI-X */
6253 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6254 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6255 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6256 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6257 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6258 }
6259 } /* ixgbe_init_device_features */
6260
6261 /************************************************************************
6262 * ixgbe_probe - Device identification routine
6263 *
6264 * Determines if the driver should be loaded on
6265 * adapter based on its PCI vendor/device ID.
6266 *
6267 * return BUS_PROBE_DEFAULT on success, positive on failure
6268 ************************************************************************/
6269 static int
6270 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6271 {
6272 const struct pci_attach_args *pa = aux;
6273
6274 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6275 }
6276
6277 static const ixgbe_vendor_info_t *
6278 ixgbe_lookup(const struct pci_attach_args *pa)
6279 {
6280 const ixgbe_vendor_info_t *ent;
6281 pcireg_t subid;
6282
6283 INIT_DEBUGOUT("ixgbe_lookup: begin");
6284
6285 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6286 return NULL;
6287
6288 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6289
6290 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6291 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6292 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6293 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6294 (ent->subvendor_id == 0)) &&
6295 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6296 (ent->subdevice_id == 0))) {
6297 return ent;
6298 }
6299 }
6300 return NULL;
6301 }
6302
6303 static int
6304 ixgbe_ifflags_cb(struct ethercom *ec)
6305 {
6306 struct ifnet *ifp = &ec->ec_if;
6307 struct adapter *adapter = ifp->if_softc;
6308 u_short change;
6309 int rv = 0;
6310
6311 IXGBE_CORE_LOCK(adapter);
6312
6313 change = ifp->if_flags ^ adapter->if_flags;
6314 if (change != 0)
6315 adapter->if_flags = ifp->if_flags;
6316
6317 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6318 rv = ENETRESET;
6319 goto out;
6320 } else if ((change & IFF_PROMISC) != 0)
6321 ixgbe_set_rxfilter(adapter);
6322
6323 /* Check for ec_capenable. */
6324 change = ec->ec_capenable ^ adapter->ec_capenable;
6325 adapter->ec_capenable = ec->ec_capenable;
6326 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6327 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6328 rv = ENETRESET;
6329 goto out;
6330 }
6331
6332 /*
6333 * Special handling is not required for ETHERCAP_VLAN_MTU.
6334 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6335 */
6336
6337 /* Set up VLAN support and filter */
6338 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6339 ixgbe_setup_vlan_hw_support(adapter);
6340
6341 out:
6342 IXGBE_CORE_UNLOCK(adapter);
6343
6344 return rv;
6345 }
6346
6347 /************************************************************************
6348 * ixgbe_ioctl - Ioctl entry point
6349 *
6350 * Called when the user wants to configure the interface.
6351 *
6352 * return 0 on success, positive on failure
6353 ************************************************************************/
6354 static int
6355 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6356 {
6357 struct adapter *adapter = ifp->if_softc;
6358 struct ixgbe_hw *hw = &adapter->hw;
6359 struct ifcapreq *ifcr = data;
6360 struct ifreq *ifr = data;
6361 int error = 0;
6362 int l4csum_en;
6363 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6364 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6365
6366 if (ixgbe_fw_recovery_mode_swflag(adapter))
6367 return (EPERM);
6368
6369 switch (command) {
6370 case SIOCSIFFLAGS:
6371 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6372 break;
6373 case SIOCADDMULTI:
6374 case SIOCDELMULTI:
6375 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6376 break;
6377 case SIOCSIFMEDIA:
6378 case SIOCGIFMEDIA:
6379 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6380 break;
6381 case SIOCSIFCAP:
6382 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6383 break;
6384 case SIOCSIFMTU:
6385 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6386 break;
6387 #ifdef __NetBSD__
6388 case SIOCINITIFADDR:
6389 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6390 break;
6391 case SIOCGIFFLAGS:
6392 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6393 break;
6394 case SIOCGIFAFLAG_IN:
6395 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6396 break;
6397 case SIOCGIFADDR:
6398 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6399 break;
6400 case SIOCGIFMTU:
6401 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6402 break;
6403 case SIOCGIFCAP:
6404 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6405 break;
6406 case SIOCGETHERCAP:
6407 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6408 break;
6409 case SIOCGLIFADDR:
6410 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6411 break;
6412 case SIOCZIFDATA:
6413 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6414 hw->mac.ops.clear_hw_cntrs(hw);
6415 ixgbe_clear_evcnt(adapter);
6416 break;
6417 case SIOCAIFADDR:
6418 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6419 break;
6420 #endif
6421 default:
6422 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6423 break;
6424 }
6425
6426 switch (command) {
6427 case SIOCGI2C:
6428 {
6429 struct ixgbe_i2c_req i2c;
6430
6431 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6432 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6433 if (error != 0)
6434 break;
6435 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6436 error = EINVAL;
6437 break;
6438 }
6439 if (i2c.len > sizeof(i2c.data)) {
6440 error = EINVAL;
6441 break;
6442 }
6443
6444 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6445 i2c.dev_addr, i2c.data);
6446 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6447 break;
6448 }
6449 case SIOCSIFCAP:
6450 /* Layer-4 Rx checksum offload has to be turned on and
6451 * off as a unit.
6452 */
6453 l4csum_en = ifcr->ifcr_capenable & l4csum;
6454 if (l4csum_en != l4csum && l4csum_en != 0)
6455 return EINVAL;
6456 /*FALLTHROUGH*/
6457 case SIOCADDMULTI:
6458 case SIOCDELMULTI:
6459 case SIOCSIFFLAGS:
6460 case SIOCSIFMTU:
6461 default:
6462 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6463 return error;
6464 if ((ifp->if_flags & IFF_RUNNING) == 0)
6465 ;
6466 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6467 IXGBE_CORE_LOCK(adapter);
6468 if ((ifp->if_flags & IFF_RUNNING) != 0)
6469 ixgbe_init_locked(adapter);
6470 ixgbe_recalculate_max_frame(adapter);
6471 IXGBE_CORE_UNLOCK(adapter);
6472 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6473 /*
6474 * Multicast list has changed; set the hardware filter
6475 * accordingly.
6476 */
6477 IXGBE_CORE_LOCK(adapter);
6478 ixgbe_disable_intr(adapter);
6479 ixgbe_set_rxfilter(adapter);
6480 ixgbe_enable_intr(adapter);
6481 IXGBE_CORE_UNLOCK(adapter);
6482 }
6483 return 0;
6484 }
6485
6486 return error;
6487 } /* ixgbe_ioctl */
6488
6489 /************************************************************************
6490 * ixgbe_check_fan_failure
6491 ************************************************************************/
6492 static void
6493 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6494 {
6495 u32 mask;
6496
6497 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6498 IXGBE_ESDP_SDP1;
6499
6500 if (reg & mask)
6501 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6502 } /* ixgbe_check_fan_failure */
6503
6504 /************************************************************************
6505 * ixgbe_handle_que
6506 ************************************************************************/
6507 static void
6508 ixgbe_handle_que(void *context)
6509 {
6510 struct ix_queue *que = context;
6511 struct adapter *adapter = que->adapter;
6512 struct tx_ring *txr = que->txr;
6513 struct ifnet *ifp = adapter->ifp;
6514 bool more = false;
6515
6516 que->handleq.ev_count++;
6517
6518 if (ifp->if_flags & IFF_RUNNING) {
6519 more = ixgbe_rxeof(que);
6520 IXGBE_TX_LOCK(txr);
6521 more |= ixgbe_txeof(txr);
6522 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6523 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6524 ixgbe_mq_start_locked(ifp, txr);
6525 /* Only for queue 0 */
6526 /* NetBSD still needs this for CBQ */
6527 if ((&adapter->queues[0] == que)
6528 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6529 ixgbe_legacy_start_locked(ifp, txr);
6530 IXGBE_TX_UNLOCK(txr);
6531 }
6532
6533 if (more) {
6534 que->req.ev_count++;
6535 ixgbe_sched_handle_que(adapter, que);
6536 } else if (que->res != NULL) {
6537 /* Re-enable this interrupt */
6538 ixgbe_enable_queue(adapter, que->msix);
6539 } else
6540 ixgbe_enable_intr(adapter);
6541
6542 return;
6543 } /* ixgbe_handle_que */
6544
6545 /************************************************************************
6546 * ixgbe_handle_que_work
6547 ************************************************************************/
6548 static void
6549 ixgbe_handle_que_work(struct work *wk, void *context)
6550 {
6551 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6552
6553 /*
6554 * "enqueued flag" is not required here.
6555 * See ixgbe_msix_que().
6556 */
6557 ixgbe_handle_que(que);
6558 }
6559
6560 /************************************************************************
6561 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6562 ************************************************************************/
6563 static int
6564 ixgbe_allocate_legacy(struct adapter *adapter,
6565 const struct pci_attach_args *pa)
6566 {
6567 device_t dev = adapter->dev;
6568 struct ix_queue *que = adapter->queues;
6569 struct tx_ring *txr = adapter->tx_rings;
6570 int counts[PCI_INTR_TYPE_SIZE];
6571 pci_intr_type_t intr_type, max_type;
6572 char intrbuf[PCI_INTRSTR_LEN];
6573 char wqname[MAXCOMLEN];
6574 const char *intrstr = NULL;
6575 int defertx_error = 0, error;
6576
6577 /* We allocate a single interrupt resource */
6578 max_type = PCI_INTR_TYPE_MSI;
6579 counts[PCI_INTR_TYPE_MSIX] = 0;
6580 counts[PCI_INTR_TYPE_MSI] =
6581 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6582 /* Check not feat_en but feat_cap to fallback to INTx */
6583 counts[PCI_INTR_TYPE_INTX] =
6584 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6585
6586 alloc_retry:
6587 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6588 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6589 return ENXIO;
6590 }
6591 adapter->osdep.nintrs = 1;
6592 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6593 intrbuf, sizeof(intrbuf));
6594 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6595 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6596 device_xname(dev));
6597 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6598 if (adapter->osdep.ihs[0] == NULL) {
6599 aprint_error_dev(dev,"unable to establish %s\n",
6600 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6601 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6602 adapter->osdep.intrs = NULL;
6603 switch (intr_type) {
6604 case PCI_INTR_TYPE_MSI:
6605 /* The next try is for INTx: Disable MSI */
6606 max_type = PCI_INTR_TYPE_INTX;
6607 counts[PCI_INTR_TYPE_INTX] = 1;
6608 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6609 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6610 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6611 goto alloc_retry;
6612 } else
6613 break;
6614 case PCI_INTR_TYPE_INTX:
6615 default:
6616 /* See below */
6617 break;
6618 }
6619 }
6620 if (intr_type == PCI_INTR_TYPE_INTX) {
6621 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6622 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6623 }
6624 if (adapter->osdep.ihs[0] == NULL) {
6625 aprint_error_dev(dev,
6626 "couldn't establish interrupt%s%s\n",
6627 intrstr ? " at " : "", intrstr ? intrstr : "");
6628 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6629 adapter->osdep.intrs = NULL;
6630 return ENXIO;
6631 }
6632 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6633 /*
6634 * Try allocating a fast interrupt and the associated deferred
6635 * processing contexts.
6636 */
6637 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6638 txr->txr_si =
6639 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6640 ixgbe_deferred_mq_start, txr);
6641
6642 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6643 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6644 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6645 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6646 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6647 }
6648 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6649 ixgbe_handle_que, que);
6650 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6651 error = workqueue_create(&adapter->que_wq, wqname,
6652 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6653 IXGBE_WORKQUEUE_FLAGS);
6654
6655 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6656 && ((txr->txr_si == NULL) || defertx_error != 0))
6657 || (que->que_si == NULL) || error != 0) {
6658 aprint_error_dev(dev,
6659 "could not establish software interrupts\n");
6660
6661 return ENXIO;
6662 }
6663 /* For simplicity in the handlers */
6664 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6665
6666 return (0);
6667 } /* ixgbe_allocate_legacy */
6668
6669 /************************************************************************
6670 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6671 ************************************************************************/
6672 static int
6673 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6674 {
6675 device_t dev = adapter->dev;
6676 struct ix_queue *que = adapter->queues;
6677 struct tx_ring *txr = adapter->tx_rings;
6678 pci_chipset_tag_t pc;
6679 char intrbuf[PCI_INTRSTR_LEN];
6680 char intr_xname[32];
6681 char wqname[MAXCOMLEN];
6682 const char *intrstr = NULL;
6683 int error, vector = 0;
6684 int cpu_id = 0;
6685 kcpuset_t *affinity;
6686 #ifdef RSS
6687 unsigned int rss_buckets = 0;
6688 kcpuset_t cpu_mask;
6689 #endif
6690
6691 pc = adapter->osdep.pc;
6692 #ifdef RSS
6693 /*
6694 * If we're doing RSS, the number of queues needs to
6695 * match the number of RSS buckets that are configured.
6696 *
6697 * + If there's more queues than RSS buckets, we'll end
6698 * up with queues that get no traffic.
6699 *
6700 * + If there's more RSS buckets than queues, we'll end
6701 * up having multiple RSS buckets map to the same queue,
6702 * so there'll be some contention.
6703 */
6704 rss_buckets = rss_getnumbuckets();
6705 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6706 (adapter->num_queues != rss_buckets)) {
6707 device_printf(dev,
6708 "%s: number of queues (%d) != number of RSS buckets (%d)"
6709 "; performance will be impacted.\n",
6710 __func__, adapter->num_queues, rss_buckets);
6711 }
6712 #endif
6713
6714 adapter->osdep.nintrs = adapter->num_queues + 1;
6715 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6716 adapter->osdep.nintrs) != 0) {
6717 aprint_error_dev(dev,
6718 "failed to allocate MSI-X interrupt\n");
6719 return (ENXIO);
6720 }
6721
6722 kcpuset_create(&affinity, false);
6723 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6724 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6725 device_xname(dev), i);
6726 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6727 sizeof(intrbuf));
6728 #ifdef IXGBE_MPSAFE
6729 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6730 true);
6731 #endif
6732 /* Set the handler function */
6733 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6734 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6735 intr_xname);
6736 if (que->res == NULL) {
6737 aprint_error_dev(dev,
6738 "Failed to register QUE handler\n");
6739 error = ENXIO;
6740 goto err_out;
6741 }
6742 que->msix = vector;
6743 adapter->active_queues |= 1ULL << que->msix;
6744
6745 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6746 #ifdef RSS
6747 /*
6748 * The queue ID is used as the RSS layer bucket ID.
6749 * We look up the queue ID -> RSS CPU ID and select
6750 * that.
6751 */
6752 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6753 CPU_SETOF(cpu_id, &cpu_mask);
6754 #endif
6755 } else {
6756 /*
6757 * Bind the MSI-X vector, and thus the
6758 * rings to the corresponding CPU.
6759 *
6760 * This just happens to match the default RSS
6761 * round-robin bucket -> queue -> CPU allocation.
6762 */
6763 if (adapter->num_queues > 1)
6764 cpu_id = i;
6765 }
6766 /* Round-robin affinity */
6767 kcpuset_zero(affinity);
6768 kcpuset_set(affinity, cpu_id % ncpu);
6769 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6770 NULL);
6771 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6772 intrstr);
6773 if (error == 0) {
6774 #if 1 /* def IXGBE_DEBUG */
6775 #ifdef RSS
6776 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6777 cpu_id % ncpu);
6778 #else
6779 aprint_normal(", bound queue %d to cpu %d", i,
6780 cpu_id % ncpu);
6781 #endif
6782 #endif /* IXGBE_DEBUG */
6783 }
6784 aprint_normal("\n");
6785
6786 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6787 txr->txr_si = softint_establish(
6788 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6789 ixgbe_deferred_mq_start, txr);
6790 if (txr->txr_si == NULL) {
6791 aprint_error_dev(dev,
6792 "couldn't establish software interrupt\n");
6793 error = ENXIO;
6794 goto err_out;
6795 }
6796 }
6797 que->que_si
6798 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6799 ixgbe_handle_que, que);
6800 if (que->que_si == NULL) {
6801 aprint_error_dev(dev,
6802 "couldn't establish software interrupt\n");
6803 error = ENXIO;
6804 goto err_out;
6805 }
6806 }
6807 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6808 error = workqueue_create(&adapter->txr_wq, wqname,
6809 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6810 IXGBE_WORKQUEUE_FLAGS);
6811 if (error) {
6812 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6813 goto err_out;
6814 }
6815 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6816
6817 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6818 error = workqueue_create(&adapter->que_wq, wqname,
6819 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6820 IXGBE_WORKQUEUE_FLAGS);
6821 if (error) {
6822 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6823 goto err_out;
6824 }
6825
6826 /* and Link */
6827 cpu_id++;
6828 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6829 adapter->vector = vector;
6830 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6831 sizeof(intrbuf));
6832 #ifdef IXGBE_MPSAFE
6833 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6834 true);
6835 #endif
6836 /* Set the link handler function */
6837 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6838 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6839 intr_xname);
6840 if (adapter->osdep.ihs[vector] == NULL) {
6841 aprint_error_dev(dev, "Failed to register LINK handler\n");
6842 error = ENXIO;
6843 goto err_out;
6844 }
6845 /* Round-robin affinity */
6846 kcpuset_zero(affinity);
6847 kcpuset_set(affinity, cpu_id % ncpu);
6848 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6849 NULL);
6850
6851 aprint_normal_dev(dev,
6852 "for link, interrupting at %s", intrstr);
6853 if (error == 0)
6854 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6855 else
6856 aprint_normal("\n");
6857
6858 kcpuset_destroy(affinity);
6859 aprint_normal_dev(dev,
6860 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6861
6862 return (0);
6863
6864 err_out:
6865 kcpuset_destroy(affinity);
6866 ixgbe_free_workqueue(adapter);
6867 ixgbe_free_pciintr_resources(adapter);
6868 return (error);
6869 } /* ixgbe_allocate_msix */
6870
6871 /************************************************************************
6872 * ixgbe_configure_interrupts
6873 *
6874 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6875 * This will also depend on user settings.
6876 ************************************************************************/
6877 static int
6878 ixgbe_configure_interrupts(struct adapter *adapter)
6879 {
6880 device_t dev = adapter->dev;
6881 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6882 int want, queues, msgs;
6883
6884 /* Default to 1 queue if MSI-X setup fails */
6885 adapter->num_queues = 1;
6886
6887 /* Override by tuneable */
6888 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6889 goto msi;
6890
6891 /*
6892 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6893 * interrupt slot.
6894 */
6895 if (ncpu == 1)
6896 goto msi;
6897
6898 /* First try MSI-X */
6899 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6900 msgs = MIN(msgs, IXG_MAX_NINTR);
6901 if (msgs < 2)
6902 goto msi;
6903
6904 adapter->msix_mem = (void *)1; /* XXX */
6905
6906 /* Figure out a reasonable auto config value */
6907 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6908
6909 #ifdef RSS
6910 /* If we're doing RSS, clamp at the number of RSS buckets */
6911 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6912 queues = uimin(queues, rss_getnumbuckets());
6913 #endif
6914 if (ixgbe_num_queues > queues) {
6915 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6916 ixgbe_num_queues = queues;
6917 }
6918
6919 if (ixgbe_num_queues != 0)
6920 queues = ixgbe_num_queues;
6921 else
6922 queues = uimin(queues,
6923 uimin(mac->max_tx_queues, mac->max_rx_queues));
6924
6925 /* reflect correct sysctl value */
6926 ixgbe_num_queues = queues;
6927
6928 /*
6929 * Want one vector (RX/TX pair) per queue
6930 * plus an additional for Link.
6931 */
6932 want = queues + 1;
6933 if (msgs >= want)
6934 msgs = want;
6935 else {
6936 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6937 "%d vectors but %d queues wanted!\n",
6938 msgs, want);
6939 goto msi;
6940 }
6941 adapter->num_queues = queues;
6942 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6943 return (0);
6944
6945 /*
6946 * MSI-X allocation failed or provided us with
6947 * less vectors than needed. Free MSI-X resources
6948 * and we'll try enabling MSI.
6949 */
6950 msi:
6951 /* Without MSI-X, some features are no longer supported */
6952 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6953 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6954 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6955 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6956
6957 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6958 adapter->msix_mem = NULL; /* XXX */
6959 if (msgs > 1)
6960 msgs = 1;
6961 if (msgs != 0) {
6962 msgs = 1;
6963 adapter->feat_en |= IXGBE_FEATURE_MSI;
6964 return (0);
6965 }
6966
6967 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6968 aprint_error_dev(dev,
6969 "Device does not support legacy interrupts.\n");
6970 return 1;
6971 }
6972
6973 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6974
6975 return (0);
6976 } /* ixgbe_configure_interrupts */
6977
6978
6979 /************************************************************************
6980 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6981 *
6982 * Done outside of interrupt context since the driver might sleep
6983 ************************************************************************/
6984 static void
6985 ixgbe_handle_link(void *context)
6986 {
6987 struct adapter *adapter = context;
6988 struct ixgbe_hw *hw = &adapter->hw;
6989
6990 ++adapter->link_workev.ev_count;
6991 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6992 ixgbe_update_link_status(adapter);
6993
6994 /* Re-enable link interrupts */
6995 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6996 } /* ixgbe_handle_link */
6997
6998 #if 0
6999 /************************************************************************
7000 * ixgbe_rearm_queues
7001 ************************************************************************/
7002 static __inline void
7003 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7004 {
7005 u32 mask;
7006
7007 switch (adapter->hw.mac.type) {
7008 case ixgbe_mac_82598EB:
7009 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7010 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7011 break;
7012 case ixgbe_mac_82599EB:
7013 case ixgbe_mac_X540:
7014 case ixgbe_mac_X550:
7015 case ixgbe_mac_X550EM_x:
7016 case ixgbe_mac_X550EM_a:
7017 mask = (queues & 0xFFFFFFFF);
7018 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7019 mask = (queues >> 32);
7020 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7021 break;
7022 default:
7023 break;
7024 }
7025 } /* ixgbe_rearm_queues */
7026 #endif
7027