ixgbe.c revision 1.242 1 /* $NetBSD: ixgbe.c,v 1.242 2020/08/24 18:31:14 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.10 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */
148 {0, 0, 0, 0, 0}
149 };
150
151 /************************************************************************
152 * Table of branding strings
153 ************************************************************************/
154 static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156 };
157
158 /************************************************************************
159 * Function prototypes
160 ************************************************************************/
161 static int ixgbe_probe(device_t, cfdata_t, void *);
162 static void ixgbe_attach(device_t, device_t, void *);
163 static int ixgbe_detach(device_t, int);
164 #if 0
165 static int ixgbe_shutdown(device_t);
166 #endif
167 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168 static bool ixgbe_resume(device_t, const pmf_qual_t *);
169 static int ixgbe_ifflags_cb(struct ethercom *);
170 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171 static int ixgbe_init(struct ifnet *);
172 static void ixgbe_init_locked(struct adapter *);
173 static void ixgbe_ifstop(struct ifnet *, int);
174 static void ixgbe_stop(void *);
175 static void ixgbe_init_device_features(struct adapter *);
176 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177 static void ixgbe_add_media_types(struct adapter *);
178 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179 static int ixgbe_media_change(struct ifnet *);
180 static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *);
182 static void ixgbe_free_workqueue(struct adapter *);
183 static void ixgbe_get_slot_info(struct adapter *);
184 static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *);
186 static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_configure_interrupts(struct adapter *);
189 static void ixgbe_free_pciintr_resources(struct adapter *);
190 static void ixgbe_free_pci_resources(struct adapter *);
191 static void ixgbe_local_timer(void *);
192 static void ixgbe_handle_timer(struct work *, void *);
193 static void ixgbe_recovery_mode_timer(void *);
194 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
195 static int ixgbe_setup_interface(device_t, struct adapter *);
196 static void ixgbe_config_gpie(struct adapter *);
197 static void ixgbe_config_dmac(struct adapter *);
198 static void ixgbe_config_delay_values(struct adapter *);
199 static void ixgbe_schedule_admin_tasklet(struct adapter *);
200 static void ixgbe_config_link(struct adapter *);
201 static void ixgbe_check_wol_support(struct adapter *);
202 static int ixgbe_setup_low_power_mode(struct adapter *);
203 #if 0
204 static void ixgbe_rearm_queues(struct adapter *, u64);
205 #endif
206
207 static void ixgbe_initialize_transmit_units(struct adapter *);
208 static void ixgbe_initialize_receive_units(struct adapter *);
209 static void ixgbe_enable_rx_drop(struct adapter *);
210 static void ixgbe_disable_rx_drop(struct adapter *);
211 static void ixgbe_initialize_rss_mapping(struct adapter *);
212
213 static void ixgbe_enable_intr(struct adapter *);
214 static void ixgbe_disable_intr(struct adapter *);
215 static void ixgbe_update_stats_counters(struct adapter *);
216 static void ixgbe_set_rxfilter(struct adapter *);
217 static void ixgbe_update_link_status(struct adapter *);
218 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
219 static void ixgbe_configure_ivars(struct adapter *);
220 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
221 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
222
223 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
224 static void ixgbe_setup_vlan_hw_support(struct adapter *);
225 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
226 static int ixgbe_register_vlan(struct adapter *, u16);
227 static int ixgbe_unregister_vlan(struct adapter *, u16);
228
229 static void ixgbe_add_device_sysctls(struct adapter *);
230 static void ixgbe_add_hw_stats(struct adapter *);
231 static void ixgbe_clear_evcnt(struct adapter *);
232 static int ixgbe_set_flowcntl(struct adapter *, int);
233 static int ixgbe_set_advertise(struct adapter *, int);
234 static int ixgbe_get_advertise(struct adapter *);
235
236 /* Sysctl handlers */
237 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
238 const char *, int *, int);
239 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
245 #ifdef IXGBE_DEBUG
246 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
248 #endif
249 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
258
259 /* Support for pluggable optic modules */
260 static bool ixgbe_sfp_cage_full(struct ixgbe_hw *);
261
262 /* Legacy (single vector) interrupt handler */
263 static int ixgbe_legacy_irq(void *);
264
265 /* The MSI/MSI-X Interrupt handlers */
266 static int ixgbe_msix_que(void *);
267 static int ixgbe_msix_admin(void *);
268
269 /* Event handlers running on workqueue */
270 static void ixgbe_handle_que(void *);
271 static void ixgbe_handle_link(void *);
272 static void ixgbe_handle_msf(void *);
273 static void ixgbe_handle_mod(void *);
274 static void ixgbe_handle_phy(void *);
275
276 /* Deferred workqueue handlers */
277 static void ixgbe_handle_admin(struct work *, void *);
278 static void ixgbe_handle_que_work(struct work *, void *);
279
280 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
281
282 /************************************************************************
283 * NetBSD Device Interface Entry Points
284 ************************************************************************/
285 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
286 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
287 DVF_DETACH_SHUTDOWN);
288
289 #if 0
290 devclass_t ix_devclass;
291 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
292
293 MODULE_DEPEND(ix, pci, 1, 1, 1);
294 MODULE_DEPEND(ix, ether, 1, 1, 1);
295 #ifdef DEV_NETMAP
296 MODULE_DEPEND(ix, netmap, 1, 1, 1);
297 #endif
298 #endif
299
300 /*
301 * TUNEABLE PARAMETERS:
302 */
303
304 /*
305 * AIM: Adaptive Interrupt Moderation
306 * which means that the interrupt rate
307 * is varied over time based on the
308 * traffic for that interrupt vector
309 */
310 static bool ixgbe_enable_aim = true;
311 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
312 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
313 "Enable adaptive interrupt moderation");
314
315 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
316 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
317 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
318
319 /* How many packets rxeof tries to clean at a time */
320 static int ixgbe_rx_process_limit = 256;
321 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
322 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
323
324 /* How many packets txeof tries to clean at a time */
325 static int ixgbe_tx_process_limit = 256;
326 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
327 &ixgbe_tx_process_limit, 0,
328 "Maximum number of sent packets to process at a time, -1 means unlimited");
329
330 /* Flow control setting, default to full */
331 static int ixgbe_flow_control = ixgbe_fc_full;
332 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
333 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
334
335 /* Which packet processing uses workqueue or softint */
336 static bool ixgbe_txrx_workqueue = false;
337
338 /*
339 * Smart speed setting, default to on
340 * this only works as a compile option
341 * right now as its during attach, set
342 * this to 'ixgbe_smart_speed_off' to
343 * disable.
344 */
345 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
346
347 /*
348 * MSI-X should be the default for best performance,
349 * but this allows it to be forced off for testing.
350 */
351 static int ixgbe_enable_msix = 1;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
353 "Enable MSI-X interrupts");
354
355 /*
356 * Number of Queues, can be set to 0,
357 * it then autoconfigures based on the
358 * number of cpus with a max of 8. This
359 * can be overridden manually here.
360 */
361 static int ixgbe_num_queues = 0;
362 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
363 "Number of queues to configure, 0 indicates autoconfigure");
364
365 /*
366 * Number of TX descriptors per ring,
367 * setting higher than RX as this seems
368 * the better performing choice.
369 */
370 static int ixgbe_txd = PERFORM_TXD;
371 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
372 "Number of transmit descriptors per queue");
373
374 /* Number of RX descriptors per ring */
375 static int ixgbe_rxd = PERFORM_RXD;
376 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
377 "Number of receive descriptors per queue");
378
379 /*
380 * Defining this on will allow the use
381 * of unsupported SFP+ modules, note that
382 * doing so you are on your own :)
383 */
384 static int allow_unsupported_sfp = false;
385 #define TUNABLE_INT(__x, __y)
386 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
387
388 /*
389 * Not sure if Flow Director is fully baked,
390 * so we'll default to turning it off.
391 */
392 static int ixgbe_enable_fdir = 0;
393 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
394 "Enable Flow Director");
395
396 /* Legacy Transmit (single queue) */
397 static int ixgbe_enable_legacy_tx = 0;
398 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
399 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
400
401 /* Receive-Side Scaling */
402 static int ixgbe_enable_rss = 1;
403 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
404 "Enable Receive-Side Scaling (RSS)");
405
406 #if 0
407 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
408 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
409 #endif
410
411 #ifdef NET_MPSAFE
412 #define IXGBE_MPSAFE 1
413 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
414 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
415 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
416 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
417 #else
418 #define IXGBE_CALLOUT_FLAGS 0
419 #define IXGBE_SOFTINT_FLAGS 0
420 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
421 #define IXGBE_TASKLET_WQ_FLAGS 0
422 #endif
423 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
424
425 /************************************************************************
426 * ixgbe_initialize_rss_mapping
427 ************************************************************************/
428 static void
429 ixgbe_initialize_rss_mapping(struct adapter *adapter)
430 {
431 struct ixgbe_hw *hw = &adapter->hw;
432 u32 reta = 0, mrqc, rss_key[10];
433 int queue_id, table_size, index_mult;
434 int i, j;
435 u32 rss_hash_config;
436
437 /* force use default RSS key. */
438 #ifdef __NetBSD__
439 rss_getkey((uint8_t *) &rss_key);
440 #else
441 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
442 /* Fetch the configured RSS key */
443 rss_getkey((uint8_t *) &rss_key);
444 } else {
445 /* set up random bits */
446 cprng_fast(&rss_key, sizeof(rss_key));
447 }
448 #endif
449
450 /* Set multiplier for RETA setup and table size based on MAC */
451 index_mult = 0x1;
452 table_size = 128;
453 switch (adapter->hw.mac.type) {
454 case ixgbe_mac_82598EB:
455 index_mult = 0x11;
456 break;
457 case ixgbe_mac_X550:
458 case ixgbe_mac_X550EM_x:
459 case ixgbe_mac_X550EM_a:
460 table_size = 512;
461 break;
462 default:
463 break;
464 }
465
466 /* Set up the redirection table */
467 for (i = 0, j = 0; i < table_size; i++, j++) {
468 if (j == adapter->num_queues)
469 j = 0;
470
471 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
472 /*
473 * Fetch the RSS bucket id for the given indirection
474 * entry. Cap it at the number of configured buckets
475 * (which is num_queues.)
476 */
477 queue_id = rss_get_indirection_to_bucket(i);
478 queue_id = queue_id % adapter->num_queues;
479 } else
480 queue_id = (j * index_mult);
481
482 /*
483 * The low 8 bits are for hash value (n+0);
484 * The next 8 bits are for hash value (n+1), etc.
485 */
486 reta = reta >> 8;
487 reta = reta | (((uint32_t) queue_id) << 24);
488 if ((i & 3) == 3) {
489 if (i < 128)
490 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
491 else
492 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
493 reta);
494 reta = 0;
495 }
496 }
497
498 /* Now fill our hash function seeds */
499 for (i = 0; i < 10; i++)
500 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
501
502 /* Perform hash on these packet types */
503 if (adapter->feat_en & IXGBE_FEATURE_RSS)
504 rss_hash_config = rss_gethashconfig();
505 else {
506 /*
507 * Disable UDP - IP fragments aren't currently being handled
508 * and so we end up with a mix of 2-tuple and 4-tuple
509 * traffic.
510 */
511 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
512 | RSS_HASHTYPE_RSS_TCP_IPV4
513 | RSS_HASHTYPE_RSS_IPV6
514 | RSS_HASHTYPE_RSS_TCP_IPV6
515 | RSS_HASHTYPE_RSS_IPV6_EX
516 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
517 }
518
519 mrqc = IXGBE_MRQC_RSSEN;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
538 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
539 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
540 } /* ixgbe_initialize_rss_mapping */
541
542 /************************************************************************
543 * ixgbe_initialize_receive_units - Setup receive registers and features.
544 ************************************************************************/
545 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
546
547 static void
548 ixgbe_initialize_receive_units(struct adapter *adapter)
549 {
550 struct rx_ring *rxr = adapter->rx_rings;
551 struct ixgbe_hw *hw = &adapter->hw;
552 struct ifnet *ifp = adapter->ifp;
553 int i, j;
554 u32 bufsz, fctrl, srrctl, rxcsum;
555 u32 hlreg;
556
557 /*
558 * Make sure receives are disabled while
559 * setting up the descriptor ring
560 */
561 ixgbe_disable_rx(hw);
562
563 /* Enable broadcasts */
564 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
565 fctrl |= IXGBE_FCTRL_BAM;
566 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
567 fctrl |= IXGBE_FCTRL_DPF;
568 fctrl |= IXGBE_FCTRL_PMCF;
569 }
570 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
571
572 /* Set for Jumbo Frames? */
573 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
574 if (ifp->if_mtu > ETHERMTU)
575 hlreg |= IXGBE_HLREG0_JUMBOEN;
576 else
577 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
578
579 #ifdef DEV_NETMAP
580 /* CRC stripping is conditional in Netmap */
581 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
582 (ifp->if_capenable & IFCAP_NETMAP) &&
583 !ix_crcstrip)
584 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
585 else
586 #endif /* DEV_NETMAP */
587 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
588
589 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
590
591 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
592 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
593
594 for (i = 0; i < adapter->num_queues; i++, rxr++) {
595 u64 rdba = rxr->rxdma.dma_paddr;
596 u32 reg;
597 int regnum = i / 4; /* 1 register per 4 queues */
598 int regshift = i % 4; /* 4 bits per 1 queue */
599 j = rxr->me;
600
601 /* Setup the Base and Length of the Rx Descriptor Ring */
602 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
603 (rdba & 0x00000000ffffffffULL));
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
605 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
606 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
607
608 /* Set up the SRRCTL register */
609 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
610 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
611 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
612 srrctl |= bufsz;
613 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
614
615 /* Set RQSMR (Receive Queue Statistic Mapping) register */
616 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
617 reg &= ~(0x000000ffUL << (regshift * 8));
618 reg |= i << (regshift * 8);
619 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
620
621 /*
622 * Set DROP_EN iff we have no flow control and >1 queue.
623 * Note that srrctl was cleared shortly before during reset,
624 * so we do not need to clear the bit, but do it just in case
625 * this code is moved elsewhere.
626 */
627 if (adapter->num_queues > 1 &&
628 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
629 srrctl |= IXGBE_SRRCTL_DROP_EN;
630 } else {
631 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
632 }
633
634 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
635
636 /* Setup the HW Rx Head and Tail Descriptor Pointers */
637 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
638 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
639
640 /* Set the driver rx tail address */
641 rxr->tail = IXGBE_RDT(rxr->me);
642 }
643
644 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
645 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
646 | IXGBE_PSRTYPE_UDPHDR
647 | IXGBE_PSRTYPE_IPV4HDR
648 | IXGBE_PSRTYPE_IPV6HDR;
649 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
650 }
651
652 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
653
654 ixgbe_initialize_rss_mapping(adapter);
655
656 if (adapter->num_queues > 1) {
657 /* RSS and RX IPP Checksum are mutually exclusive */
658 rxcsum |= IXGBE_RXCSUM_PCSD;
659 }
660
661 if (ifp->if_capenable & IFCAP_RXCSUM)
662 rxcsum |= IXGBE_RXCSUM_PCSD;
663
664 /* This is useful for calculating UDP/IP fragment checksums */
665 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
666 rxcsum |= IXGBE_RXCSUM_IPPCSE;
667
668 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
669
670 } /* ixgbe_initialize_receive_units */
671
672 /************************************************************************
673 * ixgbe_initialize_transmit_units - Enable transmit units.
674 ************************************************************************/
675 static void
676 ixgbe_initialize_transmit_units(struct adapter *adapter)
677 {
678 struct tx_ring *txr = adapter->tx_rings;
679 struct ixgbe_hw *hw = &adapter->hw;
680 int i;
681
682 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
683
684 /* Setup the Base and Length of the Tx Descriptor Ring */
685 for (i = 0; i < adapter->num_queues; i++, txr++) {
686 u64 tdba = txr->txdma.dma_paddr;
687 u32 txctrl = 0;
688 u32 tqsmreg, reg;
689 int regnum = i / 4; /* 1 register per 4 queues */
690 int regshift = i % 4; /* 4 bits per 1 queue */
691 int j = txr->me;
692
693 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
694 (tdba & 0x00000000ffffffffULL));
695 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
696 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
697 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
698
699 /*
700 * Set TQSMR (Transmit Queue Statistic Mapping) register.
701 * Register location is different between 82598 and others.
702 */
703 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
704 tqsmreg = IXGBE_TQSMR(regnum);
705 else
706 tqsmreg = IXGBE_TQSM(regnum);
707 reg = IXGBE_READ_REG(hw, tqsmreg);
708 reg &= ~(0x000000ffUL << (regshift * 8));
709 reg |= i << (regshift * 8);
710 IXGBE_WRITE_REG(hw, tqsmreg, reg);
711
712 /* Setup the HW Tx Head and Tail descriptor pointers */
713 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
714 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
715
716 /* Cache the tail address */
717 txr->tail = IXGBE_TDT(j);
718
719 txr->txr_no_space = false;
720
721 /* Disable Head Writeback */
722 /*
723 * Note: for X550 series devices, these registers are actually
724 * prefixed with TPH_ isntead of DCA_, but the addresses and
725 * fields remain the same.
726 */
727 switch (hw->mac.type) {
728 case ixgbe_mac_82598EB:
729 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
730 break;
731 default:
732 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
733 break;
734 }
735 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
736 switch (hw->mac.type) {
737 case ixgbe_mac_82598EB:
738 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
739 break;
740 default:
741 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
742 break;
743 }
744
745 }
746
747 if (hw->mac.type != ixgbe_mac_82598EB) {
748 u32 dmatxctl, rttdcs;
749
750 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
751 dmatxctl |= IXGBE_DMATXCTL_TE;
752 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
753 /* Disable arbiter to set MTQC */
754 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
755 rttdcs |= IXGBE_RTTDCS_ARBDIS;
756 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
757 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
758 ixgbe_get_mtqc(adapter->iov_mode));
759 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
760 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
761 }
762
763 return;
764 } /* ixgbe_initialize_transmit_units */
765
766 /************************************************************************
767 * ixgbe_attach - Device initialization routine
768 *
769 * Called when the driver is being loaded.
770 * Identifies the type of hardware, allocates all resources
771 * and initializes the hardware.
772 *
773 * return 0 on success, positive on failure
774 ************************************************************************/
775 static void
776 ixgbe_attach(device_t parent, device_t dev, void *aux)
777 {
778 struct adapter *adapter;
779 struct ixgbe_hw *hw;
780 int error = -1;
781 u32 ctrl_ext;
782 u16 high, low, nvmreg;
783 pcireg_t id, subid;
784 const ixgbe_vendor_info_t *ent;
785 struct pci_attach_args *pa = aux;
786 bool unsupported_sfp = false;
787 const char *str;
788 char wqname[MAXCOMLEN];
789 char buf[256];
790
791 INIT_DEBUGOUT("ixgbe_attach: begin");
792
793 /* Allocate, clear, and link in our adapter structure */
794 adapter = device_private(dev);
795 adapter->hw.back = adapter;
796 adapter->dev = dev;
797 hw = &adapter->hw;
798 adapter->osdep.pc = pa->pa_pc;
799 adapter->osdep.tag = pa->pa_tag;
800 if (pci_dma64_available(pa))
801 adapter->osdep.dmat = pa->pa_dmat64;
802 else
803 adapter->osdep.dmat = pa->pa_dmat;
804 adapter->osdep.attached = false;
805
806 ent = ixgbe_lookup(pa);
807
808 KASSERT(ent != NULL);
809
810 aprint_normal(": %s, Version - %s\n",
811 ixgbe_strings[ent->index], ixgbe_driver_version);
812
813 /* Core Lock Init */
814 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
815
816 /* Set up the timer callout and workqueue */
817 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
818 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
819 error = workqueue_create(&adapter->timer_wq, wqname,
820 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
821 IXGBE_TASKLET_WQ_FLAGS);
822 if (error) {
823 aprint_error_dev(dev,
824 "could not create timer workqueue (%d)\n", error);
825 goto err_out;
826 }
827
828 /* Determine hardware revision */
829 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
830 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
831
832 hw->vendor_id = PCI_VENDOR(id);
833 hw->device_id = PCI_PRODUCT(id);
834 hw->revision_id =
835 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
836 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
837 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
838
839 /*
840 * Make sure BUSMASTER is set
841 */
842 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
843
844 /* Do base PCI setup - map BAR0 */
845 if (ixgbe_allocate_pci_resources(adapter, pa)) {
846 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
847 error = ENXIO;
848 goto err_out;
849 }
850
851 /* let hardware know driver is loaded */
852 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
853 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
854 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
855
856 /*
857 * Initialize the shared code
858 */
859 if (ixgbe_init_shared_code(hw) != 0) {
860 aprint_error_dev(dev, "Unable to initialize the shared code\n");
861 error = ENXIO;
862 goto err_out;
863 }
864
865 switch (hw->mac.type) {
866 case ixgbe_mac_82598EB:
867 str = "82598EB";
868 break;
869 case ixgbe_mac_82599EB:
870 str = "82599EB";
871 break;
872 case ixgbe_mac_X540:
873 str = "X540";
874 break;
875 case ixgbe_mac_X550:
876 str = "X550";
877 break;
878 case ixgbe_mac_X550EM_x:
879 str = "X550EM";
880 break;
881 case ixgbe_mac_X550EM_a:
882 str = "X550EM A";
883 break;
884 default:
885 str = "Unknown";
886 break;
887 }
888 aprint_normal_dev(dev, "device %s\n", str);
889
890 if (hw->mbx.ops.init_params)
891 hw->mbx.ops.init_params(hw);
892
893 hw->allow_unsupported_sfp = allow_unsupported_sfp;
894
895 /* Pick up the 82599 settings */
896 if (hw->mac.type != ixgbe_mac_82598EB) {
897 hw->phy.smart_speed = ixgbe_smart_speed;
898 adapter->num_segs = IXGBE_82599_SCATTER;
899 } else
900 adapter->num_segs = IXGBE_82598_SCATTER;
901
902 /* Ensure SW/FW semaphore is free */
903 ixgbe_init_swfw_semaphore(hw);
904
905 hw->mac.ops.set_lan_id(hw);
906 ixgbe_init_device_features(adapter);
907
908 if (ixgbe_configure_interrupts(adapter)) {
909 error = ENXIO;
910 goto err_out;
911 }
912
913 /* Allocate multicast array memory. */
914 adapter->mta = malloc(sizeof(*adapter->mta) *
915 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
916
917 /* Enable WoL (if supported) */
918 ixgbe_check_wol_support(adapter);
919
920 /* Register for VLAN events */
921 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
922
923 /* Verify adapter fan is still functional (if applicable) */
924 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
925 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
926 ixgbe_check_fan_failure(adapter, esdp, FALSE);
927 }
928
929 /* Set an initial default flow control value */
930 hw->fc.requested_mode = ixgbe_flow_control;
931
932 /* Sysctls for limiting the amount of work done in the taskqueues */
933 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
934 "max number of rx packets to process",
935 &adapter->rx_process_limit, ixgbe_rx_process_limit);
936
937 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
938 "max number of tx packets to process",
939 &adapter->tx_process_limit, ixgbe_tx_process_limit);
940
941 /* Do descriptor calc and sanity checks */
942 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
943 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
944 aprint_error_dev(dev, "TXD config issue, using default!\n");
945 adapter->num_tx_desc = DEFAULT_TXD;
946 } else
947 adapter->num_tx_desc = ixgbe_txd;
948
949 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
950 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
951 aprint_error_dev(dev, "RXD config issue, using default!\n");
952 adapter->num_rx_desc = DEFAULT_RXD;
953 } else
954 adapter->num_rx_desc = ixgbe_rxd;
955
956 /* Allocate our TX/RX Queues */
957 if (ixgbe_allocate_queues(adapter)) {
958 error = ENOMEM;
959 goto err_out;
960 }
961
962 hw->phy.reset_if_overtemp = TRUE;
963 error = ixgbe_reset_hw(hw);
964 hw->phy.reset_if_overtemp = FALSE;
965 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
966 error = IXGBE_SUCCESS;
967 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
968 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
969 unsupported_sfp = true;
970 error = IXGBE_SUCCESS;
971 } else if (error) {
972 aprint_error_dev(dev, "Hardware initialization failed\n");
973 error = EIO;
974 goto err_late;
975 }
976
977 /* Make sure we have a good EEPROM before we read from it */
978 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
979 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
980 error = EIO;
981 goto err_late;
982 }
983
984 aprint_normal("%s:", device_xname(dev));
985 /* NVM Image Version */
986 high = low = 0;
987 switch (hw->mac.type) {
988 case ixgbe_mac_X540:
989 case ixgbe_mac_X550EM_a:
990 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
991 if (nvmreg == 0xffff)
992 break;
993 high = (nvmreg >> 12) & 0x0f;
994 low = (nvmreg >> 4) & 0xff;
995 id = nvmreg & 0x0f;
996 aprint_normal(" NVM Image Version %u.", high);
997 if (hw->mac.type == ixgbe_mac_X540)
998 str = "%x";
999 else
1000 str = "%02x";
1001 aprint_normal(str, low);
1002 aprint_normal(" ID 0x%x,", id);
1003 break;
1004 case ixgbe_mac_X550EM_x:
1005 case ixgbe_mac_X550:
1006 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1007 if (nvmreg == 0xffff)
1008 break;
1009 high = (nvmreg >> 12) & 0x0f;
1010 low = nvmreg & 0xff;
1011 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1012 break;
1013 default:
1014 break;
1015 }
1016 hw->eeprom.nvm_image_ver_high = high;
1017 hw->eeprom.nvm_image_ver_low = low;
1018
1019 /* PHY firmware revision */
1020 switch (hw->mac.type) {
1021 case ixgbe_mac_X540:
1022 case ixgbe_mac_X550:
1023 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1024 if (nvmreg == 0xffff)
1025 break;
1026 high = (nvmreg >> 12) & 0x0f;
1027 low = (nvmreg >> 4) & 0xff;
1028 id = nvmreg & 0x000f;
1029 aprint_normal(" PHY FW Revision %u.", high);
1030 if (hw->mac.type == ixgbe_mac_X540)
1031 str = "%x";
1032 else
1033 str = "%02x";
1034 aprint_normal(str, low);
1035 aprint_normal(" ID 0x%x,", id);
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 /* NVM Map version & OEM NVM Image version */
1042 switch (hw->mac.type) {
1043 case ixgbe_mac_X550:
1044 case ixgbe_mac_X550EM_x:
1045 case ixgbe_mac_X550EM_a:
1046 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1047 if (nvmreg != 0xffff) {
1048 high = (nvmreg >> 12) & 0x0f;
1049 low = nvmreg & 0x00ff;
1050 aprint_normal(" NVM Map version %u.%02x,", high, low);
1051 }
1052 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1053 if (nvmreg != 0xffff) {
1054 high = (nvmreg >> 12) & 0x0f;
1055 low = nvmreg & 0x00ff;
1056 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1057 low);
1058 }
1059 break;
1060 default:
1061 break;
1062 }
1063
1064 /* Print the ETrackID */
1065 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1066 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1067 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1068
1069 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1070 error = ixgbe_allocate_msix(adapter, pa);
1071 if (error) {
1072 /* Free allocated queue structures first */
1073 ixgbe_free_queues(adapter);
1074
1075 /* Fallback to legacy interrupt */
1076 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1077 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1078 adapter->feat_en |= IXGBE_FEATURE_MSI;
1079 adapter->num_queues = 1;
1080
1081 /* Allocate our TX/RX Queues again */
1082 if (ixgbe_allocate_queues(adapter)) {
1083 error = ENOMEM;
1084 goto err_out;
1085 }
1086 }
1087 }
1088 /* Recovery mode */
1089 switch (adapter->hw.mac.type) {
1090 case ixgbe_mac_X550:
1091 case ixgbe_mac_X550EM_x:
1092 case ixgbe_mac_X550EM_a:
1093 /* >= 2.00 */
1094 if (hw->eeprom.nvm_image_ver_high >= 2) {
1095 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1096 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1097 }
1098 break;
1099 default:
1100 break;
1101 }
1102
1103 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1104 error = ixgbe_allocate_legacy(adapter, pa);
1105 if (error)
1106 goto err_late;
1107
1108 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1109 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1110 error = workqueue_create(&adapter->admin_wq, wqname,
1111 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1112 IXGBE_TASKLET_WQ_FLAGS);
1113 if (error) {
1114 aprint_error_dev(dev,
1115 "could not create admin workqueue (%d)\n", error);
1116 goto err_out;
1117 }
1118
1119 error = ixgbe_start_hw(hw);
1120 switch (error) {
1121 case IXGBE_ERR_EEPROM_VERSION:
1122 aprint_error_dev(dev, "This device is a pre-production adapter/"
1123 "LOM. Please be aware there may be issues associated "
1124 "with your hardware.\nIf you are experiencing problems "
1125 "please contact your Intel or hardware representative "
1126 "who provided you with this hardware.\n");
1127 break;
1128 default:
1129 break;
1130 }
1131
1132 /* Setup OS specific network interface */
1133 if (ixgbe_setup_interface(dev, adapter) != 0)
1134 goto err_late;
1135
1136 /*
1137 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1138 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1139 */
1140 if (hw->phy.media_type == ixgbe_media_type_copper) {
1141 uint16_t id1, id2;
1142 int oui, model, rev;
1143 const char *descr;
1144
1145 id1 = hw->phy.id >> 16;
1146 id2 = hw->phy.id & 0xffff;
1147 oui = MII_OUI(id1, id2);
1148 model = MII_MODEL(id2);
1149 rev = MII_REV(id2);
1150 if ((descr = mii_get_descr(oui, model)) != NULL)
1151 aprint_normal_dev(dev,
1152 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1153 descr, oui, model, rev);
1154 else
1155 aprint_normal_dev(dev,
1156 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1157 oui, model, rev);
1158 }
1159
1160 /* Enable EEE power saving */
1161 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1162 hw->mac.ops.setup_eee(hw,
1163 adapter->feat_en & IXGBE_FEATURE_EEE);
1164
1165 /* Enable power to the phy. */
1166 if (!unsupported_sfp) {
1167 /* Enable the optics for 82599 SFP+ fiber */
1168 ixgbe_enable_tx_laser(hw);
1169
1170 /*
1171 * XXX Currently, ixgbe_set_phy_power() supports only copper
1172 * PHY, so it's not required to test with !unsupported_sfp.
1173 */
1174 ixgbe_set_phy_power(hw, TRUE);
1175 }
1176
1177 /* Initialize statistics */
1178 ixgbe_update_stats_counters(adapter);
1179
1180 /* Check PCIE slot type/speed/width */
1181 ixgbe_get_slot_info(adapter);
1182
1183 /*
1184 * Do time init and sysctl init here, but
1185 * only on the first port of a bypass adapter.
1186 */
1187 ixgbe_bypass_init(adapter);
1188
1189 /* Set an initial dmac value */
1190 adapter->dmac = 0;
1191 /* Set initial advertised speeds (if applicable) */
1192 adapter->advertise = ixgbe_get_advertise(adapter);
1193
1194 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1195 ixgbe_define_iov_schemas(dev, &error);
1196
1197 /* Add sysctls */
1198 ixgbe_add_device_sysctls(adapter);
1199 ixgbe_add_hw_stats(adapter);
1200
1201 /* For Netmap */
1202 adapter->init_locked = ixgbe_init_locked;
1203 adapter->stop_locked = ixgbe_stop;
1204
1205 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1206 ixgbe_netmap_attach(adapter);
1207
1208 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1209 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1210 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1211 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1212
1213 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1214 pmf_class_network_register(dev, adapter->ifp);
1215 else
1216 aprint_error_dev(dev, "couldn't establish power handler\n");
1217
1218 /* Init recovery mode timer and state variable */
1219 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1220 adapter->recovery_mode = 0;
1221
1222 /* Set up the timer callout */
1223 callout_init(&adapter->recovery_mode_timer,
1224 IXGBE_CALLOUT_FLAGS);
1225 snprintf(wqname, sizeof(wqname), "%s-recovery",
1226 device_xname(dev));
1227 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1228 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1229 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1230 if (error) {
1231 aprint_error_dev(dev, "could not create "
1232 "recovery_mode_timer workqueue (%d)\n", error);
1233 goto err_out;
1234 }
1235
1236 /* Start the task */
1237 callout_reset(&adapter->recovery_mode_timer, hz,
1238 ixgbe_recovery_mode_timer, adapter);
1239 }
1240
1241 INIT_DEBUGOUT("ixgbe_attach: end");
1242 adapter->osdep.attached = true;
1243
1244 return;
1245
1246 err_late:
1247 ixgbe_free_queues(adapter);
1248 err_out:
1249 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1250 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1251 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1252 ixgbe_free_workqueue(adapter);
1253 ixgbe_free_pci_resources(adapter);
1254 if (adapter->mta != NULL)
1255 free(adapter->mta, M_DEVBUF);
1256 IXGBE_CORE_LOCK_DESTROY(adapter);
1257
1258 return;
1259 } /* ixgbe_attach */
1260
1261 /************************************************************************
1262 * ixgbe_check_wol_support
1263 *
1264 * Checks whether the adapter's ports are capable of
1265 * Wake On LAN by reading the adapter's NVM.
1266 *
1267 * Sets each port's hw->wol_enabled value depending
1268 * on the value read here.
1269 ************************************************************************/
1270 static void
1271 ixgbe_check_wol_support(struct adapter *adapter)
1272 {
1273 struct ixgbe_hw *hw = &adapter->hw;
1274 u16 dev_caps = 0;
1275
1276 /* Find out WoL support for port */
1277 adapter->wol_support = hw->wol_enabled = 0;
1278 ixgbe_get_device_caps(hw, &dev_caps);
1279 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1280 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1281 hw->bus.func == 0))
1282 adapter->wol_support = hw->wol_enabled = 1;
1283
1284 /* Save initial wake up filter configuration */
1285 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1286
1287 return;
1288 } /* ixgbe_check_wol_support */
1289
1290 /************************************************************************
1291 * ixgbe_setup_interface
1292 *
1293 * Setup networking device structure and register an interface.
1294 ************************************************************************/
1295 static int
1296 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1297 {
1298 struct ethercom *ec = &adapter->osdep.ec;
1299 struct ifnet *ifp;
1300 int rv;
1301
1302 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1303
1304 ifp = adapter->ifp = &ec->ec_if;
1305 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1306 ifp->if_baudrate = IF_Gbps(10);
1307 ifp->if_init = ixgbe_init;
1308 ifp->if_stop = ixgbe_ifstop;
1309 ifp->if_softc = adapter;
1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1311 #ifdef IXGBE_MPSAFE
1312 ifp->if_extflags = IFEF_MPSAFE;
1313 #endif
1314 ifp->if_ioctl = ixgbe_ioctl;
1315 #if __FreeBSD_version >= 1100045
1316 /* TSO parameters */
1317 ifp->if_hw_tsomax = 65518;
1318 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1319 ifp->if_hw_tsomaxsegsize = 2048;
1320 #endif
1321 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1322 #if 0
1323 ixgbe_start_locked = ixgbe_legacy_start_locked;
1324 #endif
1325 } else {
1326 ifp->if_transmit = ixgbe_mq_start;
1327 #if 0
1328 ixgbe_start_locked = ixgbe_mq_start_locked;
1329 #endif
1330 }
1331 ifp->if_start = ixgbe_legacy_start;
1332 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1333 IFQ_SET_READY(&ifp->if_snd);
1334
1335 rv = if_initialize(ifp);
1336 if (rv != 0) {
1337 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1338 return rv;
1339 }
1340 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1341 ether_ifattach(ifp, adapter->hw.mac.addr);
1342 aprint_normal_dev(dev, "Ethernet address %s\n",
1343 ether_sprintf(adapter->hw.mac.addr));
1344 /*
1345 * We use per TX queue softint, so if_deferred_start_init() isn't
1346 * used.
1347 */
1348 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1349
1350 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1351
1352 /*
1353 * Tell the upper layer(s) we support long frames.
1354 */
1355 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1356
1357 /* Set capability flags */
1358 ifp->if_capabilities |= IFCAP_RXCSUM
1359 | IFCAP_TXCSUM
1360 | IFCAP_TSOv4
1361 | IFCAP_TSOv6;
1362 ifp->if_capenable = 0;
1363
1364 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1365 | ETHERCAP_VLAN_HWCSUM
1366 | ETHERCAP_JUMBO_MTU
1367 | ETHERCAP_VLAN_MTU;
1368
1369 /* Enable the above capabilities by default */
1370 ec->ec_capenable = ec->ec_capabilities;
1371
1372 /*
1373 * Don't turn this on by default, if vlans are
1374 * created on another pseudo device (eg. lagg)
1375 * then vlan events are not passed thru, breaking
1376 * operation, but with HW FILTER off it works. If
1377 * using vlans directly on the ixgbe driver you can
1378 * enable this and get full hardware tag filtering.
1379 */
1380 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1381
1382 /*
1383 * Specify the media types supported by this adapter and register
1384 * callbacks to update media and link information
1385 */
1386 ec->ec_ifmedia = &adapter->media;
1387 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1388 ixgbe_media_status, &adapter->core_mtx);
1389
1390 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1391 ixgbe_add_media_types(adapter);
1392
1393 /* Set autoselect media by default */
1394 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1395
1396 if_register(ifp);
1397
1398 return (0);
1399 } /* ixgbe_setup_interface */
1400
1401 /************************************************************************
1402 * ixgbe_add_media_types
1403 ************************************************************************/
1404 static void
1405 ixgbe_add_media_types(struct adapter *adapter)
1406 {
1407 struct ixgbe_hw *hw = &adapter->hw;
1408 u64 layer;
1409
1410 layer = adapter->phy_layer;
1411
1412 #define ADD(mm, dd) \
1413 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1414
1415 ADD(IFM_NONE, 0);
1416
1417 /* Media types with matching NetBSD media defines */
1418 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1419 ADD(IFM_10G_T | IFM_FDX, 0);
1420 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1422 ADD(IFM_1000_T | IFM_FDX, 0);
1423 }
1424 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1425 ADD(IFM_100_TX | IFM_FDX, 0);
1426 }
1427 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1428 ADD(IFM_10_T | IFM_FDX, 0);
1429 }
1430
1431 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1432 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1433 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1434 }
1435
1436 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1437 ADD(IFM_10G_LR | IFM_FDX, 0);
1438 if (hw->phy.multispeed_fiber) {
1439 ADD(IFM_1000_LX | IFM_FDX, 0);
1440 }
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1443 ADD(IFM_10G_SR | IFM_FDX, 0);
1444 if (hw->phy.multispeed_fiber) {
1445 ADD(IFM_1000_SX | IFM_FDX, 0);
1446 }
1447 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1448 ADD(IFM_1000_SX | IFM_FDX, 0);
1449 }
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1451 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1452 }
1453
1454 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1455 ADD(IFM_10G_KR | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1458 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1459 }
1460 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1461 ADD(IFM_1000_KX | IFM_FDX, 0);
1462 }
1463 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1464 ADD(IFM_2500_KX | IFM_FDX, 0);
1465 }
1466 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1467 ADD(IFM_2500_T | IFM_FDX, 0);
1468 }
1469 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1470 ADD(IFM_5000_T | IFM_FDX, 0);
1471 }
1472 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1473 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1474 /* XXX no ifmedia_set? */
1475
1476 ADD(IFM_AUTO, 0);
1477
1478 #undef ADD
1479 } /* ixgbe_add_media_types */
1480
1481 /************************************************************************
1482 * ixgbe_is_sfp
1483 ************************************************************************/
1484 static inline bool
1485 ixgbe_is_sfp(struct ixgbe_hw *hw)
1486 {
1487 switch (hw->mac.type) {
1488 case ixgbe_mac_82598EB:
1489 if (hw->phy.type == ixgbe_phy_nl)
1490 return (TRUE);
1491 return (FALSE);
1492 case ixgbe_mac_82599EB:
1493 case ixgbe_mac_X550EM_x:
1494 case ixgbe_mac_X550EM_a:
1495 switch (hw->mac.ops.get_media_type(hw)) {
1496 case ixgbe_media_type_fiber:
1497 case ixgbe_media_type_fiber_qsfp:
1498 return (TRUE);
1499 default:
1500 return (FALSE);
1501 }
1502 default:
1503 return (FALSE);
1504 }
1505 } /* ixgbe_is_sfp */
1506
1507 static void
1508 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1509 {
1510 if (adapter->schedule_wqs_ok) {
1511 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1512 workqueue_enqueue(adapter->admin_wq,
1513 &adapter->admin_wc, NULL);
1514 }
1515 }
1516
1517 /************************************************************************
1518 * ixgbe_config_link
1519 ************************************************************************/
1520 static void
1521 ixgbe_config_link(struct adapter *adapter)
1522 {
1523 struct ixgbe_hw *hw = &adapter->hw;
1524 u32 autoneg, err = 0;
1525 u32 task_requests = 0;
1526 bool sfp, negotiate = false;
1527
1528 sfp = ixgbe_is_sfp(hw);
1529
1530 if (sfp) {
1531 if (hw->phy.multispeed_fiber) {
1532 ixgbe_enable_tx_laser(hw);
1533 task_requests |= IXGBE_REQUEST_TASK_MSF;
1534 }
1535 task_requests |= IXGBE_REQUEST_TASK_MOD;
1536 atomic_or_32(&adapter->task_requests, task_requests);
1537 ixgbe_schedule_admin_tasklet(adapter);
1538 } else {
1539 struct ifmedia *ifm = &adapter->media;
1540
1541 if (hw->mac.ops.check_link)
1542 err = ixgbe_check_link(hw, &adapter->link_speed,
1543 &adapter->link_up, FALSE);
1544 if (err)
1545 return;
1546
1547 /*
1548 * Check if it's the first call. If it's the first call,
1549 * get value for auto negotiation.
1550 */
1551 autoneg = hw->phy.autoneg_advertised;
1552 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1553 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1554 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1555 &negotiate);
1556 if (err)
1557 return;
1558 if (hw->mac.ops.setup_link)
1559 err = hw->mac.ops.setup_link(hw, autoneg,
1560 adapter->link_up);
1561 }
1562
1563 } /* ixgbe_config_link */
1564
1565 /************************************************************************
1566 * ixgbe_update_stats_counters - Update board statistics counters.
1567 ************************************************************************/
1568 static void
1569 ixgbe_update_stats_counters(struct adapter *adapter)
1570 {
1571 struct ifnet *ifp = adapter->ifp;
1572 struct ixgbe_hw *hw = &adapter->hw;
1573 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1574 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1575 u64 total_missed_rx = 0;
1576 uint64_t crcerrs, rlec;
1577 unsigned int queue_counters;
1578 int i;
1579
1580 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1581 stats->crcerrs.ev_count += crcerrs;
1582 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1583 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1584 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1585 if (hw->mac.type >= ixgbe_mac_X550)
1586 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1587
1588 /* 16 registers exist */
1589 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1590 for (i = 0; i < queue_counters; i++) {
1591 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1592 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1593 if (hw->mac.type >= ixgbe_mac_82599EB) {
1594 stats->qprdc[i].ev_count
1595 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1596 }
1597 }
1598
1599 /* 8 registers exist */
1600 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1601 uint32_t mp;
1602
1603 /* MPC */
1604 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1605 /* global total per queue */
1606 stats->mpc[i].ev_count += mp;
1607 /* running comprehensive total for stats display */
1608 total_missed_rx += mp;
1609
1610 if (hw->mac.type == ixgbe_mac_82598EB)
1611 stats->rnbc[i].ev_count
1612 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1613
1614 stats->pxontxc[i].ev_count
1615 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1616 stats->pxofftxc[i].ev_count
1617 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1618 if (hw->mac.type >= ixgbe_mac_82599EB) {
1619 stats->pxonrxc[i].ev_count
1620 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1621 stats->pxoffrxc[i].ev_count
1622 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1623 stats->pxon2offc[i].ev_count
1624 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1625 } else {
1626 stats->pxonrxc[i].ev_count
1627 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1628 stats->pxoffrxc[i].ev_count
1629 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1630 }
1631 }
1632 stats->mpctotal.ev_count += total_missed_rx;
1633
1634 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1635 if ((adapter->link_active == LINK_STATE_UP)
1636 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1637 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1638 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1639 }
1640 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1641 stats->rlec.ev_count += rlec;
1642
1643 /* Hardware workaround, gprc counts missed packets */
1644 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1645
1646 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1647 stats->lxontxc.ev_count += lxon;
1648 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1649 stats->lxofftxc.ev_count += lxoff;
1650 total = lxon + lxoff;
1651
1652 if (hw->mac.type != ixgbe_mac_82598EB) {
1653 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1654 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1655 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1656 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1657 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1658 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1659 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1660 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1661 } else {
1662 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1663 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1664 /* 82598 only has a counter in the high register */
1665 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1666 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1667 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1668 }
1669
1670 /*
1671 * Workaround: mprc hardware is incorrectly counting
1672 * broadcasts, so for now we subtract those.
1673 */
1674 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1675 stats->bprc.ev_count += bprc;
1676 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1677 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1678
1679 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1680 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1681 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1682 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1683 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1684 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1685
1686 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1687 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1688 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1689
1690 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1691 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1692 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1693 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1694 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1695 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1696 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1697 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1698 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1699 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1700 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1701 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1702 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1703 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1704 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1705 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1706 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1707 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1708 /* Only read FCOE on 82599 */
1709 if (hw->mac.type != ixgbe_mac_82598EB) {
1710 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1711 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1712 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1713 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1714 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1715 }
1716
1717 /*
1718 * Fill out the OS statistics structure. Only RX errors are required
1719 * here because all TX counters are incremented in the TX path and
1720 * normal RX counters are prepared in ether_input().
1721 */
1722 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1723 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1724 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1725 IF_STAT_PUTREF(ifp);
1726 } /* ixgbe_update_stats_counters */
1727
1728 /************************************************************************
1729 * ixgbe_add_hw_stats
1730 *
1731 * Add sysctl variables, one per statistic, to the system.
1732 ************************************************************************/
1733 static void
1734 ixgbe_add_hw_stats(struct adapter *adapter)
1735 {
1736 device_t dev = adapter->dev;
1737 const struct sysctlnode *rnode, *cnode;
1738 struct sysctllog **log = &adapter->sysctllog;
1739 struct tx_ring *txr = adapter->tx_rings;
1740 struct rx_ring *rxr = adapter->rx_rings;
1741 struct ixgbe_hw *hw = &adapter->hw;
1742 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1743 const char *xname = device_xname(dev);
1744 int i;
1745
1746 /* Driver Statistics */
1747 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1748 NULL, xname, "Driver tx dma soft fail EFBIG");
1749 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1750 NULL, xname, "m_defrag() failed");
1751 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1752 NULL, xname, "Driver tx dma hard fail EFBIG");
1753 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1754 NULL, xname, "Driver tx dma hard fail EINVAL");
1755 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1756 NULL, xname, "Driver tx dma hard fail other");
1757 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1758 NULL, xname, "Driver tx dma soft fail EAGAIN");
1759 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1760 NULL, xname, "Driver tx dma soft fail ENOMEM");
1761 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1762 NULL, xname, "Watchdog timeouts");
1763 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1764 NULL, xname, "TSO errors");
1765 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1766 NULL, xname, "Admin MSI-X IRQ Handled");
1767 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1768 NULL, xname, "Link event");
1769 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1770 NULL, xname, "SFP+ module event");
1771 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1772 NULL, xname, "Multispeed event");
1773 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1774 NULL, xname, "External PHY event");
1775
1776 /* Max number of traffic class is 8 */
1777 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1778 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1779 snprintf(adapter->tcs[i].evnamebuf,
1780 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1781 xname, i);
1782 if (i < __arraycount(stats->mpc)) {
1783 evcnt_attach_dynamic(&stats->mpc[i],
1784 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1785 "RX Missed Packet Count");
1786 if (hw->mac.type == ixgbe_mac_82598EB)
1787 evcnt_attach_dynamic(&stats->rnbc[i],
1788 EVCNT_TYPE_MISC, NULL,
1789 adapter->tcs[i].evnamebuf,
1790 "Receive No Buffers");
1791 }
1792 if (i < __arraycount(stats->pxontxc)) {
1793 evcnt_attach_dynamic(&stats->pxontxc[i],
1794 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1795 "pxontxc");
1796 evcnt_attach_dynamic(&stats->pxonrxc[i],
1797 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1798 "pxonrxc");
1799 evcnt_attach_dynamic(&stats->pxofftxc[i],
1800 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1801 "pxofftxc");
1802 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1803 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1804 "pxoffrxc");
1805 if (hw->mac.type >= ixgbe_mac_82599EB)
1806 evcnt_attach_dynamic(&stats->pxon2offc[i],
1807 EVCNT_TYPE_MISC, NULL,
1808 adapter->tcs[i].evnamebuf,
1809 "pxon2offc");
1810 }
1811 }
1812
1813 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1814 #ifdef LRO
1815 struct lro_ctrl *lro = &rxr->lro;
1816 #endif /* LRO */
1817
1818 snprintf(adapter->queues[i].evnamebuf,
1819 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1820 xname, i);
1821 snprintf(adapter->queues[i].namebuf,
1822 sizeof(adapter->queues[i].namebuf), "q%d", i);
1823
1824 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1825 aprint_error_dev(dev, "could not create sysctl root\n");
1826 break;
1827 }
1828
1829 if (sysctl_createv(log, 0, &rnode, &rnode,
1830 0, CTLTYPE_NODE,
1831 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1832 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1833 break;
1834
1835 if (sysctl_createv(log, 0, &rnode, &cnode,
1836 CTLFLAG_READWRITE, CTLTYPE_INT,
1837 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1838 ixgbe_sysctl_interrupt_rate_handler, 0,
1839 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1840 break;
1841
1842 if (sysctl_createv(log, 0, &rnode, &cnode,
1843 CTLFLAG_READONLY, CTLTYPE_INT,
1844 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1845 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1846 0, CTL_CREATE, CTL_EOL) != 0)
1847 break;
1848
1849 if (sysctl_createv(log, 0, &rnode, &cnode,
1850 CTLFLAG_READONLY, CTLTYPE_INT,
1851 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1852 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1853 0, CTL_CREATE, CTL_EOL) != 0)
1854 break;
1855
1856 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1857 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1858 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1859 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1860 "Handled queue in softint");
1861 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1862 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1863 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1864 NULL, adapter->queues[i].evnamebuf, "TSO");
1865 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1866 NULL, adapter->queues[i].evnamebuf,
1867 "Queue No Descriptor Available");
1868 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1869 NULL, adapter->queues[i].evnamebuf,
1870 "Queue Packets Transmitted");
1871 #ifndef IXGBE_LEGACY_TX
1872 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1873 NULL, adapter->queues[i].evnamebuf,
1874 "Packets dropped in pcq");
1875 #endif
1876
1877 if (sysctl_createv(log, 0, &rnode, &cnode,
1878 CTLFLAG_READONLY,
1879 CTLTYPE_INT,
1880 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1881 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1882 CTL_CREATE, CTL_EOL) != 0)
1883 break;
1884
1885 if (sysctl_createv(log, 0, &rnode, &cnode,
1886 CTLFLAG_READONLY,
1887 CTLTYPE_INT,
1888 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1889 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1890 CTL_CREATE, CTL_EOL) != 0)
1891 break;
1892
1893 if (sysctl_createv(log, 0, &rnode, &cnode,
1894 CTLFLAG_READONLY,
1895 CTLTYPE_INT,
1896 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1897 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1898 CTL_CREATE, CTL_EOL) != 0)
1899 break;
1900
1901 if (i < __arraycount(stats->qprc)) {
1902 evcnt_attach_dynamic(&stats->qprc[i],
1903 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1904 "qprc");
1905 evcnt_attach_dynamic(&stats->qptc[i],
1906 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1907 "qptc");
1908 evcnt_attach_dynamic(&stats->qbrc[i],
1909 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1910 "qbrc");
1911 evcnt_attach_dynamic(&stats->qbtc[i],
1912 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1913 "qbtc");
1914 if (hw->mac.type >= ixgbe_mac_82599EB)
1915 evcnt_attach_dynamic(&stats->qprdc[i],
1916 EVCNT_TYPE_MISC, NULL,
1917 adapter->queues[i].evnamebuf, "qprdc");
1918 }
1919
1920 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1921 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1922 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1923 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1924 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1925 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1926 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1927 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1928 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1929 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1930 #ifdef LRO
1931 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1932 CTLFLAG_RD, &lro->lro_queued, 0,
1933 "LRO Queued");
1934 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1935 CTLFLAG_RD, &lro->lro_flushed, 0,
1936 "LRO Flushed");
1937 #endif /* LRO */
1938 }
1939
1940 /* MAC stats get their own sub node */
1941
1942 snprintf(stats->namebuf,
1943 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1944
1945 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "rx csum offload - IP");
1947 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "rx csum offload - L4");
1949 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "rx csum offload - IP bad");
1951 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "rx csum offload - L4 bad");
1953 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Interrupt conditions zero");
1955 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "Legacy interrupts");
1957
1958 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "CRC Errors");
1960 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Illegal Byte Errors");
1962 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "Byte Errors");
1964 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "MAC Short Packets Discarded");
1966 if (hw->mac.type >= ixgbe_mac_X550)
1967 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Bad SFD");
1969 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Total Packets Missed");
1971 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "MAC Local Faults");
1973 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "MAC Remote Faults");
1975 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Receive Length Errors");
1977 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "Link XON Transmitted");
1979 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "Link XON Received");
1981 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Link XOFF Transmitted");
1983 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Link XOFF Received");
1985
1986 /* Packet Reception Stats */
1987 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Total Octets Received");
1989 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Good Octets Received");
1991 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Total Packets Received");
1993 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Good Packets Received");
1995 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Multicast Packets Received");
1997 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "Broadcast Packets Received");
1999 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "64 byte frames received ");
2001 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "65-127 byte frames received");
2003 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "128-255 byte frames received");
2005 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "256-511 byte frames received");
2007 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "512-1023 byte frames received");
2009 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "1023-1522 byte frames received");
2011 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Receive Undersized");
2013 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Fragmented Packets Received ");
2015 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Oversized Packets Received");
2017 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Received Jabber");
2019 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Management Packets Received");
2021 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Management Packets Dropped");
2023 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2024 stats->namebuf, "Checksum Errors");
2025
2026 /* Packet Transmission Stats */
2027 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Good Octets Transmitted");
2029 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Total Packets Transmitted");
2031 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Good Packets Transmitted");
2033 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "Broadcast Packets Transmitted");
2035 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "Multicast Packets Transmitted");
2037 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "Management Packets Transmitted");
2039 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "64 byte frames transmitted ");
2041 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "65-127 byte frames transmitted");
2043 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "128-255 byte frames transmitted");
2045 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "256-511 byte frames transmitted");
2047 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2048 stats->namebuf, "512-1023 byte frames transmitted");
2049 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2050 stats->namebuf, "1024-1522 byte frames transmitted");
2051 } /* ixgbe_add_hw_stats */
2052
2053 static void
2054 ixgbe_clear_evcnt(struct adapter *adapter)
2055 {
2056 struct tx_ring *txr = adapter->tx_rings;
2057 struct rx_ring *rxr = adapter->rx_rings;
2058 struct ixgbe_hw *hw = &adapter->hw;
2059 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2060 int i;
2061
2062 adapter->efbig_tx_dma_setup.ev_count = 0;
2063 adapter->mbuf_defrag_failed.ev_count = 0;
2064 adapter->efbig2_tx_dma_setup.ev_count = 0;
2065 adapter->einval_tx_dma_setup.ev_count = 0;
2066 adapter->other_tx_dma_setup.ev_count = 0;
2067 adapter->eagain_tx_dma_setup.ev_count = 0;
2068 adapter->enomem_tx_dma_setup.ev_count = 0;
2069 adapter->tso_err.ev_count = 0;
2070 adapter->watchdog_events.ev_count = 0;
2071 adapter->admin_irqev.ev_count = 0;
2072 adapter->link_workev.ev_count = 0;
2073 adapter->mod_workev.ev_count = 0;
2074 adapter->msf_workev.ev_count = 0;
2075 adapter->phy_workev.ev_count = 0;
2076
2077 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2078 if (i < __arraycount(stats->mpc)) {
2079 stats->mpc[i].ev_count = 0;
2080 if (hw->mac.type == ixgbe_mac_82598EB)
2081 stats->rnbc[i].ev_count = 0;
2082 }
2083 if (i < __arraycount(stats->pxontxc)) {
2084 stats->pxontxc[i].ev_count = 0;
2085 stats->pxonrxc[i].ev_count = 0;
2086 stats->pxofftxc[i].ev_count = 0;
2087 stats->pxoffrxc[i].ev_count = 0;
2088 if (hw->mac.type >= ixgbe_mac_82599EB)
2089 stats->pxon2offc[i].ev_count = 0;
2090 }
2091 }
2092
2093 txr = adapter->tx_rings;
2094 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2095 adapter->queues[i].irqs.ev_count = 0;
2096 adapter->queues[i].handleq.ev_count = 0;
2097 adapter->queues[i].req.ev_count = 0;
2098 txr->no_desc_avail.ev_count = 0;
2099 txr->total_packets.ev_count = 0;
2100 txr->tso_tx.ev_count = 0;
2101 #ifndef IXGBE_LEGACY_TX
2102 txr->pcq_drops.ev_count = 0;
2103 #endif
2104 txr->q_efbig_tx_dma_setup = 0;
2105 txr->q_mbuf_defrag_failed = 0;
2106 txr->q_efbig2_tx_dma_setup = 0;
2107 txr->q_einval_tx_dma_setup = 0;
2108 txr->q_other_tx_dma_setup = 0;
2109 txr->q_eagain_tx_dma_setup = 0;
2110 txr->q_enomem_tx_dma_setup = 0;
2111 txr->q_tso_err = 0;
2112
2113 if (i < __arraycount(stats->qprc)) {
2114 stats->qprc[i].ev_count = 0;
2115 stats->qptc[i].ev_count = 0;
2116 stats->qbrc[i].ev_count = 0;
2117 stats->qbtc[i].ev_count = 0;
2118 if (hw->mac.type >= ixgbe_mac_82599EB)
2119 stats->qprdc[i].ev_count = 0;
2120 }
2121
2122 rxr->rx_packets.ev_count = 0;
2123 rxr->rx_bytes.ev_count = 0;
2124 rxr->rx_copies.ev_count = 0;
2125 rxr->no_jmbuf.ev_count = 0;
2126 rxr->rx_discarded.ev_count = 0;
2127 }
2128 stats->ipcs.ev_count = 0;
2129 stats->l4cs.ev_count = 0;
2130 stats->ipcs_bad.ev_count = 0;
2131 stats->l4cs_bad.ev_count = 0;
2132 stats->intzero.ev_count = 0;
2133 stats->legint.ev_count = 0;
2134 stats->crcerrs.ev_count = 0;
2135 stats->illerrc.ev_count = 0;
2136 stats->errbc.ev_count = 0;
2137 stats->mspdc.ev_count = 0;
2138 if (hw->mac.type >= ixgbe_mac_X550)
2139 stats->mbsdc.ev_count = 0;
2140 stats->mpctotal.ev_count = 0;
2141 stats->mlfc.ev_count = 0;
2142 stats->mrfc.ev_count = 0;
2143 stats->rlec.ev_count = 0;
2144 stats->lxontxc.ev_count = 0;
2145 stats->lxonrxc.ev_count = 0;
2146 stats->lxofftxc.ev_count = 0;
2147 stats->lxoffrxc.ev_count = 0;
2148
2149 /* Packet Reception Stats */
2150 stats->tor.ev_count = 0;
2151 stats->gorc.ev_count = 0;
2152 stats->tpr.ev_count = 0;
2153 stats->gprc.ev_count = 0;
2154 stats->mprc.ev_count = 0;
2155 stats->bprc.ev_count = 0;
2156 stats->prc64.ev_count = 0;
2157 stats->prc127.ev_count = 0;
2158 stats->prc255.ev_count = 0;
2159 stats->prc511.ev_count = 0;
2160 stats->prc1023.ev_count = 0;
2161 stats->prc1522.ev_count = 0;
2162 stats->ruc.ev_count = 0;
2163 stats->rfc.ev_count = 0;
2164 stats->roc.ev_count = 0;
2165 stats->rjc.ev_count = 0;
2166 stats->mngprc.ev_count = 0;
2167 stats->mngpdc.ev_count = 0;
2168 stats->xec.ev_count = 0;
2169
2170 /* Packet Transmission Stats */
2171 stats->gotc.ev_count = 0;
2172 stats->tpt.ev_count = 0;
2173 stats->gptc.ev_count = 0;
2174 stats->bptc.ev_count = 0;
2175 stats->mptc.ev_count = 0;
2176 stats->mngptc.ev_count = 0;
2177 stats->ptc64.ev_count = 0;
2178 stats->ptc127.ev_count = 0;
2179 stats->ptc255.ev_count = 0;
2180 stats->ptc511.ev_count = 0;
2181 stats->ptc1023.ev_count = 0;
2182 stats->ptc1522.ev_count = 0;
2183 }
2184
2185 /************************************************************************
2186 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2187 *
2188 * Retrieves the TDH value from the hardware
2189 ************************************************************************/
2190 static int
2191 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2192 {
2193 struct sysctlnode node = *rnode;
2194 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2195 struct adapter *adapter;
2196 uint32_t val;
2197
2198 if (!txr)
2199 return (0);
2200
2201 adapter = txr->adapter;
2202 if (ixgbe_fw_recovery_mode_swflag(adapter))
2203 return (EPERM);
2204
2205 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2206 node.sysctl_data = &val;
2207 return sysctl_lookup(SYSCTLFN_CALL(&node));
2208 } /* ixgbe_sysctl_tdh_handler */
2209
2210 /************************************************************************
2211 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2212 *
2213 * Retrieves the TDT value from the hardware
2214 ************************************************************************/
2215 static int
2216 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2217 {
2218 struct sysctlnode node = *rnode;
2219 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2220 struct adapter *adapter;
2221 uint32_t val;
2222
2223 if (!txr)
2224 return (0);
2225
2226 adapter = txr->adapter;
2227 if (ixgbe_fw_recovery_mode_swflag(adapter))
2228 return (EPERM);
2229
2230 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2231 node.sysctl_data = &val;
2232 return sysctl_lookup(SYSCTLFN_CALL(&node));
2233 } /* ixgbe_sysctl_tdt_handler */
2234
2235 /************************************************************************
2236 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2237 * handler function
2238 *
2239 * Retrieves the next_to_check value
2240 ************************************************************************/
2241 static int
2242 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2243 {
2244 struct sysctlnode node = *rnode;
2245 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2246 struct adapter *adapter;
2247 uint32_t val;
2248
2249 if (!rxr)
2250 return (0);
2251
2252 adapter = rxr->adapter;
2253 if (ixgbe_fw_recovery_mode_swflag(adapter))
2254 return (EPERM);
2255
2256 val = rxr->next_to_check;
2257 node.sysctl_data = &val;
2258 return sysctl_lookup(SYSCTLFN_CALL(&node));
2259 } /* ixgbe_sysctl_next_to_check_handler */
2260
2261 /************************************************************************
2262 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2263 *
2264 * Retrieves the RDH value from the hardware
2265 ************************************************************************/
2266 static int
2267 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2268 {
2269 struct sysctlnode node = *rnode;
2270 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2271 struct adapter *adapter;
2272 uint32_t val;
2273
2274 if (!rxr)
2275 return (0);
2276
2277 adapter = rxr->adapter;
2278 if (ixgbe_fw_recovery_mode_swflag(adapter))
2279 return (EPERM);
2280
2281 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2282 node.sysctl_data = &val;
2283 return sysctl_lookup(SYSCTLFN_CALL(&node));
2284 } /* ixgbe_sysctl_rdh_handler */
2285
2286 /************************************************************************
2287 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2288 *
2289 * Retrieves the RDT value from the hardware
2290 ************************************************************************/
2291 static int
2292 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2293 {
2294 struct sysctlnode node = *rnode;
2295 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2296 struct adapter *adapter;
2297 uint32_t val;
2298
2299 if (!rxr)
2300 return (0);
2301
2302 adapter = rxr->adapter;
2303 if (ixgbe_fw_recovery_mode_swflag(adapter))
2304 return (EPERM);
2305
2306 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2307 node.sysctl_data = &val;
2308 return sysctl_lookup(SYSCTLFN_CALL(&node));
2309 } /* ixgbe_sysctl_rdt_handler */
2310
2311 static int
2312 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2313 {
2314 struct ifnet *ifp = &ec->ec_if;
2315 struct adapter *adapter = ifp->if_softc;
2316 int rv;
2317
2318 if (set)
2319 rv = ixgbe_register_vlan(adapter, vid);
2320 else
2321 rv = ixgbe_unregister_vlan(adapter, vid);
2322
2323 if (rv != 0)
2324 return rv;
2325
2326 /*
2327 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2328 * or 0 to 1.
2329 */
2330 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2331 ixgbe_setup_vlan_hw_tagging(adapter);
2332
2333 return rv;
2334 }
2335
2336 /************************************************************************
2337 * ixgbe_register_vlan
2338 *
2339 * Run via vlan config EVENT, it enables us to use the
2340 * HW Filter table since we can get the vlan id. This
2341 * just creates the entry in the soft version of the
2342 * VFTA, init will repopulate the real table.
2343 ************************************************************************/
2344 static int
2345 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2346 {
2347 u16 index, bit;
2348 int error;
2349
2350 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2351 return EINVAL;
2352
2353 IXGBE_CORE_LOCK(adapter);
2354 index = (vtag >> 5) & 0x7F;
2355 bit = vtag & 0x1F;
2356 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2357 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2358 true);
2359 IXGBE_CORE_UNLOCK(adapter);
2360 if (error != 0)
2361 error = EACCES;
2362
2363 return error;
2364 } /* ixgbe_register_vlan */
2365
2366 /************************************************************************
2367 * ixgbe_unregister_vlan
2368 *
2369 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2370 ************************************************************************/
2371 static int
2372 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2373 {
2374 u16 index, bit;
2375 int error;
2376
2377 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2378 return EINVAL;
2379
2380 IXGBE_CORE_LOCK(adapter);
2381 index = (vtag >> 5) & 0x7F;
2382 bit = vtag & 0x1F;
2383 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2384 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2385 true);
2386 IXGBE_CORE_UNLOCK(adapter);
2387 if (error != 0)
2388 error = EACCES;
2389
2390 return error;
2391 } /* ixgbe_unregister_vlan */
2392
2393 static void
2394 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2395 {
2396 struct ethercom *ec = &adapter->osdep.ec;
2397 struct ixgbe_hw *hw = &adapter->hw;
2398 struct rx_ring *rxr;
2399 u32 ctrl;
2400 int i;
2401 bool hwtagging;
2402
2403 /* Enable HW tagging only if any vlan is attached */
2404 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2405 && VLAN_ATTACHED(ec);
2406
2407 /* Setup the queues for vlans */
2408 for (i = 0; i < adapter->num_queues; i++) {
2409 rxr = &adapter->rx_rings[i];
2410 /*
2411 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2412 */
2413 if (hw->mac.type != ixgbe_mac_82598EB) {
2414 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2415 if (hwtagging)
2416 ctrl |= IXGBE_RXDCTL_VME;
2417 else
2418 ctrl &= ~IXGBE_RXDCTL_VME;
2419 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2420 }
2421 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2422 }
2423
2424 /* VLAN hw tagging for 82598 */
2425 if (hw->mac.type == ixgbe_mac_82598EB) {
2426 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2427 if (hwtagging)
2428 ctrl |= IXGBE_VLNCTRL_VME;
2429 else
2430 ctrl &= ~IXGBE_VLNCTRL_VME;
2431 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2432 }
2433 } /* ixgbe_setup_vlan_hw_tagging */
2434
2435 static void
2436 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2437 {
2438 struct ethercom *ec = &adapter->osdep.ec;
2439 struct ixgbe_hw *hw = &adapter->hw;
2440 int i;
2441 u32 ctrl;
2442 struct vlanid_list *vlanidp;
2443
2444 /*
2445 * This function is called from both if_init and ifflags_cb()
2446 * on NetBSD.
2447 */
2448
2449 /*
2450 * Part 1:
2451 * Setup VLAN HW tagging
2452 */
2453 ixgbe_setup_vlan_hw_tagging(adapter);
2454
2455 /*
2456 * Part 2:
2457 * Setup VLAN HW filter
2458 */
2459 /* Cleanup shadow_vfta */
2460 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2461 adapter->shadow_vfta[i] = 0;
2462 /* Generate shadow_vfta from ec_vids */
2463 ETHER_LOCK(ec);
2464 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2465 uint32_t idx;
2466
2467 idx = vlanidp->vid / 32;
2468 KASSERT(idx < IXGBE_VFTA_SIZE);
2469 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2470 }
2471 ETHER_UNLOCK(ec);
2472 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2473 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2474
2475 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2476 /* Enable the Filter Table if enabled */
2477 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2478 ctrl |= IXGBE_VLNCTRL_VFE;
2479 else
2480 ctrl &= ~IXGBE_VLNCTRL_VFE;
2481 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2482 } /* ixgbe_setup_vlan_hw_support */
2483
2484 /************************************************************************
2485 * ixgbe_get_slot_info
2486 *
2487 * Get the width and transaction speed of
2488 * the slot this adapter is plugged into.
2489 ************************************************************************/
2490 static void
2491 ixgbe_get_slot_info(struct adapter *adapter)
2492 {
2493 device_t dev = adapter->dev;
2494 struct ixgbe_hw *hw = &adapter->hw;
2495 u32 offset;
2496 u16 link;
2497 int bus_info_valid = TRUE;
2498
2499 /* Some devices are behind an internal bridge */
2500 switch (hw->device_id) {
2501 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2502 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2503 goto get_parent_info;
2504 default:
2505 break;
2506 }
2507
2508 ixgbe_get_bus_info(hw);
2509
2510 /*
2511 * Some devices don't use PCI-E, but there is no need
2512 * to display "Unknown" for bus speed and width.
2513 */
2514 switch (hw->mac.type) {
2515 case ixgbe_mac_X550EM_x:
2516 case ixgbe_mac_X550EM_a:
2517 return;
2518 default:
2519 goto display;
2520 }
2521
2522 get_parent_info:
2523 /*
2524 * For the Quad port adapter we need to parse back
2525 * up the PCI tree to find the speed of the expansion
2526 * slot into which this adapter is plugged. A bit more work.
2527 */
2528 dev = device_parent(device_parent(dev));
2529 #if 0
2530 #ifdef IXGBE_DEBUG
2531 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2532 pci_get_slot(dev), pci_get_function(dev));
2533 #endif
2534 dev = device_parent(device_parent(dev));
2535 #ifdef IXGBE_DEBUG
2536 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2537 pci_get_slot(dev), pci_get_function(dev));
2538 #endif
2539 #endif
2540 /* Now get the PCI Express Capabilities offset */
2541 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2542 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2543 /*
2544 * Hmm...can't get PCI-Express capabilities.
2545 * Falling back to default method.
2546 */
2547 bus_info_valid = FALSE;
2548 ixgbe_get_bus_info(hw);
2549 goto display;
2550 }
2551 /* ...and read the Link Status Register */
2552 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2553 offset + PCIE_LCSR) >> 16;
2554 ixgbe_set_pci_config_data_generic(hw, link);
2555
2556 display:
2557 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2558 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2559 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2560 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2561 "Unknown"),
2562 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2563 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2564 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2565 "Unknown"));
2566
2567 if (bus_info_valid) {
2568 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2569 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2570 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2571 device_printf(dev, "PCI-Express bandwidth available"
2572 " for this card\n is not sufficient for"
2573 " optimal performance.\n");
2574 device_printf(dev, "For optimal performance a x8 "
2575 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2576 }
2577 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2578 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2579 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2580 device_printf(dev, "PCI-Express bandwidth available"
2581 " for this card\n is not sufficient for"
2582 " optimal performance.\n");
2583 device_printf(dev, "For optimal performance a x8 "
2584 "PCIE Gen3 slot is required.\n");
2585 }
2586 } else
2587 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2588
2589 return;
2590 } /* ixgbe_get_slot_info */
2591
2592 /************************************************************************
2593 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2594 ************************************************************************/
2595 static inline void
2596 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2597 {
2598 struct ixgbe_hw *hw = &adapter->hw;
2599 struct ix_queue *que = &adapter->queues[vector];
2600 u64 queue = 1ULL << vector;
2601 u32 mask;
2602
2603 mutex_enter(&que->dc_mtx);
2604 if (que->disabled_count > 0 && --que->disabled_count > 0)
2605 goto out;
2606
2607 if (hw->mac.type == ixgbe_mac_82598EB) {
2608 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2609 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2610 } else {
2611 mask = (queue & 0xFFFFFFFF);
2612 if (mask)
2613 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2614 mask = (queue >> 32);
2615 if (mask)
2616 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2617 }
2618 out:
2619 mutex_exit(&que->dc_mtx);
2620 } /* ixgbe_enable_queue */
2621
2622 /************************************************************************
2623 * ixgbe_disable_queue_internal
2624 ************************************************************************/
2625 static inline void
2626 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2627 {
2628 struct ixgbe_hw *hw = &adapter->hw;
2629 struct ix_queue *que = &adapter->queues[vector];
2630 u64 queue = 1ULL << vector;
2631 u32 mask;
2632
2633 mutex_enter(&que->dc_mtx);
2634
2635 if (que->disabled_count > 0) {
2636 if (nestok)
2637 que->disabled_count++;
2638 goto out;
2639 }
2640 que->disabled_count++;
2641
2642 if (hw->mac.type == ixgbe_mac_82598EB) {
2643 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2644 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2645 } else {
2646 mask = (queue & 0xFFFFFFFF);
2647 if (mask)
2648 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2649 mask = (queue >> 32);
2650 if (mask)
2651 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2652 }
2653 out:
2654 mutex_exit(&que->dc_mtx);
2655 } /* ixgbe_disable_queue_internal */
2656
2657 /************************************************************************
2658 * ixgbe_disable_queue
2659 ************************************************************************/
2660 static inline void
2661 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2662 {
2663
2664 ixgbe_disable_queue_internal(adapter, vector, true);
2665 } /* ixgbe_disable_queue */
2666
2667 /************************************************************************
2668 * ixgbe_sched_handle_que - schedule deferred packet processing
2669 ************************************************************************/
2670 static inline void
2671 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2672 {
2673
2674 if (que->txrx_use_workqueue) {
2675 /*
2676 * adapter->que_wq is bound to each CPU instead of
2677 * each NIC queue to reduce workqueue kthread. As we
2678 * should consider about interrupt affinity in this
2679 * function, the workqueue kthread must be WQ_PERCPU.
2680 * If create WQ_PERCPU workqueue kthread for each NIC
2681 * queue, that number of created workqueue kthread is
2682 * (number of used NIC queue) * (number of CPUs) =
2683 * (number of CPUs) ^ 2 most often.
2684 *
2685 * The same NIC queue's interrupts are avoided by
2686 * masking the queue's interrupt. And different
2687 * NIC queue's interrupts use different struct work
2688 * (que->wq_cookie). So, "enqueued flag" to avoid
2689 * twice workqueue_enqueue() is not required .
2690 */
2691 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2692 } else {
2693 softint_schedule(que->que_si);
2694 }
2695 }
2696
2697 /************************************************************************
2698 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2699 ************************************************************************/
2700 static int
2701 ixgbe_msix_que(void *arg)
2702 {
2703 struct ix_queue *que = arg;
2704 struct adapter *adapter = que->adapter;
2705 struct ifnet *ifp = adapter->ifp;
2706 struct tx_ring *txr = que->txr;
2707 struct rx_ring *rxr = que->rxr;
2708 bool more;
2709 u32 newitr = 0;
2710
2711 /* Protect against spurious interrupts */
2712 if ((ifp->if_flags & IFF_RUNNING) == 0)
2713 return 0;
2714
2715 ixgbe_disable_queue(adapter, que->msix);
2716 ++que->irqs.ev_count;
2717
2718 /*
2719 * Don't change "que->txrx_use_workqueue" from this point to avoid
2720 * flip-flopping softint/workqueue mode in one deferred processing.
2721 */
2722 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2723
2724 #ifdef __NetBSD__
2725 /* Don't run ixgbe_rxeof in interrupt context */
2726 more = true;
2727 #else
2728 more = ixgbe_rxeof(que);
2729 #endif
2730
2731 IXGBE_TX_LOCK(txr);
2732 ixgbe_txeof(txr);
2733 IXGBE_TX_UNLOCK(txr);
2734
2735 /* Do AIM now? */
2736
2737 if (adapter->enable_aim == false)
2738 goto no_calc;
2739 /*
2740 * Do Adaptive Interrupt Moderation:
2741 * - Write out last calculated setting
2742 * - Calculate based on average size over
2743 * the last interval.
2744 */
2745 if (que->eitr_setting)
2746 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2747
2748 que->eitr_setting = 0;
2749
2750 /* Idle, do nothing */
2751 if ((txr->bytes == 0) && (rxr->bytes == 0))
2752 goto no_calc;
2753
2754 if ((txr->bytes) && (txr->packets))
2755 newitr = txr->bytes/txr->packets;
2756 if ((rxr->bytes) && (rxr->packets))
2757 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2758 newitr += 24; /* account for hardware frame, crc */
2759
2760 /* set an upper boundary */
2761 newitr = uimin(newitr, 3000);
2762
2763 /* Be nice to the mid range */
2764 if ((newitr > 300) && (newitr < 1200))
2765 newitr = (newitr / 3);
2766 else
2767 newitr = (newitr / 2);
2768
2769 /*
2770 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2771 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2772 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2773 * on 1G and higher.
2774 */
2775 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2776 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2777 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2778 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2779 }
2780
2781 /* save for next interrupt */
2782 que->eitr_setting = newitr;
2783
2784 /* Reset state */
2785 txr->bytes = 0;
2786 txr->packets = 0;
2787 rxr->bytes = 0;
2788 rxr->packets = 0;
2789
2790 no_calc:
2791 if (more)
2792 ixgbe_sched_handle_que(adapter, que);
2793 else
2794 ixgbe_enable_queue(adapter, que->msix);
2795
2796 return 1;
2797 } /* ixgbe_msix_que */
2798
2799 /************************************************************************
2800 * ixgbe_media_status - Media Ioctl callback
2801 *
2802 * Called whenever the user queries the status of
2803 * the interface using ifconfig.
2804 ************************************************************************/
2805 static void
2806 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2807 {
2808 struct adapter *adapter = ifp->if_softc;
2809 struct ixgbe_hw *hw = &adapter->hw;
2810 int layer;
2811
2812 INIT_DEBUGOUT("ixgbe_media_status: begin");
2813 ixgbe_update_link_status(adapter);
2814
2815 ifmr->ifm_status = IFM_AVALID;
2816 ifmr->ifm_active = IFM_ETHER;
2817
2818 if (adapter->link_active != LINK_STATE_UP) {
2819 ifmr->ifm_active |= IFM_NONE;
2820 return;
2821 }
2822
2823 ifmr->ifm_status |= IFM_ACTIVE;
2824 layer = adapter->phy_layer;
2825
2826 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2827 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2828 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2829 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2830 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2831 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2832 switch (adapter->link_speed) {
2833 case IXGBE_LINK_SPEED_10GB_FULL:
2834 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2835 break;
2836 case IXGBE_LINK_SPEED_5GB_FULL:
2837 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2838 break;
2839 case IXGBE_LINK_SPEED_2_5GB_FULL:
2840 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2841 break;
2842 case IXGBE_LINK_SPEED_1GB_FULL:
2843 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2844 break;
2845 case IXGBE_LINK_SPEED_100_FULL:
2846 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2847 break;
2848 case IXGBE_LINK_SPEED_10_FULL:
2849 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2850 break;
2851 }
2852 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2853 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2854 switch (adapter->link_speed) {
2855 case IXGBE_LINK_SPEED_10GB_FULL:
2856 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2857 break;
2858 }
2859 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2860 switch (adapter->link_speed) {
2861 case IXGBE_LINK_SPEED_10GB_FULL:
2862 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2863 break;
2864 case IXGBE_LINK_SPEED_1GB_FULL:
2865 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2866 break;
2867 }
2868 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2869 switch (adapter->link_speed) {
2870 case IXGBE_LINK_SPEED_10GB_FULL:
2871 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2872 break;
2873 case IXGBE_LINK_SPEED_1GB_FULL:
2874 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2875 break;
2876 }
2877 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2878 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2879 switch (adapter->link_speed) {
2880 case IXGBE_LINK_SPEED_10GB_FULL:
2881 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2882 break;
2883 case IXGBE_LINK_SPEED_1GB_FULL:
2884 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2885 break;
2886 }
2887 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2888 switch (adapter->link_speed) {
2889 case IXGBE_LINK_SPEED_10GB_FULL:
2890 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2891 break;
2892 }
2893 /*
2894 * XXX: These need to use the proper media types once
2895 * they're added.
2896 */
2897 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2898 switch (adapter->link_speed) {
2899 case IXGBE_LINK_SPEED_10GB_FULL:
2900 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2901 break;
2902 case IXGBE_LINK_SPEED_2_5GB_FULL:
2903 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2904 break;
2905 case IXGBE_LINK_SPEED_1GB_FULL:
2906 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2907 break;
2908 }
2909 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2910 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2911 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2912 switch (adapter->link_speed) {
2913 case IXGBE_LINK_SPEED_10GB_FULL:
2914 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2915 break;
2916 case IXGBE_LINK_SPEED_2_5GB_FULL:
2917 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2918 break;
2919 case IXGBE_LINK_SPEED_1GB_FULL:
2920 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2921 break;
2922 }
2923
2924 /* If nothing is recognized... */
2925 #if 0
2926 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2927 ifmr->ifm_active |= IFM_UNKNOWN;
2928 #endif
2929
2930 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2931
2932 /* Display current flow control setting used on link */
2933 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2934 hw->fc.current_mode == ixgbe_fc_full)
2935 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2936 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2937 hw->fc.current_mode == ixgbe_fc_full)
2938 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2939
2940 return;
2941 } /* ixgbe_media_status */
2942
2943 /************************************************************************
2944 * ixgbe_media_change - Media Ioctl callback
2945 *
2946 * Called when the user changes speed/duplex using
2947 * media/mediopt option with ifconfig.
2948 ************************************************************************/
2949 static int
2950 ixgbe_media_change(struct ifnet *ifp)
2951 {
2952 struct adapter *adapter = ifp->if_softc;
2953 struct ifmedia *ifm = &adapter->media;
2954 struct ixgbe_hw *hw = &adapter->hw;
2955 ixgbe_link_speed speed = 0;
2956 ixgbe_link_speed link_caps = 0;
2957 bool negotiate = false;
2958 s32 err = IXGBE_NOT_IMPLEMENTED;
2959
2960 INIT_DEBUGOUT("ixgbe_media_change: begin");
2961
2962 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2963 return (EINVAL);
2964
2965 if (hw->phy.media_type == ixgbe_media_type_backplane)
2966 return (EPERM);
2967
2968 /*
2969 * We don't actually need to check against the supported
2970 * media types of the adapter; ifmedia will take care of
2971 * that for us.
2972 */
2973 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2974 case IFM_AUTO:
2975 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2976 &negotiate);
2977 if (err != IXGBE_SUCCESS) {
2978 device_printf(adapter->dev, "Unable to determine "
2979 "supported advertise speeds\n");
2980 return (ENODEV);
2981 }
2982 speed |= link_caps;
2983 break;
2984 case IFM_10G_T:
2985 case IFM_10G_LRM:
2986 case IFM_10G_LR:
2987 case IFM_10G_TWINAX:
2988 case IFM_10G_SR:
2989 case IFM_10G_CX4:
2990 case IFM_10G_KR:
2991 case IFM_10G_KX4:
2992 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2993 break;
2994 case IFM_5000_T:
2995 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2996 break;
2997 case IFM_2500_T:
2998 case IFM_2500_KX:
2999 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3000 break;
3001 case IFM_1000_T:
3002 case IFM_1000_LX:
3003 case IFM_1000_SX:
3004 case IFM_1000_KX:
3005 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3006 break;
3007 case IFM_100_TX:
3008 speed |= IXGBE_LINK_SPEED_100_FULL;
3009 break;
3010 case IFM_10_T:
3011 speed |= IXGBE_LINK_SPEED_10_FULL;
3012 break;
3013 case IFM_NONE:
3014 break;
3015 default:
3016 goto invalid;
3017 }
3018
3019 hw->mac.autotry_restart = TRUE;
3020 hw->mac.ops.setup_link(hw, speed, TRUE);
3021 adapter->advertise = 0;
3022 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3023 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3024 adapter->advertise |= 1 << 2;
3025 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3026 adapter->advertise |= 1 << 1;
3027 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3028 adapter->advertise |= 1 << 0;
3029 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3030 adapter->advertise |= 1 << 3;
3031 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3032 adapter->advertise |= 1 << 4;
3033 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3034 adapter->advertise |= 1 << 5;
3035 }
3036
3037 return (0);
3038
3039 invalid:
3040 device_printf(adapter->dev, "Invalid media type!\n");
3041
3042 return (EINVAL);
3043 } /* ixgbe_media_change */
3044
3045 /************************************************************************
3046 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3047 ************************************************************************/
3048 static int
3049 ixgbe_msix_admin(void *arg)
3050 {
3051 struct adapter *adapter = arg;
3052 struct ixgbe_hw *hw = &adapter->hw;
3053 u32 eicr, eicr_mask;
3054 u32 task_requests = 0;
3055 s32 retval;
3056
3057 ++adapter->admin_irqev.ev_count;
3058
3059 /* Pause other interrupts */
3060 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3061
3062 /* First get the cause */
3063 /*
3064 * The specifications of 82598, 82599, X540 and X550 say EICS register
3065 * is write only. However, Linux says it is a workaround for silicon
3066 * errata to read EICS instead of EICR to get interrupt cause. It seems
3067 * there is a problem about read clear mechanism for EICR register.
3068 */
3069 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3070 /* Be sure the queue bits are not cleared */
3071 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3072 /* Clear interrupt with write */
3073 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3074
3075 if (ixgbe_is_sfp(hw)) {
3076 /* Pluggable optics-related interrupt */
3077 if (hw->mac.type >= ixgbe_mac_X540)
3078 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3079 else
3080 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3081
3082 /*
3083 * An interrupt might not arrive when a module is inserted.
3084 * When an link status change interrupt occurred and the driver
3085 * still regard SFP as unplugged, issue the module softint
3086 * and then issue LSC interrupt.
3087 */
3088 if ((eicr & eicr_mask)
3089 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3090 && (eicr & IXGBE_EICR_LSC))) {
3091 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3092 task_requests |= IXGBE_REQUEST_TASK_MOD;
3093 }
3094
3095 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3096 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3097 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3098 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3099 task_requests |= IXGBE_REQUEST_TASK_MSF;
3100 }
3101 }
3102
3103 /* Link status change */
3104 if (eicr & IXGBE_EICR_LSC) {
3105 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3106 task_requests |= IXGBE_REQUEST_TASK_LSC;
3107 }
3108
3109 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3110 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3111 (eicr & IXGBE_EICR_FLOW_DIR)) {
3112 /* This is probably overkill :) */
3113 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3114 return 1;
3115 /* Disable the interrupt */
3116 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3117 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3118 }
3119
3120 if (eicr & IXGBE_EICR_ECC) {
3121 device_printf(adapter->dev,
3122 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3123 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3124 }
3125
3126 /* Check for over temp condition */
3127 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3128 switch (adapter->hw.mac.type) {
3129 case ixgbe_mac_X550EM_a:
3130 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3131 break;
3132 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3133 IXGBE_EICR_GPI_SDP0_X550EM_a);
3134 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3135 IXGBE_EICR_GPI_SDP0_X550EM_a);
3136 retval = hw->phy.ops.check_overtemp(hw);
3137 if (retval != IXGBE_ERR_OVERTEMP)
3138 break;
3139 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3140 device_printf(adapter->dev, "System shutdown required!\n");
3141 break;
3142 default:
3143 if (!(eicr & IXGBE_EICR_TS))
3144 break;
3145 retval = hw->phy.ops.check_overtemp(hw);
3146 if (retval != IXGBE_ERR_OVERTEMP)
3147 break;
3148 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3149 device_printf(adapter->dev, "System shutdown required!\n");
3150 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3151 break;
3152 }
3153 }
3154
3155 /* Check for VF message */
3156 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3157 (eicr & IXGBE_EICR_MAILBOX)) {
3158 task_requests |= IXGBE_REQUEST_TASK_MBX;
3159 }
3160 }
3161
3162 /* Check for fan failure */
3163 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3164 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3165 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3166 }
3167
3168 /* External PHY interrupt */
3169 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3170 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3171 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3172 task_requests |= IXGBE_REQUEST_TASK_PHY;
3173 }
3174
3175 if (task_requests != 0) {
3176 /* Re-enabling other interrupts is done in the admin task */
3177 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
3178 atomic_or_32(&adapter->task_requests, task_requests);
3179 ixgbe_schedule_admin_tasklet(adapter);
3180 } else {
3181 /* Re-enable other interrupts */
3182 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3183 }
3184
3185 return 1;
3186 } /* ixgbe_msix_admin */
3187
3188 static void
3189 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3190 {
3191
3192 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3193 itr |= itr << 16;
3194 else
3195 itr |= IXGBE_EITR_CNT_WDIS;
3196
3197 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3198 }
3199
3200
3201 /************************************************************************
3202 * ixgbe_sysctl_interrupt_rate_handler
3203 ************************************************************************/
3204 static int
3205 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3206 {
3207 struct sysctlnode node = *rnode;
3208 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3209 struct adapter *adapter;
3210 uint32_t reg, usec, rate;
3211 int error;
3212
3213 if (que == NULL)
3214 return 0;
3215
3216 adapter = que->adapter;
3217 if (ixgbe_fw_recovery_mode_swflag(adapter))
3218 return (EPERM);
3219
3220 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3221 usec = ((reg & 0x0FF8) >> 3);
3222 if (usec > 0)
3223 rate = 500000 / usec;
3224 else
3225 rate = 0;
3226 node.sysctl_data = &rate;
3227 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3228 if (error || newp == NULL)
3229 return error;
3230 reg &= ~0xfff; /* default, no limitation */
3231 if (rate > 0 && rate < 500000) {
3232 if (rate < 1000)
3233 rate = 1000;
3234 reg |= ((4000000 / rate) & 0xff8);
3235 /*
3236 * When RSC is used, ITR interval must be larger than
3237 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3238 * The minimum value is always greater than 2us on 100M
3239 * (and 10M?(not documented)), but it's not on 1G and higher.
3240 */
3241 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3242 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3243 if ((adapter->num_queues > 1)
3244 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3245 return EINVAL;
3246 }
3247 ixgbe_max_interrupt_rate = rate;
3248 } else
3249 ixgbe_max_interrupt_rate = 0;
3250 ixgbe_eitr_write(adapter, que->msix, reg);
3251
3252 return (0);
3253 } /* ixgbe_sysctl_interrupt_rate_handler */
3254
3255 const struct sysctlnode *
3256 ixgbe_sysctl_instance(struct adapter *adapter)
3257 {
3258 const char *dvname;
3259 struct sysctllog **log;
3260 int rc;
3261 const struct sysctlnode *rnode;
3262
3263 if (adapter->sysctltop != NULL)
3264 return adapter->sysctltop;
3265
3266 log = &adapter->sysctllog;
3267 dvname = device_xname(adapter->dev);
3268
3269 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3270 0, CTLTYPE_NODE, dvname,
3271 SYSCTL_DESCR("ixgbe information and settings"),
3272 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3273 goto err;
3274
3275 return rnode;
3276 err:
3277 device_printf(adapter->dev,
3278 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3279 return NULL;
3280 }
3281
3282 /************************************************************************
3283 * ixgbe_add_device_sysctls
3284 ************************************************************************/
3285 static void
3286 ixgbe_add_device_sysctls(struct adapter *adapter)
3287 {
3288 device_t dev = adapter->dev;
3289 struct ixgbe_hw *hw = &adapter->hw;
3290 struct sysctllog **log;
3291 const struct sysctlnode *rnode, *cnode;
3292
3293 log = &adapter->sysctllog;
3294
3295 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3296 aprint_error_dev(dev, "could not create sysctl root\n");
3297 return;
3298 }
3299
3300 if (sysctl_createv(log, 0, &rnode, &cnode,
3301 CTLFLAG_READWRITE, CTLTYPE_INT,
3302 "debug", SYSCTL_DESCR("Debug Info"),
3303 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3304 aprint_error_dev(dev, "could not create sysctl\n");
3305
3306 if (sysctl_createv(log, 0, &rnode, &cnode,
3307 CTLFLAG_READONLY, CTLTYPE_INT,
3308 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3309 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3310 aprint_error_dev(dev, "could not create sysctl\n");
3311
3312 if (sysctl_createv(log, 0, &rnode, &cnode,
3313 CTLFLAG_READONLY, CTLTYPE_INT,
3314 "num_queues", SYSCTL_DESCR("Number of queues"),
3315 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3316 aprint_error_dev(dev, "could not create sysctl\n");
3317
3318 /* Sysctls for all devices */
3319 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3320 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3321 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3322 CTL_EOL) != 0)
3323 aprint_error_dev(dev, "could not create sysctl\n");
3324
3325 adapter->enable_aim = ixgbe_enable_aim;
3326 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3327 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3328 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3329 aprint_error_dev(dev, "could not create sysctl\n");
3330
3331 if (sysctl_createv(log, 0, &rnode, &cnode,
3332 CTLFLAG_READWRITE, CTLTYPE_INT,
3333 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3334 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3335 CTL_EOL) != 0)
3336 aprint_error_dev(dev, "could not create sysctl\n");
3337
3338 /*
3339 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3340 * it causesflip-flopping softint/workqueue mode in one deferred
3341 * processing. Therefore, preempt_disable()/preempt_enable() are
3342 * required in ixgbe_sched_handle_que() to avoid
3343 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3344 * I think changing "que->txrx_use_workqueue" in interrupt handler
3345 * is lighter than doing preempt_disable()/preempt_enable() in every
3346 * ixgbe_sched_handle_que().
3347 */
3348 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3349 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3350 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3351 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353
3354 #ifdef IXGBE_DEBUG
3355 /* testing sysctls (for all devices) */
3356 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3357 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3358 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3359 CTL_EOL) != 0)
3360 aprint_error_dev(dev, "could not create sysctl\n");
3361
3362 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3363 CTLTYPE_STRING, "print_rss_config",
3364 SYSCTL_DESCR("Prints RSS Configuration"),
3365 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3366 CTL_EOL) != 0)
3367 aprint_error_dev(dev, "could not create sysctl\n");
3368 #endif
3369 /* for X550 series devices */
3370 if (hw->mac.type >= ixgbe_mac_X550)
3371 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3372 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3373 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3374 CTL_EOL) != 0)
3375 aprint_error_dev(dev, "could not create sysctl\n");
3376
3377 /* for WoL-capable devices */
3378 if (adapter->wol_support) {
3379 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3380 CTLTYPE_BOOL, "wol_enable",
3381 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3382 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3383 CTL_EOL) != 0)
3384 aprint_error_dev(dev, "could not create sysctl\n");
3385
3386 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3387 CTLTYPE_INT, "wufc",
3388 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3389 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3390 CTL_EOL) != 0)
3391 aprint_error_dev(dev, "could not create sysctl\n");
3392 }
3393
3394 /* for X552/X557-AT devices */
3395 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3396 const struct sysctlnode *phy_node;
3397
3398 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3399 "phy", SYSCTL_DESCR("External PHY sysctls"),
3400 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3401 aprint_error_dev(dev, "could not create sysctl\n");
3402 return;
3403 }
3404
3405 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3406 CTLTYPE_INT, "temp",
3407 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3408 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3409 CTL_EOL) != 0)
3410 aprint_error_dev(dev, "could not create sysctl\n");
3411
3412 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3413 CTLTYPE_INT, "overtemp_occurred",
3414 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3415 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3416 CTL_CREATE, CTL_EOL) != 0)
3417 aprint_error_dev(dev, "could not create sysctl\n");
3418 }
3419
3420 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3421 && (hw->phy.type == ixgbe_phy_fw))
3422 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3423 CTLTYPE_BOOL, "force_10_100_autonego",
3424 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3425 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3426 CTL_CREATE, CTL_EOL) != 0)
3427 aprint_error_dev(dev, "could not create sysctl\n");
3428
3429 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3430 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3431 CTLTYPE_INT, "eee_state",
3432 SYSCTL_DESCR("EEE Power Save State"),
3433 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3434 CTL_EOL) != 0)
3435 aprint_error_dev(dev, "could not create sysctl\n");
3436 }
3437 } /* ixgbe_add_device_sysctls */
3438
3439 /************************************************************************
3440 * ixgbe_allocate_pci_resources
3441 ************************************************************************/
3442 static int
3443 ixgbe_allocate_pci_resources(struct adapter *adapter,
3444 const struct pci_attach_args *pa)
3445 {
3446 pcireg_t memtype, csr;
3447 device_t dev = adapter->dev;
3448 bus_addr_t addr;
3449 int flags;
3450
3451 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3452 switch (memtype) {
3453 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3454 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3455 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3456 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3457 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3458 goto map_err;
3459 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3460 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3461 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3462 }
3463 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3464 adapter->osdep.mem_size, flags,
3465 &adapter->osdep.mem_bus_space_handle) != 0) {
3466 map_err:
3467 adapter->osdep.mem_size = 0;
3468 aprint_error_dev(dev, "unable to map BAR0\n");
3469 return ENXIO;
3470 }
3471 /*
3472 * Enable address decoding for memory range in case BIOS or
3473 * UEFI don't set it.
3474 */
3475 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3476 PCI_COMMAND_STATUS_REG);
3477 csr |= PCI_COMMAND_MEM_ENABLE;
3478 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3479 csr);
3480 break;
3481 default:
3482 aprint_error_dev(dev, "unexpected type on BAR0\n");
3483 return ENXIO;
3484 }
3485
3486 return (0);
3487 } /* ixgbe_allocate_pci_resources */
3488
3489 static void
3490 ixgbe_free_workqueue(struct adapter *adapter)
3491 {
3492 struct ix_queue *que = adapter->queues;
3493 struct tx_ring *txr = adapter->tx_rings;
3494 int i;
3495
3496 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3497 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3498 if (txr->txr_si != NULL)
3499 softint_disestablish(txr->txr_si);
3500 }
3501 if (que->que_si != NULL)
3502 softint_disestablish(que->que_si);
3503 }
3504 if (adapter->txr_wq != NULL)
3505 workqueue_destroy(adapter->txr_wq);
3506 if (adapter->txr_wq_enqueued != NULL)
3507 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3508 if (adapter->que_wq != NULL)
3509 workqueue_destroy(adapter->que_wq);
3510
3511 if (adapter->admin_wq != NULL) {
3512 workqueue_destroy(adapter->admin_wq);
3513 adapter->admin_wq = NULL;
3514 }
3515 if (adapter->timer_wq != NULL) {
3516 workqueue_destroy(adapter->timer_wq);
3517 adapter->timer_wq = NULL;
3518 }
3519 if (adapter->recovery_mode_timer_wq != NULL) {
3520 /*
3521 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3522 * the recovery_mode_timer workqueue, so call it here.
3523 */
3524 workqueue_wait(adapter->recovery_mode_timer_wq,
3525 &adapter->recovery_mode_timer_wc);
3526 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3527 workqueue_destroy(adapter->recovery_mode_timer_wq);
3528 adapter->recovery_mode_timer_wq = NULL;
3529 }
3530 } /* ixgbe_free_workqueue */
3531
3532 /************************************************************************
3533 * ixgbe_detach - Device removal routine
3534 *
3535 * Called when the driver is being removed.
3536 * Stops the adapter and deallocates all the resources
3537 * that were allocated for driver operation.
3538 *
3539 * return 0 on success, positive on failure
3540 ************************************************************************/
3541 static int
3542 ixgbe_detach(device_t dev, int flags)
3543 {
3544 struct adapter *adapter = device_private(dev);
3545 struct rx_ring *rxr = adapter->rx_rings;
3546 struct tx_ring *txr = adapter->tx_rings;
3547 struct ixgbe_hw *hw = &adapter->hw;
3548 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3549 u32 ctrl_ext;
3550 int i;
3551
3552 INIT_DEBUGOUT("ixgbe_detach: begin");
3553 if (adapter->osdep.attached == false)
3554 return 0;
3555
3556 if (ixgbe_pci_iov_detach(dev) != 0) {
3557 device_printf(dev, "SR-IOV in use; detach first.\n");
3558 return (EBUSY);
3559 }
3560
3561 #if NVLAN > 0
3562 /* Make sure VLANs are not using driver */
3563 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3564 ; /* nothing to do: no VLANs */
3565 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3566 vlan_ifdetach(adapter->ifp);
3567 else {
3568 aprint_error_dev(dev, "VLANs in use, detach first\n");
3569 return (EBUSY);
3570 }
3571 #endif
3572
3573 /*
3574 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3575 * so it's not required to call ixgbe_stop() directly.
3576 */
3577 IXGBE_CORE_LOCK(adapter);
3578 ixgbe_setup_low_power_mode(adapter);
3579 IXGBE_CORE_UNLOCK(adapter);
3580
3581 callout_halt(&adapter->timer, NULL);
3582 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
3583 callout_stop(&adapter->recovery_mode_timer);
3584 callout_halt(&adapter->recovery_mode_timer, NULL);
3585 }
3586
3587 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3588 atomic_store_relaxed(&adapter->admin_pending, 0);
3589 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3590 atomic_store_relaxed(&adapter->timer_pending, 0);
3591
3592 pmf_device_deregister(dev);
3593
3594 ether_ifdetach(adapter->ifp);
3595
3596 ixgbe_free_workqueue(adapter);
3597
3598 /* let hardware know driver is unloading */
3599 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3600 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3602
3603 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3604 netmap_detach(adapter->ifp);
3605
3606 ixgbe_free_pci_resources(adapter);
3607 #if 0 /* XXX the NetBSD port is probably missing something here */
3608 bus_generic_detach(dev);
3609 #endif
3610 if_detach(adapter->ifp);
3611 ifmedia_fini(&adapter->media);
3612 if_percpuq_destroy(adapter->ipq);
3613
3614 sysctl_teardown(&adapter->sysctllog);
3615 evcnt_detach(&adapter->efbig_tx_dma_setup);
3616 evcnt_detach(&adapter->mbuf_defrag_failed);
3617 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3618 evcnt_detach(&adapter->einval_tx_dma_setup);
3619 evcnt_detach(&adapter->other_tx_dma_setup);
3620 evcnt_detach(&adapter->eagain_tx_dma_setup);
3621 evcnt_detach(&adapter->enomem_tx_dma_setup);
3622 evcnt_detach(&adapter->watchdog_events);
3623 evcnt_detach(&adapter->tso_err);
3624 evcnt_detach(&adapter->admin_irqev);
3625 evcnt_detach(&adapter->link_workev);
3626 evcnt_detach(&adapter->mod_workev);
3627 evcnt_detach(&adapter->msf_workev);
3628 evcnt_detach(&adapter->phy_workev);
3629
3630 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3631 if (i < __arraycount(stats->mpc)) {
3632 evcnt_detach(&stats->mpc[i]);
3633 if (hw->mac.type == ixgbe_mac_82598EB)
3634 evcnt_detach(&stats->rnbc[i]);
3635 }
3636 if (i < __arraycount(stats->pxontxc)) {
3637 evcnt_detach(&stats->pxontxc[i]);
3638 evcnt_detach(&stats->pxonrxc[i]);
3639 evcnt_detach(&stats->pxofftxc[i]);
3640 evcnt_detach(&stats->pxoffrxc[i]);
3641 if (hw->mac.type >= ixgbe_mac_82599EB)
3642 evcnt_detach(&stats->pxon2offc[i]);
3643 }
3644 }
3645
3646 txr = adapter->tx_rings;
3647 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3648 evcnt_detach(&adapter->queues[i].irqs);
3649 evcnt_detach(&adapter->queues[i].handleq);
3650 evcnt_detach(&adapter->queues[i].req);
3651 evcnt_detach(&txr->no_desc_avail);
3652 evcnt_detach(&txr->total_packets);
3653 evcnt_detach(&txr->tso_tx);
3654 #ifndef IXGBE_LEGACY_TX
3655 evcnt_detach(&txr->pcq_drops);
3656 #endif
3657
3658 if (i < __arraycount(stats->qprc)) {
3659 evcnt_detach(&stats->qprc[i]);
3660 evcnt_detach(&stats->qptc[i]);
3661 evcnt_detach(&stats->qbrc[i]);
3662 evcnt_detach(&stats->qbtc[i]);
3663 if (hw->mac.type >= ixgbe_mac_82599EB)
3664 evcnt_detach(&stats->qprdc[i]);
3665 }
3666
3667 evcnt_detach(&rxr->rx_packets);
3668 evcnt_detach(&rxr->rx_bytes);
3669 evcnt_detach(&rxr->rx_copies);
3670 evcnt_detach(&rxr->no_jmbuf);
3671 evcnt_detach(&rxr->rx_discarded);
3672 }
3673 evcnt_detach(&stats->ipcs);
3674 evcnt_detach(&stats->l4cs);
3675 evcnt_detach(&stats->ipcs_bad);
3676 evcnt_detach(&stats->l4cs_bad);
3677 evcnt_detach(&stats->intzero);
3678 evcnt_detach(&stats->legint);
3679 evcnt_detach(&stats->crcerrs);
3680 evcnt_detach(&stats->illerrc);
3681 evcnt_detach(&stats->errbc);
3682 evcnt_detach(&stats->mspdc);
3683 if (hw->mac.type >= ixgbe_mac_X550)
3684 evcnt_detach(&stats->mbsdc);
3685 evcnt_detach(&stats->mpctotal);
3686 evcnt_detach(&stats->mlfc);
3687 evcnt_detach(&stats->mrfc);
3688 evcnt_detach(&stats->rlec);
3689 evcnt_detach(&stats->lxontxc);
3690 evcnt_detach(&stats->lxonrxc);
3691 evcnt_detach(&stats->lxofftxc);
3692 evcnt_detach(&stats->lxoffrxc);
3693
3694 /* Packet Reception Stats */
3695 evcnt_detach(&stats->tor);
3696 evcnt_detach(&stats->gorc);
3697 evcnt_detach(&stats->tpr);
3698 evcnt_detach(&stats->gprc);
3699 evcnt_detach(&stats->mprc);
3700 evcnt_detach(&stats->bprc);
3701 evcnt_detach(&stats->prc64);
3702 evcnt_detach(&stats->prc127);
3703 evcnt_detach(&stats->prc255);
3704 evcnt_detach(&stats->prc511);
3705 evcnt_detach(&stats->prc1023);
3706 evcnt_detach(&stats->prc1522);
3707 evcnt_detach(&stats->ruc);
3708 evcnt_detach(&stats->rfc);
3709 evcnt_detach(&stats->roc);
3710 evcnt_detach(&stats->rjc);
3711 evcnt_detach(&stats->mngprc);
3712 evcnt_detach(&stats->mngpdc);
3713 evcnt_detach(&stats->xec);
3714
3715 /* Packet Transmission Stats */
3716 evcnt_detach(&stats->gotc);
3717 evcnt_detach(&stats->tpt);
3718 evcnt_detach(&stats->gptc);
3719 evcnt_detach(&stats->bptc);
3720 evcnt_detach(&stats->mptc);
3721 evcnt_detach(&stats->mngptc);
3722 evcnt_detach(&stats->ptc64);
3723 evcnt_detach(&stats->ptc127);
3724 evcnt_detach(&stats->ptc255);
3725 evcnt_detach(&stats->ptc511);
3726 evcnt_detach(&stats->ptc1023);
3727 evcnt_detach(&stats->ptc1522);
3728
3729 ixgbe_free_queues(adapter);
3730 free(adapter->mta, M_DEVBUF);
3731
3732 IXGBE_CORE_LOCK_DESTROY(adapter);
3733
3734 return (0);
3735 } /* ixgbe_detach */
3736
3737 /************************************************************************
3738 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3739 *
3740 * Prepare the adapter/port for LPLU and/or WoL
3741 ************************************************************************/
3742 static int
3743 ixgbe_setup_low_power_mode(struct adapter *adapter)
3744 {
3745 struct ixgbe_hw *hw = &adapter->hw;
3746 device_t dev = adapter->dev;
3747 s32 error = 0;
3748
3749 KASSERT(mutex_owned(&adapter->core_mtx));
3750
3751 /* Limit power management flow to X550EM baseT */
3752 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3753 hw->phy.ops.enter_lplu) {
3754 /* X550EM baseT adapters need a special LPLU flow */
3755 hw->phy.reset_disable = true;
3756 ixgbe_stop(adapter);
3757 error = hw->phy.ops.enter_lplu(hw);
3758 if (error)
3759 device_printf(dev,
3760 "Error entering LPLU: %d\n", error);
3761 hw->phy.reset_disable = false;
3762 } else {
3763 /* Just stop for other adapters */
3764 ixgbe_stop(adapter);
3765 }
3766
3767 if (!hw->wol_enabled) {
3768 ixgbe_set_phy_power(hw, FALSE);
3769 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3770 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3771 } else {
3772 /* Turn off support for APM wakeup. (Using ACPI instead) */
3773 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3774 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3775
3776 /*
3777 * Clear Wake Up Status register to prevent any previous wakeup
3778 * events from waking us up immediately after we suspend.
3779 */
3780 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3781
3782 /*
3783 * Program the Wakeup Filter Control register with user filter
3784 * settings
3785 */
3786 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3787
3788 /* Enable wakeups and power management in Wakeup Control */
3789 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3790 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3791
3792 }
3793
3794 return error;
3795 } /* ixgbe_setup_low_power_mode */
3796
3797 /************************************************************************
3798 * ixgbe_shutdown - Shutdown entry point
3799 ************************************************************************/
3800 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3801 static int
3802 ixgbe_shutdown(device_t dev)
3803 {
3804 struct adapter *adapter = device_private(dev);
3805 int error = 0;
3806
3807 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3808
3809 IXGBE_CORE_LOCK(adapter);
3810 error = ixgbe_setup_low_power_mode(adapter);
3811 IXGBE_CORE_UNLOCK(adapter);
3812
3813 return (error);
3814 } /* ixgbe_shutdown */
3815 #endif
3816
3817 /************************************************************************
3818 * ixgbe_suspend
3819 *
3820 * From D0 to D3
3821 ************************************************************************/
3822 static bool
3823 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3824 {
3825 struct adapter *adapter = device_private(dev);
3826 int error = 0;
3827
3828 INIT_DEBUGOUT("ixgbe_suspend: begin");
3829
3830 IXGBE_CORE_LOCK(adapter);
3831
3832 error = ixgbe_setup_low_power_mode(adapter);
3833
3834 IXGBE_CORE_UNLOCK(adapter);
3835
3836 return (error);
3837 } /* ixgbe_suspend */
3838
3839 /************************************************************************
3840 * ixgbe_resume
3841 *
3842 * From D3 to D0
3843 ************************************************************************/
3844 static bool
3845 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3846 {
3847 struct adapter *adapter = device_private(dev);
3848 struct ifnet *ifp = adapter->ifp;
3849 struct ixgbe_hw *hw = &adapter->hw;
3850 u32 wus;
3851
3852 INIT_DEBUGOUT("ixgbe_resume: begin");
3853
3854 IXGBE_CORE_LOCK(adapter);
3855
3856 /* Read & clear WUS register */
3857 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3858 if (wus)
3859 device_printf(dev, "Woken up by (WUS): %#010x\n",
3860 IXGBE_READ_REG(hw, IXGBE_WUS));
3861 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3862 /* And clear WUFC until next low-power transition */
3863 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3864
3865 /*
3866 * Required after D3->D0 transition;
3867 * will re-advertise all previous advertised speeds
3868 */
3869 if (ifp->if_flags & IFF_UP)
3870 ixgbe_init_locked(adapter);
3871
3872 IXGBE_CORE_UNLOCK(adapter);
3873
3874 return true;
3875 } /* ixgbe_resume */
3876
3877 /*
3878 * Set the various hardware offload abilities.
3879 *
3880 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3881 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3882 * mbuf offload flags the driver will understand.
3883 */
3884 static void
3885 ixgbe_set_if_hwassist(struct adapter *adapter)
3886 {
3887 /* XXX */
3888 }
3889
3890 /************************************************************************
3891 * ixgbe_init_locked - Init entry point
3892 *
3893 * Used in two ways: It is used by the stack as an init
3894 * entry point in network interface structure. It is also
3895 * used by the driver as a hw/sw initialization routine to
3896 * get to a consistent state.
3897 *
3898 * return 0 on success, positive on failure
3899 ************************************************************************/
3900 static void
3901 ixgbe_init_locked(struct adapter *adapter)
3902 {
3903 struct ifnet *ifp = adapter->ifp;
3904 device_t dev = adapter->dev;
3905 struct ixgbe_hw *hw = &adapter->hw;
3906 struct ix_queue *que;
3907 struct tx_ring *txr;
3908 struct rx_ring *rxr;
3909 u32 txdctl, mhadd;
3910 u32 rxdctl, rxctrl;
3911 u32 ctrl_ext;
3912 bool unsupported_sfp = false;
3913 int i, j, err;
3914
3915 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3916
3917 KASSERT(mutex_owned(&adapter->core_mtx));
3918 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3919
3920 hw->need_unsupported_sfp_recovery = false;
3921 hw->adapter_stopped = FALSE;
3922 ixgbe_stop_adapter(hw);
3923 callout_stop(&adapter->timer);
3924 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3925 callout_stop(&adapter->recovery_mode_timer);
3926 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3927 que->disabled_count = 0;
3928
3929 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3930 adapter->max_frame_size =
3931 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3932
3933 /* Queue indices may change with IOV mode */
3934 ixgbe_align_all_queue_indices(adapter);
3935
3936 /* reprogram the RAR[0] in case user changed it. */
3937 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3938
3939 /* Get the latest mac address, User can use a LAA */
3940 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3941 IXGBE_ETH_LENGTH_OF_ADDRESS);
3942 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3943 hw->addr_ctrl.rar_used_count = 1;
3944
3945 /* Set hardware offload abilities from ifnet flags */
3946 ixgbe_set_if_hwassist(adapter);
3947
3948 /* Prepare transmit descriptors and buffers */
3949 if (ixgbe_setup_transmit_structures(adapter)) {
3950 device_printf(dev, "Could not setup transmit structures\n");
3951 ixgbe_stop(adapter);
3952 return;
3953 }
3954
3955 ixgbe_init_hw(hw);
3956
3957 ixgbe_initialize_iov(adapter);
3958
3959 ixgbe_initialize_transmit_units(adapter);
3960
3961 /* Setup Multicast table */
3962 ixgbe_set_rxfilter(adapter);
3963
3964 /* Determine the correct mbuf pool, based on frame size */
3965 if (adapter->max_frame_size <= MCLBYTES)
3966 adapter->rx_mbuf_sz = MCLBYTES;
3967 else
3968 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3969
3970 /* Prepare receive descriptors and buffers */
3971 if (ixgbe_setup_receive_structures(adapter)) {
3972 device_printf(dev, "Could not setup receive structures\n");
3973 ixgbe_stop(adapter);
3974 return;
3975 }
3976
3977 /* Configure RX settings */
3978 ixgbe_initialize_receive_units(adapter);
3979
3980 /* Initialize variable holding task enqueue requests interrupts */
3981 adapter->task_requests = 0;
3982
3983 /* Enable SDP & MSI-X interrupts based on adapter */
3984 ixgbe_config_gpie(adapter);
3985
3986 /* Set MTU size */
3987 if (ifp->if_mtu > ETHERMTU) {
3988 /* aka IXGBE_MAXFRS on 82599 and newer */
3989 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3990 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3991 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3992 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3993 }
3994
3995 /* Now enable all the queues */
3996 for (i = 0; i < adapter->num_queues; i++) {
3997 txr = &adapter->tx_rings[i];
3998 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3999 txdctl |= IXGBE_TXDCTL_ENABLE;
4000 /* Set WTHRESH to 8, burst writeback */
4001 txdctl |= (8 << 16);
4002 /*
4003 * When the internal queue falls below PTHRESH (32),
4004 * start prefetching as long as there are at least
4005 * HTHRESH (1) buffers ready. The values are taken
4006 * from the Intel linux driver 3.8.21.
4007 * Prefetching enables tx line rate even with 1 queue.
4008 */
4009 txdctl |= (32 << 0) | (1 << 8);
4010 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4011 }
4012
4013 for (i = 0; i < adapter->num_queues; i++) {
4014 rxr = &adapter->rx_rings[i];
4015 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4016 if (hw->mac.type == ixgbe_mac_82598EB) {
4017 /*
4018 * PTHRESH = 21
4019 * HTHRESH = 4
4020 * WTHRESH = 8
4021 */
4022 rxdctl &= ~0x3FFFFF;
4023 rxdctl |= 0x080420;
4024 }
4025 rxdctl |= IXGBE_RXDCTL_ENABLE;
4026 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4027 for (j = 0; j < 10; j++) {
4028 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4029 IXGBE_RXDCTL_ENABLE)
4030 break;
4031 else
4032 msec_delay(1);
4033 }
4034 IXGBE_WRITE_BARRIER(hw);
4035
4036 /*
4037 * In netmap mode, we must preserve the buffers made
4038 * available to userspace before the if_init()
4039 * (this is true by default on the TX side, because
4040 * init makes all buffers available to userspace).
4041 *
4042 * netmap_reset() and the device specific routines
4043 * (e.g. ixgbe_setup_receive_rings()) map these
4044 * buffers at the end of the NIC ring, so here we
4045 * must set the RDT (tail) register to make sure
4046 * they are not overwritten.
4047 *
4048 * In this driver the NIC ring starts at RDH = 0,
4049 * RDT points to the last slot available for reception (?),
4050 * so RDT = num_rx_desc - 1 means the whole ring is available.
4051 */
4052 #ifdef DEV_NETMAP
4053 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4054 (ifp->if_capenable & IFCAP_NETMAP)) {
4055 struct netmap_adapter *na = NA(adapter->ifp);
4056 struct netmap_kring *kring = na->rx_rings[i];
4057 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4058
4059 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4060 } else
4061 #endif /* DEV_NETMAP */
4062 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4063 adapter->num_rx_desc - 1);
4064 }
4065
4066 /* Enable Receive engine */
4067 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4068 if (hw->mac.type == ixgbe_mac_82598EB)
4069 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4070 rxctrl |= IXGBE_RXCTRL_RXEN;
4071 ixgbe_enable_rx_dma(hw, rxctrl);
4072
4073 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4074 atomic_store_relaxed(&adapter->timer_pending, 0);
4075 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4076 callout_reset(&adapter->recovery_mode_timer, hz,
4077 ixgbe_recovery_mode_timer, adapter);
4078
4079 /* Set up MSI/MSI-X routing */
4080 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4081 ixgbe_configure_ivars(adapter);
4082 /* Set up auto-mask */
4083 if (hw->mac.type == ixgbe_mac_82598EB)
4084 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4085 else {
4086 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4087 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4088 }
4089 } else { /* Simple settings for Legacy/MSI */
4090 ixgbe_set_ivar(adapter, 0, 0, 0);
4091 ixgbe_set_ivar(adapter, 0, 0, 1);
4092 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4093 }
4094
4095 ixgbe_init_fdir(adapter);
4096
4097 /*
4098 * Check on any SFP devices that
4099 * need to be kick-started
4100 */
4101 if (hw->phy.type == ixgbe_phy_none) {
4102 err = hw->phy.ops.identify(hw);
4103 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4104 unsupported_sfp = true;
4105 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4106 unsupported_sfp = true;
4107
4108 if (unsupported_sfp)
4109 device_printf(dev,
4110 "Unsupported SFP+ module type was detected.\n");
4111
4112 /* Set moderation on the Link interrupt */
4113 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4114
4115 /* Enable EEE power saving */
4116 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4117 hw->mac.ops.setup_eee(hw,
4118 adapter->feat_en & IXGBE_FEATURE_EEE);
4119
4120 /* Enable power to the phy. */
4121 if (!unsupported_sfp) {
4122 ixgbe_set_phy_power(hw, TRUE);
4123
4124 /* Config/Enable Link */
4125 ixgbe_config_link(adapter);
4126 }
4127
4128 /* Hardware Packet Buffer & Flow Control setup */
4129 ixgbe_config_delay_values(adapter);
4130
4131 /* Initialize the FC settings */
4132 ixgbe_start_hw(hw);
4133
4134 /* Set up VLAN support and filter */
4135 ixgbe_setup_vlan_hw_support(adapter);
4136
4137 /* Setup DMA Coalescing */
4138 ixgbe_config_dmac(adapter);
4139
4140 /* OK to schedule workqueues. */
4141 adapter->schedule_wqs_ok = true;
4142
4143 /* And now turn on interrupts */
4144 ixgbe_enable_intr(adapter);
4145
4146 /* Enable the use of the MBX by the VF's */
4147 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4148 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4149 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4150 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4151 }
4152
4153 /* Update saved flags. See ixgbe_ifflags_cb() */
4154 adapter->if_flags = ifp->if_flags;
4155 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4156
4157 /* Now inform the stack we're ready */
4158 ifp->if_flags |= IFF_RUNNING;
4159
4160 return;
4161 } /* ixgbe_init_locked */
4162
4163 /************************************************************************
4164 * ixgbe_init
4165 ************************************************************************/
4166 static int
4167 ixgbe_init(struct ifnet *ifp)
4168 {
4169 struct adapter *adapter = ifp->if_softc;
4170
4171 IXGBE_CORE_LOCK(adapter);
4172 ixgbe_init_locked(adapter);
4173 IXGBE_CORE_UNLOCK(adapter);
4174
4175 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4176 } /* ixgbe_init */
4177
4178 /************************************************************************
4179 * ixgbe_set_ivar
4180 *
4181 * Setup the correct IVAR register for a particular MSI-X interrupt
4182 * (yes this is all very magic and confusing :)
4183 * - entry is the register array entry
4184 * - vector is the MSI-X vector for this queue
4185 * - type is RX/TX/MISC
4186 ************************************************************************/
4187 static void
4188 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4189 {
4190 struct ixgbe_hw *hw = &adapter->hw;
4191 u32 ivar, index;
4192
4193 vector |= IXGBE_IVAR_ALLOC_VAL;
4194
4195 switch (hw->mac.type) {
4196 case ixgbe_mac_82598EB:
4197 if (type == -1)
4198 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4199 else
4200 entry += (type * 64);
4201 index = (entry >> 2) & 0x1F;
4202 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4203 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4204 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4205 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4206 break;
4207 case ixgbe_mac_82599EB:
4208 case ixgbe_mac_X540:
4209 case ixgbe_mac_X550:
4210 case ixgbe_mac_X550EM_x:
4211 case ixgbe_mac_X550EM_a:
4212 if (type == -1) { /* MISC IVAR */
4213 index = (entry & 1) * 8;
4214 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4215 ivar &= ~(0xffUL << index);
4216 ivar |= ((u32)vector << index);
4217 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4218 } else { /* RX/TX IVARS */
4219 index = (16 * (entry & 1)) + (8 * type);
4220 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4221 ivar &= ~(0xffUL << index);
4222 ivar |= ((u32)vector << index);
4223 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4224 }
4225 break;
4226 default:
4227 break;
4228 }
4229 } /* ixgbe_set_ivar */
4230
4231 /************************************************************************
4232 * ixgbe_configure_ivars
4233 ************************************************************************/
4234 static void
4235 ixgbe_configure_ivars(struct adapter *adapter)
4236 {
4237 struct ix_queue *que = adapter->queues;
4238 u32 newitr;
4239
4240 if (ixgbe_max_interrupt_rate > 0)
4241 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4242 else {
4243 /*
4244 * Disable DMA coalescing if interrupt moderation is
4245 * disabled.
4246 */
4247 adapter->dmac = 0;
4248 newitr = 0;
4249 }
4250
4251 for (int i = 0; i < adapter->num_queues; i++, que++) {
4252 struct rx_ring *rxr = &adapter->rx_rings[i];
4253 struct tx_ring *txr = &adapter->tx_rings[i];
4254 /* First the RX queue entry */
4255 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4256 /* ... and the TX */
4257 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4258 /* Set an Initial EITR value */
4259 ixgbe_eitr_write(adapter, que->msix, newitr);
4260 /*
4261 * To eliminate influence of the previous state.
4262 * At this point, Tx/Rx interrupt handler
4263 * (ixgbe_msix_que()) cannot be called, so both
4264 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4265 */
4266 que->eitr_setting = 0;
4267 }
4268
4269 /* For the Link interrupt */
4270 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4271 } /* ixgbe_configure_ivars */
4272
4273 /************************************************************************
4274 * ixgbe_config_gpie
4275 ************************************************************************/
4276 static void
4277 ixgbe_config_gpie(struct adapter *adapter)
4278 {
4279 struct ixgbe_hw *hw = &adapter->hw;
4280 u32 gpie;
4281
4282 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4283
4284 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4285 /* Enable Enhanced MSI-X mode */
4286 gpie |= IXGBE_GPIE_MSIX_MODE
4287 | IXGBE_GPIE_EIAME
4288 | IXGBE_GPIE_PBA_SUPPORT
4289 | IXGBE_GPIE_OCD;
4290 }
4291
4292 /* Fan Failure Interrupt */
4293 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4294 gpie |= IXGBE_SDP1_GPIEN;
4295
4296 /* Thermal Sensor Interrupt */
4297 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4298 gpie |= IXGBE_SDP0_GPIEN_X540;
4299
4300 /* Link detection */
4301 switch (hw->mac.type) {
4302 case ixgbe_mac_82599EB:
4303 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4304 break;
4305 case ixgbe_mac_X550EM_x:
4306 case ixgbe_mac_X550EM_a:
4307 gpie |= IXGBE_SDP0_GPIEN_X540;
4308 break;
4309 default:
4310 break;
4311 }
4312
4313 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4314
4315 } /* ixgbe_config_gpie */
4316
4317 /************************************************************************
4318 * ixgbe_config_delay_values
4319 *
4320 * Requires adapter->max_frame_size to be set.
4321 ************************************************************************/
4322 static void
4323 ixgbe_config_delay_values(struct adapter *adapter)
4324 {
4325 struct ixgbe_hw *hw = &adapter->hw;
4326 u32 rxpb, frame, size, tmp;
4327
4328 frame = adapter->max_frame_size;
4329
4330 /* Calculate High Water */
4331 switch (hw->mac.type) {
4332 case ixgbe_mac_X540:
4333 case ixgbe_mac_X550:
4334 case ixgbe_mac_X550EM_x:
4335 case ixgbe_mac_X550EM_a:
4336 tmp = IXGBE_DV_X540(frame, frame);
4337 break;
4338 default:
4339 tmp = IXGBE_DV(frame, frame);
4340 break;
4341 }
4342 size = IXGBE_BT2KB(tmp);
4343 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4344 hw->fc.high_water[0] = rxpb - size;
4345
4346 /* Now calculate Low Water */
4347 switch (hw->mac.type) {
4348 case ixgbe_mac_X540:
4349 case ixgbe_mac_X550:
4350 case ixgbe_mac_X550EM_x:
4351 case ixgbe_mac_X550EM_a:
4352 tmp = IXGBE_LOW_DV_X540(frame);
4353 break;
4354 default:
4355 tmp = IXGBE_LOW_DV(frame);
4356 break;
4357 }
4358 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4359
4360 hw->fc.pause_time = IXGBE_FC_PAUSE;
4361 hw->fc.send_xon = TRUE;
4362 } /* ixgbe_config_delay_values */
4363
4364 /************************************************************************
4365 * ixgbe_set_rxfilter - Multicast Update
4366 *
4367 * Called whenever multicast address list is updated.
4368 ************************************************************************/
4369 static void
4370 ixgbe_set_rxfilter(struct adapter *adapter)
4371 {
4372 struct ixgbe_mc_addr *mta;
4373 struct ifnet *ifp = adapter->ifp;
4374 u8 *update_ptr;
4375 int mcnt = 0;
4376 u32 fctrl;
4377 struct ethercom *ec = &adapter->osdep.ec;
4378 struct ether_multi *enm;
4379 struct ether_multistep step;
4380
4381 KASSERT(mutex_owned(&adapter->core_mtx));
4382 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4383
4384 mta = adapter->mta;
4385 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4386
4387 ETHER_LOCK(ec);
4388 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4389 ETHER_FIRST_MULTI(step, ec, enm);
4390 while (enm != NULL) {
4391 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4392 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4393 ETHER_ADDR_LEN) != 0)) {
4394 ec->ec_flags |= ETHER_F_ALLMULTI;
4395 break;
4396 }
4397 bcopy(enm->enm_addrlo,
4398 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4399 mta[mcnt].vmdq = adapter->pool;
4400 mcnt++;
4401 ETHER_NEXT_MULTI(step, enm);
4402 }
4403
4404 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4405 if (ifp->if_flags & IFF_PROMISC)
4406 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4407 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4408 fctrl |= IXGBE_FCTRL_MPE;
4409 fctrl &= ~IXGBE_FCTRL_UPE;
4410 } else
4411 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4412
4413 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4414
4415 /* Update multicast filter entries only when it's not ALLMULTI */
4416 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4417 ETHER_UNLOCK(ec);
4418 update_ptr = (u8 *)mta;
4419 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4420 ixgbe_mc_array_itr, TRUE);
4421 } else
4422 ETHER_UNLOCK(ec);
4423 } /* ixgbe_set_rxfilter */
4424
4425 /************************************************************************
4426 * ixgbe_mc_array_itr
4427 *
4428 * An iterator function needed by the multicast shared code.
4429 * It feeds the shared code routine the addresses in the
4430 * array of ixgbe_set_rxfilter() one by one.
4431 ************************************************************************/
4432 static u8 *
4433 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4434 {
4435 struct ixgbe_mc_addr *mta;
4436
4437 mta = (struct ixgbe_mc_addr *)*update_ptr;
4438 *vmdq = mta->vmdq;
4439
4440 *update_ptr = (u8*)(mta + 1);
4441
4442 return (mta->addr);
4443 } /* ixgbe_mc_array_itr */
4444
4445 /************************************************************************
4446 * ixgbe_local_timer - Timer routine
4447 *
4448 * Checks for link status, updates statistics,
4449 * and runs the watchdog check.
4450 ************************************************************************/
4451 static void
4452 ixgbe_local_timer(void *arg)
4453 {
4454 struct adapter *adapter = arg;
4455
4456 if (adapter->schedule_wqs_ok) {
4457 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4458 workqueue_enqueue(adapter->timer_wq,
4459 &adapter->timer_wc, NULL);
4460 }
4461 }
4462
4463 static void
4464 ixgbe_handle_timer(struct work *wk, void *context)
4465 {
4466 struct adapter *adapter = context;
4467 struct ixgbe_hw *hw = &adapter->hw;
4468 device_t dev = adapter->dev;
4469 struct ix_queue *que = adapter->queues;
4470 u64 queues = 0;
4471 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4472 int hung = 0;
4473 int i;
4474
4475 IXGBE_CORE_LOCK(adapter);
4476
4477 /* Check for pluggable optics */
4478 if (ixgbe_is_sfp(hw)) {
4479 bool was_full = hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4480 bool is_full = ixgbe_sfp_cage_full(hw);
4481
4482 /* do probe if cage state changed */
4483 if (was_full ^ is_full) {
4484 atomic_or_32(&adapter->task_requests,
4485 IXGBE_REQUEST_TASK_MOD);
4486 ixgbe_schedule_admin_tasklet(adapter);
4487 }
4488 }
4489
4490 ixgbe_update_link_status(adapter);
4491 ixgbe_update_stats_counters(adapter);
4492
4493 /* Update some event counters */
4494 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4495 que = adapter->queues;
4496 for (i = 0; i < adapter->num_queues; i++, que++) {
4497 struct tx_ring *txr = que->txr;
4498
4499 v0 += txr->q_efbig_tx_dma_setup;
4500 v1 += txr->q_mbuf_defrag_failed;
4501 v2 += txr->q_efbig2_tx_dma_setup;
4502 v3 += txr->q_einval_tx_dma_setup;
4503 v4 += txr->q_other_tx_dma_setup;
4504 v5 += txr->q_eagain_tx_dma_setup;
4505 v6 += txr->q_enomem_tx_dma_setup;
4506 v7 += txr->q_tso_err;
4507 }
4508 adapter->efbig_tx_dma_setup.ev_count = v0;
4509 adapter->mbuf_defrag_failed.ev_count = v1;
4510 adapter->efbig2_tx_dma_setup.ev_count = v2;
4511 adapter->einval_tx_dma_setup.ev_count = v3;
4512 adapter->other_tx_dma_setup.ev_count = v4;
4513 adapter->eagain_tx_dma_setup.ev_count = v5;
4514 adapter->enomem_tx_dma_setup.ev_count = v6;
4515 adapter->tso_err.ev_count = v7;
4516
4517 /*
4518 * Check the TX queues status
4519 * - mark hung queues so we don't schedule on them
4520 * - watchdog only if all queues show hung
4521 */
4522 que = adapter->queues;
4523 for (i = 0; i < adapter->num_queues; i++, que++) {
4524 /* Keep track of queues with work for soft irq */
4525 if (que->txr->busy)
4526 queues |= 1ULL << que->me;
4527 /*
4528 * Each time txeof runs without cleaning, but there
4529 * are uncleaned descriptors it increments busy. If
4530 * we get to the MAX we declare it hung.
4531 */
4532 if (que->busy == IXGBE_QUEUE_HUNG) {
4533 ++hung;
4534 /* Mark the queue as inactive */
4535 adapter->active_queues &= ~(1ULL << que->me);
4536 continue;
4537 } else {
4538 /* Check if we've come back from hung */
4539 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4540 adapter->active_queues |= 1ULL << que->me;
4541 }
4542 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4543 device_printf(dev,
4544 "Warning queue %d appears to be hung!\n", i);
4545 que->txr->busy = IXGBE_QUEUE_HUNG;
4546 ++hung;
4547 }
4548 }
4549
4550 /* Only truly watchdog if all queues show hung */
4551 if (hung == adapter->num_queues)
4552 goto watchdog;
4553 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4554 else if (queues != 0) { /* Force an IRQ on queues with work */
4555 que = adapter->queues;
4556 for (i = 0; i < adapter->num_queues; i++, que++) {
4557 mutex_enter(&que->dc_mtx);
4558 if (que->disabled_count == 0)
4559 ixgbe_rearm_queues(adapter,
4560 queues & ((u64)1 << i));
4561 mutex_exit(&que->dc_mtx);
4562 }
4563 }
4564 #endif
4565
4566 atomic_store_relaxed(&adapter->timer_pending, 0);
4567 IXGBE_CORE_UNLOCK(adapter);
4568 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4569 return;
4570
4571 watchdog:
4572 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4573 adapter->ifp->if_flags &= ~IFF_RUNNING;
4574 adapter->watchdog_events.ev_count++;
4575 ixgbe_init_locked(adapter);
4576 IXGBE_CORE_UNLOCK(adapter);
4577 } /* ixgbe_handle_timer */
4578
4579 /************************************************************************
4580 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4581 ************************************************************************/
4582 static void
4583 ixgbe_recovery_mode_timer(void *arg)
4584 {
4585 struct adapter *adapter = arg;
4586
4587 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0)
4588 {
4589 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4590 &adapter->recovery_mode_timer_wc, NULL);
4591 }
4592 }
4593
4594 static void
4595 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4596 {
4597 struct adapter *adapter = context;
4598 struct ixgbe_hw *hw = &adapter->hw;
4599
4600 IXGBE_CORE_LOCK(adapter);
4601 if (ixgbe_fw_recovery_mode(hw)) {
4602 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4603 /* Firmware error detected, entering recovery mode */
4604 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4605
4606 if (hw->adapter_stopped == FALSE)
4607 ixgbe_stop(adapter);
4608 }
4609 } else
4610 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4611
4612 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4613 callout_reset(&adapter->recovery_mode_timer, hz,
4614 ixgbe_recovery_mode_timer, adapter);
4615 IXGBE_CORE_UNLOCK(adapter);
4616 } /* ixgbe_handle_recovery_mode_timer */
4617
4618 /************************************************************************
4619 * ixgbe_sfp_cage_full
4620 *
4621 * Determine if a port had optics inserted.
4622 ************************************************************************/
4623 static bool
4624 ixgbe_sfp_cage_full(struct ixgbe_hw *hw)
4625 {
4626 uint32_t mask;
4627 int rv;
4628
4629 if (hw->mac.type >= ixgbe_mac_X540)
4630 mask = IXGBE_ESDP_SDP0;
4631 else
4632 mask = IXGBE_ESDP_SDP2;
4633
4634 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask;
4635 if (hw->mac.type == ixgbe_mac_X550EM_a) {
4636 /* It seems X550EM_a's SDP0 is inverted than others... */
4637 return (rv == 0);
4638 }
4639
4640 return rv;
4641 } /* ixgbe_sfp_cage_full */
4642
4643 /************************************************************************
4644 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4645 ************************************************************************/
4646 static void
4647 ixgbe_handle_mod(void *context)
4648 {
4649 struct adapter *adapter = context;
4650 struct ixgbe_hw *hw = &adapter->hw;
4651 device_t dev = adapter->dev;
4652 u32 err, cage_full = 0;
4653
4654 ++adapter->mod_workev.ev_count;
4655 if (adapter->hw.need_crosstalk_fix) {
4656 switch (hw->mac.type) {
4657 case ixgbe_mac_82599EB:
4658 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4659 IXGBE_ESDP_SDP2;
4660 break;
4661 case ixgbe_mac_X550EM_x:
4662 case ixgbe_mac_X550EM_a:
4663 /*
4664 * XXX See ixgbe_sfp_cage_full(). It seems the bit is
4665 * inverted on X550EM_a, so I think this is incorrect.
4666 */
4667 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4668 IXGBE_ESDP_SDP0;
4669 break;
4670 default:
4671 break;
4672 }
4673
4674 if (!cage_full)
4675 goto out;
4676 }
4677
4678 err = hw->phy.ops.identify_sfp(hw);
4679 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4680 device_printf(dev,
4681 "Unsupported SFP+ module type was detected.\n");
4682 goto out;
4683 }
4684
4685 if (hw->need_unsupported_sfp_recovery) {
4686 device_printf(dev, "Recovering from unsupported SFP\n");
4687 /*
4688 * We could recover the status by calling setup_sfp(),
4689 * setup_link() and some others. It's complex and might not
4690 * work correctly on some unknown cases. To avoid such type of
4691 * problem, call ixgbe_init_locked(). It's simple and safe
4692 * approach.
4693 */
4694 ixgbe_init_locked(adapter);
4695 } else {
4696 if (hw->mac.type == ixgbe_mac_82598EB)
4697 err = hw->phy.ops.reset(hw);
4698 else {
4699 err = hw->mac.ops.setup_sfp(hw);
4700 hw->phy.sfp_setup_needed = FALSE;
4701 }
4702 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4703 device_printf(dev,
4704 "Setup failure - unsupported SFP+ module type.\n");
4705 goto out;
4706 }
4707 }
4708
4709 out:
4710 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4711 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4712
4713 /* Adjust media types shown in ifconfig */
4714 IXGBE_CORE_UNLOCK(adapter);
4715 ifmedia_removeall(&adapter->media);
4716 ixgbe_add_media_types(adapter);
4717 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4718 IXGBE_CORE_LOCK(adapter);
4719
4720 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4721 /*
4722 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4723 * the workqueue now.
4724 */
4725 } /* ixgbe_handle_mod */
4726
4727
4728 /************************************************************************
4729 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4730 ************************************************************************/
4731 static void
4732 ixgbe_handle_msf(void *context)
4733 {
4734 struct adapter *adapter = context;
4735 struct ixgbe_hw *hw = &adapter->hw;
4736 u32 autoneg;
4737 bool negotiate;
4738
4739 ++adapter->msf_workev.ev_count;
4740
4741 autoneg = hw->phy.autoneg_advertised;
4742 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4743 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4744 if (hw->mac.ops.setup_link)
4745 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4746 } /* ixgbe_handle_msf */
4747
4748 /************************************************************************
4749 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4750 ************************************************************************/
4751 static void
4752 ixgbe_handle_phy(void *context)
4753 {
4754 struct adapter *adapter = context;
4755 struct ixgbe_hw *hw = &adapter->hw;
4756 int error;
4757
4758 ++adapter->phy_workev.ev_count;
4759 error = hw->phy.ops.handle_lasi(hw);
4760 if (error == IXGBE_ERR_OVERTEMP)
4761 device_printf(adapter->dev,
4762 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4763 " PHY will downshift to lower power state!\n");
4764 else if (error)
4765 device_printf(adapter->dev,
4766 "Error handling LASI interrupt: %d\n", error);
4767 } /* ixgbe_handle_phy */
4768
4769 static void
4770 ixgbe_handle_admin(struct work *wk, void *context)
4771 {
4772 struct adapter *adapter = context;
4773 struct ifnet *ifp = adapter->ifp;
4774 struct ixgbe_hw *hw = &adapter->hw;
4775 u32 req;
4776
4777 /*
4778 * Hold the IFNET_LOCK across this entire call. This will
4779 * prevent additional changes to adapter->phy_layer
4780 * and serialize calls to this tasklet. We cannot hold the
4781 * CORE_LOCK while calling into the ifmedia functions as
4782 * they call ifmedia_lock() and the lock is CORE_LOCK.
4783 */
4784 IFNET_LOCK(ifp);
4785 IXGBE_CORE_LOCK(adapter);
4786 while ((req = adapter->task_requests) != 0) {
4787 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4788 ixgbe_handle_link(adapter);
4789 atomic_and_32(&adapter->task_requests,
4790 ~IXGBE_REQUEST_TASK_LSC);
4791 }
4792 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4793 ixgbe_handle_mod(adapter);
4794 atomic_and_32(&adapter->task_requests,
4795 ~IXGBE_REQUEST_TASK_MOD);
4796 }
4797 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4798 ixgbe_handle_msf(adapter);
4799 atomic_and_32(&adapter->task_requests,
4800 ~IXGBE_REQUEST_TASK_MSF);
4801 }
4802 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4803 ixgbe_handle_phy(adapter);
4804 atomic_and_32(&adapter->task_requests,
4805 ~IXGBE_REQUEST_TASK_PHY);
4806 }
4807 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4808 ixgbe_reinit_fdir(adapter);
4809 atomic_and_32(&adapter->task_requests,
4810 ~IXGBE_REQUEST_TASK_FDIR);
4811 }
4812 #if 0 /* notyet */
4813 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4814 ixgbe_handle_mbx(adapter);
4815 atomic_and_32(&adapter->task_requests,
4816 ~IXGBE_REQUEST_TASK_MBX);
4817 }
4818 #endif
4819 }
4820 atomic_store_relaxed(&adapter->admin_pending, 0);
4821 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4822 atomic_and_32(&adapter->task_requests,
4823 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4824 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4825 /* Re-enable other interrupts */
4826 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4827 } else
4828 ixgbe_enable_intr(adapter);
4829 }
4830
4831 IXGBE_CORE_UNLOCK(adapter);
4832 IFNET_UNLOCK(ifp);
4833 } /* ixgbe_handle_admin */
4834
4835 static void
4836 ixgbe_ifstop(struct ifnet *ifp, int disable)
4837 {
4838 struct adapter *adapter = ifp->if_softc;
4839
4840 IXGBE_CORE_LOCK(adapter);
4841 ixgbe_stop(adapter);
4842 IXGBE_CORE_UNLOCK(adapter);
4843
4844 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
4845 atomic_store_relaxed(&adapter->admin_pending, 0);
4846 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4847 atomic_store_relaxed(&adapter->timer_pending, 0);
4848 }
4849
4850 /************************************************************************
4851 * ixgbe_stop - Stop the hardware
4852 *
4853 * Disables all traffic on the adapter by issuing a
4854 * global reset on the MAC and deallocates TX/RX buffers.
4855 ************************************************************************/
4856 static void
4857 ixgbe_stop(void *arg)
4858 {
4859 struct ifnet *ifp;
4860 struct adapter *adapter = arg;
4861 struct ixgbe_hw *hw = &adapter->hw;
4862
4863 ifp = adapter->ifp;
4864
4865 KASSERT(mutex_owned(&adapter->core_mtx));
4866
4867 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4868 ixgbe_disable_intr(adapter);
4869 callout_stop(&adapter->timer);
4870
4871 /* Don't schedule workqueues. */
4872 adapter->schedule_wqs_ok = false;
4873
4874 /* Let the stack know...*/
4875 ifp->if_flags &= ~IFF_RUNNING;
4876
4877 ixgbe_reset_hw(hw);
4878 hw->adapter_stopped = FALSE;
4879 ixgbe_stop_adapter(hw);
4880 if (hw->mac.type == ixgbe_mac_82599EB)
4881 ixgbe_stop_mac_link_on_d3_82599(hw);
4882 /* Turn off the laser - noop with no optics */
4883 ixgbe_disable_tx_laser(hw);
4884
4885 /* Update the stack */
4886 adapter->link_up = FALSE;
4887 ixgbe_update_link_status(adapter);
4888
4889 /* reprogram the RAR[0] in case user changed it. */
4890 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4891
4892 return;
4893 } /* ixgbe_stop */
4894
4895 /************************************************************************
4896 * ixgbe_update_link_status - Update OS on link state
4897 *
4898 * Note: Only updates the OS on the cached link state.
4899 * The real check of the hardware only happens with
4900 * a link interrupt.
4901 ************************************************************************/
4902 static void
4903 ixgbe_update_link_status(struct adapter *adapter)
4904 {
4905 struct ifnet *ifp = adapter->ifp;
4906 device_t dev = adapter->dev;
4907 struct ixgbe_hw *hw = &adapter->hw;
4908
4909 KASSERT(mutex_owned(&adapter->core_mtx));
4910
4911 if (adapter->link_up) {
4912 if (adapter->link_active != LINK_STATE_UP) {
4913 /*
4914 * To eliminate influence of the previous state
4915 * in the same way as ixgbe_init_locked().
4916 */
4917 struct ix_queue *que = adapter->queues;
4918 for (int i = 0; i < adapter->num_queues; i++, que++)
4919 que->eitr_setting = 0;
4920
4921 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4922 /*
4923 * Discard count for both MAC Local Fault and
4924 * Remote Fault because those registers are
4925 * valid only when the link speed is up and
4926 * 10Gbps.
4927 */
4928 IXGBE_READ_REG(hw, IXGBE_MLFC);
4929 IXGBE_READ_REG(hw, IXGBE_MRFC);
4930 }
4931
4932 if (bootverbose) {
4933 const char *bpsmsg;
4934
4935 switch (adapter->link_speed) {
4936 case IXGBE_LINK_SPEED_10GB_FULL:
4937 bpsmsg = "10 Gbps";
4938 break;
4939 case IXGBE_LINK_SPEED_5GB_FULL:
4940 bpsmsg = "5 Gbps";
4941 break;
4942 case IXGBE_LINK_SPEED_2_5GB_FULL:
4943 bpsmsg = "2.5 Gbps";
4944 break;
4945 case IXGBE_LINK_SPEED_1GB_FULL:
4946 bpsmsg = "1 Gbps";
4947 break;
4948 case IXGBE_LINK_SPEED_100_FULL:
4949 bpsmsg = "100 Mbps";
4950 break;
4951 case IXGBE_LINK_SPEED_10_FULL:
4952 bpsmsg = "10 Mbps";
4953 break;
4954 default:
4955 bpsmsg = "unknown speed";
4956 break;
4957 }
4958 device_printf(dev, "Link is up %s %s \n",
4959 bpsmsg, "Full Duplex");
4960 }
4961 adapter->link_active = LINK_STATE_UP;
4962 /* Update any Flow Control changes */
4963 ixgbe_fc_enable(&adapter->hw);
4964 /* Update DMA coalescing config */
4965 ixgbe_config_dmac(adapter);
4966 if_link_state_change(ifp, LINK_STATE_UP);
4967
4968 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4969 ixgbe_ping_all_vfs(adapter);
4970 }
4971 } else {
4972 /*
4973 * Do it when link active changes to DOWN. i.e.
4974 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4975 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4976 */
4977 if (adapter->link_active != LINK_STATE_DOWN) {
4978 if (bootverbose)
4979 device_printf(dev, "Link is Down\n");
4980 if_link_state_change(ifp, LINK_STATE_DOWN);
4981 adapter->link_active = LINK_STATE_DOWN;
4982 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4983 ixgbe_ping_all_vfs(adapter);
4984 ixgbe_drain_all(adapter);
4985 }
4986 }
4987 } /* ixgbe_update_link_status */
4988
4989 /************************************************************************
4990 * ixgbe_config_dmac - Configure DMA Coalescing
4991 ************************************************************************/
4992 static void
4993 ixgbe_config_dmac(struct adapter *adapter)
4994 {
4995 struct ixgbe_hw *hw = &adapter->hw;
4996 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4997
4998 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4999 return;
5000
5001 if (dcfg->watchdog_timer ^ adapter->dmac ||
5002 dcfg->link_speed ^ adapter->link_speed) {
5003 dcfg->watchdog_timer = adapter->dmac;
5004 dcfg->fcoe_en = false;
5005 dcfg->link_speed = adapter->link_speed;
5006 dcfg->num_tcs = 1;
5007
5008 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5009 dcfg->watchdog_timer, dcfg->link_speed);
5010
5011 hw->mac.ops.dmac_config(hw);
5012 }
5013 } /* ixgbe_config_dmac */
5014
5015 /************************************************************************
5016 * ixgbe_enable_intr
5017 ************************************************************************/
5018 static void
5019 ixgbe_enable_intr(struct adapter *adapter)
5020 {
5021 struct ixgbe_hw *hw = &adapter->hw;
5022 struct ix_queue *que = adapter->queues;
5023 u32 mask, fwsm;
5024
5025 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5026
5027 switch (adapter->hw.mac.type) {
5028 case ixgbe_mac_82599EB:
5029 mask |= IXGBE_EIMS_ECC;
5030 /* Temperature sensor on some adapters */
5031 mask |= IXGBE_EIMS_GPI_SDP0;
5032 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5033 mask |= IXGBE_EIMS_GPI_SDP1;
5034 mask |= IXGBE_EIMS_GPI_SDP2;
5035 break;
5036 case ixgbe_mac_X540:
5037 /* Detect if Thermal Sensor is enabled */
5038 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5039 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5040 mask |= IXGBE_EIMS_TS;
5041 mask |= IXGBE_EIMS_ECC;
5042 break;
5043 case ixgbe_mac_X550:
5044 /* MAC thermal sensor is automatically enabled */
5045 mask |= IXGBE_EIMS_TS;
5046 mask |= IXGBE_EIMS_ECC;
5047 break;
5048 case ixgbe_mac_X550EM_x:
5049 case ixgbe_mac_X550EM_a:
5050 /* Some devices use SDP0 for important information */
5051 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5052 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5053 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5054 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5055 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5056 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5057 mask |= IXGBE_EICR_GPI_SDP0_X540;
5058 mask |= IXGBE_EIMS_ECC;
5059 break;
5060 default:
5061 break;
5062 }
5063
5064 /* Enable Fan Failure detection */
5065 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5066 mask |= IXGBE_EIMS_GPI_SDP1;
5067 /* Enable SR-IOV */
5068 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5069 mask |= IXGBE_EIMS_MAILBOX;
5070 /* Enable Flow Director */
5071 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5072 mask |= IXGBE_EIMS_FLOW_DIR;
5073
5074 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5075
5076 /* With MSI-X we use auto clear */
5077 if (adapter->msix_mem) {
5078 mask = IXGBE_EIMS_ENABLE_MASK;
5079 /* Don't autoclear Link */
5080 mask &= ~IXGBE_EIMS_OTHER;
5081 mask &= ~IXGBE_EIMS_LSC;
5082 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5083 mask &= ~IXGBE_EIMS_MAILBOX;
5084 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5085 }
5086
5087 /*
5088 * Now enable all queues, this is done separately to
5089 * allow for handling the extended (beyond 32) MSI-X
5090 * vectors that can be used by 82599
5091 */
5092 for (int i = 0; i < adapter->num_queues; i++, que++)
5093 ixgbe_enable_queue(adapter, que->msix);
5094
5095 IXGBE_WRITE_FLUSH(hw);
5096
5097 } /* ixgbe_enable_intr */
5098
5099 /************************************************************************
5100 * ixgbe_disable_intr_internal
5101 ************************************************************************/
5102 static void
5103 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5104 {
5105 struct ix_queue *que = adapter->queues;
5106
5107 /* disable interrupts other than queues */
5108 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5109
5110 if (adapter->msix_mem)
5111 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5112
5113 for (int i = 0; i < adapter->num_queues; i++, que++)
5114 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5115
5116 IXGBE_WRITE_FLUSH(&adapter->hw);
5117
5118 } /* ixgbe_do_disable_intr_internal */
5119
5120 /************************************************************************
5121 * ixgbe_disable_intr
5122 ************************************************************************/
5123 static void
5124 ixgbe_disable_intr(struct adapter *adapter)
5125 {
5126
5127 ixgbe_disable_intr_internal(adapter, true);
5128 } /* ixgbe_disable_intr */
5129
5130 /************************************************************************
5131 * ixgbe_ensure_disabled_intr
5132 ************************************************************************/
5133 void
5134 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5135 {
5136
5137 ixgbe_disable_intr_internal(adapter, false);
5138 } /* ixgbe_ensure_disabled_intr */
5139
5140 /************************************************************************
5141 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5142 ************************************************************************/
5143 static int
5144 ixgbe_legacy_irq(void *arg)
5145 {
5146 struct ix_queue *que = arg;
5147 struct adapter *adapter = que->adapter;
5148 struct ixgbe_hw *hw = &adapter->hw;
5149 struct ifnet *ifp = adapter->ifp;
5150 struct tx_ring *txr = adapter->tx_rings;
5151 bool more = false;
5152 bool reenable_intr = true;
5153 u32 eicr, eicr_mask;
5154 u32 task_requests = 0;
5155
5156 /* Silicon errata #26 on 82598 */
5157 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5158
5159 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5160
5161 adapter->stats.pf.legint.ev_count++;
5162 ++que->irqs.ev_count;
5163 if (eicr == 0) {
5164 adapter->stats.pf.intzero.ev_count++;
5165 if ((ifp->if_flags & IFF_UP) != 0)
5166 ixgbe_enable_intr(adapter);
5167 return 0;
5168 }
5169
5170 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5171 /*
5172 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5173 */
5174 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5175
5176 #ifdef __NetBSD__
5177 /* Don't run ixgbe_rxeof in interrupt context */
5178 more = true;
5179 #else
5180 more = ixgbe_rxeof(que);
5181 #endif
5182
5183 IXGBE_TX_LOCK(txr);
5184 ixgbe_txeof(txr);
5185 #ifdef notyet
5186 if (!ixgbe_ring_empty(ifp, txr->br))
5187 ixgbe_start_locked(ifp, txr);
5188 #endif
5189 IXGBE_TX_UNLOCK(txr);
5190 }
5191
5192 /* Check for fan failure */
5193 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5194 ixgbe_check_fan_failure(adapter, eicr, true);
5195 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5196 }
5197
5198 /* Link status change */
5199 if (eicr & IXGBE_EICR_LSC)
5200 task_requests |= IXGBE_REQUEST_TASK_LSC;
5201
5202 if (ixgbe_is_sfp(hw)) {
5203 /* Pluggable optics-related interrupt */
5204 if (hw->mac.type >= ixgbe_mac_X540)
5205 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5206 else
5207 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5208
5209 if (eicr & eicr_mask) {
5210 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5211 task_requests |= IXGBE_REQUEST_TASK_MOD;
5212 }
5213
5214 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5215 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5216 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5217 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5218 task_requests |= IXGBE_REQUEST_TASK_MSF;
5219 }
5220 }
5221
5222 /* External PHY interrupt */
5223 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5224 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5225 task_requests |= IXGBE_REQUEST_TASK_PHY;
5226
5227 if (more) {
5228 que->req.ev_count++;
5229 ixgbe_sched_handle_que(adapter, que);
5230 reenable_intr = false;
5231 }
5232 if (task_requests != 0) {
5233 /* Re-enabling other interrupts is done in the admin task */
5234 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5235 atomic_or_32(&adapter->task_requests, task_requests);
5236 ixgbe_schedule_admin_tasklet(adapter);
5237 reenable_intr = false;
5238 }
5239
5240 if (reenable_intr == true)
5241 ixgbe_enable_intr(adapter);
5242
5243 return 1;
5244 } /* ixgbe_legacy_irq */
5245
5246 /************************************************************************
5247 * ixgbe_free_pciintr_resources
5248 ************************************************************************/
5249 static void
5250 ixgbe_free_pciintr_resources(struct adapter *adapter)
5251 {
5252 struct ix_queue *que = adapter->queues;
5253 int rid;
5254
5255 /*
5256 * Release all msix queue resources:
5257 */
5258 for (int i = 0; i < adapter->num_queues; i++, que++) {
5259 if (que->res != NULL) {
5260 pci_intr_disestablish(adapter->osdep.pc,
5261 adapter->osdep.ihs[i]);
5262 adapter->osdep.ihs[i] = NULL;
5263 }
5264 }
5265
5266 /* Clean the Legacy or Link interrupt last */
5267 if (adapter->vector) /* we are doing MSIX */
5268 rid = adapter->vector;
5269 else
5270 rid = 0;
5271
5272 if (adapter->osdep.ihs[rid] != NULL) {
5273 pci_intr_disestablish(adapter->osdep.pc,
5274 adapter->osdep.ihs[rid]);
5275 adapter->osdep.ihs[rid] = NULL;
5276 }
5277
5278 if (adapter->osdep.intrs != NULL) {
5279 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5280 adapter->osdep.nintrs);
5281 adapter->osdep.intrs = NULL;
5282 }
5283 } /* ixgbe_free_pciintr_resources */
5284
5285 /************************************************************************
5286 * ixgbe_free_pci_resources
5287 ************************************************************************/
5288 static void
5289 ixgbe_free_pci_resources(struct adapter *adapter)
5290 {
5291
5292 ixgbe_free_pciintr_resources(adapter);
5293
5294 if (adapter->osdep.mem_size != 0) {
5295 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5296 adapter->osdep.mem_bus_space_handle,
5297 adapter->osdep.mem_size);
5298 }
5299
5300 } /* ixgbe_free_pci_resources */
5301
5302 /************************************************************************
5303 * ixgbe_set_sysctl_value
5304 ************************************************************************/
5305 static void
5306 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5307 const char *description, int *limit, int value)
5308 {
5309 device_t dev = adapter->dev;
5310 struct sysctllog **log;
5311 const struct sysctlnode *rnode, *cnode;
5312
5313 /*
5314 * It's not required to check recovery mode because this function never
5315 * touches hardware.
5316 */
5317
5318 log = &adapter->sysctllog;
5319 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5320 aprint_error_dev(dev, "could not create sysctl root\n");
5321 return;
5322 }
5323 if (sysctl_createv(log, 0, &rnode, &cnode,
5324 CTLFLAG_READWRITE, CTLTYPE_INT,
5325 name, SYSCTL_DESCR(description),
5326 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5327 aprint_error_dev(dev, "could not create sysctl\n");
5328 *limit = value;
5329 } /* ixgbe_set_sysctl_value */
5330
5331 /************************************************************************
5332 * ixgbe_sysctl_flowcntl
5333 *
5334 * SYSCTL wrapper around setting Flow Control
5335 ************************************************************************/
5336 static int
5337 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5338 {
5339 struct sysctlnode node = *rnode;
5340 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5341 int error, fc;
5342
5343 if (ixgbe_fw_recovery_mode_swflag(adapter))
5344 return (EPERM);
5345
5346 fc = adapter->hw.fc.current_mode;
5347 node.sysctl_data = &fc;
5348 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5349 if (error != 0 || newp == NULL)
5350 return error;
5351
5352 /* Don't bother if it's not changed */
5353 if (fc == adapter->hw.fc.current_mode)
5354 return (0);
5355
5356 return ixgbe_set_flowcntl(adapter, fc);
5357 } /* ixgbe_sysctl_flowcntl */
5358
5359 /************************************************************************
5360 * ixgbe_set_flowcntl - Set flow control
5361 *
5362 * Flow control values:
5363 * 0 - off
5364 * 1 - rx pause
5365 * 2 - tx pause
5366 * 3 - full
5367 ************************************************************************/
5368 static int
5369 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5370 {
5371 switch (fc) {
5372 case ixgbe_fc_rx_pause:
5373 case ixgbe_fc_tx_pause:
5374 case ixgbe_fc_full:
5375 adapter->hw.fc.requested_mode = fc;
5376 if (adapter->num_queues > 1)
5377 ixgbe_disable_rx_drop(adapter);
5378 break;
5379 case ixgbe_fc_none:
5380 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5381 if (adapter->num_queues > 1)
5382 ixgbe_enable_rx_drop(adapter);
5383 break;
5384 default:
5385 return (EINVAL);
5386 }
5387
5388 #if 0 /* XXX NetBSD */
5389 /* Don't autoneg if forcing a value */
5390 adapter->hw.fc.disable_fc_autoneg = TRUE;
5391 #endif
5392 ixgbe_fc_enable(&adapter->hw);
5393
5394 return (0);
5395 } /* ixgbe_set_flowcntl */
5396
5397 /************************************************************************
5398 * ixgbe_enable_rx_drop
5399 *
5400 * Enable the hardware to drop packets when the buffer is
5401 * full. This is useful with multiqueue, so that no single
5402 * queue being full stalls the entire RX engine. We only
5403 * enable this when Multiqueue is enabled AND Flow Control
5404 * is disabled.
5405 ************************************************************************/
5406 static void
5407 ixgbe_enable_rx_drop(struct adapter *adapter)
5408 {
5409 struct ixgbe_hw *hw = &adapter->hw;
5410 struct rx_ring *rxr;
5411 u32 srrctl;
5412
5413 for (int i = 0; i < adapter->num_queues; i++) {
5414 rxr = &adapter->rx_rings[i];
5415 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5416 srrctl |= IXGBE_SRRCTL_DROP_EN;
5417 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5418 }
5419
5420 /* enable drop for each vf */
5421 for (int i = 0; i < adapter->num_vfs; i++) {
5422 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5423 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5424 IXGBE_QDE_ENABLE));
5425 }
5426 } /* ixgbe_enable_rx_drop */
5427
5428 /************************************************************************
5429 * ixgbe_disable_rx_drop
5430 ************************************************************************/
5431 static void
5432 ixgbe_disable_rx_drop(struct adapter *adapter)
5433 {
5434 struct ixgbe_hw *hw = &adapter->hw;
5435 struct rx_ring *rxr;
5436 u32 srrctl;
5437
5438 for (int i = 0; i < adapter->num_queues; i++) {
5439 rxr = &adapter->rx_rings[i];
5440 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5441 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5442 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5443 }
5444
5445 /* disable drop for each vf */
5446 for (int i = 0; i < adapter->num_vfs; i++) {
5447 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5448 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5449 }
5450 } /* ixgbe_disable_rx_drop */
5451
5452 /************************************************************************
5453 * ixgbe_sysctl_advertise
5454 *
5455 * SYSCTL wrapper around setting advertised speed
5456 ************************************************************************/
5457 static int
5458 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5459 {
5460 struct sysctlnode node = *rnode;
5461 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5462 int error = 0, advertise;
5463
5464 if (ixgbe_fw_recovery_mode_swflag(adapter))
5465 return (EPERM);
5466
5467 advertise = adapter->advertise;
5468 node.sysctl_data = &advertise;
5469 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5470 if (error != 0 || newp == NULL)
5471 return error;
5472
5473 return ixgbe_set_advertise(adapter, advertise);
5474 } /* ixgbe_sysctl_advertise */
5475
5476 /************************************************************************
5477 * ixgbe_set_advertise - Control advertised link speed
5478 *
5479 * Flags:
5480 * 0x00 - Default (all capable link speed)
5481 * 0x01 - advertise 100 Mb
5482 * 0x02 - advertise 1G
5483 * 0x04 - advertise 10G
5484 * 0x08 - advertise 10 Mb
5485 * 0x10 - advertise 2.5G
5486 * 0x20 - advertise 5G
5487 ************************************************************************/
5488 static int
5489 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5490 {
5491 device_t dev;
5492 struct ixgbe_hw *hw;
5493 ixgbe_link_speed speed = 0;
5494 ixgbe_link_speed link_caps = 0;
5495 s32 err = IXGBE_NOT_IMPLEMENTED;
5496 bool negotiate = FALSE;
5497
5498 /* Checks to validate new value */
5499 if (adapter->advertise == advertise) /* no change */
5500 return (0);
5501
5502 dev = adapter->dev;
5503 hw = &adapter->hw;
5504
5505 /* No speed changes for backplane media */
5506 if (hw->phy.media_type == ixgbe_media_type_backplane)
5507 return (ENODEV);
5508
5509 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5510 (hw->phy.multispeed_fiber))) {
5511 device_printf(dev,
5512 "Advertised speed can only be set on copper or "
5513 "multispeed fiber media types.\n");
5514 return (EINVAL);
5515 }
5516
5517 if (advertise < 0x0 || advertise > 0x2f) {
5518 device_printf(dev,
5519 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5520 return (EINVAL);
5521 }
5522
5523 if (hw->mac.ops.get_link_capabilities) {
5524 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5525 &negotiate);
5526 if (err != IXGBE_SUCCESS) {
5527 device_printf(dev, "Unable to determine supported advertise speeds\n");
5528 return (ENODEV);
5529 }
5530 }
5531
5532 /* Set new value and report new advertised mode */
5533 if (advertise & 0x1) {
5534 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5535 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5536 return (EINVAL);
5537 }
5538 speed |= IXGBE_LINK_SPEED_100_FULL;
5539 }
5540 if (advertise & 0x2) {
5541 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5542 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5543 return (EINVAL);
5544 }
5545 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5546 }
5547 if (advertise & 0x4) {
5548 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5549 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5550 return (EINVAL);
5551 }
5552 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5553 }
5554 if (advertise & 0x8) {
5555 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5556 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5557 return (EINVAL);
5558 }
5559 speed |= IXGBE_LINK_SPEED_10_FULL;
5560 }
5561 if (advertise & 0x10) {
5562 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5563 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5564 return (EINVAL);
5565 }
5566 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5567 }
5568 if (advertise & 0x20) {
5569 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5570 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5571 return (EINVAL);
5572 }
5573 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5574 }
5575 if (advertise == 0)
5576 speed = link_caps; /* All capable link speed */
5577
5578 hw->mac.autotry_restart = TRUE;
5579 hw->mac.ops.setup_link(hw, speed, TRUE);
5580 adapter->advertise = advertise;
5581
5582 return (0);
5583 } /* ixgbe_set_advertise */
5584
5585 /************************************************************************
5586 * ixgbe_get_advertise - Get current advertised speed settings
5587 *
5588 * Formatted for sysctl usage.
5589 * Flags:
5590 * 0x01 - advertise 100 Mb
5591 * 0x02 - advertise 1G
5592 * 0x04 - advertise 10G
5593 * 0x08 - advertise 10 Mb (yes, Mb)
5594 * 0x10 - advertise 2.5G
5595 * 0x20 - advertise 5G
5596 ************************************************************************/
5597 static int
5598 ixgbe_get_advertise(struct adapter *adapter)
5599 {
5600 struct ixgbe_hw *hw = &adapter->hw;
5601 int speed;
5602 ixgbe_link_speed link_caps = 0;
5603 s32 err;
5604 bool negotiate = FALSE;
5605
5606 /*
5607 * Advertised speed means nothing unless it's copper or
5608 * multi-speed fiber
5609 */
5610 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5611 !(hw->phy.multispeed_fiber))
5612 return (0);
5613
5614 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5615 if (err != IXGBE_SUCCESS)
5616 return (0);
5617
5618 speed =
5619 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5620 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5621 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5622 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5623 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5624 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5625
5626 return speed;
5627 } /* ixgbe_get_advertise */
5628
5629 /************************************************************************
5630 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5631 *
5632 * Control values:
5633 * 0/1 - off / on (use default value of 1000)
5634 *
5635 * Legal timer values are:
5636 * 50,100,250,500,1000,2000,5000,10000
5637 *
5638 * Turning off interrupt moderation will also turn this off.
5639 ************************************************************************/
5640 static int
5641 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5642 {
5643 struct sysctlnode node = *rnode;
5644 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5645 struct ifnet *ifp = adapter->ifp;
5646 int error;
5647 int newval;
5648
5649 if (ixgbe_fw_recovery_mode_swflag(adapter))
5650 return (EPERM);
5651
5652 newval = adapter->dmac;
5653 node.sysctl_data = &newval;
5654 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5655 if ((error) || (newp == NULL))
5656 return (error);
5657
5658 switch (newval) {
5659 case 0:
5660 /* Disabled */
5661 adapter->dmac = 0;
5662 break;
5663 case 1:
5664 /* Enable and use default */
5665 adapter->dmac = 1000;
5666 break;
5667 case 50:
5668 case 100:
5669 case 250:
5670 case 500:
5671 case 1000:
5672 case 2000:
5673 case 5000:
5674 case 10000:
5675 /* Legal values - allow */
5676 adapter->dmac = newval;
5677 break;
5678 default:
5679 /* Do nothing, illegal value */
5680 return (EINVAL);
5681 }
5682
5683 /* Re-initialize hardware if it's already running */
5684 if (ifp->if_flags & IFF_RUNNING)
5685 ifp->if_init(ifp);
5686
5687 return (0);
5688 }
5689
5690 #ifdef IXGBE_DEBUG
5691 /************************************************************************
5692 * ixgbe_sysctl_power_state
5693 *
5694 * Sysctl to test power states
5695 * Values:
5696 * 0 - set device to D0
5697 * 3 - set device to D3
5698 * (none) - get current device power state
5699 ************************************************************************/
5700 static int
5701 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5702 {
5703 #ifdef notyet
5704 struct sysctlnode node = *rnode;
5705 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5706 device_t dev = adapter->dev;
5707 int curr_ps, new_ps, error = 0;
5708
5709 if (ixgbe_fw_recovery_mode_swflag(adapter))
5710 return (EPERM);
5711
5712 curr_ps = new_ps = pci_get_powerstate(dev);
5713
5714 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5715 if ((error) || (req->newp == NULL))
5716 return (error);
5717
5718 if (new_ps == curr_ps)
5719 return (0);
5720
5721 if (new_ps == 3 && curr_ps == 0)
5722 error = DEVICE_SUSPEND(dev);
5723 else if (new_ps == 0 && curr_ps == 3)
5724 error = DEVICE_RESUME(dev);
5725 else
5726 return (EINVAL);
5727
5728 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5729
5730 return (error);
5731 #else
5732 return 0;
5733 #endif
5734 } /* ixgbe_sysctl_power_state */
5735 #endif
5736
5737 /************************************************************************
5738 * ixgbe_sysctl_wol_enable
5739 *
5740 * Sysctl to enable/disable the WoL capability,
5741 * if supported by the adapter.
5742 *
5743 * Values:
5744 * 0 - disabled
5745 * 1 - enabled
5746 ************************************************************************/
5747 static int
5748 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5749 {
5750 struct sysctlnode node = *rnode;
5751 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5752 struct ixgbe_hw *hw = &adapter->hw;
5753 bool new_wol_enabled;
5754 int error = 0;
5755
5756 /*
5757 * It's not required to check recovery mode because this function never
5758 * touches hardware.
5759 */
5760 new_wol_enabled = hw->wol_enabled;
5761 node.sysctl_data = &new_wol_enabled;
5762 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5763 if ((error) || (newp == NULL))
5764 return (error);
5765 if (new_wol_enabled == hw->wol_enabled)
5766 return (0);
5767
5768 if (new_wol_enabled && !adapter->wol_support)
5769 return (ENODEV);
5770 else
5771 hw->wol_enabled = new_wol_enabled;
5772
5773 return (0);
5774 } /* ixgbe_sysctl_wol_enable */
5775
5776 /************************************************************************
5777 * ixgbe_sysctl_wufc - Wake Up Filter Control
5778 *
5779 * Sysctl to enable/disable the types of packets that the
5780 * adapter will wake up on upon receipt.
5781 * Flags:
5782 * 0x1 - Link Status Change
5783 * 0x2 - Magic Packet
5784 * 0x4 - Direct Exact
5785 * 0x8 - Directed Multicast
5786 * 0x10 - Broadcast
5787 * 0x20 - ARP/IPv4 Request Packet
5788 * 0x40 - Direct IPv4 Packet
5789 * 0x80 - Direct IPv6 Packet
5790 *
5791 * Settings not listed above will cause the sysctl to return an error.
5792 ************************************************************************/
5793 static int
5794 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5795 {
5796 struct sysctlnode node = *rnode;
5797 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5798 int error = 0;
5799 u32 new_wufc;
5800
5801 /*
5802 * It's not required to check recovery mode because this function never
5803 * touches hardware.
5804 */
5805 new_wufc = adapter->wufc;
5806 node.sysctl_data = &new_wufc;
5807 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5808 if ((error) || (newp == NULL))
5809 return (error);
5810 if (new_wufc == adapter->wufc)
5811 return (0);
5812
5813 if (new_wufc & 0xffffff00)
5814 return (EINVAL);
5815
5816 new_wufc &= 0xff;
5817 new_wufc |= (0xffffff & adapter->wufc);
5818 adapter->wufc = new_wufc;
5819
5820 return (0);
5821 } /* ixgbe_sysctl_wufc */
5822
5823 #ifdef IXGBE_DEBUG
5824 /************************************************************************
5825 * ixgbe_sysctl_print_rss_config
5826 ************************************************************************/
5827 static int
5828 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5829 {
5830 #ifdef notyet
5831 struct sysctlnode node = *rnode;
5832 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5833 struct ixgbe_hw *hw = &adapter->hw;
5834 device_t dev = adapter->dev;
5835 struct sbuf *buf;
5836 int error = 0, reta_size;
5837 u32 reg;
5838
5839 if (ixgbe_fw_recovery_mode_swflag(adapter))
5840 return (EPERM);
5841
5842 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5843 if (!buf) {
5844 device_printf(dev, "Could not allocate sbuf for output.\n");
5845 return (ENOMEM);
5846 }
5847
5848 // TODO: use sbufs to make a string to print out
5849 /* Set multiplier for RETA setup and table size based on MAC */
5850 switch (adapter->hw.mac.type) {
5851 case ixgbe_mac_X550:
5852 case ixgbe_mac_X550EM_x:
5853 case ixgbe_mac_X550EM_a:
5854 reta_size = 128;
5855 break;
5856 default:
5857 reta_size = 32;
5858 break;
5859 }
5860
5861 /* Print out the redirection table */
5862 sbuf_cat(buf, "\n");
5863 for (int i = 0; i < reta_size; i++) {
5864 if (i < 32) {
5865 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5866 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5867 } else {
5868 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5869 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5870 }
5871 }
5872
5873 // TODO: print more config
5874
5875 error = sbuf_finish(buf);
5876 if (error)
5877 device_printf(dev, "Error finishing sbuf: %d\n", error);
5878
5879 sbuf_delete(buf);
5880 #endif
5881 return (0);
5882 } /* ixgbe_sysctl_print_rss_config */
5883 #endif /* IXGBE_DEBUG */
5884
5885 /************************************************************************
5886 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5887 *
5888 * For X552/X557-AT devices using an external PHY
5889 ************************************************************************/
5890 static int
5891 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5892 {
5893 struct sysctlnode node = *rnode;
5894 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5895 struct ixgbe_hw *hw = &adapter->hw;
5896 int val;
5897 u16 reg;
5898 int error;
5899
5900 if (ixgbe_fw_recovery_mode_swflag(adapter))
5901 return (EPERM);
5902
5903 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5904 device_printf(adapter->dev,
5905 "Device has no supported external thermal sensor.\n");
5906 return (ENODEV);
5907 }
5908
5909 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5910 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5911 device_printf(adapter->dev,
5912 "Error reading from PHY's current temperature register\n");
5913 return (EAGAIN);
5914 }
5915
5916 node.sysctl_data = &val;
5917
5918 /* Shift temp for output */
5919 val = reg >> 8;
5920
5921 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5922 if ((error) || (newp == NULL))
5923 return (error);
5924
5925 return (0);
5926 } /* ixgbe_sysctl_phy_temp */
5927
5928 /************************************************************************
5929 * ixgbe_sysctl_phy_overtemp_occurred
5930 *
5931 * Reports (directly from the PHY) whether the current PHY
5932 * temperature is over the overtemp threshold.
5933 ************************************************************************/
5934 static int
5935 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5936 {
5937 struct sysctlnode node = *rnode;
5938 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5939 struct ixgbe_hw *hw = &adapter->hw;
5940 int val, error;
5941 u16 reg;
5942
5943 if (ixgbe_fw_recovery_mode_swflag(adapter))
5944 return (EPERM);
5945
5946 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5947 device_printf(adapter->dev,
5948 "Device has no supported external thermal sensor.\n");
5949 return (ENODEV);
5950 }
5951
5952 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5953 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5954 device_printf(adapter->dev,
5955 "Error reading from PHY's temperature status register\n");
5956 return (EAGAIN);
5957 }
5958
5959 node.sysctl_data = &val;
5960
5961 /* Get occurrence bit */
5962 val = !!(reg & 0x4000);
5963
5964 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5965 if ((error) || (newp == NULL))
5966 return (error);
5967
5968 return (0);
5969 } /* ixgbe_sysctl_phy_overtemp_occurred */
5970
5971 /************************************************************************
5972 * ixgbe_sysctl_eee_state
5973 *
5974 * Sysctl to set EEE power saving feature
5975 * Values:
5976 * 0 - disable EEE
5977 * 1 - enable EEE
5978 * (none) - get current device EEE state
5979 ************************************************************************/
5980 static int
5981 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5982 {
5983 struct sysctlnode node = *rnode;
5984 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5985 struct ifnet *ifp = adapter->ifp;
5986 device_t dev = adapter->dev;
5987 int curr_eee, new_eee, error = 0;
5988 s32 retval;
5989
5990 if (ixgbe_fw_recovery_mode_swflag(adapter))
5991 return (EPERM);
5992
5993 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5994 node.sysctl_data = &new_eee;
5995 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5996 if ((error) || (newp == NULL))
5997 return (error);
5998
5999 /* Nothing to do */
6000 if (new_eee == curr_eee)
6001 return (0);
6002
6003 /* Not supported */
6004 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6005 return (EINVAL);
6006
6007 /* Bounds checking */
6008 if ((new_eee < 0) || (new_eee > 1))
6009 return (EINVAL);
6010
6011 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6012 if (retval) {
6013 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6014 return (EINVAL);
6015 }
6016
6017 /* Restart auto-neg */
6018 ifp->if_init(ifp);
6019
6020 device_printf(dev, "New EEE state: %d\n", new_eee);
6021
6022 /* Cache new value */
6023 if (new_eee)
6024 adapter->feat_en |= IXGBE_FEATURE_EEE;
6025 else
6026 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6027
6028 return (error);
6029 } /* ixgbe_sysctl_eee_state */
6030
6031 #define PRINTQS(adapter, regname) \
6032 do { \
6033 struct ixgbe_hw *_hw = &(adapter)->hw; \
6034 int _i; \
6035 \
6036 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6037 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6038 printf((_i == 0) ? "\t" : " "); \
6039 printf("%08x", IXGBE_READ_REG(_hw, \
6040 IXGBE_##regname(_i))); \
6041 } \
6042 printf("\n"); \
6043 } while (0)
6044
6045 /************************************************************************
6046 * ixgbe_print_debug_info
6047 *
6048 * Called only when em_display_debug_stats is enabled.
6049 * Provides a way to take a look at important statistics
6050 * maintained by the driver and hardware.
6051 ************************************************************************/
6052 static void
6053 ixgbe_print_debug_info(struct adapter *adapter)
6054 {
6055 device_t dev = adapter->dev;
6056 struct ixgbe_hw *hw = &adapter->hw;
6057 int table_size;
6058 int i;
6059
6060 switch (adapter->hw.mac.type) {
6061 case ixgbe_mac_X550:
6062 case ixgbe_mac_X550EM_x:
6063 case ixgbe_mac_X550EM_a:
6064 table_size = 128;
6065 break;
6066 default:
6067 table_size = 32;
6068 break;
6069 }
6070
6071 device_printf(dev, "[E]RETA:\n");
6072 for (i = 0; i < table_size; i++) {
6073 if (i < 32)
6074 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6075 IXGBE_RETA(i)));
6076 else
6077 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6078 IXGBE_ERETA(i - 32)));
6079 }
6080
6081 device_printf(dev, "queue:");
6082 for (i = 0; i < adapter->num_queues; i++) {
6083 printf((i == 0) ? "\t" : " ");
6084 printf("%8d", i);
6085 }
6086 printf("\n");
6087 PRINTQS(adapter, RDBAL);
6088 PRINTQS(adapter, RDBAH);
6089 PRINTQS(adapter, RDLEN);
6090 PRINTQS(adapter, SRRCTL);
6091 PRINTQS(adapter, RDH);
6092 PRINTQS(adapter, RDT);
6093 PRINTQS(adapter, RXDCTL);
6094
6095 device_printf(dev, "RQSMR:");
6096 for (i = 0; i < adapter->num_queues / 4; i++) {
6097 printf((i == 0) ? "\t" : " ");
6098 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6099 }
6100 printf("\n");
6101
6102 device_printf(dev, "disabled_count:");
6103 for (i = 0; i < adapter->num_queues; i++) {
6104 printf((i == 0) ? "\t" : " ");
6105 printf("%8d", adapter->queues[i].disabled_count);
6106 }
6107 printf("\n");
6108
6109 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6110 if (hw->mac.type != ixgbe_mac_82598EB) {
6111 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6112 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6113 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6114 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6115 }
6116 } /* ixgbe_print_debug_info */
6117
6118 /************************************************************************
6119 * ixgbe_sysctl_debug
6120 ************************************************************************/
6121 static int
6122 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6123 {
6124 struct sysctlnode node = *rnode;
6125 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6126 int error, result = 0;
6127
6128 if (ixgbe_fw_recovery_mode_swflag(adapter))
6129 return (EPERM);
6130
6131 node.sysctl_data = &result;
6132 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6133
6134 if (error || newp == NULL)
6135 return error;
6136
6137 if (result == 1)
6138 ixgbe_print_debug_info(adapter);
6139
6140 return 0;
6141 } /* ixgbe_sysctl_debug */
6142
6143 /************************************************************************
6144 * ixgbe_init_device_features
6145 ************************************************************************/
6146 static void
6147 ixgbe_init_device_features(struct adapter *adapter)
6148 {
6149 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6150 | IXGBE_FEATURE_RSS
6151 | IXGBE_FEATURE_MSI
6152 | IXGBE_FEATURE_MSIX
6153 | IXGBE_FEATURE_LEGACY_IRQ
6154 | IXGBE_FEATURE_LEGACY_TX;
6155
6156 /* Set capabilities first... */
6157 switch (adapter->hw.mac.type) {
6158 case ixgbe_mac_82598EB:
6159 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6160 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6161 break;
6162 case ixgbe_mac_X540:
6163 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6164 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6165 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6166 (adapter->hw.bus.func == 0))
6167 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6168 break;
6169 case ixgbe_mac_X550:
6170 /*
6171 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6172 * NVM Image version.
6173 */
6174 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6175 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6176 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6177 break;
6178 case ixgbe_mac_X550EM_x:
6179 /*
6180 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6181 * NVM Image version.
6182 */
6183 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6184 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6185 break;
6186 case ixgbe_mac_X550EM_a:
6187 /*
6188 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6189 * NVM Image version.
6190 */
6191 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6192 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6193 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6194 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6195 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6196 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6197 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6198 }
6199 break;
6200 case ixgbe_mac_82599EB:
6201 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6202 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6203 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6204 (adapter->hw.bus.func == 0))
6205 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6206 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6207 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6208 break;
6209 default:
6210 break;
6211 }
6212
6213 /* Enabled by default... */
6214 /* Fan failure detection */
6215 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6216 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6217 /* Netmap */
6218 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6219 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6220 /* EEE */
6221 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6222 adapter->feat_en |= IXGBE_FEATURE_EEE;
6223 /* Thermal Sensor */
6224 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6225 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6226 /*
6227 * Recovery mode:
6228 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6229 * NVM Image version.
6230 */
6231
6232 /* Enabled via global sysctl... */
6233 /* Flow Director */
6234 if (ixgbe_enable_fdir) {
6235 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6236 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6237 else
6238 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6239 }
6240 /* Legacy (single queue) transmit */
6241 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6242 ixgbe_enable_legacy_tx)
6243 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6244 /*
6245 * Message Signal Interrupts - Extended (MSI-X)
6246 * Normal MSI is only enabled if MSI-X calls fail.
6247 */
6248 if (!ixgbe_enable_msix)
6249 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6250 /* Receive-Side Scaling (RSS) */
6251 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6252 adapter->feat_en |= IXGBE_FEATURE_RSS;
6253
6254 /* Disable features with unmet dependencies... */
6255 /* No MSI-X */
6256 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6257 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6258 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6259 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6260 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6261 }
6262 } /* ixgbe_init_device_features */
6263
6264 /************************************************************************
6265 * ixgbe_probe - Device identification routine
6266 *
6267 * Determines if the driver should be loaded on
6268 * adapter based on its PCI vendor/device ID.
6269 *
6270 * return BUS_PROBE_DEFAULT on success, positive on failure
6271 ************************************************************************/
6272 static int
6273 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6274 {
6275 const struct pci_attach_args *pa = aux;
6276
6277 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6278 }
6279
6280 static const ixgbe_vendor_info_t *
6281 ixgbe_lookup(const struct pci_attach_args *pa)
6282 {
6283 const ixgbe_vendor_info_t *ent;
6284 pcireg_t subid;
6285
6286 INIT_DEBUGOUT("ixgbe_lookup: begin");
6287
6288 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6289 return NULL;
6290
6291 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6292
6293 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6294 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6295 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6296 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6297 (ent->subvendor_id == 0)) &&
6298 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6299 (ent->subdevice_id == 0))) {
6300 return ent;
6301 }
6302 }
6303 return NULL;
6304 }
6305
6306 static int
6307 ixgbe_ifflags_cb(struct ethercom *ec)
6308 {
6309 struct ifnet *ifp = &ec->ec_if;
6310 struct adapter *adapter = ifp->if_softc;
6311 u_short change;
6312 int rv = 0;
6313
6314 IXGBE_CORE_LOCK(adapter);
6315
6316 change = ifp->if_flags ^ adapter->if_flags;
6317 if (change != 0)
6318 adapter->if_flags = ifp->if_flags;
6319
6320 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6321 rv = ENETRESET;
6322 goto out;
6323 } else if ((change & IFF_PROMISC) != 0)
6324 ixgbe_set_rxfilter(adapter);
6325
6326 /* Check for ec_capenable. */
6327 change = ec->ec_capenable ^ adapter->ec_capenable;
6328 adapter->ec_capenable = ec->ec_capenable;
6329 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6330 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6331 rv = ENETRESET;
6332 goto out;
6333 }
6334
6335 /*
6336 * Special handling is not required for ETHERCAP_VLAN_MTU.
6337 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6338 */
6339
6340 /* Set up VLAN support and filter */
6341 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6342 ixgbe_setup_vlan_hw_support(adapter);
6343
6344 out:
6345 IXGBE_CORE_UNLOCK(adapter);
6346
6347 return rv;
6348 }
6349
6350 /************************************************************************
6351 * ixgbe_ioctl - Ioctl entry point
6352 *
6353 * Called when the user wants to configure the interface.
6354 *
6355 * return 0 on success, positive on failure
6356 ************************************************************************/
6357 static int
6358 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6359 {
6360 struct adapter *adapter = ifp->if_softc;
6361 struct ixgbe_hw *hw = &adapter->hw;
6362 struct ifcapreq *ifcr = data;
6363 struct ifreq *ifr = data;
6364 int error = 0;
6365 int l4csum_en;
6366 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6367 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6368
6369 if (ixgbe_fw_recovery_mode_swflag(adapter))
6370 return (EPERM);
6371
6372 switch (command) {
6373 case SIOCSIFFLAGS:
6374 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6375 break;
6376 case SIOCADDMULTI:
6377 case SIOCDELMULTI:
6378 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6379 break;
6380 case SIOCSIFMEDIA:
6381 case SIOCGIFMEDIA:
6382 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6383 break;
6384 case SIOCSIFCAP:
6385 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6386 break;
6387 case SIOCSIFMTU:
6388 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6389 break;
6390 #ifdef __NetBSD__
6391 case SIOCINITIFADDR:
6392 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6393 break;
6394 case SIOCGIFFLAGS:
6395 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6396 break;
6397 case SIOCGIFAFLAG_IN:
6398 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6399 break;
6400 case SIOCGIFADDR:
6401 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6402 break;
6403 case SIOCGIFMTU:
6404 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6405 break;
6406 case SIOCGIFCAP:
6407 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6408 break;
6409 case SIOCGETHERCAP:
6410 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6411 break;
6412 case SIOCGLIFADDR:
6413 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6414 break;
6415 case SIOCZIFDATA:
6416 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6417 hw->mac.ops.clear_hw_cntrs(hw);
6418 ixgbe_clear_evcnt(adapter);
6419 break;
6420 case SIOCAIFADDR:
6421 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6422 break;
6423 #endif
6424 default:
6425 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6426 break;
6427 }
6428
6429 switch (command) {
6430 case SIOCGI2C:
6431 {
6432 struct ixgbe_i2c_req i2c;
6433
6434 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6435 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6436 if (error != 0)
6437 break;
6438 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6439 error = EINVAL;
6440 break;
6441 }
6442 if (i2c.len > sizeof(i2c.data)) {
6443 error = EINVAL;
6444 break;
6445 }
6446
6447 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6448 i2c.dev_addr, i2c.data);
6449 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6450 break;
6451 }
6452 case SIOCSIFCAP:
6453 /* Layer-4 Rx checksum offload has to be turned on and
6454 * off as a unit.
6455 */
6456 l4csum_en = ifcr->ifcr_capenable & l4csum;
6457 if (l4csum_en != l4csum && l4csum_en != 0)
6458 return EINVAL;
6459 /*FALLTHROUGH*/
6460 case SIOCADDMULTI:
6461 case SIOCDELMULTI:
6462 case SIOCSIFFLAGS:
6463 case SIOCSIFMTU:
6464 default:
6465 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6466 return error;
6467 if ((ifp->if_flags & IFF_RUNNING) == 0)
6468 ;
6469 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6470 IXGBE_CORE_LOCK(adapter);
6471 if ((ifp->if_flags & IFF_RUNNING) != 0)
6472 ixgbe_init_locked(adapter);
6473 ixgbe_recalculate_max_frame(adapter);
6474 IXGBE_CORE_UNLOCK(adapter);
6475 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6476 /*
6477 * Multicast list has changed; set the hardware filter
6478 * accordingly.
6479 */
6480 IXGBE_CORE_LOCK(adapter);
6481 ixgbe_disable_intr(adapter);
6482 ixgbe_set_rxfilter(adapter);
6483 ixgbe_enable_intr(adapter);
6484 IXGBE_CORE_UNLOCK(adapter);
6485 }
6486 return 0;
6487 }
6488
6489 return error;
6490 } /* ixgbe_ioctl */
6491
6492 /************************************************************************
6493 * ixgbe_check_fan_failure
6494 ************************************************************************/
6495 static void
6496 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6497 {
6498 u32 mask;
6499
6500 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6501 IXGBE_ESDP_SDP1;
6502
6503 if (reg & mask)
6504 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6505 } /* ixgbe_check_fan_failure */
6506
6507 /************************************************************************
6508 * ixgbe_handle_que
6509 ************************************************************************/
6510 static void
6511 ixgbe_handle_que(void *context)
6512 {
6513 struct ix_queue *que = context;
6514 struct adapter *adapter = que->adapter;
6515 struct tx_ring *txr = que->txr;
6516 struct ifnet *ifp = adapter->ifp;
6517 bool more = false;
6518
6519 que->handleq.ev_count++;
6520
6521 if (ifp->if_flags & IFF_RUNNING) {
6522 more = ixgbe_rxeof(que);
6523 IXGBE_TX_LOCK(txr);
6524 more |= ixgbe_txeof(txr);
6525 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6526 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6527 ixgbe_mq_start_locked(ifp, txr);
6528 /* Only for queue 0 */
6529 /* NetBSD still needs this for CBQ */
6530 if ((&adapter->queues[0] == que)
6531 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6532 ixgbe_legacy_start_locked(ifp, txr);
6533 IXGBE_TX_UNLOCK(txr);
6534 }
6535
6536 if (more) {
6537 que->req.ev_count++;
6538 ixgbe_sched_handle_que(adapter, que);
6539 } else if (que->res != NULL) {
6540 /* Re-enable this interrupt */
6541 ixgbe_enable_queue(adapter, que->msix);
6542 } else
6543 ixgbe_enable_intr(adapter);
6544
6545 return;
6546 } /* ixgbe_handle_que */
6547
6548 /************************************************************************
6549 * ixgbe_handle_que_work
6550 ************************************************************************/
6551 static void
6552 ixgbe_handle_que_work(struct work *wk, void *context)
6553 {
6554 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6555
6556 /*
6557 * "enqueued flag" is not required here.
6558 * See ixgbe_msix_que().
6559 */
6560 ixgbe_handle_que(que);
6561 }
6562
6563 /************************************************************************
6564 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6565 ************************************************************************/
6566 static int
6567 ixgbe_allocate_legacy(struct adapter *adapter,
6568 const struct pci_attach_args *pa)
6569 {
6570 device_t dev = adapter->dev;
6571 struct ix_queue *que = adapter->queues;
6572 struct tx_ring *txr = adapter->tx_rings;
6573 int counts[PCI_INTR_TYPE_SIZE];
6574 pci_intr_type_t intr_type, max_type;
6575 char intrbuf[PCI_INTRSTR_LEN];
6576 char wqname[MAXCOMLEN];
6577 const char *intrstr = NULL;
6578 int defertx_error = 0, error;
6579
6580 /* We allocate a single interrupt resource */
6581 max_type = PCI_INTR_TYPE_MSI;
6582 counts[PCI_INTR_TYPE_MSIX] = 0;
6583 counts[PCI_INTR_TYPE_MSI] =
6584 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6585 /* Check not feat_en but feat_cap to fallback to INTx */
6586 counts[PCI_INTR_TYPE_INTX] =
6587 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6588
6589 alloc_retry:
6590 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6591 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6592 return ENXIO;
6593 }
6594 adapter->osdep.nintrs = 1;
6595 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6596 intrbuf, sizeof(intrbuf));
6597 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6598 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6599 device_xname(dev));
6600 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6601 if (adapter->osdep.ihs[0] == NULL) {
6602 aprint_error_dev(dev,"unable to establish %s\n",
6603 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6604 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6605 adapter->osdep.intrs = NULL;
6606 switch (intr_type) {
6607 case PCI_INTR_TYPE_MSI:
6608 /* The next try is for INTx: Disable MSI */
6609 max_type = PCI_INTR_TYPE_INTX;
6610 counts[PCI_INTR_TYPE_INTX] = 1;
6611 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6612 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6613 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6614 goto alloc_retry;
6615 } else
6616 break;
6617 case PCI_INTR_TYPE_INTX:
6618 default:
6619 /* See below */
6620 break;
6621 }
6622 }
6623 if (intr_type == PCI_INTR_TYPE_INTX) {
6624 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6625 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6626 }
6627 if (adapter->osdep.ihs[0] == NULL) {
6628 aprint_error_dev(dev,
6629 "couldn't establish interrupt%s%s\n",
6630 intrstr ? " at " : "", intrstr ? intrstr : "");
6631 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6632 adapter->osdep.intrs = NULL;
6633 return ENXIO;
6634 }
6635 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6636 /*
6637 * Try allocating a fast interrupt and the associated deferred
6638 * processing contexts.
6639 */
6640 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6641 txr->txr_si =
6642 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6643 ixgbe_deferred_mq_start, txr);
6644
6645 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6646 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6647 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6648 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6649 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6650 }
6651 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6652 ixgbe_handle_que, que);
6653 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6654 error = workqueue_create(&adapter->que_wq, wqname,
6655 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6656 IXGBE_WORKQUEUE_FLAGS);
6657
6658 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6659 && ((txr->txr_si == NULL) || defertx_error != 0))
6660 || (que->que_si == NULL) || error != 0) {
6661 aprint_error_dev(dev,
6662 "could not establish software interrupts\n");
6663
6664 return ENXIO;
6665 }
6666 /* For simplicity in the handlers */
6667 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6668
6669 return (0);
6670 } /* ixgbe_allocate_legacy */
6671
6672 /************************************************************************
6673 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6674 ************************************************************************/
6675 static int
6676 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6677 {
6678 device_t dev = adapter->dev;
6679 struct ix_queue *que = adapter->queues;
6680 struct tx_ring *txr = adapter->tx_rings;
6681 pci_chipset_tag_t pc;
6682 char intrbuf[PCI_INTRSTR_LEN];
6683 char intr_xname[32];
6684 char wqname[MAXCOMLEN];
6685 const char *intrstr = NULL;
6686 int error, vector = 0;
6687 int cpu_id = 0;
6688 kcpuset_t *affinity;
6689 #ifdef RSS
6690 unsigned int rss_buckets = 0;
6691 kcpuset_t cpu_mask;
6692 #endif
6693
6694 pc = adapter->osdep.pc;
6695 #ifdef RSS
6696 /*
6697 * If we're doing RSS, the number of queues needs to
6698 * match the number of RSS buckets that are configured.
6699 *
6700 * + If there's more queues than RSS buckets, we'll end
6701 * up with queues that get no traffic.
6702 *
6703 * + If there's more RSS buckets than queues, we'll end
6704 * up having multiple RSS buckets map to the same queue,
6705 * so there'll be some contention.
6706 */
6707 rss_buckets = rss_getnumbuckets();
6708 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6709 (adapter->num_queues != rss_buckets)) {
6710 device_printf(dev,
6711 "%s: number of queues (%d) != number of RSS buckets (%d)"
6712 "; performance will be impacted.\n",
6713 __func__, adapter->num_queues, rss_buckets);
6714 }
6715 #endif
6716
6717 adapter->osdep.nintrs = adapter->num_queues + 1;
6718 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6719 adapter->osdep.nintrs) != 0) {
6720 aprint_error_dev(dev,
6721 "failed to allocate MSI-X interrupt\n");
6722 return (ENXIO);
6723 }
6724
6725 kcpuset_create(&affinity, false);
6726 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6727 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6728 device_xname(dev), i);
6729 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6730 sizeof(intrbuf));
6731 #ifdef IXGBE_MPSAFE
6732 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6733 true);
6734 #endif
6735 /* Set the handler function */
6736 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6737 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6738 intr_xname);
6739 if (que->res == NULL) {
6740 aprint_error_dev(dev,
6741 "Failed to register QUE handler\n");
6742 error = ENXIO;
6743 goto err_out;
6744 }
6745 que->msix = vector;
6746 adapter->active_queues |= 1ULL << que->msix;
6747
6748 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6749 #ifdef RSS
6750 /*
6751 * The queue ID is used as the RSS layer bucket ID.
6752 * We look up the queue ID -> RSS CPU ID and select
6753 * that.
6754 */
6755 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6756 CPU_SETOF(cpu_id, &cpu_mask);
6757 #endif
6758 } else {
6759 /*
6760 * Bind the MSI-X vector, and thus the
6761 * rings to the corresponding CPU.
6762 *
6763 * This just happens to match the default RSS
6764 * round-robin bucket -> queue -> CPU allocation.
6765 */
6766 if (adapter->num_queues > 1)
6767 cpu_id = i;
6768 }
6769 /* Round-robin affinity */
6770 kcpuset_zero(affinity);
6771 kcpuset_set(affinity, cpu_id % ncpu);
6772 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6773 NULL);
6774 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6775 intrstr);
6776 if (error == 0) {
6777 #if 1 /* def IXGBE_DEBUG */
6778 #ifdef RSS
6779 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6780 cpu_id % ncpu);
6781 #else
6782 aprint_normal(", bound queue %d to cpu %d", i,
6783 cpu_id % ncpu);
6784 #endif
6785 #endif /* IXGBE_DEBUG */
6786 }
6787 aprint_normal("\n");
6788
6789 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6790 txr->txr_si = softint_establish(
6791 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6792 ixgbe_deferred_mq_start, txr);
6793 if (txr->txr_si == NULL) {
6794 aprint_error_dev(dev,
6795 "couldn't establish software interrupt\n");
6796 error = ENXIO;
6797 goto err_out;
6798 }
6799 }
6800 que->que_si
6801 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6802 ixgbe_handle_que, que);
6803 if (que->que_si == NULL) {
6804 aprint_error_dev(dev,
6805 "couldn't establish software interrupt\n");
6806 error = ENXIO;
6807 goto err_out;
6808 }
6809 }
6810 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6811 error = workqueue_create(&adapter->txr_wq, wqname,
6812 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6813 IXGBE_WORKQUEUE_FLAGS);
6814 if (error) {
6815 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6816 goto err_out;
6817 }
6818 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6819
6820 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6821 error = workqueue_create(&adapter->que_wq, wqname,
6822 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6823 IXGBE_WORKQUEUE_FLAGS);
6824 if (error) {
6825 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6826 goto err_out;
6827 }
6828
6829 /* and Link */
6830 cpu_id++;
6831 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6832 adapter->vector = vector;
6833 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6834 sizeof(intrbuf));
6835 #ifdef IXGBE_MPSAFE
6836 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6837 true);
6838 #endif
6839 /* Set the link handler function */
6840 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6841 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6842 intr_xname);
6843 if (adapter->osdep.ihs[vector] == NULL) {
6844 aprint_error_dev(dev, "Failed to register LINK handler\n");
6845 error = ENXIO;
6846 goto err_out;
6847 }
6848 /* Round-robin affinity */
6849 kcpuset_zero(affinity);
6850 kcpuset_set(affinity, cpu_id % ncpu);
6851 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6852 NULL);
6853
6854 aprint_normal_dev(dev,
6855 "for link, interrupting at %s", intrstr);
6856 if (error == 0)
6857 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6858 else
6859 aprint_normal("\n");
6860
6861 kcpuset_destroy(affinity);
6862 aprint_normal_dev(dev,
6863 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6864
6865 return (0);
6866
6867 err_out:
6868 kcpuset_destroy(affinity);
6869 ixgbe_free_workqueue(adapter);
6870 ixgbe_free_pciintr_resources(adapter);
6871 return (error);
6872 } /* ixgbe_allocate_msix */
6873
6874 /************************************************************************
6875 * ixgbe_configure_interrupts
6876 *
6877 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6878 * This will also depend on user settings.
6879 ************************************************************************/
6880 static int
6881 ixgbe_configure_interrupts(struct adapter *adapter)
6882 {
6883 device_t dev = adapter->dev;
6884 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6885 int want, queues, msgs;
6886
6887 /* Default to 1 queue if MSI-X setup fails */
6888 adapter->num_queues = 1;
6889
6890 /* Override by tuneable */
6891 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6892 goto msi;
6893
6894 /*
6895 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6896 * interrupt slot.
6897 */
6898 if (ncpu == 1)
6899 goto msi;
6900
6901 /* First try MSI-X */
6902 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6903 msgs = MIN(msgs, IXG_MAX_NINTR);
6904 if (msgs < 2)
6905 goto msi;
6906
6907 adapter->msix_mem = (void *)1; /* XXX */
6908
6909 /* Figure out a reasonable auto config value */
6910 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6911
6912 #ifdef RSS
6913 /* If we're doing RSS, clamp at the number of RSS buckets */
6914 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6915 queues = uimin(queues, rss_getnumbuckets());
6916 #endif
6917 if (ixgbe_num_queues > queues) {
6918 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6919 ixgbe_num_queues = queues;
6920 }
6921
6922 if (ixgbe_num_queues != 0)
6923 queues = ixgbe_num_queues;
6924 else
6925 queues = uimin(queues,
6926 uimin(mac->max_tx_queues, mac->max_rx_queues));
6927
6928 /* reflect correct sysctl value */
6929 ixgbe_num_queues = queues;
6930
6931 /*
6932 * Want one vector (RX/TX pair) per queue
6933 * plus an additional for Link.
6934 */
6935 want = queues + 1;
6936 if (msgs >= want)
6937 msgs = want;
6938 else {
6939 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6940 "%d vectors but %d queues wanted!\n",
6941 msgs, want);
6942 goto msi;
6943 }
6944 adapter->num_queues = queues;
6945 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6946 return (0);
6947
6948 /*
6949 * MSI-X allocation failed or provided us with
6950 * less vectors than needed. Free MSI-X resources
6951 * and we'll try enabling MSI.
6952 */
6953 msi:
6954 /* Without MSI-X, some features are no longer supported */
6955 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6956 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6957 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6958 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6959
6960 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6961 adapter->msix_mem = NULL; /* XXX */
6962 if (msgs > 1)
6963 msgs = 1;
6964 if (msgs != 0) {
6965 msgs = 1;
6966 adapter->feat_en |= IXGBE_FEATURE_MSI;
6967 return (0);
6968 }
6969
6970 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6971 aprint_error_dev(dev,
6972 "Device does not support legacy interrupts.\n");
6973 return 1;
6974 }
6975
6976 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6977
6978 return (0);
6979 } /* ixgbe_configure_interrupts */
6980
6981
6982 /************************************************************************
6983 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6984 *
6985 * Done outside of interrupt context since the driver might sleep
6986 ************************************************************************/
6987 static void
6988 ixgbe_handle_link(void *context)
6989 {
6990 struct adapter *adapter = context;
6991 struct ixgbe_hw *hw = &adapter->hw;
6992
6993 ++adapter->link_workev.ev_count;
6994 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6995 ixgbe_update_link_status(adapter);
6996
6997 /* Re-enable link interrupts */
6998 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6999 } /* ixgbe_handle_link */
7000
7001 #if 0
7002 /************************************************************************
7003 * ixgbe_rearm_queues
7004 ************************************************************************/
7005 static __inline void
7006 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7007 {
7008 u32 mask;
7009
7010 switch (adapter->hw.mac.type) {
7011 case ixgbe_mac_82598EB:
7012 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7013 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7014 break;
7015 case ixgbe_mac_82599EB:
7016 case ixgbe_mac_X540:
7017 case ixgbe_mac_X550:
7018 case ixgbe_mac_X550EM_x:
7019 case ixgbe_mac_X550EM_a:
7020 mask = (queues & 0xFFFFFFFF);
7021 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7022 mask = (queues >> 32);
7023 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7024 break;
7025 default:
7026 break;
7027 }
7028 } /* ixgbe_rearm_queues */
7029 #endif
7030