ixgbe.c revision 1.245 1 /* $NetBSD: ixgbe.c,v 1.245 2020/08/27 00:07:56 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.10 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */
148 {0, 0, 0, 0, 0}
149 };
150
151 /************************************************************************
152 * Table of branding strings
153 ************************************************************************/
154 static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156 };
157
158 /************************************************************************
159 * Function prototypes
160 ************************************************************************/
161 static int ixgbe_probe(device_t, cfdata_t, void *);
162 static void ixgbe_quirks(struct adapter *);
163 static void ixgbe_attach(device_t, device_t, void *);
164 static int ixgbe_detach(device_t, int);
165 #if 0
166 static int ixgbe_shutdown(device_t);
167 #endif
168 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
169 static bool ixgbe_resume(device_t, const pmf_qual_t *);
170 static int ixgbe_ifflags_cb(struct ethercom *);
171 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
172 static int ixgbe_init(struct ifnet *);
173 static void ixgbe_init_locked(struct adapter *);
174 static void ixgbe_ifstop(struct ifnet *, int);
175 static void ixgbe_stop(void *);
176 static void ixgbe_init_device_features(struct adapter *);
177 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
178 static void ixgbe_add_media_types(struct adapter *);
179 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
180 static int ixgbe_media_change(struct ifnet *);
181 static int ixgbe_allocate_pci_resources(struct adapter *,
182 const struct pci_attach_args *);
183 static void ixgbe_free_workqueue(struct adapter *);
184 static void ixgbe_get_slot_info(struct adapter *);
185 static int ixgbe_allocate_msix(struct adapter *,
186 const struct pci_attach_args *);
187 static int ixgbe_allocate_legacy(struct adapter *,
188 const struct pci_attach_args *);
189 static int ixgbe_configure_interrupts(struct adapter *);
190 static void ixgbe_free_pciintr_resources(struct adapter *);
191 static void ixgbe_free_pci_resources(struct adapter *);
192 static void ixgbe_local_timer(void *);
193 static void ixgbe_handle_timer(struct work *, void *);
194 static void ixgbe_recovery_mode_timer(void *);
195 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
196 static int ixgbe_setup_interface(device_t, struct adapter *);
197 static void ixgbe_config_gpie(struct adapter *);
198 static void ixgbe_config_dmac(struct adapter *);
199 static void ixgbe_config_delay_values(struct adapter *);
200 static void ixgbe_schedule_admin_tasklet(struct adapter *);
201 static void ixgbe_config_link(struct adapter *);
202 static void ixgbe_check_wol_support(struct adapter *);
203 static int ixgbe_setup_low_power_mode(struct adapter *);
204 #if 0
205 static void ixgbe_rearm_queues(struct adapter *, u64);
206 #endif
207
208 static void ixgbe_initialize_transmit_units(struct adapter *);
209 static void ixgbe_initialize_receive_units(struct adapter *);
210 static void ixgbe_enable_rx_drop(struct adapter *);
211 static void ixgbe_disable_rx_drop(struct adapter *);
212 static void ixgbe_initialize_rss_mapping(struct adapter *);
213
214 static void ixgbe_enable_intr(struct adapter *);
215 static void ixgbe_disable_intr(struct adapter *);
216 static void ixgbe_update_stats_counters(struct adapter *);
217 static void ixgbe_set_rxfilter(struct adapter *);
218 static void ixgbe_update_link_status(struct adapter *);
219 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
220 static void ixgbe_configure_ivars(struct adapter *);
221 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
222 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
223
224 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
225 static void ixgbe_setup_vlan_hw_support(struct adapter *);
226 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
227 static int ixgbe_register_vlan(struct adapter *, u16);
228 static int ixgbe_unregister_vlan(struct adapter *, u16);
229
230 static void ixgbe_add_device_sysctls(struct adapter *);
231 static void ixgbe_add_hw_stats(struct adapter *);
232 static void ixgbe_clear_evcnt(struct adapter *);
233 static int ixgbe_set_flowcntl(struct adapter *, int);
234 static int ixgbe_set_advertise(struct adapter *, int);
235 static int ixgbe_get_advertise(struct adapter *);
236
237 /* Sysctl handlers */
238 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
239 const char *, int *, int);
240 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
246 #ifdef IXGBE_DEBUG
247 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
249 #endif
250 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
259
260 /* Support for pluggable optic modules */
261 static bool ixgbe_sfp_cage_full(struct adapter *);
262
263 /* Legacy (single vector) interrupt handler */
264 static int ixgbe_legacy_irq(void *);
265
266 /* The MSI/MSI-X Interrupt handlers */
267 static int ixgbe_msix_que(void *);
268 static int ixgbe_msix_admin(void *);
269
270 /* Event handlers running on workqueue */
271 static void ixgbe_handle_que(void *);
272 static void ixgbe_handle_link(void *);
273 static void ixgbe_handle_msf(void *);
274 static void ixgbe_handle_mod(void *);
275 static void ixgbe_handle_phy(void *);
276
277 /* Deferred workqueue handlers */
278 static void ixgbe_handle_admin(struct work *, void *);
279 static void ixgbe_handle_que_work(struct work *, void *);
280
281 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
282
283 /************************************************************************
284 * NetBSD Device Interface Entry Points
285 ************************************************************************/
286 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
287 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
288 DVF_DETACH_SHUTDOWN);
289
290 #if 0
291 devclass_t ix_devclass;
292 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
293
294 MODULE_DEPEND(ix, pci, 1, 1, 1);
295 MODULE_DEPEND(ix, ether, 1, 1, 1);
296 #ifdef DEV_NETMAP
297 MODULE_DEPEND(ix, netmap, 1, 1, 1);
298 #endif
299 #endif
300
301 /*
302 * TUNEABLE PARAMETERS:
303 */
304
305 /*
306 * AIM: Adaptive Interrupt Moderation
307 * which means that the interrupt rate
308 * is varied over time based on the
309 * traffic for that interrupt vector
310 */
311 static bool ixgbe_enable_aim = true;
312 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
313 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
314 "Enable adaptive interrupt moderation");
315
316 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
317 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
318 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
319
320 /* How many packets rxeof tries to clean at a time */
321 static int ixgbe_rx_process_limit = 256;
322 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
323 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
324
325 /* How many packets txeof tries to clean at a time */
326 static int ixgbe_tx_process_limit = 256;
327 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
328 &ixgbe_tx_process_limit, 0,
329 "Maximum number of sent packets to process at a time, -1 means unlimited");
330
331 /* Flow control setting, default to full */
332 static int ixgbe_flow_control = ixgbe_fc_full;
333 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
334 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
335
336 /* Which packet processing uses workqueue or softint */
337 static bool ixgbe_txrx_workqueue = false;
338
339 /*
340 * Smart speed setting, default to on
341 * this only works as a compile option
342 * right now as its during attach, set
343 * this to 'ixgbe_smart_speed_off' to
344 * disable.
345 */
346 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
347
348 /*
349 * MSI-X should be the default for best performance,
350 * but this allows it to be forced off for testing.
351 */
352 static int ixgbe_enable_msix = 1;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
354 "Enable MSI-X interrupts");
355
356 /*
357 * Number of Queues, can be set to 0,
358 * it then autoconfigures based on the
359 * number of cpus with a max of 8. This
360 * can be overridden manually here.
361 */
362 static int ixgbe_num_queues = 0;
363 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
364 "Number of queues to configure, 0 indicates autoconfigure");
365
366 /*
367 * Number of TX descriptors per ring,
368 * setting higher than RX as this seems
369 * the better performing choice.
370 */
371 static int ixgbe_txd = PERFORM_TXD;
372 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
373 "Number of transmit descriptors per queue");
374
375 /* Number of RX descriptors per ring */
376 static int ixgbe_rxd = PERFORM_RXD;
377 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
378 "Number of receive descriptors per queue");
379
380 /*
381 * Defining this on will allow the use
382 * of unsupported SFP+ modules, note that
383 * doing so you are on your own :)
384 */
385 static int allow_unsupported_sfp = false;
386 #define TUNABLE_INT(__x, __y)
387 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
388
389 /*
390 * Not sure if Flow Director is fully baked,
391 * so we'll default to turning it off.
392 */
393 static int ixgbe_enable_fdir = 0;
394 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
395 "Enable Flow Director");
396
397 /* Legacy Transmit (single queue) */
398 static int ixgbe_enable_legacy_tx = 0;
399 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
400 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
401
402 /* Receive-Side Scaling */
403 static int ixgbe_enable_rss = 1;
404 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
405 "Enable Receive-Side Scaling (RSS)");
406
407 #if 0
408 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
409 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
410 #endif
411
412 #ifdef NET_MPSAFE
413 #define IXGBE_MPSAFE 1
414 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
415 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
416 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
417 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
418 #else
419 #define IXGBE_CALLOUT_FLAGS 0
420 #define IXGBE_SOFTINT_FLAGS 0
421 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
422 #define IXGBE_TASKLET_WQ_FLAGS 0
423 #endif
424 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
425
426 /************************************************************************
427 * ixgbe_initialize_rss_mapping
428 ************************************************************************/
429 static void
430 ixgbe_initialize_rss_mapping(struct adapter *adapter)
431 {
432 struct ixgbe_hw *hw = &adapter->hw;
433 u32 reta = 0, mrqc, rss_key[10];
434 int queue_id, table_size, index_mult;
435 int i, j;
436 u32 rss_hash_config;
437
438 /* force use default RSS key. */
439 #ifdef __NetBSD__
440 rss_getkey((uint8_t *) &rss_key);
441 #else
442 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
443 /* Fetch the configured RSS key */
444 rss_getkey((uint8_t *) &rss_key);
445 } else {
446 /* set up random bits */
447 cprng_fast(&rss_key, sizeof(rss_key));
448 }
449 #endif
450
451 /* Set multiplier for RETA setup and table size based on MAC */
452 index_mult = 0x1;
453 table_size = 128;
454 switch (adapter->hw.mac.type) {
455 case ixgbe_mac_82598EB:
456 index_mult = 0x11;
457 break;
458 case ixgbe_mac_X550:
459 case ixgbe_mac_X550EM_x:
460 case ixgbe_mac_X550EM_a:
461 table_size = 512;
462 break;
463 default:
464 break;
465 }
466
467 /* Set up the redirection table */
468 for (i = 0, j = 0; i < table_size; i++, j++) {
469 if (j == adapter->num_queues)
470 j = 0;
471
472 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
473 /*
474 * Fetch the RSS bucket id for the given indirection
475 * entry. Cap it at the number of configured buckets
476 * (which is num_queues.)
477 */
478 queue_id = rss_get_indirection_to_bucket(i);
479 queue_id = queue_id % adapter->num_queues;
480 } else
481 queue_id = (j * index_mult);
482
483 /*
484 * The low 8 bits are for hash value (n+0);
485 * The next 8 bits are for hash value (n+1), etc.
486 */
487 reta = reta >> 8;
488 reta = reta | (((uint32_t) queue_id) << 24);
489 if ((i & 3) == 3) {
490 if (i < 128)
491 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
492 else
493 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
494 reta);
495 reta = 0;
496 }
497 }
498
499 /* Now fill our hash function seeds */
500 for (i = 0; i < 10; i++)
501 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
502
503 /* Perform hash on these packet types */
504 if (adapter->feat_en & IXGBE_FEATURE_RSS)
505 rss_hash_config = rss_gethashconfig();
506 else {
507 /*
508 * Disable UDP - IP fragments aren't currently being handled
509 * and so we end up with a mix of 2-tuple and 4-tuple
510 * traffic.
511 */
512 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
513 | RSS_HASHTYPE_RSS_TCP_IPV4
514 | RSS_HASHTYPE_RSS_IPV6
515 | RSS_HASHTYPE_RSS_TCP_IPV6
516 | RSS_HASHTYPE_RSS_IPV6_EX
517 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
518 }
519
520 mrqc = IXGBE_MRQC_RSSEN;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
537 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
538 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
539 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
540 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
541 } /* ixgbe_initialize_rss_mapping */
542
543 /************************************************************************
544 * ixgbe_initialize_receive_units - Setup receive registers and features.
545 ************************************************************************/
546 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
547
548 static void
549 ixgbe_initialize_receive_units(struct adapter *adapter)
550 {
551 struct rx_ring *rxr = adapter->rx_rings;
552 struct ixgbe_hw *hw = &adapter->hw;
553 struct ifnet *ifp = adapter->ifp;
554 int i, j;
555 u32 bufsz, fctrl, srrctl, rxcsum;
556 u32 hlreg;
557
558 /*
559 * Make sure receives are disabled while
560 * setting up the descriptor ring
561 */
562 ixgbe_disable_rx(hw);
563
564 /* Enable broadcasts */
565 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
566 fctrl |= IXGBE_FCTRL_BAM;
567 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
568 fctrl |= IXGBE_FCTRL_DPF;
569 fctrl |= IXGBE_FCTRL_PMCF;
570 }
571 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
572
573 /* Set for Jumbo Frames? */
574 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
575 if (ifp->if_mtu > ETHERMTU)
576 hlreg |= IXGBE_HLREG0_JUMBOEN;
577 else
578 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
579
580 #ifdef DEV_NETMAP
581 /* CRC stripping is conditional in Netmap */
582 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
583 (ifp->if_capenable & IFCAP_NETMAP) &&
584 !ix_crcstrip)
585 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
586 else
587 #endif /* DEV_NETMAP */
588 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
589
590 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
591
592 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
593 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
594
595 for (i = 0; i < adapter->num_queues; i++, rxr++) {
596 u64 rdba = rxr->rxdma.dma_paddr;
597 u32 reg;
598 int regnum = i / 4; /* 1 register per 4 queues */
599 int regshift = i % 4; /* 4 bits per 1 queue */
600 j = rxr->me;
601
602 /* Setup the Base and Length of the Rx Descriptor Ring */
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
604 (rdba & 0x00000000ffffffffULL));
605 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
606 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
607 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
608
609 /* Set up the SRRCTL register */
610 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
611 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
612 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
613 srrctl |= bufsz;
614 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
615
616 /* Set RQSMR (Receive Queue Statistic Mapping) register */
617 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
618 reg &= ~(0x000000ffUL << (regshift * 8));
619 reg |= i << (regshift * 8);
620 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
621
622 /*
623 * Set DROP_EN iff we have no flow control and >1 queue.
624 * Note that srrctl was cleared shortly before during reset,
625 * so we do not need to clear the bit, but do it just in case
626 * this code is moved elsewhere.
627 */
628 if (adapter->num_queues > 1 &&
629 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
630 srrctl |= IXGBE_SRRCTL_DROP_EN;
631 } else {
632 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
633 }
634
635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
636
637 /* Setup the HW Rx Head and Tail Descriptor Pointers */
638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
640
641 /* Set the driver rx tail address */
642 rxr->tail = IXGBE_RDT(rxr->me);
643 }
644
645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
647 | IXGBE_PSRTYPE_UDPHDR
648 | IXGBE_PSRTYPE_IPV4HDR
649 | IXGBE_PSRTYPE_IPV6HDR;
650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
651 }
652
653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
654
655 ixgbe_initialize_rss_mapping(adapter);
656
657 if (adapter->num_queues > 1) {
658 /* RSS and RX IPP Checksum are mutually exclusive */
659 rxcsum |= IXGBE_RXCSUM_PCSD;
660 }
661
662 if (ifp->if_capenable & IFCAP_RXCSUM)
663 rxcsum |= IXGBE_RXCSUM_PCSD;
664
665 /* This is useful for calculating UDP/IP fragment checksums */
666 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
667 rxcsum |= IXGBE_RXCSUM_IPPCSE;
668
669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
670
671 } /* ixgbe_initialize_receive_units */
672
673 /************************************************************************
674 * ixgbe_initialize_transmit_units - Enable transmit units.
675 ************************************************************************/
676 static void
677 ixgbe_initialize_transmit_units(struct adapter *adapter)
678 {
679 struct tx_ring *txr = adapter->tx_rings;
680 struct ixgbe_hw *hw = &adapter->hw;
681 int i;
682
683 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
684
685 /* Setup the Base and Length of the Tx Descriptor Ring */
686 for (i = 0; i < adapter->num_queues; i++, txr++) {
687 u64 tdba = txr->txdma.dma_paddr;
688 u32 txctrl = 0;
689 u32 tqsmreg, reg;
690 int regnum = i / 4; /* 1 register per 4 queues */
691 int regshift = i % 4; /* 4 bits per 1 queue */
692 int j = txr->me;
693
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
695 (tdba & 0x00000000ffffffffULL));
696 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
697 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
698 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
699
700 /*
701 * Set TQSMR (Transmit Queue Statistic Mapping) register.
702 * Register location is different between 82598 and others.
703 */
704 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
705 tqsmreg = IXGBE_TQSMR(regnum);
706 else
707 tqsmreg = IXGBE_TQSM(regnum);
708 reg = IXGBE_READ_REG(hw, tqsmreg);
709 reg &= ~(0x000000ffUL << (regshift * 8));
710 reg |= i << (regshift * 8);
711 IXGBE_WRITE_REG(hw, tqsmreg, reg);
712
713 /* Setup the HW Tx Head and Tail descriptor pointers */
714 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
715 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
716
717 /* Cache the tail address */
718 txr->tail = IXGBE_TDT(j);
719
720 txr->txr_no_space = false;
721
722 /* Disable Head Writeback */
723 /*
724 * Note: for X550 series devices, these registers are actually
725 * prefixed with TPH_ isntead of DCA_, but the addresses and
726 * fields remain the same.
727 */
728 switch (hw->mac.type) {
729 case ixgbe_mac_82598EB:
730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
731 break;
732 default:
733 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
734 break;
735 }
736 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
737 switch (hw->mac.type) {
738 case ixgbe_mac_82598EB:
739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
740 break;
741 default:
742 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
743 break;
744 }
745
746 }
747
748 if (hw->mac.type != ixgbe_mac_82598EB) {
749 u32 dmatxctl, rttdcs;
750
751 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
752 dmatxctl |= IXGBE_DMATXCTL_TE;
753 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
754 /* Disable arbiter to set MTQC */
755 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
756 rttdcs |= IXGBE_RTTDCS_ARBDIS;
757 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
758 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
759 ixgbe_get_mtqc(adapter->iov_mode));
760 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 }
763
764 return;
765 } /* ixgbe_initialize_transmit_units */
766
767 static void
768 ixgbe_quirks(struct adapter *adapter)
769 {
770 device_t dev = adapter->dev;
771 const char *vendor, *product;
772
773 /* Quirk for inverted logic of SFP+'s MOD_ABS */
774 vendor = pmf_get_platform("system-vendor");
775 product = pmf_get_platform("system-product");
776
777 if ((vendor == NULL) || (product == NULL))
778 return;
779
780 if ((strcmp(vendor, "GIGABYTE") == 0) &&
781 (strcmp(product, "MA10-ST0") == 0)) {
782 aprint_verbose_dev(dev, "Enable SFP+ MOD_ABS inverse quirk\n");
783 adapter->quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
784 }
785 }
786
787 /************************************************************************
788 * ixgbe_attach - Device initialization routine
789 *
790 * Called when the driver is being loaded.
791 * Identifies the type of hardware, allocates all resources
792 * and initializes the hardware.
793 *
794 * return 0 on success, positive on failure
795 ************************************************************************/
796 static void
797 ixgbe_attach(device_t parent, device_t dev, void *aux)
798 {
799 struct adapter *adapter;
800 struct ixgbe_hw *hw;
801 int error = -1;
802 u32 ctrl_ext;
803 u16 high, low, nvmreg;
804 pcireg_t id, subid;
805 const ixgbe_vendor_info_t *ent;
806 struct pci_attach_args *pa = aux;
807 bool unsupported_sfp = false;
808 const char *str;
809 char wqname[MAXCOMLEN];
810 char buf[256];
811
812 INIT_DEBUGOUT("ixgbe_attach: begin");
813
814 /* Allocate, clear, and link in our adapter structure */
815 adapter = device_private(dev);
816 adapter->hw.back = adapter;
817 adapter->dev = dev;
818 hw = &adapter->hw;
819 adapter->osdep.pc = pa->pa_pc;
820 adapter->osdep.tag = pa->pa_tag;
821 if (pci_dma64_available(pa))
822 adapter->osdep.dmat = pa->pa_dmat64;
823 else
824 adapter->osdep.dmat = pa->pa_dmat;
825 adapter->osdep.attached = false;
826
827 ent = ixgbe_lookup(pa);
828
829 KASSERT(ent != NULL);
830
831 aprint_normal(": %s, Version - %s\n",
832 ixgbe_strings[ent->index], ixgbe_driver_version);
833
834 /* Set quirk flags */
835 ixgbe_quirks(adapter);
836
837 /* Core Lock Init */
838 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
839
840 /* Set up the timer callout and workqueue */
841 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
842 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
843 error = workqueue_create(&adapter->timer_wq, wqname,
844 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
845 IXGBE_TASKLET_WQ_FLAGS);
846 if (error) {
847 aprint_error_dev(dev,
848 "could not create timer workqueue (%d)\n", error);
849 goto err_out;
850 }
851
852 /* Determine hardware revision */
853 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
854 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
855
856 hw->vendor_id = PCI_VENDOR(id);
857 hw->device_id = PCI_PRODUCT(id);
858 hw->revision_id =
859 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
860 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
861 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
862
863 /*
864 * Make sure BUSMASTER is set
865 */
866 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
867
868 /* Do base PCI setup - map BAR0 */
869 if (ixgbe_allocate_pci_resources(adapter, pa)) {
870 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
871 error = ENXIO;
872 goto err_out;
873 }
874
875 /* let hardware know driver is loaded */
876 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
877 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
878 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
879
880 /*
881 * Initialize the shared code
882 */
883 if (ixgbe_init_shared_code(hw) != 0) {
884 aprint_error_dev(dev, "Unable to initialize the shared code\n");
885 error = ENXIO;
886 goto err_out;
887 }
888
889 switch (hw->mac.type) {
890 case ixgbe_mac_82598EB:
891 str = "82598EB";
892 break;
893 case ixgbe_mac_82599EB:
894 str = "82599EB";
895 break;
896 case ixgbe_mac_X540:
897 str = "X540";
898 break;
899 case ixgbe_mac_X550:
900 str = "X550";
901 break;
902 case ixgbe_mac_X550EM_x:
903 str = "X550EM";
904 break;
905 case ixgbe_mac_X550EM_a:
906 str = "X550EM A";
907 break;
908 default:
909 str = "Unknown";
910 break;
911 }
912 aprint_normal_dev(dev, "device %s\n", str);
913
914 if (hw->mbx.ops.init_params)
915 hw->mbx.ops.init_params(hw);
916
917 hw->allow_unsupported_sfp = allow_unsupported_sfp;
918
919 /* Pick up the 82599 settings */
920 if (hw->mac.type != ixgbe_mac_82598EB) {
921 hw->phy.smart_speed = ixgbe_smart_speed;
922 adapter->num_segs = IXGBE_82599_SCATTER;
923 } else
924 adapter->num_segs = IXGBE_82598_SCATTER;
925
926 /* Ensure SW/FW semaphore is free */
927 ixgbe_init_swfw_semaphore(hw);
928
929 hw->mac.ops.set_lan_id(hw);
930 ixgbe_init_device_features(adapter);
931
932 if (ixgbe_configure_interrupts(adapter)) {
933 error = ENXIO;
934 goto err_out;
935 }
936
937 /* Allocate multicast array memory. */
938 adapter->mta = malloc(sizeof(*adapter->mta) *
939 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
940
941 /* Enable WoL (if supported) */
942 ixgbe_check_wol_support(adapter);
943
944 /* Register for VLAN events */
945 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
946
947 /* Verify adapter fan is still functional (if applicable) */
948 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
949 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
950 ixgbe_check_fan_failure(adapter, esdp, FALSE);
951 }
952
953 /* Set an initial default flow control value */
954 hw->fc.requested_mode = ixgbe_flow_control;
955
956 /* Sysctls for limiting the amount of work done in the taskqueues */
957 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
958 "max number of rx packets to process",
959 &adapter->rx_process_limit, ixgbe_rx_process_limit);
960
961 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
962 "max number of tx packets to process",
963 &adapter->tx_process_limit, ixgbe_tx_process_limit);
964
965 /* Do descriptor calc and sanity checks */
966 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 aprint_error_dev(dev, "TXD config issue, using default!\n");
969 adapter->num_tx_desc = DEFAULT_TXD;
970 } else
971 adapter->num_tx_desc = ixgbe_txd;
972
973 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
974 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
975 aprint_error_dev(dev, "RXD config issue, using default!\n");
976 adapter->num_rx_desc = DEFAULT_RXD;
977 } else
978 adapter->num_rx_desc = ixgbe_rxd;
979
980 /* Allocate our TX/RX Queues */
981 if (ixgbe_allocate_queues(adapter)) {
982 error = ENOMEM;
983 goto err_out;
984 }
985
986 hw->phy.reset_if_overtemp = TRUE;
987 error = ixgbe_reset_hw(hw);
988 hw->phy.reset_if_overtemp = FALSE;
989 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
990 error = IXGBE_SUCCESS;
991 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
992 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
993 unsupported_sfp = true;
994 error = IXGBE_SUCCESS;
995 } else if (error) {
996 aprint_error_dev(dev, "Hardware initialization failed\n");
997 error = EIO;
998 goto err_late;
999 }
1000
1001 /* Make sure we have a good EEPROM before we read from it */
1002 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1003 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1004 error = EIO;
1005 goto err_late;
1006 }
1007
1008 aprint_normal("%s:", device_xname(dev));
1009 /* NVM Image Version */
1010 high = low = 0;
1011 switch (hw->mac.type) {
1012 case ixgbe_mac_X540:
1013 case ixgbe_mac_X550EM_a:
1014 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1015 if (nvmreg == 0xffff)
1016 break;
1017 high = (nvmreg >> 12) & 0x0f;
1018 low = (nvmreg >> 4) & 0xff;
1019 id = nvmreg & 0x0f;
1020 aprint_normal(" NVM Image Version %u.", high);
1021 if (hw->mac.type == ixgbe_mac_X540)
1022 str = "%x";
1023 else
1024 str = "%02x";
1025 aprint_normal(str, low);
1026 aprint_normal(" ID 0x%x,", id);
1027 break;
1028 case ixgbe_mac_X550EM_x:
1029 case ixgbe_mac_X550:
1030 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1031 if (nvmreg == 0xffff)
1032 break;
1033 high = (nvmreg >> 12) & 0x0f;
1034 low = nvmreg & 0xff;
1035 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1036 break;
1037 default:
1038 break;
1039 }
1040 hw->eeprom.nvm_image_ver_high = high;
1041 hw->eeprom.nvm_image_ver_low = low;
1042
1043 /* PHY firmware revision */
1044 switch (hw->mac.type) {
1045 case ixgbe_mac_X540:
1046 case ixgbe_mac_X550:
1047 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1048 if (nvmreg == 0xffff)
1049 break;
1050 high = (nvmreg >> 12) & 0x0f;
1051 low = (nvmreg >> 4) & 0xff;
1052 id = nvmreg & 0x000f;
1053 aprint_normal(" PHY FW Revision %u.", high);
1054 if (hw->mac.type == ixgbe_mac_X540)
1055 str = "%x";
1056 else
1057 str = "%02x";
1058 aprint_normal(str, low);
1059 aprint_normal(" ID 0x%x,", id);
1060 break;
1061 default:
1062 break;
1063 }
1064
1065 /* NVM Map version & OEM NVM Image version */
1066 switch (hw->mac.type) {
1067 case ixgbe_mac_X550:
1068 case ixgbe_mac_X550EM_x:
1069 case ixgbe_mac_X550EM_a:
1070 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1071 if (nvmreg != 0xffff) {
1072 high = (nvmreg >> 12) & 0x0f;
1073 low = nvmreg & 0x00ff;
1074 aprint_normal(" NVM Map version %u.%02x,", high, low);
1075 }
1076 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1077 if (nvmreg != 0xffff) {
1078 high = (nvmreg >> 12) & 0x0f;
1079 low = nvmreg & 0x00ff;
1080 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1081 low);
1082 }
1083 break;
1084 default:
1085 break;
1086 }
1087
1088 /* Print the ETrackID */
1089 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1090 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1091 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1092
1093 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1094 error = ixgbe_allocate_msix(adapter, pa);
1095 if (error) {
1096 /* Free allocated queue structures first */
1097 ixgbe_free_queues(adapter);
1098
1099 /* Fallback to legacy interrupt */
1100 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1101 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1102 adapter->feat_en |= IXGBE_FEATURE_MSI;
1103 adapter->num_queues = 1;
1104
1105 /* Allocate our TX/RX Queues again */
1106 if (ixgbe_allocate_queues(adapter)) {
1107 error = ENOMEM;
1108 goto err_out;
1109 }
1110 }
1111 }
1112 /* Recovery mode */
1113 switch (adapter->hw.mac.type) {
1114 case ixgbe_mac_X550:
1115 case ixgbe_mac_X550EM_x:
1116 case ixgbe_mac_X550EM_a:
1117 /* >= 2.00 */
1118 if (hw->eeprom.nvm_image_ver_high >= 2) {
1119 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1120 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1121 }
1122 break;
1123 default:
1124 break;
1125 }
1126
1127 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1128 error = ixgbe_allocate_legacy(adapter, pa);
1129 if (error)
1130 goto err_late;
1131
1132 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1133 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1134 error = workqueue_create(&adapter->admin_wq, wqname,
1135 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1136 IXGBE_TASKLET_WQ_FLAGS);
1137 if (error) {
1138 aprint_error_dev(dev,
1139 "could not create admin workqueue (%d)\n", error);
1140 goto err_out;
1141 }
1142
1143 error = ixgbe_start_hw(hw);
1144 switch (error) {
1145 case IXGBE_ERR_EEPROM_VERSION:
1146 aprint_error_dev(dev, "This device is a pre-production adapter/"
1147 "LOM. Please be aware there may be issues associated "
1148 "with your hardware.\nIf you are experiencing problems "
1149 "please contact your Intel or hardware representative "
1150 "who provided you with this hardware.\n");
1151 break;
1152 default:
1153 break;
1154 }
1155
1156 /* Setup OS specific network interface */
1157 if (ixgbe_setup_interface(dev, adapter) != 0)
1158 goto err_late;
1159
1160 /*
1161 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1162 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1163 */
1164 if (hw->phy.media_type == ixgbe_media_type_copper) {
1165 uint16_t id1, id2;
1166 int oui, model, rev;
1167 const char *descr;
1168
1169 id1 = hw->phy.id >> 16;
1170 id2 = hw->phy.id & 0xffff;
1171 oui = MII_OUI(id1, id2);
1172 model = MII_MODEL(id2);
1173 rev = MII_REV(id2);
1174 if ((descr = mii_get_descr(oui, model)) != NULL)
1175 aprint_normal_dev(dev,
1176 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1177 descr, oui, model, rev);
1178 else
1179 aprint_normal_dev(dev,
1180 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1181 oui, model, rev);
1182 }
1183
1184 /* Enable EEE power saving */
1185 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1186 hw->mac.ops.setup_eee(hw,
1187 adapter->feat_en & IXGBE_FEATURE_EEE);
1188
1189 /* Enable power to the phy. */
1190 if (!unsupported_sfp) {
1191 /* Enable the optics for 82599 SFP+ fiber */
1192 ixgbe_enable_tx_laser(hw);
1193
1194 /*
1195 * XXX Currently, ixgbe_set_phy_power() supports only copper
1196 * PHY, so it's not required to test with !unsupported_sfp.
1197 */
1198 ixgbe_set_phy_power(hw, TRUE);
1199 }
1200
1201 /* Initialize statistics */
1202 ixgbe_update_stats_counters(adapter);
1203
1204 /* Check PCIE slot type/speed/width */
1205 ixgbe_get_slot_info(adapter);
1206
1207 /*
1208 * Do time init and sysctl init here, but
1209 * only on the first port of a bypass adapter.
1210 */
1211 ixgbe_bypass_init(adapter);
1212
1213 /* Set an initial dmac value */
1214 adapter->dmac = 0;
1215 /* Set initial advertised speeds (if applicable) */
1216 adapter->advertise = ixgbe_get_advertise(adapter);
1217
1218 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1219 ixgbe_define_iov_schemas(dev, &error);
1220
1221 /* Add sysctls */
1222 ixgbe_add_device_sysctls(adapter);
1223 ixgbe_add_hw_stats(adapter);
1224
1225 /* For Netmap */
1226 adapter->init_locked = ixgbe_init_locked;
1227 adapter->stop_locked = ixgbe_stop;
1228
1229 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1230 ixgbe_netmap_attach(adapter);
1231
1232 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1233 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1234 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1235 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1236
1237 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1238 pmf_class_network_register(dev, adapter->ifp);
1239 else
1240 aprint_error_dev(dev, "couldn't establish power handler\n");
1241
1242 /* Init recovery mode timer and state variable */
1243 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1244 adapter->recovery_mode = 0;
1245
1246 /* Set up the timer callout */
1247 callout_init(&adapter->recovery_mode_timer,
1248 IXGBE_CALLOUT_FLAGS);
1249 snprintf(wqname, sizeof(wqname), "%s-recovery",
1250 device_xname(dev));
1251 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1252 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1253 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1254 if (error) {
1255 aprint_error_dev(dev, "could not create "
1256 "recovery_mode_timer workqueue (%d)\n", error);
1257 goto err_out;
1258 }
1259
1260 /* Start the task */
1261 callout_reset(&adapter->recovery_mode_timer, hz,
1262 ixgbe_recovery_mode_timer, adapter);
1263 }
1264
1265 INIT_DEBUGOUT("ixgbe_attach: end");
1266 adapter->osdep.attached = true;
1267
1268 return;
1269
1270 err_late:
1271 ixgbe_free_queues(adapter);
1272 err_out:
1273 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1274 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1276 ixgbe_free_workqueue(adapter);
1277 ixgbe_free_pci_resources(adapter);
1278 if (adapter->mta != NULL)
1279 free(adapter->mta, M_DEVBUF);
1280 IXGBE_CORE_LOCK_DESTROY(adapter);
1281
1282 return;
1283 } /* ixgbe_attach */
1284
1285 /************************************************************************
1286 * ixgbe_check_wol_support
1287 *
1288 * Checks whether the adapter's ports are capable of
1289 * Wake On LAN by reading the adapter's NVM.
1290 *
1291 * Sets each port's hw->wol_enabled value depending
1292 * on the value read here.
1293 ************************************************************************/
1294 static void
1295 ixgbe_check_wol_support(struct adapter *adapter)
1296 {
1297 struct ixgbe_hw *hw = &adapter->hw;
1298 u16 dev_caps = 0;
1299
1300 /* Find out WoL support for port */
1301 adapter->wol_support = hw->wol_enabled = 0;
1302 ixgbe_get_device_caps(hw, &dev_caps);
1303 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1304 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1305 hw->bus.func == 0))
1306 adapter->wol_support = hw->wol_enabled = 1;
1307
1308 /* Save initial wake up filter configuration */
1309 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1310
1311 return;
1312 } /* ixgbe_check_wol_support */
1313
1314 /************************************************************************
1315 * ixgbe_setup_interface
1316 *
1317 * Setup networking device structure and register an interface.
1318 ************************************************************************/
1319 static int
1320 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1321 {
1322 struct ethercom *ec = &adapter->osdep.ec;
1323 struct ifnet *ifp;
1324 int rv;
1325
1326 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1327
1328 ifp = adapter->ifp = &ec->ec_if;
1329 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1330 ifp->if_baudrate = IF_Gbps(10);
1331 ifp->if_init = ixgbe_init;
1332 ifp->if_stop = ixgbe_ifstop;
1333 ifp->if_softc = adapter;
1334 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1335 #ifdef IXGBE_MPSAFE
1336 ifp->if_extflags = IFEF_MPSAFE;
1337 #endif
1338 ifp->if_ioctl = ixgbe_ioctl;
1339 #if __FreeBSD_version >= 1100045
1340 /* TSO parameters */
1341 ifp->if_hw_tsomax = 65518;
1342 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1343 ifp->if_hw_tsomaxsegsize = 2048;
1344 #endif
1345 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1346 #if 0
1347 ixgbe_start_locked = ixgbe_legacy_start_locked;
1348 #endif
1349 } else {
1350 ifp->if_transmit = ixgbe_mq_start;
1351 #if 0
1352 ixgbe_start_locked = ixgbe_mq_start_locked;
1353 #endif
1354 }
1355 ifp->if_start = ixgbe_legacy_start;
1356 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1357 IFQ_SET_READY(&ifp->if_snd);
1358
1359 rv = if_initialize(ifp);
1360 if (rv != 0) {
1361 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1362 return rv;
1363 }
1364 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1365 ether_ifattach(ifp, adapter->hw.mac.addr);
1366 aprint_normal_dev(dev, "Ethernet address %s\n",
1367 ether_sprintf(adapter->hw.mac.addr));
1368 /*
1369 * We use per TX queue softint, so if_deferred_start_init() isn't
1370 * used.
1371 */
1372 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1373
1374 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1375
1376 /*
1377 * Tell the upper layer(s) we support long frames.
1378 */
1379 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1380
1381 /* Set capability flags */
1382 ifp->if_capabilities |= IFCAP_RXCSUM
1383 | IFCAP_TXCSUM
1384 | IFCAP_TSOv4
1385 | IFCAP_TSOv6;
1386 ifp->if_capenable = 0;
1387
1388 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1389 | ETHERCAP_VLAN_HWCSUM
1390 | ETHERCAP_JUMBO_MTU
1391 | ETHERCAP_VLAN_MTU;
1392
1393 /* Enable the above capabilities by default */
1394 ec->ec_capenable = ec->ec_capabilities;
1395
1396 /*
1397 * Don't turn this on by default, if vlans are
1398 * created on another pseudo device (eg. lagg)
1399 * then vlan events are not passed thru, breaking
1400 * operation, but with HW FILTER off it works. If
1401 * using vlans directly on the ixgbe driver you can
1402 * enable this and get full hardware tag filtering.
1403 */
1404 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1405
1406 /*
1407 * Specify the media types supported by this adapter and register
1408 * callbacks to update media and link information
1409 */
1410 ec->ec_ifmedia = &adapter->media;
1411 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1412 ixgbe_media_status, &adapter->core_mtx);
1413
1414 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1415 ixgbe_add_media_types(adapter);
1416
1417 /* Set autoselect media by default */
1418 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1419
1420 if_register(ifp);
1421
1422 return (0);
1423 } /* ixgbe_setup_interface */
1424
1425 /************************************************************************
1426 * ixgbe_add_media_types
1427 ************************************************************************/
1428 static void
1429 ixgbe_add_media_types(struct adapter *adapter)
1430 {
1431 struct ixgbe_hw *hw = &adapter->hw;
1432 u64 layer;
1433
1434 layer = adapter->phy_layer;
1435
1436 #define ADD(mm, dd) \
1437 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1438
1439 ADD(IFM_NONE, 0);
1440
1441 /* Media types with matching NetBSD media defines */
1442 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1443 ADD(IFM_10G_T | IFM_FDX, 0);
1444 }
1445 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1446 ADD(IFM_1000_T | IFM_FDX, 0);
1447 }
1448 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1449 ADD(IFM_100_TX | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1452 ADD(IFM_10_T | IFM_FDX, 0);
1453 }
1454
1455 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1456 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1457 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1458 }
1459
1460 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1461 ADD(IFM_10G_LR | IFM_FDX, 0);
1462 if (hw->phy.multispeed_fiber) {
1463 ADD(IFM_1000_LX | IFM_FDX, 0);
1464 }
1465 }
1466 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1467 ADD(IFM_10G_SR | IFM_FDX, 0);
1468 if (hw->phy.multispeed_fiber) {
1469 ADD(IFM_1000_SX | IFM_FDX, 0);
1470 }
1471 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1472 ADD(IFM_1000_SX | IFM_FDX, 0);
1473 }
1474 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1475 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1476 }
1477
1478 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1479 ADD(IFM_10G_KR | IFM_FDX, 0);
1480 }
1481 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1482 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1483 }
1484 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1485 ADD(IFM_1000_KX | IFM_FDX, 0);
1486 }
1487 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1488 ADD(IFM_2500_KX | IFM_FDX, 0);
1489 }
1490 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1491 ADD(IFM_2500_T | IFM_FDX, 0);
1492 }
1493 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1494 ADD(IFM_5000_T | IFM_FDX, 0);
1495 }
1496 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1497 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1498 /* XXX no ifmedia_set? */
1499
1500 ADD(IFM_AUTO, 0);
1501
1502 #undef ADD
1503 } /* ixgbe_add_media_types */
1504
1505 /************************************************************************
1506 * ixgbe_is_sfp
1507 ************************************************************************/
1508 static inline bool
1509 ixgbe_is_sfp(struct ixgbe_hw *hw)
1510 {
1511 switch (hw->mac.type) {
1512 case ixgbe_mac_82598EB:
1513 if (hw->phy.type == ixgbe_phy_nl)
1514 return (TRUE);
1515 return (FALSE);
1516 case ixgbe_mac_82599EB:
1517 case ixgbe_mac_X550EM_x:
1518 case ixgbe_mac_X550EM_a:
1519 switch (hw->mac.ops.get_media_type(hw)) {
1520 case ixgbe_media_type_fiber:
1521 case ixgbe_media_type_fiber_qsfp:
1522 return (TRUE);
1523 default:
1524 return (FALSE);
1525 }
1526 default:
1527 return (FALSE);
1528 }
1529 } /* ixgbe_is_sfp */
1530
1531 static void
1532 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1533 {
1534
1535 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1536 workqueue_enqueue(adapter->admin_wq,
1537 &adapter->admin_wc, NULL);
1538 }
1539
1540 /************************************************************************
1541 * ixgbe_config_link
1542 ************************************************************************/
1543 static void
1544 ixgbe_config_link(struct adapter *adapter)
1545 {
1546 struct ixgbe_hw *hw = &adapter->hw;
1547 u32 autoneg, err = 0;
1548 u32 task_requests = 0;
1549 bool sfp, negotiate = false;
1550
1551 sfp = ixgbe_is_sfp(hw);
1552
1553 if (sfp) {
1554 if (hw->phy.multispeed_fiber) {
1555 ixgbe_enable_tx_laser(hw);
1556 task_requests |= IXGBE_REQUEST_TASK_MSF;
1557 }
1558 task_requests |= IXGBE_REQUEST_TASK_MOD;
1559 atomic_or_32(&adapter->task_requests, task_requests);
1560 ixgbe_schedule_admin_tasklet(adapter);
1561 } else {
1562 struct ifmedia *ifm = &adapter->media;
1563
1564 if (hw->mac.ops.check_link)
1565 err = ixgbe_check_link(hw, &adapter->link_speed,
1566 &adapter->link_up, FALSE);
1567 if (err)
1568 return;
1569
1570 /*
1571 * Check if it's the first call. If it's the first call,
1572 * get value for auto negotiation.
1573 */
1574 autoneg = hw->phy.autoneg_advertised;
1575 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1576 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1577 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1578 &negotiate);
1579 if (err)
1580 return;
1581 if (hw->mac.ops.setup_link)
1582 err = hw->mac.ops.setup_link(hw, autoneg,
1583 adapter->link_up);
1584 }
1585
1586 } /* ixgbe_config_link */
1587
1588 /************************************************************************
1589 * ixgbe_update_stats_counters - Update board statistics counters.
1590 ************************************************************************/
1591 static void
1592 ixgbe_update_stats_counters(struct adapter *adapter)
1593 {
1594 struct ifnet *ifp = adapter->ifp;
1595 struct ixgbe_hw *hw = &adapter->hw;
1596 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1597 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1598 u64 total_missed_rx = 0;
1599 uint64_t crcerrs, rlec;
1600 unsigned int queue_counters;
1601 int i;
1602
1603 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1604 stats->crcerrs.ev_count += crcerrs;
1605 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1606 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1607 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1608 if (hw->mac.type >= ixgbe_mac_X550)
1609 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1610
1611 /* 16 registers exist */
1612 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1613 for (i = 0; i < queue_counters; i++) {
1614 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1615 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1616 if (hw->mac.type >= ixgbe_mac_82599EB) {
1617 stats->qprdc[i].ev_count
1618 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1619 }
1620 }
1621
1622 /* 8 registers exist */
1623 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1624 uint32_t mp;
1625
1626 /* MPC */
1627 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1628 /* global total per queue */
1629 stats->mpc[i].ev_count += mp;
1630 /* running comprehensive total for stats display */
1631 total_missed_rx += mp;
1632
1633 if (hw->mac.type == ixgbe_mac_82598EB)
1634 stats->rnbc[i].ev_count
1635 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1636
1637 stats->pxontxc[i].ev_count
1638 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1639 stats->pxofftxc[i].ev_count
1640 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1641 if (hw->mac.type >= ixgbe_mac_82599EB) {
1642 stats->pxonrxc[i].ev_count
1643 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1644 stats->pxoffrxc[i].ev_count
1645 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1646 stats->pxon2offc[i].ev_count
1647 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1648 } else {
1649 stats->pxonrxc[i].ev_count
1650 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1651 stats->pxoffrxc[i].ev_count
1652 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1653 }
1654 }
1655 stats->mpctotal.ev_count += total_missed_rx;
1656
1657 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1658 if ((adapter->link_active == LINK_STATE_UP)
1659 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1660 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1661 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1662 }
1663 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1664 stats->rlec.ev_count += rlec;
1665
1666 /* Hardware workaround, gprc counts missed packets */
1667 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1668
1669 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1670 stats->lxontxc.ev_count += lxon;
1671 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1672 stats->lxofftxc.ev_count += lxoff;
1673 total = lxon + lxoff;
1674
1675 if (hw->mac.type != ixgbe_mac_82598EB) {
1676 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1677 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1678 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1679 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1680 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1681 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1682 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1683 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1684 } else {
1685 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1686 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1687 /* 82598 only has a counter in the high register */
1688 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1689 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1690 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1691 }
1692
1693 /*
1694 * Workaround: mprc hardware is incorrectly counting
1695 * broadcasts, so for now we subtract those.
1696 */
1697 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1698 stats->bprc.ev_count += bprc;
1699 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1700 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1701
1702 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1703 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1704 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1705 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1706 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1707 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1708
1709 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1710 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1711 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1712
1713 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1714 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1715 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1716 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1717 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1718 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1719 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1720 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1721 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1722 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1723 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1724 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1725 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1726 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1727 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1728 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1729 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1730 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1731 /* Only read FCOE on 82599 */
1732 if (hw->mac.type != ixgbe_mac_82598EB) {
1733 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1734 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1735 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1736 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1737 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1738 }
1739
1740 /*
1741 * Fill out the OS statistics structure. Only RX errors are required
1742 * here because all TX counters are incremented in the TX path and
1743 * normal RX counters are prepared in ether_input().
1744 */
1745 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1746 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1747 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1748 IF_STAT_PUTREF(ifp);
1749 } /* ixgbe_update_stats_counters */
1750
1751 /************************************************************************
1752 * ixgbe_add_hw_stats
1753 *
1754 * Add sysctl variables, one per statistic, to the system.
1755 ************************************************************************/
1756 static void
1757 ixgbe_add_hw_stats(struct adapter *adapter)
1758 {
1759 device_t dev = adapter->dev;
1760 const struct sysctlnode *rnode, *cnode;
1761 struct sysctllog **log = &adapter->sysctllog;
1762 struct tx_ring *txr = adapter->tx_rings;
1763 struct rx_ring *rxr = adapter->rx_rings;
1764 struct ixgbe_hw *hw = &adapter->hw;
1765 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1766 const char *xname = device_xname(dev);
1767 int i;
1768
1769 /* Driver Statistics */
1770 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1771 NULL, xname, "Driver tx dma soft fail EFBIG");
1772 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1773 NULL, xname, "m_defrag() failed");
1774 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1775 NULL, xname, "Driver tx dma hard fail EFBIG");
1776 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1777 NULL, xname, "Driver tx dma hard fail EINVAL");
1778 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1779 NULL, xname, "Driver tx dma hard fail other");
1780 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1781 NULL, xname, "Driver tx dma soft fail EAGAIN");
1782 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1783 NULL, xname, "Driver tx dma soft fail ENOMEM");
1784 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1785 NULL, xname, "Watchdog timeouts");
1786 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1787 NULL, xname, "TSO errors");
1788 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1789 NULL, xname, "Admin MSI-X IRQ Handled");
1790 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1791 NULL, xname, "Link event");
1792 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1793 NULL, xname, "SFP+ module event");
1794 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1795 NULL, xname, "Multispeed event");
1796 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1797 NULL, xname, "External PHY event");
1798
1799 /* Max number of traffic class is 8 */
1800 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1801 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1802 snprintf(adapter->tcs[i].evnamebuf,
1803 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1804 xname, i);
1805 if (i < __arraycount(stats->mpc)) {
1806 evcnt_attach_dynamic(&stats->mpc[i],
1807 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1808 "RX Missed Packet Count");
1809 if (hw->mac.type == ixgbe_mac_82598EB)
1810 evcnt_attach_dynamic(&stats->rnbc[i],
1811 EVCNT_TYPE_MISC, NULL,
1812 adapter->tcs[i].evnamebuf,
1813 "Receive No Buffers");
1814 }
1815 if (i < __arraycount(stats->pxontxc)) {
1816 evcnt_attach_dynamic(&stats->pxontxc[i],
1817 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1818 "pxontxc");
1819 evcnt_attach_dynamic(&stats->pxonrxc[i],
1820 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1821 "pxonrxc");
1822 evcnt_attach_dynamic(&stats->pxofftxc[i],
1823 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1824 "pxofftxc");
1825 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1826 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1827 "pxoffrxc");
1828 if (hw->mac.type >= ixgbe_mac_82599EB)
1829 evcnt_attach_dynamic(&stats->pxon2offc[i],
1830 EVCNT_TYPE_MISC, NULL,
1831 adapter->tcs[i].evnamebuf,
1832 "pxon2offc");
1833 }
1834 }
1835
1836 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1837 #ifdef LRO
1838 struct lro_ctrl *lro = &rxr->lro;
1839 #endif /* LRO */
1840
1841 snprintf(adapter->queues[i].evnamebuf,
1842 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1843 xname, i);
1844 snprintf(adapter->queues[i].namebuf,
1845 sizeof(adapter->queues[i].namebuf), "q%d", i);
1846
1847 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1848 aprint_error_dev(dev, "could not create sysctl root\n");
1849 break;
1850 }
1851
1852 if (sysctl_createv(log, 0, &rnode, &rnode,
1853 0, CTLTYPE_NODE,
1854 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1855 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1856 break;
1857
1858 if (sysctl_createv(log, 0, &rnode, &cnode,
1859 CTLFLAG_READWRITE, CTLTYPE_INT,
1860 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1861 ixgbe_sysctl_interrupt_rate_handler, 0,
1862 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1863 break;
1864
1865 if (sysctl_createv(log, 0, &rnode, &cnode,
1866 CTLFLAG_READONLY, CTLTYPE_INT,
1867 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1868 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1869 0, CTL_CREATE, CTL_EOL) != 0)
1870 break;
1871
1872 if (sysctl_createv(log, 0, &rnode, &cnode,
1873 CTLFLAG_READONLY, CTLTYPE_INT,
1874 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1875 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1876 0, CTL_CREATE, CTL_EOL) != 0)
1877 break;
1878
1879 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1880 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1881 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1882 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1883 "Handled queue in softint");
1884 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1885 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1886 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1887 NULL, adapter->queues[i].evnamebuf, "TSO");
1888 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1889 NULL, adapter->queues[i].evnamebuf,
1890 "Queue No Descriptor Available");
1891 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1892 NULL, adapter->queues[i].evnamebuf,
1893 "Queue Packets Transmitted");
1894 #ifndef IXGBE_LEGACY_TX
1895 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1896 NULL, adapter->queues[i].evnamebuf,
1897 "Packets dropped in pcq");
1898 #endif
1899
1900 if (sysctl_createv(log, 0, &rnode, &cnode,
1901 CTLFLAG_READONLY,
1902 CTLTYPE_INT,
1903 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1904 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1905 CTL_CREATE, CTL_EOL) != 0)
1906 break;
1907
1908 if (sysctl_createv(log, 0, &rnode, &cnode,
1909 CTLFLAG_READONLY,
1910 CTLTYPE_INT,
1911 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1912 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1913 CTL_CREATE, CTL_EOL) != 0)
1914 break;
1915
1916 if (sysctl_createv(log, 0, &rnode, &cnode,
1917 CTLFLAG_READONLY,
1918 CTLTYPE_INT,
1919 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1920 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1921 CTL_CREATE, CTL_EOL) != 0)
1922 break;
1923
1924 if (i < __arraycount(stats->qprc)) {
1925 evcnt_attach_dynamic(&stats->qprc[i],
1926 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1927 "qprc");
1928 evcnt_attach_dynamic(&stats->qptc[i],
1929 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1930 "qptc");
1931 evcnt_attach_dynamic(&stats->qbrc[i],
1932 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1933 "qbrc");
1934 evcnt_attach_dynamic(&stats->qbtc[i],
1935 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1936 "qbtc");
1937 if (hw->mac.type >= ixgbe_mac_82599EB)
1938 evcnt_attach_dynamic(&stats->qprdc[i],
1939 EVCNT_TYPE_MISC, NULL,
1940 adapter->queues[i].evnamebuf, "qprdc");
1941 }
1942
1943 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1944 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1945 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1946 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1947 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1948 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1949 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1950 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1951 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1952 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1953 #ifdef LRO
1954 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1955 CTLFLAG_RD, &lro->lro_queued, 0,
1956 "LRO Queued");
1957 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1958 CTLFLAG_RD, &lro->lro_flushed, 0,
1959 "LRO Flushed");
1960 #endif /* LRO */
1961 }
1962
1963 /* MAC stats get their own sub node */
1964
1965 snprintf(stats->namebuf,
1966 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1967
1968 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "rx csum offload - IP");
1970 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "rx csum offload - L4");
1972 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "rx csum offload - IP bad");
1974 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "rx csum offload - L4 bad");
1976 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Interrupt conditions zero");
1978 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Legacy interrupts");
1980
1981 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "CRC Errors");
1983 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Illegal Byte Errors");
1985 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Byte Errors");
1987 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "MAC Short Packets Discarded");
1989 if (hw->mac.type >= ixgbe_mac_X550)
1990 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Bad SFD");
1992 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Total Packets Missed");
1994 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "MAC Local Faults");
1996 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "MAC Remote Faults");
1998 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Receive Length Errors");
2000 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Link XON Transmitted");
2002 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Link XON Received");
2004 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Link XOFF Transmitted");
2006 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Link XOFF Received");
2008
2009 /* Packet Reception Stats */
2010 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Total Octets Received");
2012 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Good Octets Received");
2014 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Total Packets Received");
2016 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Good Packets Received");
2018 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Multicast Packets Received");
2020 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Broadcast Packets Received");
2022 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "64 byte frames received ");
2024 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "65-127 byte frames received");
2026 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "128-255 byte frames received");
2028 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "256-511 byte frames received");
2030 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "512-1023 byte frames received");
2032 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "1023-1522 byte frames received");
2034 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Receive Undersized");
2036 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Fragmented Packets Received ");
2038 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "Oversized Packets Received");
2040 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "Received Jabber");
2042 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "Management Packets Received");
2044 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "Management Packets Dropped");
2046 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "Checksum Errors");
2048
2049 /* Packet Transmission Stats */
2050 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Good Octets Transmitted");
2052 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Total Packets Transmitted");
2054 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Good Packets Transmitted");
2056 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Broadcast Packets Transmitted");
2058 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Multicast Packets Transmitted");
2060 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Management Packets Transmitted");
2062 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "64 byte frames transmitted ");
2064 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2065 stats->namebuf, "65-127 byte frames transmitted");
2066 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "128-255 byte frames transmitted");
2068 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "256-511 byte frames transmitted");
2070 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "512-1023 byte frames transmitted");
2072 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "1024-1522 byte frames transmitted");
2074 } /* ixgbe_add_hw_stats */
2075
2076 static void
2077 ixgbe_clear_evcnt(struct adapter *adapter)
2078 {
2079 struct tx_ring *txr = adapter->tx_rings;
2080 struct rx_ring *rxr = adapter->rx_rings;
2081 struct ixgbe_hw *hw = &adapter->hw;
2082 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2083 int i;
2084
2085 adapter->efbig_tx_dma_setup.ev_count = 0;
2086 adapter->mbuf_defrag_failed.ev_count = 0;
2087 adapter->efbig2_tx_dma_setup.ev_count = 0;
2088 adapter->einval_tx_dma_setup.ev_count = 0;
2089 adapter->other_tx_dma_setup.ev_count = 0;
2090 adapter->eagain_tx_dma_setup.ev_count = 0;
2091 adapter->enomem_tx_dma_setup.ev_count = 0;
2092 adapter->tso_err.ev_count = 0;
2093 adapter->watchdog_events.ev_count = 0;
2094 adapter->admin_irqev.ev_count = 0;
2095 adapter->link_workev.ev_count = 0;
2096 adapter->mod_workev.ev_count = 0;
2097 adapter->msf_workev.ev_count = 0;
2098 adapter->phy_workev.ev_count = 0;
2099
2100 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2101 if (i < __arraycount(stats->mpc)) {
2102 stats->mpc[i].ev_count = 0;
2103 if (hw->mac.type == ixgbe_mac_82598EB)
2104 stats->rnbc[i].ev_count = 0;
2105 }
2106 if (i < __arraycount(stats->pxontxc)) {
2107 stats->pxontxc[i].ev_count = 0;
2108 stats->pxonrxc[i].ev_count = 0;
2109 stats->pxofftxc[i].ev_count = 0;
2110 stats->pxoffrxc[i].ev_count = 0;
2111 if (hw->mac.type >= ixgbe_mac_82599EB)
2112 stats->pxon2offc[i].ev_count = 0;
2113 }
2114 }
2115
2116 txr = adapter->tx_rings;
2117 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2118 adapter->queues[i].irqs.ev_count = 0;
2119 adapter->queues[i].handleq.ev_count = 0;
2120 adapter->queues[i].req.ev_count = 0;
2121 txr->no_desc_avail.ev_count = 0;
2122 txr->total_packets.ev_count = 0;
2123 txr->tso_tx.ev_count = 0;
2124 #ifndef IXGBE_LEGACY_TX
2125 txr->pcq_drops.ev_count = 0;
2126 #endif
2127 txr->q_efbig_tx_dma_setup = 0;
2128 txr->q_mbuf_defrag_failed = 0;
2129 txr->q_efbig2_tx_dma_setup = 0;
2130 txr->q_einval_tx_dma_setup = 0;
2131 txr->q_other_tx_dma_setup = 0;
2132 txr->q_eagain_tx_dma_setup = 0;
2133 txr->q_enomem_tx_dma_setup = 0;
2134 txr->q_tso_err = 0;
2135
2136 if (i < __arraycount(stats->qprc)) {
2137 stats->qprc[i].ev_count = 0;
2138 stats->qptc[i].ev_count = 0;
2139 stats->qbrc[i].ev_count = 0;
2140 stats->qbtc[i].ev_count = 0;
2141 if (hw->mac.type >= ixgbe_mac_82599EB)
2142 stats->qprdc[i].ev_count = 0;
2143 }
2144
2145 rxr->rx_packets.ev_count = 0;
2146 rxr->rx_bytes.ev_count = 0;
2147 rxr->rx_copies.ev_count = 0;
2148 rxr->no_jmbuf.ev_count = 0;
2149 rxr->rx_discarded.ev_count = 0;
2150 }
2151 stats->ipcs.ev_count = 0;
2152 stats->l4cs.ev_count = 0;
2153 stats->ipcs_bad.ev_count = 0;
2154 stats->l4cs_bad.ev_count = 0;
2155 stats->intzero.ev_count = 0;
2156 stats->legint.ev_count = 0;
2157 stats->crcerrs.ev_count = 0;
2158 stats->illerrc.ev_count = 0;
2159 stats->errbc.ev_count = 0;
2160 stats->mspdc.ev_count = 0;
2161 if (hw->mac.type >= ixgbe_mac_X550)
2162 stats->mbsdc.ev_count = 0;
2163 stats->mpctotal.ev_count = 0;
2164 stats->mlfc.ev_count = 0;
2165 stats->mrfc.ev_count = 0;
2166 stats->rlec.ev_count = 0;
2167 stats->lxontxc.ev_count = 0;
2168 stats->lxonrxc.ev_count = 0;
2169 stats->lxofftxc.ev_count = 0;
2170 stats->lxoffrxc.ev_count = 0;
2171
2172 /* Packet Reception Stats */
2173 stats->tor.ev_count = 0;
2174 stats->gorc.ev_count = 0;
2175 stats->tpr.ev_count = 0;
2176 stats->gprc.ev_count = 0;
2177 stats->mprc.ev_count = 0;
2178 stats->bprc.ev_count = 0;
2179 stats->prc64.ev_count = 0;
2180 stats->prc127.ev_count = 0;
2181 stats->prc255.ev_count = 0;
2182 stats->prc511.ev_count = 0;
2183 stats->prc1023.ev_count = 0;
2184 stats->prc1522.ev_count = 0;
2185 stats->ruc.ev_count = 0;
2186 stats->rfc.ev_count = 0;
2187 stats->roc.ev_count = 0;
2188 stats->rjc.ev_count = 0;
2189 stats->mngprc.ev_count = 0;
2190 stats->mngpdc.ev_count = 0;
2191 stats->xec.ev_count = 0;
2192
2193 /* Packet Transmission Stats */
2194 stats->gotc.ev_count = 0;
2195 stats->tpt.ev_count = 0;
2196 stats->gptc.ev_count = 0;
2197 stats->bptc.ev_count = 0;
2198 stats->mptc.ev_count = 0;
2199 stats->mngptc.ev_count = 0;
2200 stats->ptc64.ev_count = 0;
2201 stats->ptc127.ev_count = 0;
2202 stats->ptc255.ev_count = 0;
2203 stats->ptc511.ev_count = 0;
2204 stats->ptc1023.ev_count = 0;
2205 stats->ptc1522.ev_count = 0;
2206 }
2207
2208 /************************************************************************
2209 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2210 *
2211 * Retrieves the TDH value from the hardware
2212 ************************************************************************/
2213 static int
2214 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2215 {
2216 struct sysctlnode node = *rnode;
2217 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2218 struct adapter *adapter;
2219 uint32_t val;
2220
2221 if (!txr)
2222 return (0);
2223
2224 adapter = txr->adapter;
2225 if (ixgbe_fw_recovery_mode_swflag(adapter))
2226 return (EPERM);
2227
2228 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2229 node.sysctl_data = &val;
2230 return sysctl_lookup(SYSCTLFN_CALL(&node));
2231 } /* ixgbe_sysctl_tdh_handler */
2232
2233 /************************************************************************
2234 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2235 *
2236 * Retrieves the TDT value from the hardware
2237 ************************************************************************/
2238 static int
2239 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2240 {
2241 struct sysctlnode node = *rnode;
2242 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2243 struct adapter *adapter;
2244 uint32_t val;
2245
2246 if (!txr)
2247 return (0);
2248
2249 adapter = txr->adapter;
2250 if (ixgbe_fw_recovery_mode_swflag(adapter))
2251 return (EPERM);
2252
2253 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2254 node.sysctl_data = &val;
2255 return sysctl_lookup(SYSCTLFN_CALL(&node));
2256 } /* ixgbe_sysctl_tdt_handler */
2257
2258 /************************************************************************
2259 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2260 * handler function
2261 *
2262 * Retrieves the next_to_check value
2263 ************************************************************************/
2264 static int
2265 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2266 {
2267 struct sysctlnode node = *rnode;
2268 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2269 struct adapter *adapter;
2270 uint32_t val;
2271
2272 if (!rxr)
2273 return (0);
2274
2275 adapter = rxr->adapter;
2276 if (ixgbe_fw_recovery_mode_swflag(adapter))
2277 return (EPERM);
2278
2279 val = rxr->next_to_check;
2280 node.sysctl_data = &val;
2281 return sysctl_lookup(SYSCTLFN_CALL(&node));
2282 } /* ixgbe_sysctl_next_to_check_handler */
2283
2284 /************************************************************************
2285 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2286 *
2287 * Retrieves the RDH value from the hardware
2288 ************************************************************************/
2289 static int
2290 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2291 {
2292 struct sysctlnode node = *rnode;
2293 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2294 struct adapter *adapter;
2295 uint32_t val;
2296
2297 if (!rxr)
2298 return (0);
2299
2300 adapter = rxr->adapter;
2301 if (ixgbe_fw_recovery_mode_swflag(adapter))
2302 return (EPERM);
2303
2304 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2305 node.sysctl_data = &val;
2306 return sysctl_lookup(SYSCTLFN_CALL(&node));
2307 } /* ixgbe_sysctl_rdh_handler */
2308
2309 /************************************************************************
2310 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2311 *
2312 * Retrieves the RDT value from the hardware
2313 ************************************************************************/
2314 static int
2315 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2316 {
2317 struct sysctlnode node = *rnode;
2318 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2319 struct adapter *adapter;
2320 uint32_t val;
2321
2322 if (!rxr)
2323 return (0);
2324
2325 adapter = rxr->adapter;
2326 if (ixgbe_fw_recovery_mode_swflag(adapter))
2327 return (EPERM);
2328
2329 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2330 node.sysctl_data = &val;
2331 return sysctl_lookup(SYSCTLFN_CALL(&node));
2332 } /* ixgbe_sysctl_rdt_handler */
2333
2334 static int
2335 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2336 {
2337 struct ifnet *ifp = &ec->ec_if;
2338 struct adapter *adapter = ifp->if_softc;
2339 int rv;
2340
2341 if (set)
2342 rv = ixgbe_register_vlan(adapter, vid);
2343 else
2344 rv = ixgbe_unregister_vlan(adapter, vid);
2345
2346 if (rv != 0)
2347 return rv;
2348
2349 /*
2350 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2351 * or 0 to 1.
2352 */
2353 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2354 ixgbe_setup_vlan_hw_tagging(adapter);
2355
2356 return rv;
2357 }
2358
2359 /************************************************************************
2360 * ixgbe_register_vlan
2361 *
2362 * Run via vlan config EVENT, it enables us to use the
2363 * HW Filter table since we can get the vlan id. This
2364 * just creates the entry in the soft version of the
2365 * VFTA, init will repopulate the real table.
2366 ************************************************************************/
2367 static int
2368 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2369 {
2370 u16 index, bit;
2371 int error;
2372
2373 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2374 return EINVAL;
2375
2376 IXGBE_CORE_LOCK(adapter);
2377 index = (vtag >> 5) & 0x7F;
2378 bit = vtag & 0x1F;
2379 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2380 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2381 true);
2382 IXGBE_CORE_UNLOCK(adapter);
2383 if (error != 0)
2384 error = EACCES;
2385
2386 return error;
2387 } /* ixgbe_register_vlan */
2388
2389 /************************************************************************
2390 * ixgbe_unregister_vlan
2391 *
2392 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2393 ************************************************************************/
2394 static int
2395 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2396 {
2397 u16 index, bit;
2398 int error;
2399
2400 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2401 return EINVAL;
2402
2403 IXGBE_CORE_LOCK(adapter);
2404 index = (vtag >> 5) & 0x7F;
2405 bit = vtag & 0x1F;
2406 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2407 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2408 true);
2409 IXGBE_CORE_UNLOCK(adapter);
2410 if (error != 0)
2411 error = EACCES;
2412
2413 return error;
2414 } /* ixgbe_unregister_vlan */
2415
2416 static void
2417 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2418 {
2419 struct ethercom *ec = &adapter->osdep.ec;
2420 struct ixgbe_hw *hw = &adapter->hw;
2421 struct rx_ring *rxr;
2422 u32 ctrl;
2423 int i;
2424 bool hwtagging;
2425
2426 /* Enable HW tagging only if any vlan is attached */
2427 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2428 && VLAN_ATTACHED(ec);
2429
2430 /* Setup the queues for vlans */
2431 for (i = 0; i < adapter->num_queues; i++) {
2432 rxr = &adapter->rx_rings[i];
2433 /*
2434 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2435 */
2436 if (hw->mac.type != ixgbe_mac_82598EB) {
2437 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2438 if (hwtagging)
2439 ctrl |= IXGBE_RXDCTL_VME;
2440 else
2441 ctrl &= ~IXGBE_RXDCTL_VME;
2442 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2443 }
2444 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2445 }
2446
2447 /* VLAN hw tagging for 82598 */
2448 if (hw->mac.type == ixgbe_mac_82598EB) {
2449 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2450 if (hwtagging)
2451 ctrl |= IXGBE_VLNCTRL_VME;
2452 else
2453 ctrl &= ~IXGBE_VLNCTRL_VME;
2454 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2455 }
2456 } /* ixgbe_setup_vlan_hw_tagging */
2457
2458 static void
2459 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2460 {
2461 struct ethercom *ec = &adapter->osdep.ec;
2462 struct ixgbe_hw *hw = &adapter->hw;
2463 int i;
2464 u32 ctrl;
2465 struct vlanid_list *vlanidp;
2466
2467 /*
2468 * This function is called from both if_init and ifflags_cb()
2469 * on NetBSD.
2470 */
2471
2472 /*
2473 * Part 1:
2474 * Setup VLAN HW tagging
2475 */
2476 ixgbe_setup_vlan_hw_tagging(adapter);
2477
2478 /*
2479 * Part 2:
2480 * Setup VLAN HW filter
2481 */
2482 /* Cleanup shadow_vfta */
2483 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2484 adapter->shadow_vfta[i] = 0;
2485 /* Generate shadow_vfta from ec_vids */
2486 ETHER_LOCK(ec);
2487 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2488 uint32_t idx;
2489
2490 idx = vlanidp->vid / 32;
2491 KASSERT(idx < IXGBE_VFTA_SIZE);
2492 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2493 }
2494 ETHER_UNLOCK(ec);
2495 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2496 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2497
2498 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2499 /* Enable the Filter Table if enabled */
2500 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2501 ctrl |= IXGBE_VLNCTRL_VFE;
2502 else
2503 ctrl &= ~IXGBE_VLNCTRL_VFE;
2504 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2505 } /* ixgbe_setup_vlan_hw_support */
2506
2507 /************************************************************************
2508 * ixgbe_get_slot_info
2509 *
2510 * Get the width and transaction speed of
2511 * the slot this adapter is plugged into.
2512 ************************************************************************/
2513 static void
2514 ixgbe_get_slot_info(struct adapter *adapter)
2515 {
2516 device_t dev = adapter->dev;
2517 struct ixgbe_hw *hw = &adapter->hw;
2518 u32 offset;
2519 u16 link;
2520 int bus_info_valid = TRUE;
2521
2522 /* Some devices are behind an internal bridge */
2523 switch (hw->device_id) {
2524 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2525 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2526 goto get_parent_info;
2527 default:
2528 break;
2529 }
2530
2531 ixgbe_get_bus_info(hw);
2532
2533 /*
2534 * Some devices don't use PCI-E, but there is no need
2535 * to display "Unknown" for bus speed and width.
2536 */
2537 switch (hw->mac.type) {
2538 case ixgbe_mac_X550EM_x:
2539 case ixgbe_mac_X550EM_a:
2540 return;
2541 default:
2542 goto display;
2543 }
2544
2545 get_parent_info:
2546 /*
2547 * For the Quad port adapter we need to parse back
2548 * up the PCI tree to find the speed of the expansion
2549 * slot into which this adapter is plugged. A bit more work.
2550 */
2551 dev = device_parent(device_parent(dev));
2552 #if 0
2553 #ifdef IXGBE_DEBUG
2554 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2555 pci_get_slot(dev), pci_get_function(dev));
2556 #endif
2557 dev = device_parent(device_parent(dev));
2558 #ifdef IXGBE_DEBUG
2559 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2560 pci_get_slot(dev), pci_get_function(dev));
2561 #endif
2562 #endif
2563 /* Now get the PCI Express Capabilities offset */
2564 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2565 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2566 /*
2567 * Hmm...can't get PCI-Express capabilities.
2568 * Falling back to default method.
2569 */
2570 bus_info_valid = FALSE;
2571 ixgbe_get_bus_info(hw);
2572 goto display;
2573 }
2574 /* ...and read the Link Status Register */
2575 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2576 offset + PCIE_LCSR) >> 16;
2577 ixgbe_set_pci_config_data_generic(hw, link);
2578
2579 display:
2580 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2581 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2582 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2583 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2584 "Unknown"),
2585 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2586 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2587 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2588 "Unknown"));
2589
2590 if (bus_info_valid) {
2591 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2592 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2593 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2594 device_printf(dev, "PCI-Express bandwidth available"
2595 " for this card\n is not sufficient for"
2596 " optimal performance.\n");
2597 device_printf(dev, "For optimal performance a x8 "
2598 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2599 }
2600 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2601 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2602 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2603 device_printf(dev, "PCI-Express bandwidth available"
2604 " for this card\n is not sufficient for"
2605 " optimal performance.\n");
2606 device_printf(dev, "For optimal performance a x8 "
2607 "PCIE Gen3 slot is required.\n");
2608 }
2609 } else
2610 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2611
2612 return;
2613 } /* ixgbe_get_slot_info */
2614
2615 /************************************************************************
2616 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2617 ************************************************************************/
2618 static inline void
2619 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2620 {
2621 struct ixgbe_hw *hw = &adapter->hw;
2622 struct ix_queue *que = &adapter->queues[vector];
2623 u64 queue = 1ULL << vector;
2624 u32 mask;
2625
2626 mutex_enter(&que->dc_mtx);
2627 if (que->disabled_count > 0 && --que->disabled_count > 0)
2628 goto out;
2629
2630 if (hw->mac.type == ixgbe_mac_82598EB) {
2631 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2632 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2633 } else {
2634 mask = (queue & 0xFFFFFFFF);
2635 if (mask)
2636 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2637 mask = (queue >> 32);
2638 if (mask)
2639 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2640 }
2641 out:
2642 mutex_exit(&que->dc_mtx);
2643 } /* ixgbe_enable_queue */
2644
2645 /************************************************************************
2646 * ixgbe_disable_queue_internal
2647 ************************************************************************/
2648 static inline void
2649 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2650 {
2651 struct ixgbe_hw *hw = &adapter->hw;
2652 struct ix_queue *que = &adapter->queues[vector];
2653 u64 queue = 1ULL << vector;
2654 u32 mask;
2655
2656 mutex_enter(&que->dc_mtx);
2657
2658 if (que->disabled_count > 0) {
2659 if (nestok)
2660 que->disabled_count++;
2661 goto out;
2662 }
2663 que->disabled_count++;
2664
2665 if (hw->mac.type == ixgbe_mac_82598EB) {
2666 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2667 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2668 } else {
2669 mask = (queue & 0xFFFFFFFF);
2670 if (mask)
2671 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2672 mask = (queue >> 32);
2673 if (mask)
2674 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2675 }
2676 out:
2677 mutex_exit(&que->dc_mtx);
2678 } /* ixgbe_disable_queue_internal */
2679
2680 /************************************************************************
2681 * ixgbe_disable_queue
2682 ************************************************************************/
2683 static inline void
2684 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2685 {
2686
2687 ixgbe_disable_queue_internal(adapter, vector, true);
2688 } /* ixgbe_disable_queue */
2689
2690 /************************************************************************
2691 * ixgbe_sched_handle_que - schedule deferred packet processing
2692 ************************************************************************/
2693 static inline void
2694 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2695 {
2696
2697 if (que->txrx_use_workqueue) {
2698 /*
2699 * adapter->que_wq is bound to each CPU instead of
2700 * each NIC queue to reduce workqueue kthread. As we
2701 * should consider about interrupt affinity in this
2702 * function, the workqueue kthread must be WQ_PERCPU.
2703 * If create WQ_PERCPU workqueue kthread for each NIC
2704 * queue, that number of created workqueue kthread is
2705 * (number of used NIC queue) * (number of CPUs) =
2706 * (number of CPUs) ^ 2 most often.
2707 *
2708 * The same NIC queue's interrupts are avoided by
2709 * masking the queue's interrupt. And different
2710 * NIC queue's interrupts use different struct work
2711 * (que->wq_cookie). So, "enqueued flag" to avoid
2712 * twice workqueue_enqueue() is not required .
2713 */
2714 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2715 } else {
2716 softint_schedule(que->que_si);
2717 }
2718 }
2719
2720 /************************************************************************
2721 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2722 ************************************************************************/
2723 static int
2724 ixgbe_msix_que(void *arg)
2725 {
2726 struct ix_queue *que = arg;
2727 struct adapter *adapter = que->adapter;
2728 struct ifnet *ifp = adapter->ifp;
2729 struct tx_ring *txr = que->txr;
2730 struct rx_ring *rxr = que->rxr;
2731 bool more;
2732 u32 newitr = 0;
2733
2734 /* Protect against spurious interrupts */
2735 if ((ifp->if_flags & IFF_RUNNING) == 0)
2736 return 0;
2737
2738 ixgbe_disable_queue(adapter, que->msix);
2739 ++que->irqs.ev_count;
2740
2741 /*
2742 * Don't change "que->txrx_use_workqueue" from this point to avoid
2743 * flip-flopping softint/workqueue mode in one deferred processing.
2744 */
2745 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2746
2747 #ifdef __NetBSD__
2748 /* Don't run ixgbe_rxeof in interrupt context */
2749 more = true;
2750 #else
2751 more = ixgbe_rxeof(que);
2752 #endif
2753
2754 IXGBE_TX_LOCK(txr);
2755 ixgbe_txeof(txr);
2756 IXGBE_TX_UNLOCK(txr);
2757
2758 /* Do AIM now? */
2759
2760 if (adapter->enable_aim == false)
2761 goto no_calc;
2762 /*
2763 * Do Adaptive Interrupt Moderation:
2764 * - Write out last calculated setting
2765 * - Calculate based on average size over
2766 * the last interval.
2767 */
2768 if (que->eitr_setting)
2769 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2770
2771 que->eitr_setting = 0;
2772
2773 /* Idle, do nothing */
2774 if ((txr->bytes == 0) && (rxr->bytes == 0))
2775 goto no_calc;
2776
2777 if ((txr->bytes) && (txr->packets))
2778 newitr = txr->bytes/txr->packets;
2779 if ((rxr->bytes) && (rxr->packets))
2780 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2781 newitr += 24; /* account for hardware frame, crc */
2782
2783 /* set an upper boundary */
2784 newitr = uimin(newitr, 3000);
2785
2786 /* Be nice to the mid range */
2787 if ((newitr > 300) && (newitr < 1200))
2788 newitr = (newitr / 3);
2789 else
2790 newitr = (newitr / 2);
2791
2792 /*
2793 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2794 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2795 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2796 * on 1G and higher.
2797 */
2798 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2799 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2800 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2801 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2802 }
2803
2804 /* save for next interrupt */
2805 que->eitr_setting = newitr;
2806
2807 /* Reset state */
2808 txr->bytes = 0;
2809 txr->packets = 0;
2810 rxr->bytes = 0;
2811 rxr->packets = 0;
2812
2813 no_calc:
2814 if (more)
2815 ixgbe_sched_handle_que(adapter, que);
2816 else
2817 ixgbe_enable_queue(adapter, que->msix);
2818
2819 return 1;
2820 } /* ixgbe_msix_que */
2821
2822 /************************************************************************
2823 * ixgbe_media_status - Media Ioctl callback
2824 *
2825 * Called whenever the user queries the status of
2826 * the interface using ifconfig.
2827 ************************************************************************/
2828 static void
2829 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2830 {
2831 struct adapter *adapter = ifp->if_softc;
2832 struct ixgbe_hw *hw = &adapter->hw;
2833 int layer;
2834
2835 INIT_DEBUGOUT("ixgbe_media_status: begin");
2836 ixgbe_update_link_status(adapter);
2837
2838 ifmr->ifm_status = IFM_AVALID;
2839 ifmr->ifm_active = IFM_ETHER;
2840
2841 if (adapter->link_active != LINK_STATE_UP) {
2842 ifmr->ifm_active |= IFM_NONE;
2843 return;
2844 }
2845
2846 ifmr->ifm_status |= IFM_ACTIVE;
2847 layer = adapter->phy_layer;
2848
2849 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2850 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2851 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2852 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2853 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2854 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2855 switch (adapter->link_speed) {
2856 case IXGBE_LINK_SPEED_10GB_FULL:
2857 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2858 break;
2859 case IXGBE_LINK_SPEED_5GB_FULL:
2860 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2861 break;
2862 case IXGBE_LINK_SPEED_2_5GB_FULL:
2863 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2864 break;
2865 case IXGBE_LINK_SPEED_1GB_FULL:
2866 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2867 break;
2868 case IXGBE_LINK_SPEED_100_FULL:
2869 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2870 break;
2871 case IXGBE_LINK_SPEED_10_FULL:
2872 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2873 break;
2874 }
2875 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2876 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2877 switch (adapter->link_speed) {
2878 case IXGBE_LINK_SPEED_10GB_FULL:
2879 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2880 break;
2881 }
2882 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2883 switch (adapter->link_speed) {
2884 case IXGBE_LINK_SPEED_10GB_FULL:
2885 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2886 break;
2887 case IXGBE_LINK_SPEED_1GB_FULL:
2888 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2889 break;
2890 }
2891 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2892 switch (adapter->link_speed) {
2893 case IXGBE_LINK_SPEED_10GB_FULL:
2894 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2895 break;
2896 case IXGBE_LINK_SPEED_1GB_FULL:
2897 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2898 break;
2899 }
2900 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2901 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2902 switch (adapter->link_speed) {
2903 case IXGBE_LINK_SPEED_10GB_FULL:
2904 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2905 break;
2906 case IXGBE_LINK_SPEED_1GB_FULL:
2907 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2908 break;
2909 }
2910 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2911 switch (adapter->link_speed) {
2912 case IXGBE_LINK_SPEED_10GB_FULL:
2913 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2914 break;
2915 }
2916 /*
2917 * XXX: These need to use the proper media types once
2918 * they're added.
2919 */
2920 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2921 switch (adapter->link_speed) {
2922 case IXGBE_LINK_SPEED_10GB_FULL:
2923 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2924 break;
2925 case IXGBE_LINK_SPEED_2_5GB_FULL:
2926 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2927 break;
2928 case IXGBE_LINK_SPEED_1GB_FULL:
2929 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2930 break;
2931 }
2932 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2933 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2934 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2935 switch (adapter->link_speed) {
2936 case IXGBE_LINK_SPEED_10GB_FULL:
2937 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2938 break;
2939 case IXGBE_LINK_SPEED_2_5GB_FULL:
2940 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2941 break;
2942 case IXGBE_LINK_SPEED_1GB_FULL:
2943 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2944 break;
2945 }
2946
2947 /* If nothing is recognized... */
2948 #if 0
2949 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2950 ifmr->ifm_active |= IFM_UNKNOWN;
2951 #endif
2952
2953 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2954
2955 /* Display current flow control setting used on link */
2956 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2957 hw->fc.current_mode == ixgbe_fc_full)
2958 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2959 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2960 hw->fc.current_mode == ixgbe_fc_full)
2961 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2962
2963 return;
2964 } /* ixgbe_media_status */
2965
2966 /************************************************************************
2967 * ixgbe_media_change - Media Ioctl callback
2968 *
2969 * Called when the user changes speed/duplex using
2970 * media/mediopt option with ifconfig.
2971 ************************************************************************/
2972 static int
2973 ixgbe_media_change(struct ifnet *ifp)
2974 {
2975 struct adapter *adapter = ifp->if_softc;
2976 struct ifmedia *ifm = &adapter->media;
2977 struct ixgbe_hw *hw = &adapter->hw;
2978 ixgbe_link_speed speed = 0;
2979 ixgbe_link_speed link_caps = 0;
2980 bool negotiate = false;
2981 s32 err = IXGBE_NOT_IMPLEMENTED;
2982
2983 INIT_DEBUGOUT("ixgbe_media_change: begin");
2984
2985 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2986 return (EINVAL);
2987
2988 if (hw->phy.media_type == ixgbe_media_type_backplane)
2989 return (EPERM);
2990
2991 /*
2992 * We don't actually need to check against the supported
2993 * media types of the adapter; ifmedia will take care of
2994 * that for us.
2995 */
2996 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2997 case IFM_AUTO:
2998 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2999 &negotiate);
3000 if (err != IXGBE_SUCCESS) {
3001 device_printf(adapter->dev, "Unable to determine "
3002 "supported advertise speeds\n");
3003 return (ENODEV);
3004 }
3005 speed |= link_caps;
3006 break;
3007 case IFM_10G_T:
3008 case IFM_10G_LRM:
3009 case IFM_10G_LR:
3010 case IFM_10G_TWINAX:
3011 case IFM_10G_SR:
3012 case IFM_10G_CX4:
3013 case IFM_10G_KR:
3014 case IFM_10G_KX4:
3015 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3016 break;
3017 case IFM_5000_T:
3018 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3019 break;
3020 case IFM_2500_T:
3021 case IFM_2500_KX:
3022 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3023 break;
3024 case IFM_1000_T:
3025 case IFM_1000_LX:
3026 case IFM_1000_SX:
3027 case IFM_1000_KX:
3028 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3029 break;
3030 case IFM_100_TX:
3031 speed |= IXGBE_LINK_SPEED_100_FULL;
3032 break;
3033 case IFM_10_T:
3034 speed |= IXGBE_LINK_SPEED_10_FULL;
3035 break;
3036 case IFM_NONE:
3037 break;
3038 default:
3039 goto invalid;
3040 }
3041
3042 hw->mac.autotry_restart = TRUE;
3043 hw->mac.ops.setup_link(hw, speed, TRUE);
3044 adapter->advertise = 0;
3045 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3046 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3047 adapter->advertise |= 1 << 2;
3048 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3049 adapter->advertise |= 1 << 1;
3050 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3051 adapter->advertise |= 1 << 0;
3052 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3053 adapter->advertise |= 1 << 3;
3054 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3055 adapter->advertise |= 1 << 4;
3056 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3057 adapter->advertise |= 1 << 5;
3058 }
3059
3060 return (0);
3061
3062 invalid:
3063 device_printf(adapter->dev, "Invalid media type!\n");
3064
3065 return (EINVAL);
3066 } /* ixgbe_media_change */
3067
3068 /************************************************************************
3069 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3070 ************************************************************************/
3071 static int
3072 ixgbe_msix_admin(void *arg)
3073 {
3074 struct adapter *adapter = arg;
3075 struct ixgbe_hw *hw = &adapter->hw;
3076 u32 eicr, eicr_mask;
3077 u32 task_requests = 0;
3078 s32 retval;
3079
3080 ++adapter->admin_irqev.ev_count;
3081
3082 /* Pause other interrupts */
3083 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3084
3085 /* First get the cause */
3086 /*
3087 * The specifications of 82598, 82599, X540 and X550 say EICS register
3088 * is write only. However, Linux says it is a workaround for silicon
3089 * errata to read EICS instead of EICR to get interrupt cause. It seems
3090 * there is a problem about read clear mechanism for EICR register.
3091 */
3092 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3093 /* Be sure the queue bits are not cleared */
3094 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3095 /* Clear interrupt with write */
3096 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3097
3098 if (ixgbe_is_sfp(hw)) {
3099 /* Pluggable optics-related interrupt */
3100 if (hw->mac.type >= ixgbe_mac_X540)
3101 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3102 else
3103 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3104
3105 /*
3106 * An interrupt might not arrive when a module is inserted.
3107 * When an link status change interrupt occurred and the driver
3108 * still regard SFP as unplugged, issue the module softint
3109 * and then issue LSC interrupt.
3110 */
3111 if ((eicr & eicr_mask)
3112 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3113 && (eicr & IXGBE_EICR_LSC))) {
3114 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3115 task_requests |= IXGBE_REQUEST_TASK_MOD;
3116 }
3117
3118 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3119 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3120 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3121 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3122 task_requests |= IXGBE_REQUEST_TASK_MSF;
3123 }
3124 }
3125
3126 /* Link status change */
3127 if (eicr & IXGBE_EICR_LSC) {
3128 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3129 task_requests |= IXGBE_REQUEST_TASK_LSC;
3130 }
3131
3132 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3133 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3134 (eicr & IXGBE_EICR_FLOW_DIR)) {
3135 /* This is probably overkill :) */
3136 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3137 return 1;
3138 /* Disable the interrupt */
3139 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3140 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3141 }
3142
3143 if (eicr & IXGBE_EICR_ECC) {
3144 device_printf(adapter->dev,
3145 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3146 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3147 }
3148
3149 /* Check for over temp condition */
3150 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3151 switch (adapter->hw.mac.type) {
3152 case ixgbe_mac_X550EM_a:
3153 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3154 break;
3155 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3156 IXGBE_EICR_GPI_SDP0_X550EM_a);
3157 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3158 IXGBE_EICR_GPI_SDP0_X550EM_a);
3159 retval = hw->phy.ops.check_overtemp(hw);
3160 if (retval != IXGBE_ERR_OVERTEMP)
3161 break;
3162 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3163 device_printf(adapter->dev, "System shutdown required!\n");
3164 break;
3165 default:
3166 if (!(eicr & IXGBE_EICR_TS))
3167 break;
3168 retval = hw->phy.ops.check_overtemp(hw);
3169 if (retval != IXGBE_ERR_OVERTEMP)
3170 break;
3171 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3172 device_printf(adapter->dev, "System shutdown required!\n");
3173 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3174 break;
3175 }
3176 }
3177
3178 /* Check for VF message */
3179 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3180 (eicr & IXGBE_EICR_MAILBOX)) {
3181 task_requests |= IXGBE_REQUEST_TASK_MBX;
3182 }
3183 }
3184
3185 /* Check for fan failure */
3186 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3187 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3188 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3189 }
3190
3191 /* External PHY interrupt */
3192 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3193 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3194 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3195 task_requests |= IXGBE_REQUEST_TASK_PHY;
3196 }
3197
3198 if (task_requests != 0) {
3199 /* Re-enabling other interrupts is done in the admin task */
3200 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
3201 atomic_or_32(&adapter->task_requests, task_requests);
3202 ixgbe_schedule_admin_tasklet(adapter);
3203 } else {
3204 /* Re-enable other interrupts */
3205 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3206 }
3207
3208 return 1;
3209 } /* ixgbe_msix_admin */
3210
3211 static void
3212 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3213 {
3214
3215 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3216 itr |= itr << 16;
3217 else
3218 itr |= IXGBE_EITR_CNT_WDIS;
3219
3220 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3221 }
3222
3223
3224 /************************************************************************
3225 * ixgbe_sysctl_interrupt_rate_handler
3226 ************************************************************************/
3227 static int
3228 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3229 {
3230 struct sysctlnode node = *rnode;
3231 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3232 struct adapter *adapter;
3233 uint32_t reg, usec, rate;
3234 int error;
3235
3236 if (que == NULL)
3237 return 0;
3238
3239 adapter = que->adapter;
3240 if (ixgbe_fw_recovery_mode_swflag(adapter))
3241 return (EPERM);
3242
3243 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3244 usec = ((reg & 0x0FF8) >> 3);
3245 if (usec > 0)
3246 rate = 500000 / usec;
3247 else
3248 rate = 0;
3249 node.sysctl_data = &rate;
3250 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3251 if (error || newp == NULL)
3252 return error;
3253 reg &= ~0xfff; /* default, no limitation */
3254 if (rate > 0 && rate < 500000) {
3255 if (rate < 1000)
3256 rate = 1000;
3257 reg |= ((4000000 / rate) & 0xff8);
3258 /*
3259 * When RSC is used, ITR interval must be larger than
3260 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3261 * The minimum value is always greater than 2us on 100M
3262 * (and 10M?(not documented)), but it's not on 1G and higher.
3263 */
3264 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3265 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3266 if ((adapter->num_queues > 1)
3267 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3268 return EINVAL;
3269 }
3270 ixgbe_max_interrupt_rate = rate;
3271 } else
3272 ixgbe_max_interrupt_rate = 0;
3273 ixgbe_eitr_write(adapter, que->msix, reg);
3274
3275 return (0);
3276 } /* ixgbe_sysctl_interrupt_rate_handler */
3277
3278 const struct sysctlnode *
3279 ixgbe_sysctl_instance(struct adapter *adapter)
3280 {
3281 const char *dvname;
3282 struct sysctllog **log;
3283 int rc;
3284 const struct sysctlnode *rnode;
3285
3286 if (adapter->sysctltop != NULL)
3287 return adapter->sysctltop;
3288
3289 log = &adapter->sysctllog;
3290 dvname = device_xname(adapter->dev);
3291
3292 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3293 0, CTLTYPE_NODE, dvname,
3294 SYSCTL_DESCR("ixgbe information and settings"),
3295 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3296 goto err;
3297
3298 return rnode;
3299 err:
3300 device_printf(adapter->dev,
3301 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3302 return NULL;
3303 }
3304
3305 /************************************************************************
3306 * ixgbe_add_device_sysctls
3307 ************************************************************************/
3308 static void
3309 ixgbe_add_device_sysctls(struct adapter *adapter)
3310 {
3311 device_t dev = adapter->dev;
3312 struct ixgbe_hw *hw = &adapter->hw;
3313 struct sysctllog **log;
3314 const struct sysctlnode *rnode, *cnode;
3315
3316 log = &adapter->sysctllog;
3317
3318 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3319 aprint_error_dev(dev, "could not create sysctl root\n");
3320 return;
3321 }
3322
3323 if (sysctl_createv(log, 0, &rnode, &cnode,
3324 CTLFLAG_READWRITE, CTLTYPE_INT,
3325 "debug", SYSCTL_DESCR("Debug Info"),
3326 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3327 aprint_error_dev(dev, "could not create sysctl\n");
3328
3329 if (sysctl_createv(log, 0, &rnode, &cnode,
3330 CTLFLAG_READONLY, CTLTYPE_INT,
3331 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3332 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3333 aprint_error_dev(dev, "could not create sysctl\n");
3334
3335 if (sysctl_createv(log, 0, &rnode, &cnode,
3336 CTLFLAG_READONLY, CTLTYPE_INT,
3337 "num_queues", SYSCTL_DESCR("Number of queues"),
3338 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3339 aprint_error_dev(dev, "could not create sysctl\n");
3340
3341 /* Sysctls for all devices */
3342 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3343 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3344 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3345 CTL_EOL) != 0)
3346 aprint_error_dev(dev, "could not create sysctl\n");
3347
3348 adapter->enable_aim = ixgbe_enable_aim;
3349 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3350 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3351 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353
3354 if (sysctl_createv(log, 0, &rnode, &cnode,
3355 CTLFLAG_READWRITE, CTLTYPE_INT,
3356 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3357 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3358 CTL_EOL) != 0)
3359 aprint_error_dev(dev, "could not create sysctl\n");
3360
3361 /*
3362 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3363 * it causesflip-flopping softint/workqueue mode in one deferred
3364 * processing. Therefore, preempt_disable()/preempt_enable() are
3365 * required in ixgbe_sched_handle_que() to avoid
3366 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3367 * I think changing "que->txrx_use_workqueue" in interrupt handler
3368 * is lighter than doing preempt_disable()/preempt_enable() in every
3369 * ixgbe_sched_handle_que().
3370 */
3371 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3372 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3373 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3374 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3375 aprint_error_dev(dev, "could not create sysctl\n");
3376
3377 #ifdef IXGBE_DEBUG
3378 /* testing sysctls (for all devices) */
3379 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3380 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3381 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3382 CTL_EOL) != 0)
3383 aprint_error_dev(dev, "could not create sysctl\n");
3384
3385 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3386 CTLTYPE_STRING, "print_rss_config",
3387 SYSCTL_DESCR("Prints RSS Configuration"),
3388 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3389 CTL_EOL) != 0)
3390 aprint_error_dev(dev, "could not create sysctl\n");
3391 #endif
3392 /* for X550 series devices */
3393 if (hw->mac.type >= ixgbe_mac_X550)
3394 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3395 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3396 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3397 CTL_EOL) != 0)
3398 aprint_error_dev(dev, "could not create sysctl\n");
3399
3400 /* for WoL-capable devices */
3401 if (adapter->wol_support) {
3402 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3403 CTLTYPE_BOOL, "wol_enable",
3404 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3405 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3406 CTL_EOL) != 0)
3407 aprint_error_dev(dev, "could not create sysctl\n");
3408
3409 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3410 CTLTYPE_INT, "wufc",
3411 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3412 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3413 CTL_EOL) != 0)
3414 aprint_error_dev(dev, "could not create sysctl\n");
3415 }
3416
3417 /* for X552/X557-AT devices */
3418 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3419 const struct sysctlnode *phy_node;
3420
3421 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3422 "phy", SYSCTL_DESCR("External PHY sysctls"),
3423 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3424 aprint_error_dev(dev, "could not create sysctl\n");
3425 return;
3426 }
3427
3428 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3429 CTLTYPE_INT, "temp",
3430 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3431 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3432 CTL_EOL) != 0)
3433 aprint_error_dev(dev, "could not create sysctl\n");
3434
3435 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3436 CTLTYPE_INT, "overtemp_occurred",
3437 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3438 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3439 CTL_CREATE, CTL_EOL) != 0)
3440 aprint_error_dev(dev, "could not create sysctl\n");
3441 }
3442
3443 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3444 && (hw->phy.type == ixgbe_phy_fw))
3445 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3446 CTLTYPE_BOOL, "force_10_100_autonego",
3447 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3448 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3449 CTL_CREATE, CTL_EOL) != 0)
3450 aprint_error_dev(dev, "could not create sysctl\n");
3451
3452 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3453 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3454 CTLTYPE_INT, "eee_state",
3455 SYSCTL_DESCR("EEE Power Save State"),
3456 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3457 CTL_EOL) != 0)
3458 aprint_error_dev(dev, "could not create sysctl\n");
3459 }
3460 } /* ixgbe_add_device_sysctls */
3461
3462 /************************************************************************
3463 * ixgbe_allocate_pci_resources
3464 ************************************************************************/
3465 static int
3466 ixgbe_allocate_pci_resources(struct adapter *adapter,
3467 const struct pci_attach_args *pa)
3468 {
3469 pcireg_t memtype, csr;
3470 device_t dev = adapter->dev;
3471 bus_addr_t addr;
3472 int flags;
3473
3474 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3475 switch (memtype) {
3476 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3477 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3478 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3479 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3480 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3481 goto map_err;
3482 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3483 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3484 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3485 }
3486 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3487 adapter->osdep.mem_size, flags,
3488 &adapter->osdep.mem_bus_space_handle) != 0) {
3489 map_err:
3490 adapter->osdep.mem_size = 0;
3491 aprint_error_dev(dev, "unable to map BAR0\n");
3492 return ENXIO;
3493 }
3494 /*
3495 * Enable address decoding for memory range in case BIOS or
3496 * UEFI don't set it.
3497 */
3498 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3499 PCI_COMMAND_STATUS_REG);
3500 csr |= PCI_COMMAND_MEM_ENABLE;
3501 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3502 csr);
3503 break;
3504 default:
3505 aprint_error_dev(dev, "unexpected type on BAR0\n");
3506 return ENXIO;
3507 }
3508
3509 return (0);
3510 } /* ixgbe_allocate_pci_resources */
3511
3512 static void
3513 ixgbe_free_workqueue(struct adapter *adapter)
3514 {
3515 struct ix_queue *que = adapter->queues;
3516 struct tx_ring *txr = adapter->tx_rings;
3517 int i;
3518
3519 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3520 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3521 if (txr->txr_si != NULL)
3522 softint_disestablish(txr->txr_si);
3523 }
3524 if (que->que_si != NULL)
3525 softint_disestablish(que->que_si);
3526 }
3527 if (adapter->txr_wq != NULL)
3528 workqueue_destroy(adapter->txr_wq);
3529 if (adapter->txr_wq_enqueued != NULL)
3530 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3531 if (adapter->que_wq != NULL)
3532 workqueue_destroy(adapter->que_wq);
3533
3534 if (adapter->admin_wq != NULL) {
3535 workqueue_destroy(adapter->admin_wq);
3536 adapter->admin_wq = NULL;
3537 }
3538 if (adapter->timer_wq != NULL) {
3539 workqueue_destroy(adapter->timer_wq);
3540 adapter->timer_wq = NULL;
3541 }
3542 if (adapter->recovery_mode_timer_wq != NULL) {
3543 /*
3544 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3545 * the recovery_mode_timer workqueue, so call it here.
3546 */
3547 workqueue_wait(adapter->recovery_mode_timer_wq,
3548 &adapter->recovery_mode_timer_wc);
3549 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3550 workqueue_destroy(adapter->recovery_mode_timer_wq);
3551 adapter->recovery_mode_timer_wq = NULL;
3552 }
3553 } /* ixgbe_free_workqueue */
3554
3555 /************************************************************************
3556 * ixgbe_detach - Device removal routine
3557 *
3558 * Called when the driver is being removed.
3559 * Stops the adapter and deallocates all the resources
3560 * that were allocated for driver operation.
3561 *
3562 * return 0 on success, positive on failure
3563 ************************************************************************/
3564 static int
3565 ixgbe_detach(device_t dev, int flags)
3566 {
3567 struct adapter *adapter = device_private(dev);
3568 struct rx_ring *rxr = adapter->rx_rings;
3569 struct tx_ring *txr = adapter->tx_rings;
3570 struct ixgbe_hw *hw = &adapter->hw;
3571 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3572 u32 ctrl_ext;
3573 int i;
3574
3575 INIT_DEBUGOUT("ixgbe_detach: begin");
3576 if (adapter->osdep.attached == false)
3577 return 0;
3578
3579 if (ixgbe_pci_iov_detach(dev) != 0) {
3580 device_printf(dev, "SR-IOV in use; detach first.\n");
3581 return (EBUSY);
3582 }
3583
3584 #if NVLAN > 0
3585 /* Make sure VLANs are not using driver */
3586 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3587 ; /* nothing to do: no VLANs */
3588 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3589 vlan_ifdetach(adapter->ifp);
3590 else {
3591 aprint_error_dev(dev, "VLANs in use, detach first\n");
3592 return (EBUSY);
3593 }
3594 #endif
3595
3596 /*
3597 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3598 * so it's not required to call ixgbe_stop() directly.
3599 */
3600 IXGBE_CORE_LOCK(adapter);
3601 ixgbe_setup_low_power_mode(adapter);
3602 IXGBE_CORE_UNLOCK(adapter);
3603
3604 callout_halt(&adapter->timer, NULL);
3605 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
3606 callout_stop(&adapter->recovery_mode_timer);
3607 callout_halt(&adapter->recovery_mode_timer, NULL);
3608 }
3609
3610 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3611 atomic_store_relaxed(&adapter->admin_pending, 0);
3612 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3613 atomic_store_relaxed(&adapter->timer_pending, 0);
3614
3615 pmf_device_deregister(dev);
3616
3617 ether_ifdetach(adapter->ifp);
3618
3619 ixgbe_free_workqueue(adapter);
3620
3621 /* let hardware know driver is unloading */
3622 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3623 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3625
3626 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3627 netmap_detach(adapter->ifp);
3628
3629 ixgbe_free_pci_resources(adapter);
3630 #if 0 /* XXX the NetBSD port is probably missing something here */
3631 bus_generic_detach(dev);
3632 #endif
3633 if_detach(adapter->ifp);
3634 ifmedia_fini(&adapter->media);
3635 if_percpuq_destroy(adapter->ipq);
3636
3637 sysctl_teardown(&adapter->sysctllog);
3638 evcnt_detach(&adapter->efbig_tx_dma_setup);
3639 evcnt_detach(&adapter->mbuf_defrag_failed);
3640 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3641 evcnt_detach(&adapter->einval_tx_dma_setup);
3642 evcnt_detach(&adapter->other_tx_dma_setup);
3643 evcnt_detach(&adapter->eagain_tx_dma_setup);
3644 evcnt_detach(&adapter->enomem_tx_dma_setup);
3645 evcnt_detach(&adapter->watchdog_events);
3646 evcnt_detach(&adapter->tso_err);
3647 evcnt_detach(&adapter->admin_irqev);
3648 evcnt_detach(&adapter->link_workev);
3649 evcnt_detach(&adapter->mod_workev);
3650 evcnt_detach(&adapter->msf_workev);
3651 evcnt_detach(&adapter->phy_workev);
3652
3653 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3654 if (i < __arraycount(stats->mpc)) {
3655 evcnt_detach(&stats->mpc[i]);
3656 if (hw->mac.type == ixgbe_mac_82598EB)
3657 evcnt_detach(&stats->rnbc[i]);
3658 }
3659 if (i < __arraycount(stats->pxontxc)) {
3660 evcnt_detach(&stats->pxontxc[i]);
3661 evcnt_detach(&stats->pxonrxc[i]);
3662 evcnt_detach(&stats->pxofftxc[i]);
3663 evcnt_detach(&stats->pxoffrxc[i]);
3664 if (hw->mac.type >= ixgbe_mac_82599EB)
3665 evcnt_detach(&stats->pxon2offc[i]);
3666 }
3667 }
3668
3669 txr = adapter->tx_rings;
3670 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3671 evcnt_detach(&adapter->queues[i].irqs);
3672 evcnt_detach(&adapter->queues[i].handleq);
3673 evcnt_detach(&adapter->queues[i].req);
3674 evcnt_detach(&txr->no_desc_avail);
3675 evcnt_detach(&txr->total_packets);
3676 evcnt_detach(&txr->tso_tx);
3677 #ifndef IXGBE_LEGACY_TX
3678 evcnt_detach(&txr->pcq_drops);
3679 #endif
3680
3681 if (i < __arraycount(stats->qprc)) {
3682 evcnt_detach(&stats->qprc[i]);
3683 evcnt_detach(&stats->qptc[i]);
3684 evcnt_detach(&stats->qbrc[i]);
3685 evcnt_detach(&stats->qbtc[i]);
3686 if (hw->mac.type >= ixgbe_mac_82599EB)
3687 evcnt_detach(&stats->qprdc[i]);
3688 }
3689
3690 evcnt_detach(&rxr->rx_packets);
3691 evcnt_detach(&rxr->rx_bytes);
3692 evcnt_detach(&rxr->rx_copies);
3693 evcnt_detach(&rxr->no_jmbuf);
3694 evcnt_detach(&rxr->rx_discarded);
3695 }
3696 evcnt_detach(&stats->ipcs);
3697 evcnt_detach(&stats->l4cs);
3698 evcnt_detach(&stats->ipcs_bad);
3699 evcnt_detach(&stats->l4cs_bad);
3700 evcnt_detach(&stats->intzero);
3701 evcnt_detach(&stats->legint);
3702 evcnt_detach(&stats->crcerrs);
3703 evcnt_detach(&stats->illerrc);
3704 evcnt_detach(&stats->errbc);
3705 evcnt_detach(&stats->mspdc);
3706 if (hw->mac.type >= ixgbe_mac_X550)
3707 evcnt_detach(&stats->mbsdc);
3708 evcnt_detach(&stats->mpctotal);
3709 evcnt_detach(&stats->mlfc);
3710 evcnt_detach(&stats->mrfc);
3711 evcnt_detach(&stats->rlec);
3712 evcnt_detach(&stats->lxontxc);
3713 evcnt_detach(&stats->lxonrxc);
3714 evcnt_detach(&stats->lxofftxc);
3715 evcnt_detach(&stats->lxoffrxc);
3716
3717 /* Packet Reception Stats */
3718 evcnt_detach(&stats->tor);
3719 evcnt_detach(&stats->gorc);
3720 evcnt_detach(&stats->tpr);
3721 evcnt_detach(&stats->gprc);
3722 evcnt_detach(&stats->mprc);
3723 evcnt_detach(&stats->bprc);
3724 evcnt_detach(&stats->prc64);
3725 evcnt_detach(&stats->prc127);
3726 evcnt_detach(&stats->prc255);
3727 evcnt_detach(&stats->prc511);
3728 evcnt_detach(&stats->prc1023);
3729 evcnt_detach(&stats->prc1522);
3730 evcnt_detach(&stats->ruc);
3731 evcnt_detach(&stats->rfc);
3732 evcnt_detach(&stats->roc);
3733 evcnt_detach(&stats->rjc);
3734 evcnt_detach(&stats->mngprc);
3735 evcnt_detach(&stats->mngpdc);
3736 evcnt_detach(&stats->xec);
3737
3738 /* Packet Transmission Stats */
3739 evcnt_detach(&stats->gotc);
3740 evcnt_detach(&stats->tpt);
3741 evcnt_detach(&stats->gptc);
3742 evcnt_detach(&stats->bptc);
3743 evcnt_detach(&stats->mptc);
3744 evcnt_detach(&stats->mngptc);
3745 evcnt_detach(&stats->ptc64);
3746 evcnt_detach(&stats->ptc127);
3747 evcnt_detach(&stats->ptc255);
3748 evcnt_detach(&stats->ptc511);
3749 evcnt_detach(&stats->ptc1023);
3750 evcnt_detach(&stats->ptc1522);
3751
3752 ixgbe_free_queues(adapter);
3753 free(adapter->mta, M_DEVBUF);
3754
3755 IXGBE_CORE_LOCK_DESTROY(adapter);
3756
3757 return (0);
3758 } /* ixgbe_detach */
3759
3760 /************************************************************************
3761 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3762 *
3763 * Prepare the adapter/port for LPLU and/or WoL
3764 ************************************************************************/
3765 static int
3766 ixgbe_setup_low_power_mode(struct adapter *adapter)
3767 {
3768 struct ixgbe_hw *hw = &adapter->hw;
3769 device_t dev = adapter->dev;
3770 s32 error = 0;
3771
3772 KASSERT(mutex_owned(&adapter->core_mtx));
3773
3774 /* Limit power management flow to X550EM baseT */
3775 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3776 hw->phy.ops.enter_lplu) {
3777 /* X550EM baseT adapters need a special LPLU flow */
3778 hw->phy.reset_disable = true;
3779 ixgbe_stop(adapter);
3780 error = hw->phy.ops.enter_lplu(hw);
3781 if (error)
3782 device_printf(dev,
3783 "Error entering LPLU: %d\n", error);
3784 hw->phy.reset_disable = false;
3785 } else {
3786 /* Just stop for other adapters */
3787 ixgbe_stop(adapter);
3788 }
3789
3790 if (!hw->wol_enabled) {
3791 ixgbe_set_phy_power(hw, FALSE);
3792 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3793 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3794 } else {
3795 /* Turn off support for APM wakeup. (Using ACPI instead) */
3796 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3797 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3798
3799 /*
3800 * Clear Wake Up Status register to prevent any previous wakeup
3801 * events from waking us up immediately after we suspend.
3802 */
3803 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3804
3805 /*
3806 * Program the Wakeup Filter Control register with user filter
3807 * settings
3808 */
3809 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3810
3811 /* Enable wakeups and power management in Wakeup Control */
3812 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3813 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3814
3815 }
3816
3817 return error;
3818 } /* ixgbe_setup_low_power_mode */
3819
3820 /************************************************************************
3821 * ixgbe_shutdown - Shutdown entry point
3822 ************************************************************************/
3823 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3824 static int
3825 ixgbe_shutdown(device_t dev)
3826 {
3827 struct adapter *adapter = device_private(dev);
3828 int error = 0;
3829
3830 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3831
3832 IXGBE_CORE_LOCK(adapter);
3833 error = ixgbe_setup_low_power_mode(adapter);
3834 IXGBE_CORE_UNLOCK(adapter);
3835
3836 return (error);
3837 } /* ixgbe_shutdown */
3838 #endif
3839
3840 /************************************************************************
3841 * ixgbe_suspend
3842 *
3843 * From D0 to D3
3844 ************************************************************************/
3845 static bool
3846 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3847 {
3848 struct adapter *adapter = device_private(dev);
3849 int error = 0;
3850
3851 INIT_DEBUGOUT("ixgbe_suspend: begin");
3852
3853 IXGBE_CORE_LOCK(adapter);
3854
3855 error = ixgbe_setup_low_power_mode(adapter);
3856
3857 IXGBE_CORE_UNLOCK(adapter);
3858
3859 return (error);
3860 } /* ixgbe_suspend */
3861
3862 /************************************************************************
3863 * ixgbe_resume
3864 *
3865 * From D3 to D0
3866 ************************************************************************/
3867 static bool
3868 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3869 {
3870 struct adapter *adapter = device_private(dev);
3871 struct ifnet *ifp = adapter->ifp;
3872 struct ixgbe_hw *hw = &adapter->hw;
3873 u32 wus;
3874
3875 INIT_DEBUGOUT("ixgbe_resume: begin");
3876
3877 IXGBE_CORE_LOCK(adapter);
3878
3879 /* Read & clear WUS register */
3880 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3881 if (wus)
3882 device_printf(dev, "Woken up by (WUS): %#010x\n",
3883 IXGBE_READ_REG(hw, IXGBE_WUS));
3884 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3885 /* And clear WUFC until next low-power transition */
3886 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3887
3888 /*
3889 * Required after D3->D0 transition;
3890 * will re-advertise all previous advertised speeds
3891 */
3892 if (ifp->if_flags & IFF_UP)
3893 ixgbe_init_locked(adapter);
3894
3895 IXGBE_CORE_UNLOCK(adapter);
3896
3897 return true;
3898 } /* ixgbe_resume */
3899
3900 /*
3901 * Set the various hardware offload abilities.
3902 *
3903 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3904 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3905 * mbuf offload flags the driver will understand.
3906 */
3907 static void
3908 ixgbe_set_if_hwassist(struct adapter *adapter)
3909 {
3910 /* XXX */
3911 }
3912
3913 /************************************************************************
3914 * ixgbe_init_locked - Init entry point
3915 *
3916 * Used in two ways: It is used by the stack as an init
3917 * entry point in network interface structure. It is also
3918 * used by the driver as a hw/sw initialization routine to
3919 * get to a consistent state.
3920 *
3921 * return 0 on success, positive on failure
3922 ************************************************************************/
3923 static void
3924 ixgbe_init_locked(struct adapter *adapter)
3925 {
3926 struct ifnet *ifp = adapter->ifp;
3927 device_t dev = adapter->dev;
3928 struct ixgbe_hw *hw = &adapter->hw;
3929 struct ix_queue *que;
3930 struct tx_ring *txr;
3931 struct rx_ring *rxr;
3932 u32 txdctl, mhadd;
3933 u32 rxdctl, rxctrl;
3934 u32 ctrl_ext;
3935 bool unsupported_sfp = false;
3936 int i, j, err;
3937
3938 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3939
3940 KASSERT(mutex_owned(&adapter->core_mtx));
3941 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3942
3943 hw->need_unsupported_sfp_recovery = false;
3944 hw->adapter_stopped = FALSE;
3945 ixgbe_stop_adapter(hw);
3946 callout_stop(&adapter->timer);
3947 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3948 callout_stop(&adapter->recovery_mode_timer);
3949 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3950 que->disabled_count = 0;
3951
3952 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3953 adapter->max_frame_size =
3954 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3955
3956 /* Queue indices may change with IOV mode */
3957 ixgbe_align_all_queue_indices(adapter);
3958
3959 /* reprogram the RAR[0] in case user changed it. */
3960 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3961
3962 /* Get the latest mac address, User can use a LAA */
3963 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3964 IXGBE_ETH_LENGTH_OF_ADDRESS);
3965 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3966 hw->addr_ctrl.rar_used_count = 1;
3967
3968 /* Set hardware offload abilities from ifnet flags */
3969 ixgbe_set_if_hwassist(adapter);
3970
3971 /* Prepare transmit descriptors and buffers */
3972 if (ixgbe_setup_transmit_structures(adapter)) {
3973 device_printf(dev, "Could not setup transmit structures\n");
3974 ixgbe_stop(adapter);
3975 return;
3976 }
3977
3978 ixgbe_init_hw(hw);
3979
3980 ixgbe_initialize_iov(adapter);
3981
3982 ixgbe_initialize_transmit_units(adapter);
3983
3984 /* Setup Multicast table */
3985 ixgbe_set_rxfilter(adapter);
3986
3987 /* Determine the correct mbuf pool, based on frame size */
3988 if (adapter->max_frame_size <= MCLBYTES)
3989 adapter->rx_mbuf_sz = MCLBYTES;
3990 else
3991 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3992
3993 /* Prepare receive descriptors and buffers */
3994 if (ixgbe_setup_receive_structures(adapter)) {
3995 device_printf(dev, "Could not setup receive structures\n");
3996 ixgbe_stop(adapter);
3997 return;
3998 }
3999
4000 /* Configure RX settings */
4001 ixgbe_initialize_receive_units(adapter);
4002
4003 /* Initialize variable holding task enqueue requests interrupts */
4004 adapter->task_requests = 0;
4005
4006 /* Enable SDP & MSI-X interrupts based on adapter */
4007 ixgbe_config_gpie(adapter);
4008
4009 /* Set MTU size */
4010 if (ifp->if_mtu > ETHERMTU) {
4011 /* aka IXGBE_MAXFRS on 82599 and newer */
4012 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4013 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4014 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4015 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4016 }
4017
4018 /* Now enable all the queues */
4019 for (i = 0; i < adapter->num_queues; i++) {
4020 txr = &adapter->tx_rings[i];
4021 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4022 txdctl |= IXGBE_TXDCTL_ENABLE;
4023 /* Set WTHRESH to 8, burst writeback */
4024 txdctl |= (8 << 16);
4025 /*
4026 * When the internal queue falls below PTHRESH (32),
4027 * start prefetching as long as there are at least
4028 * HTHRESH (1) buffers ready. The values are taken
4029 * from the Intel linux driver 3.8.21.
4030 * Prefetching enables tx line rate even with 1 queue.
4031 */
4032 txdctl |= (32 << 0) | (1 << 8);
4033 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4034 }
4035
4036 for (i = 0; i < adapter->num_queues; i++) {
4037 rxr = &adapter->rx_rings[i];
4038 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4039 if (hw->mac.type == ixgbe_mac_82598EB) {
4040 /*
4041 * PTHRESH = 21
4042 * HTHRESH = 4
4043 * WTHRESH = 8
4044 */
4045 rxdctl &= ~0x3FFFFF;
4046 rxdctl |= 0x080420;
4047 }
4048 rxdctl |= IXGBE_RXDCTL_ENABLE;
4049 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4050 for (j = 0; j < 10; j++) {
4051 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4052 IXGBE_RXDCTL_ENABLE)
4053 break;
4054 else
4055 msec_delay(1);
4056 }
4057 IXGBE_WRITE_BARRIER(hw);
4058
4059 /*
4060 * In netmap mode, we must preserve the buffers made
4061 * available to userspace before the if_init()
4062 * (this is true by default on the TX side, because
4063 * init makes all buffers available to userspace).
4064 *
4065 * netmap_reset() and the device specific routines
4066 * (e.g. ixgbe_setup_receive_rings()) map these
4067 * buffers at the end of the NIC ring, so here we
4068 * must set the RDT (tail) register to make sure
4069 * they are not overwritten.
4070 *
4071 * In this driver the NIC ring starts at RDH = 0,
4072 * RDT points to the last slot available for reception (?),
4073 * so RDT = num_rx_desc - 1 means the whole ring is available.
4074 */
4075 #ifdef DEV_NETMAP
4076 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4077 (ifp->if_capenable & IFCAP_NETMAP)) {
4078 struct netmap_adapter *na = NA(adapter->ifp);
4079 struct netmap_kring *kring = na->rx_rings[i];
4080 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4081
4082 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4083 } else
4084 #endif /* DEV_NETMAP */
4085 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4086 adapter->num_rx_desc - 1);
4087 }
4088
4089 /* Enable Receive engine */
4090 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4091 if (hw->mac.type == ixgbe_mac_82598EB)
4092 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4093 rxctrl |= IXGBE_RXCTRL_RXEN;
4094 ixgbe_enable_rx_dma(hw, rxctrl);
4095
4096 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4097 atomic_store_relaxed(&adapter->timer_pending, 0);
4098 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4099 callout_reset(&adapter->recovery_mode_timer, hz,
4100 ixgbe_recovery_mode_timer, adapter);
4101
4102 /* Set up MSI/MSI-X routing */
4103 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4104 ixgbe_configure_ivars(adapter);
4105 /* Set up auto-mask */
4106 if (hw->mac.type == ixgbe_mac_82598EB)
4107 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4108 else {
4109 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4110 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4111 }
4112 } else { /* Simple settings for Legacy/MSI */
4113 ixgbe_set_ivar(adapter, 0, 0, 0);
4114 ixgbe_set_ivar(adapter, 0, 0, 1);
4115 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4116 }
4117
4118 ixgbe_init_fdir(adapter);
4119
4120 /*
4121 * Check on any SFP devices that
4122 * need to be kick-started
4123 */
4124 if (hw->phy.type == ixgbe_phy_none) {
4125 err = hw->phy.ops.identify(hw);
4126 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4127 unsupported_sfp = true;
4128 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4129 unsupported_sfp = true;
4130
4131 if (unsupported_sfp)
4132 device_printf(dev,
4133 "Unsupported SFP+ module type was detected.\n");
4134
4135 /* Set moderation on the Link interrupt */
4136 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4137
4138 /* Enable EEE power saving */
4139 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4140 hw->mac.ops.setup_eee(hw,
4141 adapter->feat_en & IXGBE_FEATURE_EEE);
4142
4143 /* Enable power to the phy. */
4144 if (!unsupported_sfp) {
4145 ixgbe_set_phy_power(hw, TRUE);
4146
4147 /* Config/Enable Link */
4148 ixgbe_config_link(adapter);
4149 }
4150
4151 /* Hardware Packet Buffer & Flow Control setup */
4152 ixgbe_config_delay_values(adapter);
4153
4154 /* Initialize the FC settings */
4155 ixgbe_start_hw(hw);
4156
4157 /* Set up VLAN support and filter */
4158 ixgbe_setup_vlan_hw_support(adapter);
4159
4160 /* Setup DMA Coalescing */
4161 ixgbe_config_dmac(adapter);
4162
4163 /* OK to schedule workqueues. */
4164 adapter->schedule_wqs_ok = true;
4165
4166 /* And now turn on interrupts */
4167 ixgbe_enable_intr(adapter);
4168
4169 /* Enable the use of the MBX by the VF's */
4170 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4171 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4172 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4173 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4174 }
4175
4176 /* Update saved flags. See ixgbe_ifflags_cb() */
4177 adapter->if_flags = ifp->if_flags;
4178 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4179
4180 /* Now inform the stack we're ready */
4181 ifp->if_flags |= IFF_RUNNING;
4182
4183 return;
4184 } /* ixgbe_init_locked */
4185
4186 /************************************************************************
4187 * ixgbe_init
4188 ************************************************************************/
4189 static int
4190 ixgbe_init(struct ifnet *ifp)
4191 {
4192 struct adapter *adapter = ifp->if_softc;
4193
4194 IXGBE_CORE_LOCK(adapter);
4195 ixgbe_init_locked(adapter);
4196 IXGBE_CORE_UNLOCK(adapter);
4197
4198 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4199 } /* ixgbe_init */
4200
4201 /************************************************************************
4202 * ixgbe_set_ivar
4203 *
4204 * Setup the correct IVAR register for a particular MSI-X interrupt
4205 * (yes this is all very magic and confusing :)
4206 * - entry is the register array entry
4207 * - vector is the MSI-X vector for this queue
4208 * - type is RX/TX/MISC
4209 ************************************************************************/
4210 static void
4211 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4212 {
4213 struct ixgbe_hw *hw = &adapter->hw;
4214 u32 ivar, index;
4215
4216 vector |= IXGBE_IVAR_ALLOC_VAL;
4217
4218 switch (hw->mac.type) {
4219 case ixgbe_mac_82598EB:
4220 if (type == -1)
4221 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4222 else
4223 entry += (type * 64);
4224 index = (entry >> 2) & 0x1F;
4225 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4226 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4227 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4229 break;
4230 case ixgbe_mac_82599EB:
4231 case ixgbe_mac_X540:
4232 case ixgbe_mac_X550:
4233 case ixgbe_mac_X550EM_x:
4234 case ixgbe_mac_X550EM_a:
4235 if (type == -1) { /* MISC IVAR */
4236 index = (entry & 1) * 8;
4237 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4238 ivar &= ~(0xffUL << index);
4239 ivar |= ((u32)vector << index);
4240 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4241 } else { /* RX/TX IVARS */
4242 index = (16 * (entry & 1)) + (8 * type);
4243 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4244 ivar &= ~(0xffUL << index);
4245 ivar |= ((u32)vector << index);
4246 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4247 }
4248 break;
4249 default:
4250 break;
4251 }
4252 } /* ixgbe_set_ivar */
4253
4254 /************************************************************************
4255 * ixgbe_configure_ivars
4256 ************************************************************************/
4257 static void
4258 ixgbe_configure_ivars(struct adapter *adapter)
4259 {
4260 struct ix_queue *que = adapter->queues;
4261 u32 newitr;
4262
4263 if (ixgbe_max_interrupt_rate > 0)
4264 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4265 else {
4266 /*
4267 * Disable DMA coalescing if interrupt moderation is
4268 * disabled.
4269 */
4270 adapter->dmac = 0;
4271 newitr = 0;
4272 }
4273
4274 for (int i = 0; i < adapter->num_queues; i++, que++) {
4275 struct rx_ring *rxr = &adapter->rx_rings[i];
4276 struct tx_ring *txr = &adapter->tx_rings[i];
4277 /* First the RX queue entry */
4278 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4279 /* ... and the TX */
4280 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4281 /* Set an Initial EITR value */
4282 ixgbe_eitr_write(adapter, que->msix, newitr);
4283 /*
4284 * To eliminate influence of the previous state.
4285 * At this point, Tx/Rx interrupt handler
4286 * (ixgbe_msix_que()) cannot be called, so both
4287 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4288 */
4289 que->eitr_setting = 0;
4290 }
4291
4292 /* For the Link interrupt */
4293 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4294 } /* ixgbe_configure_ivars */
4295
4296 /************************************************************************
4297 * ixgbe_config_gpie
4298 ************************************************************************/
4299 static void
4300 ixgbe_config_gpie(struct adapter *adapter)
4301 {
4302 struct ixgbe_hw *hw = &adapter->hw;
4303 u32 gpie;
4304
4305 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4306
4307 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4308 /* Enable Enhanced MSI-X mode */
4309 gpie |= IXGBE_GPIE_MSIX_MODE
4310 | IXGBE_GPIE_EIAME
4311 | IXGBE_GPIE_PBA_SUPPORT
4312 | IXGBE_GPIE_OCD;
4313 }
4314
4315 /* Fan Failure Interrupt */
4316 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4317 gpie |= IXGBE_SDP1_GPIEN;
4318
4319 /* Thermal Sensor Interrupt */
4320 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4321 gpie |= IXGBE_SDP0_GPIEN_X540;
4322
4323 /* Link detection */
4324 switch (hw->mac.type) {
4325 case ixgbe_mac_82599EB:
4326 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4327 break;
4328 case ixgbe_mac_X550EM_x:
4329 case ixgbe_mac_X550EM_a:
4330 gpie |= IXGBE_SDP0_GPIEN_X540;
4331 break;
4332 default:
4333 break;
4334 }
4335
4336 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4337
4338 } /* ixgbe_config_gpie */
4339
4340 /************************************************************************
4341 * ixgbe_config_delay_values
4342 *
4343 * Requires adapter->max_frame_size to be set.
4344 ************************************************************************/
4345 static void
4346 ixgbe_config_delay_values(struct adapter *adapter)
4347 {
4348 struct ixgbe_hw *hw = &adapter->hw;
4349 u32 rxpb, frame, size, tmp;
4350
4351 frame = adapter->max_frame_size;
4352
4353 /* Calculate High Water */
4354 switch (hw->mac.type) {
4355 case ixgbe_mac_X540:
4356 case ixgbe_mac_X550:
4357 case ixgbe_mac_X550EM_x:
4358 case ixgbe_mac_X550EM_a:
4359 tmp = IXGBE_DV_X540(frame, frame);
4360 break;
4361 default:
4362 tmp = IXGBE_DV(frame, frame);
4363 break;
4364 }
4365 size = IXGBE_BT2KB(tmp);
4366 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4367 hw->fc.high_water[0] = rxpb - size;
4368
4369 /* Now calculate Low Water */
4370 switch (hw->mac.type) {
4371 case ixgbe_mac_X540:
4372 case ixgbe_mac_X550:
4373 case ixgbe_mac_X550EM_x:
4374 case ixgbe_mac_X550EM_a:
4375 tmp = IXGBE_LOW_DV_X540(frame);
4376 break;
4377 default:
4378 tmp = IXGBE_LOW_DV(frame);
4379 break;
4380 }
4381 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4382
4383 hw->fc.pause_time = IXGBE_FC_PAUSE;
4384 hw->fc.send_xon = TRUE;
4385 } /* ixgbe_config_delay_values */
4386
4387 /************************************************************************
4388 * ixgbe_set_rxfilter - Multicast Update
4389 *
4390 * Called whenever multicast address list is updated.
4391 ************************************************************************/
4392 static void
4393 ixgbe_set_rxfilter(struct adapter *adapter)
4394 {
4395 struct ixgbe_mc_addr *mta;
4396 struct ifnet *ifp = adapter->ifp;
4397 u8 *update_ptr;
4398 int mcnt = 0;
4399 u32 fctrl;
4400 struct ethercom *ec = &adapter->osdep.ec;
4401 struct ether_multi *enm;
4402 struct ether_multistep step;
4403
4404 KASSERT(mutex_owned(&adapter->core_mtx));
4405 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4406
4407 mta = adapter->mta;
4408 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4409
4410 ETHER_LOCK(ec);
4411 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4412 ETHER_FIRST_MULTI(step, ec, enm);
4413 while (enm != NULL) {
4414 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4415 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4416 ETHER_ADDR_LEN) != 0)) {
4417 ec->ec_flags |= ETHER_F_ALLMULTI;
4418 break;
4419 }
4420 bcopy(enm->enm_addrlo,
4421 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4422 mta[mcnt].vmdq = adapter->pool;
4423 mcnt++;
4424 ETHER_NEXT_MULTI(step, enm);
4425 }
4426
4427 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4428 if (ifp->if_flags & IFF_PROMISC)
4429 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4430 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4431 fctrl |= IXGBE_FCTRL_MPE;
4432 fctrl &= ~IXGBE_FCTRL_UPE;
4433 } else
4434 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4435
4436 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4437
4438 /* Update multicast filter entries only when it's not ALLMULTI */
4439 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4440 ETHER_UNLOCK(ec);
4441 update_ptr = (u8 *)mta;
4442 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4443 ixgbe_mc_array_itr, TRUE);
4444 } else
4445 ETHER_UNLOCK(ec);
4446 } /* ixgbe_set_rxfilter */
4447
4448 /************************************************************************
4449 * ixgbe_mc_array_itr
4450 *
4451 * An iterator function needed by the multicast shared code.
4452 * It feeds the shared code routine the addresses in the
4453 * array of ixgbe_set_rxfilter() one by one.
4454 ************************************************************************/
4455 static u8 *
4456 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4457 {
4458 struct ixgbe_mc_addr *mta;
4459
4460 mta = (struct ixgbe_mc_addr *)*update_ptr;
4461 *vmdq = mta->vmdq;
4462
4463 *update_ptr = (u8*)(mta + 1);
4464
4465 return (mta->addr);
4466 } /* ixgbe_mc_array_itr */
4467
4468 /************************************************************************
4469 * ixgbe_local_timer - Timer routine
4470 *
4471 * Checks for link status, updates statistics,
4472 * and runs the watchdog check.
4473 ************************************************************************/
4474 static void
4475 ixgbe_local_timer(void *arg)
4476 {
4477 struct adapter *adapter = arg;
4478
4479 if (adapter->schedule_wqs_ok) {
4480 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4481 workqueue_enqueue(adapter->timer_wq,
4482 &adapter->timer_wc, NULL);
4483 }
4484 }
4485
4486 static void
4487 ixgbe_handle_timer(struct work *wk, void *context)
4488 {
4489 struct adapter *adapter = context;
4490 struct ixgbe_hw *hw = &adapter->hw;
4491 device_t dev = adapter->dev;
4492 struct ix_queue *que = adapter->queues;
4493 u64 queues = 0;
4494 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4495 int hung = 0;
4496 int i;
4497
4498 IXGBE_CORE_LOCK(adapter);
4499
4500 /* Check for pluggable optics */
4501 if (ixgbe_is_sfp(hw)) {
4502 bool was_full = hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4503 bool is_full = ixgbe_sfp_cage_full(hw);
4504
4505 /* do probe if cage state changed */
4506 if (was_full ^ is_full) {
4507 atomic_or_32(&adapter->task_requests,
4508 IXGBE_REQUEST_TASK_MOD);
4509 ixgbe_schedule_admin_tasklet(adapter);
4510 }
4511 }
4512
4513 ixgbe_update_link_status(adapter);
4514 ixgbe_update_stats_counters(adapter);
4515
4516 /* Update some event counters */
4517 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4518 que = adapter->queues;
4519 for (i = 0; i < adapter->num_queues; i++, que++) {
4520 struct tx_ring *txr = que->txr;
4521
4522 v0 += txr->q_efbig_tx_dma_setup;
4523 v1 += txr->q_mbuf_defrag_failed;
4524 v2 += txr->q_efbig2_tx_dma_setup;
4525 v3 += txr->q_einval_tx_dma_setup;
4526 v4 += txr->q_other_tx_dma_setup;
4527 v5 += txr->q_eagain_tx_dma_setup;
4528 v6 += txr->q_enomem_tx_dma_setup;
4529 v7 += txr->q_tso_err;
4530 }
4531 adapter->efbig_tx_dma_setup.ev_count = v0;
4532 adapter->mbuf_defrag_failed.ev_count = v1;
4533 adapter->efbig2_tx_dma_setup.ev_count = v2;
4534 adapter->einval_tx_dma_setup.ev_count = v3;
4535 adapter->other_tx_dma_setup.ev_count = v4;
4536 adapter->eagain_tx_dma_setup.ev_count = v5;
4537 adapter->enomem_tx_dma_setup.ev_count = v6;
4538 adapter->tso_err.ev_count = v7;
4539
4540 /*
4541 * Check the TX queues status
4542 * - mark hung queues so we don't schedule on them
4543 * - watchdog only if all queues show hung
4544 */
4545 que = adapter->queues;
4546 for (i = 0; i < adapter->num_queues; i++, que++) {
4547 /* Keep track of queues with work for soft irq */
4548 if (que->txr->busy)
4549 queues |= 1ULL << que->me;
4550 /*
4551 * Each time txeof runs without cleaning, but there
4552 * are uncleaned descriptors it increments busy. If
4553 * we get to the MAX we declare it hung.
4554 */
4555 if (que->busy == IXGBE_QUEUE_HUNG) {
4556 ++hung;
4557 /* Mark the queue as inactive */
4558 adapter->active_queues &= ~(1ULL << que->me);
4559 continue;
4560 } else {
4561 /* Check if we've come back from hung */
4562 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4563 adapter->active_queues |= 1ULL << que->me;
4564 }
4565 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4566 device_printf(dev,
4567 "Warning queue %d appears to be hung!\n", i);
4568 que->txr->busy = IXGBE_QUEUE_HUNG;
4569 ++hung;
4570 }
4571 }
4572
4573 /* Only truly watchdog if all queues show hung */
4574 if (hung == adapter->num_queues)
4575 goto watchdog;
4576 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4577 else if (queues != 0) { /* Force an IRQ on queues with work */
4578 que = adapter->queues;
4579 for (i = 0; i < adapter->num_queues; i++, que++) {
4580 mutex_enter(&que->dc_mtx);
4581 if (que->disabled_count == 0)
4582 ixgbe_rearm_queues(adapter,
4583 queues & ((u64)1 << i));
4584 mutex_exit(&que->dc_mtx);
4585 }
4586 }
4587 #endif
4588
4589 atomic_store_relaxed(&adapter->timer_pending, 0);
4590 IXGBE_CORE_UNLOCK(adapter);
4591 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4592 return;
4593
4594 watchdog:
4595 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4596 adapter->ifp->if_flags &= ~IFF_RUNNING;
4597 adapter->watchdog_events.ev_count++;
4598 ixgbe_init_locked(adapter);
4599 IXGBE_CORE_UNLOCK(adapter);
4600 } /* ixgbe_handle_timer */
4601
4602 /************************************************************************
4603 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4604 ************************************************************************/
4605 static void
4606 ixgbe_recovery_mode_timer(void *arg)
4607 {
4608 struct adapter *adapter = arg;
4609
4610 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0)
4611 {
4612 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4613 &adapter->recovery_mode_timer_wc, NULL);
4614 }
4615 }
4616
4617 static void
4618 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4619 {
4620 struct adapter *adapter = context;
4621 struct ixgbe_hw *hw = &adapter->hw;
4622
4623 IXGBE_CORE_LOCK(adapter);
4624 if (ixgbe_fw_recovery_mode(hw)) {
4625 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4626 /* Firmware error detected, entering recovery mode */
4627 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4628
4629 if (hw->adapter_stopped == FALSE)
4630 ixgbe_stop(adapter);
4631 }
4632 } else
4633 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4634
4635 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4636 callout_reset(&adapter->recovery_mode_timer, hz,
4637 ixgbe_recovery_mode_timer, adapter);
4638 IXGBE_CORE_UNLOCK(adapter);
4639 } /* ixgbe_handle_recovery_mode_timer */
4640
4641 /************************************************************************
4642 * ixgbe_sfp_cage_full
4643 *
4644 * Determine if a port had optics inserted.
4645 ************************************************************************/
4646 static bool
4647 ixgbe_sfp_cage_full(struct adapter *adapter)
4648 {
4649 struct ixgbe_hw *hw = &adapter->hw;
4650 uint32_t mask;
4651 int rv;
4652
4653 if (hw->mac.type >= ixgbe_mac_X540)
4654 mask = IXGBE_ESDP_SDP0;
4655 else
4656 mask = IXGBE_ESDP_SDP2;
4657
4658 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask;
4659 if ((adapter->quirks & IXGBE_QUIRK_MOD_ABS_INVERT) != 0)
4660 rv = !rv;
4661
4662 if (hw->mac.type == ixgbe_mac_X550EM_a) {
4663 /* X550EM_a's SDP0 is inverted than others. */
4664 return !rv;
4665 }
4666
4667 return rv;
4668 } /* ixgbe_sfp_cage_full */
4669
4670 /************************************************************************
4671 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4672 ************************************************************************/
4673 static void
4674 ixgbe_handle_mod(void *context)
4675 {
4676 struct adapter *adapter = context;
4677 struct ixgbe_hw *hw = &adapter->hw;
4678 device_t dev = adapter->dev;
4679 u32 err, cage_full = 0;
4680
4681 ++adapter->mod_workev.ev_count;
4682 if (adapter->hw.need_crosstalk_fix) {
4683 switch (hw->mac.type) {
4684 case ixgbe_mac_82599EB:
4685 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4686 IXGBE_ESDP_SDP2;
4687 break;
4688 case ixgbe_mac_X550EM_x:
4689 case ixgbe_mac_X550EM_a:
4690 /*
4691 * XXX See ixgbe_sfp_cage_full(). It seems the bit is
4692 * inverted on X550EM_a, so I think this is incorrect.
4693 */
4694 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4695 IXGBE_ESDP_SDP0;
4696 break;
4697 default:
4698 break;
4699 }
4700
4701 if (!cage_full)
4702 goto out;
4703 }
4704
4705 err = hw->phy.ops.identify_sfp(hw);
4706 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4707 device_printf(dev,
4708 "Unsupported SFP+ module type was detected.\n");
4709 goto out;
4710 }
4711
4712 if (hw->need_unsupported_sfp_recovery) {
4713 device_printf(dev, "Recovering from unsupported SFP\n");
4714 /*
4715 * We could recover the status by calling setup_sfp(),
4716 * setup_link() and some others. It's complex and might not
4717 * work correctly on some unknown cases. To avoid such type of
4718 * problem, call ixgbe_init_locked(). It's simple and safe
4719 * approach.
4720 */
4721 ixgbe_init_locked(adapter);
4722 } else {
4723 if (hw->mac.type == ixgbe_mac_82598EB)
4724 err = hw->phy.ops.reset(hw);
4725 else {
4726 err = hw->mac.ops.setup_sfp(hw);
4727 hw->phy.sfp_setup_needed = FALSE;
4728 }
4729 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4730 device_printf(dev,
4731 "Setup failure - unsupported SFP+ module type.\n");
4732 goto out;
4733 }
4734 }
4735
4736 out:
4737 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4738 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4739
4740 /* Adjust media types shown in ifconfig */
4741 IXGBE_CORE_UNLOCK(adapter);
4742 ifmedia_removeall(&adapter->media);
4743 ixgbe_add_media_types(adapter);
4744 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4745 IXGBE_CORE_LOCK(adapter);
4746
4747 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4748 /*
4749 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4750 * the workqueue now.
4751 */
4752 } /* ixgbe_handle_mod */
4753
4754
4755 /************************************************************************
4756 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4757 ************************************************************************/
4758 static void
4759 ixgbe_handle_msf(void *context)
4760 {
4761 struct adapter *adapter = context;
4762 struct ixgbe_hw *hw = &adapter->hw;
4763 u32 autoneg;
4764 bool negotiate;
4765
4766 ++adapter->msf_workev.ev_count;
4767
4768 autoneg = hw->phy.autoneg_advertised;
4769 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4770 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4771 if (hw->mac.ops.setup_link)
4772 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4773 } /* ixgbe_handle_msf */
4774
4775 /************************************************************************
4776 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4777 ************************************************************************/
4778 static void
4779 ixgbe_handle_phy(void *context)
4780 {
4781 struct adapter *adapter = context;
4782 struct ixgbe_hw *hw = &adapter->hw;
4783 int error;
4784
4785 ++adapter->phy_workev.ev_count;
4786 error = hw->phy.ops.handle_lasi(hw);
4787 if (error == IXGBE_ERR_OVERTEMP)
4788 device_printf(adapter->dev,
4789 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4790 " PHY will downshift to lower power state!\n");
4791 else if (error)
4792 device_printf(adapter->dev,
4793 "Error handling LASI interrupt: %d\n", error);
4794 } /* ixgbe_handle_phy */
4795
4796 static void
4797 ixgbe_handle_admin(struct work *wk, void *context)
4798 {
4799 struct adapter *adapter = context;
4800 struct ifnet *ifp = adapter->ifp;
4801 struct ixgbe_hw *hw = &adapter->hw;
4802 u32 req;
4803
4804 /*
4805 * Hold the IFNET_LOCK across this entire call. This will
4806 * prevent additional changes to adapter->phy_layer
4807 * and serialize calls to this tasklet. We cannot hold the
4808 * CORE_LOCK while calling into the ifmedia functions as
4809 * they call ifmedia_lock() and the lock is CORE_LOCK.
4810 */
4811 IFNET_LOCK(ifp);
4812 IXGBE_CORE_LOCK(adapter);
4813 while ((req =
4814 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR))
4815 != 0) {
4816 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4817 ixgbe_handle_link(adapter);
4818 atomic_and_32(&adapter->task_requests,
4819 ~IXGBE_REQUEST_TASK_LSC);
4820 }
4821 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4822 ixgbe_handle_mod(adapter);
4823 atomic_and_32(&adapter->task_requests,
4824 ~IXGBE_REQUEST_TASK_MOD);
4825 }
4826 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4827 ixgbe_handle_msf(adapter);
4828 atomic_and_32(&adapter->task_requests,
4829 ~IXGBE_REQUEST_TASK_MSF);
4830 }
4831 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4832 ixgbe_handle_phy(adapter);
4833 atomic_and_32(&adapter->task_requests,
4834 ~IXGBE_REQUEST_TASK_PHY);
4835 }
4836 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4837 ixgbe_reinit_fdir(adapter);
4838 atomic_and_32(&adapter->task_requests,
4839 ~IXGBE_REQUEST_TASK_FDIR);
4840 }
4841 #if 0 /* notyet */
4842 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4843 ixgbe_handle_mbx(adapter);
4844 atomic_and_32(&adapter->task_requests,
4845 ~IXGBE_REQUEST_TASK_MBX);
4846 }
4847 #endif
4848 }
4849 atomic_store_relaxed(&adapter->admin_pending, 0);
4850 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4851 atomic_and_32(&adapter->task_requests,
4852 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4853 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4854 /* Re-enable other interrupts */
4855 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4856 } else
4857 ixgbe_enable_intr(adapter);
4858 }
4859
4860 IXGBE_CORE_UNLOCK(adapter);
4861 IFNET_UNLOCK(ifp);
4862 } /* ixgbe_handle_admin */
4863
4864 static void
4865 ixgbe_ifstop(struct ifnet *ifp, int disable)
4866 {
4867 struct adapter *adapter = ifp->if_softc;
4868
4869 IXGBE_CORE_LOCK(adapter);
4870 ixgbe_stop(adapter);
4871 IXGBE_CORE_UNLOCK(adapter);
4872
4873 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4874 atomic_store_relaxed(&adapter->timer_pending, 0);
4875 }
4876
4877 /************************************************************************
4878 * ixgbe_stop - Stop the hardware
4879 *
4880 * Disables all traffic on the adapter by issuing a
4881 * global reset on the MAC and deallocates TX/RX buffers.
4882 ************************************************************************/
4883 static void
4884 ixgbe_stop(void *arg)
4885 {
4886 struct ifnet *ifp;
4887 struct adapter *adapter = arg;
4888 struct ixgbe_hw *hw = &adapter->hw;
4889
4890 ifp = adapter->ifp;
4891
4892 KASSERT(mutex_owned(&adapter->core_mtx));
4893
4894 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4895 ixgbe_disable_intr(adapter);
4896 callout_stop(&adapter->timer);
4897
4898 /* Don't schedule workqueues. */
4899 adapter->schedule_wqs_ok = false;
4900
4901 /* Let the stack know...*/
4902 ifp->if_flags &= ~IFF_RUNNING;
4903
4904 ixgbe_reset_hw(hw);
4905 hw->adapter_stopped = FALSE;
4906 ixgbe_stop_adapter(hw);
4907 if (hw->mac.type == ixgbe_mac_82599EB)
4908 ixgbe_stop_mac_link_on_d3_82599(hw);
4909 /* Turn off the laser - noop with no optics */
4910 ixgbe_disable_tx_laser(hw);
4911
4912 /* Update the stack */
4913 adapter->link_up = FALSE;
4914 ixgbe_update_link_status(adapter);
4915
4916 /* reprogram the RAR[0] in case user changed it. */
4917 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4918
4919 return;
4920 } /* ixgbe_stop */
4921
4922 /************************************************************************
4923 * ixgbe_update_link_status - Update OS on link state
4924 *
4925 * Note: Only updates the OS on the cached link state.
4926 * The real check of the hardware only happens with
4927 * a link interrupt.
4928 ************************************************************************/
4929 static void
4930 ixgbe_update_link_status(struct adapter *adapter)
4931 {
4932 struct ifnet *ifp = adapter->ifp;
4933 device_t dev = adapter->dev;
4934 struct ixgbe_hw *hw = &adapter->hw;
4935
4936 KASSERT(mutex_owned(&adapter->core_mtx));
4937
4938 if (adapter->link_up) {
4939 if (adapter->link_active != LINK_STATE_UP) {
4940 /*
4941 * To eliminate influence of the previous state
4942 * in the same way as ixgbe_init_locked().
4943 */
4944 struct ix_queue *que = adapter->queues;
4945 for (int i = 0; i < adapter->num_queues; i++, que++)
4946 que->eitr_setting = 0;
4947
4948 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4949 /*
4950 * Discard count for both MAC Local Fault and
4951 * Remote Fault because those registers are
4952 * valid only when the link speed is up and
4953 * 10Gbps.
4954 */
4955 IXGBE_READ_REG(hw, IXGBE_MLFC);
4956 IXGBE_READ_REG(hw, IXGBE_MRFC);
4957 }
4958
4959 if (bootverbose) {
4960 const char *bpsmsg;
4961
4962 switch (adapter->link_speed) {
4963 case IXGBE_LINK_SPEED_10GB_FULL:
4964 bpsmsg = "10 Gbps";
4965 break;
4966 case IXGBE_LINK_SPEED_5GB_FULL:
4967 bpsmsg = "5 Gbps";
4968 break;
4969 case IXGBE_LINK_SPEED_2_5GB_FULL:
4970 bpsmsg = "2.5 Gbps";
4971 break;
4972 case IXGBE_LINK_SPEED_1GB_FULL:
4973 bpsmsg = "1 Gbps";
4974 break;
4975 case IXGBE_LINK_SPEED_100_FULL:
4976 bpsmsg = "100 Mbps";
4977 break;
4978 case IXGBE_LINK_SPEED_10_FULL:
4979 bpsmsg = "10 Mbps";
4980 break;
4981 default:
4982 bpsmsg = "unknown speed";
4983 break;
4984 }
4985 device_printf(dev, "Link is up %s %s \n",
4986 bpsmsg, "Full Duplex");
4987 }
4988 adapter->link_active = LINK_STATE_UP;
4989 /* Update any Flow Control changes */
4990 ixgbe_fc_enable(&adapter->hw);
4991 /* Update DMA coalescing config */
4992 ixgbe_config_dmac(adapter);
4993 if_link_state_change(ifp, LINK_STATE_UP);
4994
4995 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4996 ixgbe_ping_all_vfs(adapter);
4997 }
4998 } else {
4999 /*
5000 * Do it when link active changes to DOWN. i.e.
5001 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5002 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5003 */
5004 if (adapter->link_active != LINK_STATE_DOWN) {
5005 if (bootverbose)
5006 device_printf(dev, "Link is Down\n");
5007 if_link_state_change(ifp, LINK_STATE_DOWN);
5008 adapter->link_active = LINK_STATE_DOWN;
5009 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5010 ixgbe_ping_all_vfs(adapter);
5011 ixgbe_drain_all(adapter);
5012 }
5013 }
5014 } /* ixgbe_update_link_status */
5015
5016 /************************************************************************
5017 * ixgbe_config_dmac - Configure DMA Coalescing
5018 ************************************************************************/
5019 static void
5020 ixgbe_config_dmac(struct adapter *adapter)
5021 {
5022 struct ixgbe_hw *hw = &adapter->hw;
5023 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5024
5025 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5026 return;
5027
5028 if (dcfg->watchdog_timer ^ adapter->dmac ||
5029 dcfg->link_speed ^ adapter->link_speed) {
5030 dcfg->watchdog_timer = adapter->dmac;
5031 dcfg->fcoe_en = false;
5032 dcfg->link_speed = adapter->link_speed;
5033 dcfg->num_tcs = 1;
5034
5035 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5036 dcfg->watchdog_timer, dcfg->link_speed);
5037
5038 hw->mac.ops.dmac_config(hw);
5039 }
5040 } /* ixgbe_config_dmac */
5041
5042 /************************************************************************
5043 * ixgbe_enable_intr
5044 ************************************************************************/
5045 static void
5046 ixgbe_enable_intr(struct adapter *adapter)
5047 {
5048 struct ixgbe_hw *hw = &adapter->hw;
5049 struct ix_queue *que = adapter->queues;
5050 u32 mask, fwsm;
5051
5052 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5053
5054 switch (adapter->hw.mac.type) {
5055 case ixgbe_mac_82599EB:
5056 mask |= IXGBE_EIMS_ECC;
5057 /* Temperature sensor on some adapters */
5058 mask |= IXGBE_EIMS_GPI_SDP0;
5059 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5060 mask |= IXGBE_EIMS_GPI_SDP1;
5061 mask |= IXGBE_EIMS_GPI_SDP2;
5062 break;
5063 case ixgbe_mac_X540:
5064 /* Detect if Thermal Sensor is enabled */
5065 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5066 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5067 mask |= IXGBE_EIMS_TS;
5068 mask |= IXGBE_EIMS_ECC;
5069 break;
5070 case ixgbe_mac_X550:
5071 /* MAC thermal sensor is automatically enabled */
5072 mask |= IXGBE_EIMS_TS;
5073 mask |= IXGBE_EIMS_ECC;
5074 break;
5075 case ixgbe_mac_X550EM_x:
5076 case ixgbe_mac_X550EM_a:
5077 /* Some devices use SDP0 for important information */
5078 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5079 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5080 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5081 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5082 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5083 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5084 mask |= IXGBE_EICR_GPI_SDP0_X540;
5085 mask |= IXGBE_EIMS_ECC;
5086 break;
5087 default:
5088 break;
5089 }
5090
5091 /* Enable Fan Failure detection */
5092 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5093 mask |= IXGBE_EIMS_GPI_SDP1;
5094 /* Enable SR-IOV */
5095 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5096 mask |= IXGBE_EIMS_MAILBOX;
5097 /* Enable Flow Director */
5098 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5099 mask |= IXGBE_EIMS_FLOW_DIR;
5100
5101 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5102
5103 /* With MSI-X we use auto clear */
5104 if (adapter->msix_mem) {
5105 mask = IXGBE_EIMS_ENABLE_MASK;
5106 /* Don't autoclear Link */
5107 mask &= ~IXGBE_EIMS_OTHER;
5108 mask &= ~IXGBE_EIMS_LSC;
5109 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5110 mask &= ~IXGBE_EIMS_MAILBOX;
5111 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5112 }
5113
5114 /*
5115 * Now enable all queues, this is done separately to
5116 * allow for handling the extended (beyond 32) MSI-X
5117 * vectors that can be used by 82599
5118 */
5119 for (int i = 0; i < adapter->num_queues; i++, que++)
5120 ixgbe_enable_queue(adapter, que->msix);
5121
5122 IXGBE_WRITE_FLUSH(hw);
5123
5124 } /* ixgbe_enable_intr */
5125
5126 /************************************************************************
5127 * ixgbe_disable_intr_internal
5128 ************************************************************************/
5129 static void
5130 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5131 {
5132 struct ix_queue *que = adapter->queues;
5133
5134 /* disable interrupts other than queues */
5135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5136
5137 if (adapter->msix_mem)
5138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5139
5140 for (int i = 0; i < adapter->num_queues; i++, que++)
5141 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5142
5143 IXGBE_WRITE_FLUSH(&adapter->hw);
5144
5145 } /* ixgbe_do_disable_intr_internal */
5146
5147 /************************************************************************
5148 * ixgbe_disable_intr
5149 ************************************************************************/
5150 static void
5151 ixgbe_disable_intr(struct adapter *adapter)
5152 {
5153
5154 ixgbe_disable_intr_internal(adapter, true);
5155 } /* ixgbe_disable_intr */
5156
5157 /************************************************************************
5158 * ixgbe_ensure_disabled_intr
5159 ************************************************************************/
5160 void
5161 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5162 {
5163
5164 ixgbe_disable_intr_internal(adapter, false);
5165 } /* ixgbe_ensure_disabled_intr */
5166
5167 /************************************************************************
5168 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5169 ************************************************************************/
5170 static int
5171 ixgbe_legacy_irq(void *arg)
5172 {
5173 struct ix_queue *que = arg;
5174 struct adapter *adapter = que->adapter;
5175 struct ixgbe_hw *hw = &adapter->hw;
5176 struct ifnet *ifp = adapter->ifp;
5177 struct tx_ring *txr = adapter->tx_rings;
5178 bool more = false;
5179 bool reenable_intr = true;
5180 u32 eicr, eicr_mask;
5181 u32 task_requests = 0;
5182
5183 /* Silicon errata #26 on 82598 */
5184 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5185
5186 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5187
5188 adapter->stats.pf.legint.ev_count++;
5189 ++que->irqs.ev_count;
5190 if (eicr == 0) {
5191 adapter->stats.pf.intzero.ev_count++;
5192 if ((ifp->if_flags & IFF_UP) != 0)
5193 ixgbe_enable_intr(adapter);
5194 return 0;
5195 }
5196
5197 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5198 /*
5199 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5200 */
5201 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5202
5203 #ifdef __NetBSD__
5204 /* Don't run ixgbe_rxeof in interrupt context */
5205 more = true;
5206 #else
5207 more = ixgbe_rxeof(que);
5208 #endif
5209
5210 IXGBE_TX_LOCK(txr);
5211 ixgbe_txeof(txr);
5212 #ifdef notyet
5213 if (!ixgbe_ring_empty(ifp, txr->br))
5214 ixgbe_start_locked(ifp, txr);
5215 #endif
5216 IXGBE_TX_UNLOCK(txr);
5217 }
5218
5219 /* Check for fan failure */
5220 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5221 ixgbe_check_fan_failure(adapter, eicr, true);
5222 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5223 }
5224
5225 /* Link status change */
5226 if (eicr & IXGBE_EICR_LSC)
5227 task_requests |= IXGBE_REQUEST_TASK_LSC;
5228
5229 if (ixgbe_is_sfp(hw)) {
5230 /* Pluggable optics-related interrupt */
5231 if (hw->mac.type >= ixgbe_mac_X540)
5232 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5233 else
5234 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5235
5236 if (eicr & eicr_mask) {
5237 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5238 task_requests |= IXGBE_REQUEST_TASK_MOD;
5239 }
5240
5241 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5242 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5243 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5244 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5245 task_requests |= IXGBE_REQUEST_TASK_MSF;
5246 }
5247 }
5248
5249 /* External PHY interrupt */
5250 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5251 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5252 task_requests |= IXGBE_REQUEST_TASK_PHY;
5253
5254 if (more) {
5255 que->req.ev_count++;
5256 ixgbe_sched_handle_que(adapter, que);
5257 reenable_intr = false;
5258 }
5259 if (task_requests != 0) {
5260 /* Re-enabling other interrupts is done in the admin task */
5261 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5262 atomic_or_32(&adapter->task_requests, task_requests);
5263 ixgbe_schedule_admin_tasklet(adapter);
5264 reenable_intr = false;
5265 }
5266
5267 if (reenable_intr == true)
5268 ixgbe_enable_intr(adapter);
5269
5270 return 1;
5271 } /* ixgbe_legacy_irq */
5272
5273 /************************************************************************
5274 * ixgbe_free_pciintr_resources
5275 ************************************************************************/
5276 static void
5277 ixgbe_free_pciintr_resources(struct adapter *adapter)
5278 {
5279 struct ix_queue *que = adapter->queues;
5280 int rid;
5281
5282 /*
5283 * Release all msix queue resources:
5284 */
5285 for (int i = 0; i < adapter->num_queues; i++, que++) {
5286 if (que->res != NULL) {
5287 pci_intr_disestablish(adapter->osdep.pc,
5288 adapter->osdep.ihs[i]);
5289 adapter->osdep.ihs[i] = NULL;
5290 }
5291 }
5292
5293 /* Clean the Legacy or Link interrupt last */
5294 if (adapter->vector) /* we are doing MSIX */
5295 rid = adapter->vector;
5296 else
5297 rid = 0;
5298
5299 if (adapter->osdep.ihs[rid] != NULL) {
5300 pci_intr_disestablish(adapter->osdep.pc,
5301 adapter->osdep.ihs[rid]);
5302 adapter->osdep.ihs[rid] = NULL;
5303 }
5304
5305 if (adapter->osdep.intrs != NULL) {
5306 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5307 adapter->osdep.nintrs);
5308 adapter->osdep.intrs = NULL;
5309 }
5310 } /* ixgbe_free_pciintr_resources */
5311
5312 /************************************************************************
5313 * ixgbe_free_pci_resources
5314 ************************************************************************/
5315 static void
5316 ixgbe_free_pci_resources(struct adapter *adapter)
5317 {
5318
5319 ixgbe_free_pciintr_resources(adapter);
5320
5321 if (adapter->osdep.mem_size != 0) {
5322 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5323 adapter->osdep.mem_bus_space_handle,
5324 adapter->osdep.mem_size);
5325 }
5326
5327 } /* ixgbe_free_pci_resources */
5328
5329 /************************************************************************
5330 * ixgbe_set_sysctl_value
5331 ************************************************************************/
5332 static void
5333 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5334 const char *description, int *limit, int value)
5335 {
5336 device_t dev = adapter->dev;
5337 struct sysctllog **log;
5338 const struct sysctlnode *rnode, *cnode;
5339
5340 /*
5341 * It's not required to check recovery mode because this function never
5342 * touches hardware.
5343 */
5344
5345 log = &adapter->sysctllog;
5346 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5347 aprint_error_dev(dev, "could not create sysctl root\n");
5348 return;
5349 }
5350 if (sysctl_createv(log, 0, &rnode, &cnode,
5351 CTLFLAG_READWRITE, CTLTYPE_INT,
5352 name, SYSCTL_DESCR(description),
5353 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5354 aprint_error_dev(dev, "could not create sysctl\n");
5355 *limit = value;
5356 } /* ixgbe_set_sysctl_value */
5357
5358 /************************************************************************
5359 * ixgbe_sysctl_flowcntl
5360 *
5361 * SYSCTL wrapper around setting Flow Control
5362 ************************************************************************/
5363 static int
5364 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5365 {
5366 struct sysctlnode node = *rnode;
5367 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5368 int error, fc;
5369
5370 if (ixgbe_fw_recovery_mode_swflag(adapter))
5371 return (EPERM);
5372
5373 fc = adapter->hw.fc.current_mode;
5374 node.sysctl_data = &fc;
5375 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5376 if (error != 0 || newp == NULL)
5377 return error;
5378
5379 /* Don't bother if it's not changed */
5380 if (fc == adapter->hw.fc.current_mode)
5381 return (0);
5382
5383 return ixgbe_set_flowcntl(adapter, fc);
5384 } /* ixgbe_sysctl_flowcntl */
5385
5386 /************************************************************************
5387 * ixgbe_set_flowcntl - Set flow control
5388 *
5389 * Flow control values:
5390 * 0 - off
5391 * 1 - rx pause
5392 * 2 - tx pause
5393 * 3 - full
5394 ************************************************************************/
5395 static int
5396 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5397 {
5398 switch (fc) {
5399 case ixgbe_fc_rx_pause:
5400 case ixgbe_fc_tx_pause:
5401 case ixgbe_fc_full:
5402 adapter->hw.fc.requested_mode = fc;
5403 if (adapter->num_queues > 1)
5404 ixgbe_disable_rx_drop(adapter);
5405 break;
5406 case ixgbe_fc_none:
5407 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5408 if (adapter->num_queues > 1)
5409 ixgbe_enable_rx_drop(adapter);
5410 break;
5411 default:
5412 return (EINVAL);
5413 }
5414
5415 #if 0 /* XXX NetBSD */
5416 /* Don't autoneg if forcing a value */
5417 adapter->hw.fc.disable_fc_autoneg = TRUE;
5418 #endif
5419 ixgbe_fc_enable(&adapter->hw);
5420
5421 return (0);
5422 } /* ixgbe_set_flowcntl */
5423
5424 /************************************************************************
5425 * ixgbe_enable_rx_drop
5426 *
5427 * Enable the hardware to drop packets when the buffer is
5428 * full. This is useful with multiqueue, so that no single
5429 * queue being full stalls the entire RX engine. We only
5430 * enable this when Multiqueue is enabled AND Flow Control
5431 * is disabled.
5432 ************************************************************************/
5433 static void
5434 ixgbe_enable_rx_drop(struct adapter *adapter)
5435 {
5436 struct ixgbe_hw *hw = &adapter->hw;
5437 struct rx_ring *rxr;
5438 u32 srrctl;
5439
5440 for (int i = 0; i < adapter->num_queues; i++) {
5441 rxr = &adapter->rx_rings[i];
5442 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5443 srrctl |= IXGBE_SRRCTL_DROP_EN;
5444 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5445 }
5446
5447 /* enable drop for each vf */
5448 for (int i = 0; i < adapter->num_vfs; i++) {
5449 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5450 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5451 IXGBE_QDE_ENABLE));
5452 }
5453 } /* ixgbe_enable_rx_drop */
5454
5455 /************************************************************************
5456 * ixgbe_disable_rx_drop
5457 ************************************************************************/
5458 static void
5459 ixgbe_disable_rx_drop(struct adapter *adapter)
5460 {
5461 struct ixgbe_hw *hw = &adapter->hw;
5462 struct rx_ring *rxr;
5463 u32 srrctl;
5464
5465 for (int i = 0; i < adapter->num_queues; i++) {
5466 rxr = &adapter->rx_rings[i];
5467 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5468 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5469 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5470 }
5471
5472 /* disable drop for each vf */
5473 for (int i = 0; i < adapter->num_vfs; i++) {
5474 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5475 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5476 }
5477 } /* ixgbe_disable_rx_drop */
5478
5479 /************************************************************************
5480 * ixgbe_sysctl_advertise
5481 *
5482 * SYSCTL wrapper around setting advertised speed
5483 ************************************************************************/
5484 static int
5485 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5486 {
5487 struct sysctlnode node = *rnode;
5488 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5489 int error = 0, advertise;
5490
5491 if (ixgbe_fw_recovery_mode_swflag(adapter))
5492 return (EPERM);
5493
5494 advertise = adapter->advertise;
5495 node.sysctl_data = &advertise;
5496 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5497 if (error != 0 || newp == NULL)
5498 return error;
5499
5500 return ixgbe_set_advertise(adapter, advertise);
5501 } /* ixgbe_sysctl_advertise */
5502
5503 /************************************************************************
5504 * ixgbe_set_advertise - Control advertised link speed
5505 *
5506 * Flags:
5507 * 0x00 - Default (all capable link speed)
5508 * 0x01 - advertise 100 Mb
5509 * 0x02 - advertise 1G
5510 * 0x04 - advertise 10G
5511 * 0x08 - advertise 10 Mb
5512 * 0x10 - advertise 2.5G
5513 * 0x20 - advertise 5G
5514 ************************************************************************/
5515 static int
5516 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5517 {
5518 device_t dev;
5519 struct ixgbe_hw *hw;
5520 ixgbe_link_speed speed = 0;
5521 ixgbe_link_speed link_caps = 0;
5522 s32 err = IXGBE_NOT_IMPLEMENTED;
5523 bool negotiate = FALSE;
5524
5525 /* Checks to validate new value */
5526 if (adapter->advertise == advertise) /* no change */
5527 return (0);
5528
5529 dev = adapter->dev;
5530 hw = &adapter->hw;
5531
5532 /* No speed changes for backplane media */
5533 if (hw->phy.media_type == ixgbe_media_type_backplane)
5534 return (ENODEV);
5535
5536 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5537 (hw->phy.multispeed_fiber))) {
5538 device_printf(dev,
5539 "Advertised speed can only be set on copper or "
5540 "multispeed fiber media types.\n");
5541 return (EINVAL);
5542 }
5543
5544 if (advertise < 0x0 || advertise > 0x2f) {
5545 device_printf(dev,
5546 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5547 return (EINVAL);
5548 }
5549
5550 if (hw->mac.ops.get_link_capabilities) {
5551 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5552 &negotiate);
5553 if (err != IXGBE_SUCCESS) {
5554 device_printf(dev, "Unable to determine supported advertise speeds\n");
5555 return (ENODEV);
5556 }
5557 }
5558
5559 /* Set new value and report new advertised mode */
5560 if (advertise & 0x1) {
5561 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5562 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5563 return (EINVAL);
5564 }
5565 speed |= IXGBE_LINK_SPEED_100_FULL;
5566 }
5567 if (advertise & 0x2) {
5568 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5569 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5570 return (EINVAL);
5571 }
5572 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5573 }
5574 if (advertise & 0x4) {
5575 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5576 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5577 return (EINVAL);
5578 }
5579 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5580 }
5581 if (advertise & 0x8) {
5582 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5583 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5584 return (EINVAL);
5585 }
5586 speed |= IXGBE_LINK_SPEED_10_FULL;
5587 }
5588 if (advertise & 0x10) {
5589 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5590 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5591 return (EINVAL);
5592 }
5593 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5594 }
5595 if (advertise & 0x20) {
5596 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5597 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5598 return (EINVAL);
5599 }
5600 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5601 }
5602 if (advertise == 0)
5603 speed = link_caps; /* All capable link speed */
5604
5605 hw->mac.autotry_restart = TRUE;
5606 hw->mac.ops.setup_link(hw, speed, TRUE);
5607 adapter->advertise = advertise;
5608
5609 return (0);
5610 } /* ixgbe_set_advertise */
5611
5612 /************************************************************************
5613 * ixgbe_get_advertise - Get current advertised speed settings
5614 *
5615 * Formatted for sysctl usage.
5616 * Flags:
5617 * 0x01 - advertise 100 Mb
5618 * 0x02 - advertise 1G
5619 * 0x04 - advertise 10G
5620 * 0x08 - advertise 10 Mb (yes, Mb)
5621 * 0x10 - advertise 2.5G
5622 * 0x20 - advertise 5G
5623 ************************************************************************/
5624 static int
5625 ixgbe_get_advertise(struct adapter *adapter)
5626 {
5627 struct ixgbe_hw *hw = &adapter->hw;
5628 int speed;
5629 ixgbe_link_speed link_caps = 0;
5630 s32 err;
5631 bool negotiate = FALSE;
5632
5633 /*
5634 * Advertised speed means nothing unless it's copper or
5635 * multi-speed fiber
5636 */
5637 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5638 !(hw->phy.multispeed_fiber))
5639 return (0);
5640
5641 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5642 if (err != IXGBE_SUCCESS)
5643 return (0);
5644
5645 speed =
5646 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5647 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5648 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5649 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5650 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5651 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5652
5653 return speed;
5654 } /* ixgbe_get_advertise */
5655
5656 /************************************************************************
5657 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5658 *
5659 * Control values:
5660 * 0/1 - off / on (use default value of 1000)
5661 *
5662 * Legal timer values are:
5663 * 50,100,250,500,1000,2000,5000,10000
5664 *
5665 * Turning off interrupt moderation will also turn this off.
5666 ************************************************************************/
5667 static int
5668 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5669 {
5670 struct sysctlnode node = *rnode;
5671 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5672 struct ifnet *ifp = adapter->ifp;
5673 int error;
5674 int newval;
5675
5676 if (ixgbe_fw_recovery_mode_swflag(adapter))
5677 return (EPERM);
5678
5679 newval = adapter->dmac;
5680 node.sysctl_data = &newval;
5681 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5682 if ((error) || (newp == NULL))
5683 return (error);
5684
5685 switch (newval) {
5686 case 0:
5687 /* Disabled */
5688 adapter->dmac = 0;
5689 break;
5690 case 1:
5691 /* Enable and use default */
5692 adapter->dmac = 1000;
5693 break;
5694 case 50:
5695 case 100:
5696 case 250:
5697 case 500:
5698 case 1000:
5699 case 2000:
5700 case 5000:
5701 case 10000:
5702 /* Legal values - allow */
5703 adapter->dmac = newval;
5704 break;
5705 default:
5706 /* Do nothing, illegal value */
5707 return (EINVAL);
5708 }
5709
5710 /* Re-initialize hardware if it's already running */
5711 if (ifp->if_flags & IFF_RUNNING)
5712 ifp->if_init(ifp);
5713
5714 return (0);
5715 }
5716
5717 #ifdef IXGBE_DEBUG
5718 /************************************************************************
5719 * ixgbe_sysctl_power_state
5720 *
5721 * Sysctl to test power states
5722 * Values:
5723 * 0 - set device to D0
5724 * 3 - set device to D3
5725 * (none) - get current device power state
5726 ************************************************************************/
5727 static int
5728 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5729 {
5730 #ifdef notyet
5731 struct sysctlnode node = *rnode;
5732 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5733 device_t dev = adapter->dev;
5734 int curr_ps, new_ps, error = 0;
5735
5736 if (ixgbe_fw_recovery_mode_swflag(adapter))
5737 return (EPERM);
5738
5739 curr_ps = new_ps = pci_get_powerstate(dev);
5740
5741 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5742 if ((error) || (req->newp == NULL))
5743 return (error);
5744
5745 if (new_ps == curr_ps)
5746 return (0);
5747
5748 if (new_ps == 3 && curr_ps == 0)
5749 error = DEVICE_SUSPEND(dev);
5750 else if (new_ps == 0 && curr_ps == 3)
5751 error = DEVICE_RESUME(dev);
5752 else
5753 return (EINVAL);
5754
5755 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5756
5757 return (error);
5758 #else
5759 return 0;
5760 #endif
5761 } /* ixgbe_sysctl_power_state */
5762 #endif
5763
5764 /************************************************************************
5765 * ixgbe_sysctl_wol_enable
5766 *
5767 * Sysctl to enable/disable the WoL capability,
5768 * if supported by the adapter.
5769 *
5770 * Values:
5771 * 0 - disabled
5772 * 1 - enabled
5773 ************************************************************************/
5774 static int
5775 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5776 {
5777 struct sysctlnode node = *rnode;
5778 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5779 struct ixgbe_hw *hw = &adapter->hw;
5780 bool new_wol_enabled;
5781 int error = 0;
5782
5783 /*
5784 * It's not required to check recovery mode because this function never
5785 * touches hardware.
5786 */
5787 new_wol_enabled = hw->wol_enabled;
5788 node.sysctl_data = &new_wol_enabled;
5789 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5790 if ((error) || (newp == NULL))
5791 return (error);
5792 if (new_wol_enabled == hw->wol_enabled)
5793 return (0);
5794
5795 if (new_wol_enabled && !adapter->wol_support)
5796 return (ENODEV);
5797 else
5798 hw->wol_enabled = new_wol_enabled;
5799
5800 return (0);
5801 } /* ixgbe_sysctl_wol_enable */
5802
5803 /************************************************************************
5804 * ixgbe_sysctl_wufc - Wake Up Filter Control
5805 *
5806 * Sysctl to enable/disable the types of packets that the
5807 * adapter will wake up on upon receipt.
5808 * Flags:
5809 * 0x1 - Link Status Change
5810 * 0x2 - Magic Packet
5811 * 0x4 - Direct Exact
5812 * 0x8 - Directed Multicast
5813 * 0x10 - Broadcast
5814 * 0x20 - ARP/IPv4 Request Packet
5815 * 0x40 - Direct IPv4 Packet
5816 * 0x80 - Direct IPv6 Packet
5817 *
5818 * Settings not listed above will cause the sysctl to return an error.
5819 ************************************************************************/
5820 static int
5821 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5822 {
5823 struct sysctlnode node = *rnode;
5824 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5825 int error = 0;
5826 u32 new_wufc;
5827
5828 /*
5829 * It's not required to check recovery mode because this function never
5830 * touches hardware.
5831 */
5832 new_wufc = adapter->wufc;
5833 node.sysctl_data = &new_wufc;
5834 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5835 if ((error) || (newp == NULL))
5836 return (error);
5837 if (new_wufc == adapter->wufc)
5838 return (0);
5839
5840 if (new_wufc & 0xffffff00)
5841 return (EINVAL);
5842
5843 new_wufc &= 0xff;
5844 new_wufc |= (0xffffff & adapter->wufc);
5845 adapter->wufc = new_wufc;
5846
5847 return (0);
5848 } /* ixgbe_sysctl_wufc */
5849
5850 #ifdef IXGBE_DEBUG
5851 /************************************************************************
5852 * ixgbe_sysctl_print_rss_config
5853 ************************************************************************/
5854 static int
5855 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5856 {
5857 #ifdef notyet
5858 struct sysctlnode node = *rnode;
5859 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5860 struct ixgbe_hw *hw = &adapter->hw;
5861 device_t dev = adapter->dev;
5862 struct sbuf *buf;
5863 int error = 0, reta_size;
5864 u32 reg;
5865
5866 if (ixgbe_fw_recovery_mode_swflag(adapter))
5867 return (EPERM);
5868
5869 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5870 if (!buf) {
5871 device_printf(dev, "Could not allocate sbuf for output.\n");
5872 return (ENOMEM);
5873 }
5874
5875 // TODO: use sbufs to make a string to print out
5876 /* Set multiplier for RETA setup and table size based on MAC */
5877 switch (adapter->hw.mac.type) {
5878 case ixgbe_mac_X550:
5879 case ixgbe_mac_X550EM_x:
5880 case ixgbe_mac_X550EM_a:
5881 reta_size = 128;
5882 break;
5883 default:
5884 reta_size = 32;
5885 break;
5886 }
5887
5888 /* Print out the redirection table */
5889 sbuf_cat(buf, "\n");
5890 for (int i = 0; i < reta_size; i++) {
5891 if (i < 32) {
5892 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5893 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5894 } else {
5895 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5896 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5897 }
5898 }
5899
5900 // TODO: print more config
5901
5902 error = sbuf_finish(buf);
5903 if (error)
5904 device_printf(dev, "Error finishing sbuf: %d\n", error);
5905
5906 sbuf_delete(buf);
5907 #endif
5908 return (0);
5909 } /* ixgbe_sysctl_print_rss_config */
5910 #endif /* IXGBE_DEBUG */
5911
5912 /************************************************************************
5913 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5914 *
5915 * For X552/X557-AT devices using an external PHY
5916 ************************************************************************/
5917 static int
5918 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5919 {
5920 struct sysctlnode node = *rnode;
5921 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5922 struct ixgbe_hw *hw = &adapter->hw;
5923 int val;
5924 u16 reg;
5925 int error;
5926
5927 if (ixgbe_fw_recovery_mode_swflag(adapter))
5928 return (EPERM);
5929
5930 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5931 device_printf(adapter->dev,
5932 "Device has no supported external thermal sensor.\n");
5933 return (ENODEV);
5934 }
5935
5936 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5937 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5938 device_printf(adapter->dev,
5939 "Error reading from PHY's current temperature register\n");
5940 return (EAGAIN);
5941 }
5942
5943 node.sysctl_data = &val;
5944
5945 /* Shift temp for output */
5946 val = reg >> 8;
5947
5948 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5949 if ((error) || (newp == NULL))
5950 return (error);
5951
5952 return (0);
5953 } /* ixgbe_sysctl_phy_temp */
5954
5955 /************************************************************************
5956 * ixgbe_sysctl_phy_overtemp_occurred
5957 *
5958 * Reports (directly from the PHY) whether the current PHY
5959 * temperature is over the overtemp threshold.
5960 ************************************************************************/
5961 static int
5962 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5963 {
5964 struct sysctlnode node = *rnode;
5965 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5966 struct ixgbe_hw *hw = &adapter->hw;
5967 int val, error;
5968 u16 reg;
5969
5970 if (ixgbe_fw_recovery_mode_swflag(adapter))
5971 return (EPERM);
5972
5973 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5974 device_printf(adapter->dev,
5975 "Device has no supported external thermal sensor.\n");
5976 return (ENODEV);
5977 }
5978
5979 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5980 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5981 device_printf(adapter->dev,
5982 "Error reading from PHY's temperature status register\n");
5983 return (EAGAIN);
5984 }
5985
5986 node.sysctl_data = &val;
5987
5988 /* Get occurrence bit */
5989 val = !!(reg & 0x4000);
5990
5991 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5992 if ((error) || (newp == NULL))
5993 return (error);
5994
5995 return (0);
5996 } /* ixgbe_sysctl_phy_overtemp_occurred */
5997
5998 /************************************************************************
5999 * ixgbe_sysctl_eee_state
6000 *
6001 * Sysctl to set EEE power saving feature
6002 * Values:
6003 * 0 - disable EEE
6004 * 1 - enable EEE
6005 * (none) - get current device EEE state
6006 ************************************************************************/
6007 static int
6008 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6009 {
6010 struct sysctlnode node = *rnode;
6011 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6012 struct ifnet *ifp = adapter->ifp;
6013 device_t dev = adapter->dev;
6014 int curr_eee, new_eee, error = 0;
6015 s32 retval;
6016
6017 if (ixgbe_fw_recovery_mode_swflag(adapter))
6018 return (EPERM);
6019
6020 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6021 node.sysctl_data = &new_eee;
6022 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6023 if ((error) || (newp == NULL))
6024 return (error);
6025
6026 /* Nothing to do */
6027 if (new_eee == curr_eee)
6028 return (0);
6029
6030 /* Not supported */
6031 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6032 return (EINVAL);
6033
6034 /* Bounds checking */
6035 if ((new_eee < 0) || (new_eee > 1))
6036 return (EINVAL);
6037
6038 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6039 if (retval) {
6040 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6041 return (EINVAL);
6042 }
6043
6044 /* Restart auto-neg */
6045 ifp->if_init(ifp);
6046
6047 device_printf(dev, "New EEE state: %d\n", new_eee);
6048
6049 /* Cache new value */
6050 if (new_eee)
6051 adapter->feat_en |= IXGBE_FEATURE_EEE;
6052 else
6053 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6054
6055 return (error);
6056 } /* ixgbe_sysctl_eee_state */
6057
6058 #define PRINTQS(adapter, regname) \
6059 do { \
6060 struct ixgbe_hw *_hw = &(adapter)->hw; \
6061 int _i; \
6062 \
6063 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6064 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6065 printf((_i == 0) ? "\t" : " "); \
6066 printf("%08x", IXGBE_READ_REG(_hw, \
6067 IXGBE_##regname(_i))); \
6068 } \
6069 printf("\n"); \
6070 } while (0)
6071
6072 /************************************************************************
6073 * ixgbe_print_debug_info
6074 *
6075 * Called only when em_display_debug_stats is enabled.
6076 * Provides a way to take a look at important statistics
6077 * maintained by the driver and hardware.
6078 ************************************************************************/
6079 static void
6080 ixgbe_print_debug_info(struct adapter *adapter)
6081 {
6082 device_t dev = adapter->dev;
6083 struct ixgbe_hw *hw = &adapter->hw;
6084 int table_size;
6085 int i;
6086
6087 switch (adapter->hw.mac.type) {
6088 case ixgbe_mac_X550:
6089 case ixgbe_mac_X550EM_x:
6090 case ixgbe_mac_X550EM_a:
6091 table_size = 128;
6092 break;
6093 default:
6094 table_size = 32;
6095 break;
6096 }
6097
6098 device_printf(dev, "[E]RETA:\n");
6099 for (i = 0; i < table_size; i++) {
6100 if (i < 32)
6101 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6102 IXGBE_RETA(i)));
6103 else
6104 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6105 IXGBE_ERETA(i - 32)));
6106 }
6107
6108 device_printf(dev, "queue:");
6109 for (i = 0; i < adapter->num_queues; i++) {
6110 printf((i == 0) ? "\t" : " ");
6111 printf("%8d", i);
6112 }
6113 printf("\n");
6114 PRINTQS(adapter, RDBAL);
6115 PRINTQS(adapter, RDBAH);
6116 PRINTQS(adapter, RDLEN);
6117 PRINTQS(adapter, SRRCTL);
6118 PRINTQS(adapter, RDH);
6119 PRINTQS(adapter, RDT);
6120 PRINTQS(adapter, RXDCTL);
6121
6122 device_printf(dev, "RQSMR:");
6123 for (i = 0; i < adapter->num_queues / 4; i++) {
6124 printf((i == 0) ? "\t" : " ");
6125 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6126 }
6127 printf("\n");
6128
6129 device_printf(dev, "disabled_count:");
6130 for (i = 0; i < adapter->num_queues; i++) {
6131 printf((i == 0) ? "\t" : " ");
6132 printf("%8d", adapter->queues[i].disabled_count);
6133 }
6134 printf("\n");
6135
6136 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6137 if (hw->mac.type != ixgbe_mac_82598EB) {
6138 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6139 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6140 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6141 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6142 }
6143 } /* ixgbe_print_debug_info */
6144
6145 /************************************************************************
6146 * ixgbe_sysctl_debug
6147 ************************************************************************/
6148 static int
6149 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6150 {
6151 struct sysctlnode node = *rnode;
6152 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6153 int error, result = 0;
6154
6155 if (ixgbe_fw_recovery_mode_swflag(adapter))
6156 return (EPERM);
6157
6158 node.sysctl_data = &result;
6159 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6160
6161 if (error || newp == NULL)
6162 return error;
6163
6164 if (result == 1)
6165 ixgbe_print_debug_info(adapter);
6166
6167 return 0;
6168 } /* ixgbe_sysctl_debug */
6169
6170 /************************************************************************
6171 * ixgbe_init_device_features
6172 ************************************************************************/
6173 static void
6174 ixgbe_init_device_features(struct adapter *adapter)
6175 {
6176 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6177 | IXGBE_FEATURE_RSS
6178 | IXGBE_FEATURE_MSI
6179 | IXGBE_FEATURE_MSIX
6180 | IXGBE_FEATURE_LEGACY_IRQ
6181 | IXGBE_FEATURE_LEGACY_TX;
6182
6183 /* Set capabilities first... */
6184 switch (adapter->hw.mac.type) {
6185 case ixgbe_mac_82598EB:
6186 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6187 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6188 break;
6189 case ixgbe_mac_X540:
6190 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6191 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6192 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6193 (adapter->hw.bus.func == 0))
6194 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6195 break;
6196 case ixgbe_mac_X550:
6197 /*
6198 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6199 * NVM Image version.
6200 */
6201 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6202 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6203 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6204 break;
6205 case ixgbe_mac_X550EM_x:
6206 /*
6207 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6208 * NVM Image version.
6209 */
6210 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6211 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6212 break;
6213 case ixgbe_mac_X550EM_a:
6214 /*
6215 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6216 * NVM Image version.
6217 */
6218 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6219 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6220 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6221 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6222 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6223 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6224 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6225 }
6226 break;
6227 case ixgbe_mac_82599EB:
6228 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6229 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6230 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6231 (adapter->hw.bus.func == 0))
6232 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6233 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6234 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6235 break;
6236 default:
6237 break;
6238 }
6239
6240 /* Enabled by default... */
6241 /* Fan failure detection */
6242 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6243 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6244 /* Netmap */
6245 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6246 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6247 /* EEE */
6248 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6249 adapter->feat_en |= IXGBE_FEATURE_EEE;
6250 /* Thermal Sensor */
6251 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6252 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6253 /*
6254 * Recovery mode:
6255 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6256 * NVM Image version.
6257 */
6258
6259 /* Enabled via global sysctl... */
6260 /* Flow Director */
6261 if (ixgbe_enable_fdir) {
6262 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6263 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6264 else
6265 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6266 }
6267 /* Legacy (single queue) transmit */
6268 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6269 ixgbe_enable_legacy_tx)
6270 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6271 /*
6272 * Message Signal Interrupts - Extended (MSI-X)
6273 * Normal MSI is only enabled if MSI-X calls fail.
6274 */
6275 if (!ixgbe_enable_msix)
6276 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6277 /* Receive-Side Scaling (RSS) */
6278 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6279 adapter->feat_en |= IXGBE_FEATURE_RSS;
6280
6281 /* Disable features with unmet dependencies... */
6282 /* No MSI-X */
6283 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6284 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6285 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6286 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6287 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6288 }
6289 } /* ixgbe_init_device_features */
6290
6291 /************************************************************************
6292 * ixgbe_probe - Device identification routine
6293 *
6294 * Determines if the driver should be loaded on
6295 * adapter based on its PCI vendor/device ID.
6296 *
6297 * return BUS_PROBE_DEFAULT on success, positive on failure
6298 ************************************************************************/
6299 static int
6300 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6301 {
6302 const struct pci_attach_args *pa = aux;
6303
6304 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6305 }
6306
6307 static const ixgbe_vendor_info_t *
6308 ixgbe_lookup(const struct pci_attach_args *pa)
6309 {
6310 const ixgbe_vendor_info_t *ent;
6311 pcireg_t subid;
6312
6313 INIT_DEBUGOUT("ixgbe_lookup: begin");
6314
6315 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6316 return NULL;
6317
6318 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6319
6320 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6321 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6322 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6323 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6324 (ent->subvendor_id == 0)) &&
6325 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6326 (ent->subdevice_id == 0))) {
6327 return ent;
6328 }
6329 }
6330 return NULL;
6331 }
6332
6333 static int
6334 ixgbe_ifflags_cb(struct ethercom *ec)
6335 {
6336 struct ifnet *ifp = &ec->ec_if;
6337 struct adapter *adapter = ifp->if_softc;
6338 u_short change;
6339 int rv = 0;
6340
6341 IXGBE_CORE_LOCK(adapter);
6342
6343 change = ifp->if_flags ^ adapter->if_flags;
6344 if (change != 0)
6345 adapter->if_flags = ifp->if_flags;
6346
6347 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6348 rv = ENETRESET;
6349 goto out;
6350 } else if ((change & IFF_PROMISC) != 0)
6351 ixgbe_set_rxfilter(adapter);
6352
6353 /* Check for ec_capenable. */
6354 change = ec->ec_capenable ^ adapter->ec_capenable;
6355 adapter->ec_capenable = ec->ec_capenable;
6356 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6357 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6358 rv = ENETRESET;
6359 goto out;
6360 }
6361
6362 /*
6363 * Special handling is not required for ETHERCAP_VLAN_MTU.
6364 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6365 */
6366
6367 /* Set up VLAN support and filter */
6368 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6369 ixgbe_setup_vlan_hw_support(adapter);
6370
6371 out:
6372 IXGBE_CORE_UNLOCK(adapter);
6373
6374 return rv;
6375 }
6376
6377 /************************************************************************
6378 * ixgbe_ioctl - Ioctl entry point
6379 *
6380 * Called when the user wants to configure the interface.
6381 *
6382 * return 0 on success, positive on failure
6383 ************************************************************************/
6384 static int
6385 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6386 {
6387 struct adapter *adapter = ifp->if_softc;
6388 struct ixgbe_hw *hw = &adapter->hw;
6389 struct ifcapreq *ifcr = data;
6390 struct ifreq *ifr = data;
6391 int error = 0;
6392 int l4csum_en;
6393 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6394 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6395
6396 if (ixgbe_fw_recovery_mode_swflag(adapter))
6397 return (EPERM);
6398
6399 switch (command) {
6400 case SIOCSIFFLAGS:
6401 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6402 break;
6403 case SIOCADDMULTI:
6404 case SIOCDELMULTI:
6405 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6406 break;
6407 case SIOCSIFMEDIA:
6408 case SIOCGIFMEDIA:
6409 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6410 break;
6411 case SIOCSIFCAP:
6412 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6413 break;
6414 case SIOCSIFMTU:
6415 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6416 break;
6417 #ifdef __NetBSD__
6418 case SIOCINITIFADDR:
6419 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6420 break;
6421 case SIOCGIFFLAGS:
6422 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6423 break;
6424 case SIOCGIFAFLAG_IN:
6425 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6426 break;
6427 case SIOCGIFADDR:
6428 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6429 break;
6430 case SIOCGIFMTU:
6431 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6432 break;
6433 case SIOCGIFCAP:
6434 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6435 break;
6436 case SIOCGETHERCAP:
6437 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6438 break;
6439 case SIOCGLIFADDR:
6440 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6441 break;
6442 case SIOCZIFDATA:
6443 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6444 hw->mac.ops.clear_hw_cntrs(hw);
6445 ixgbe_clear_evcnt(adapter);
6446 break;
6447 case SIOCAIFADDR:
6448 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6449 break;
6450 #endif
6451 default:
6452 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6453 break;
6454 }
6455
6456 switch (command) {
6457 case SIOCGI2C:
6458 {
6459 struct ixgbe_i2c_req i2c;
6460
6461 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6462 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6463 if (error != 0)
6464 break;
6465 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6466 error = EINVAL;
6467 break;
6468 }
6469 if (i2c.len > sizeof(i2c.data)) {
6470 error = EINVAL;
6471 break;
6472 }
6473
6474 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6475 i2c.dev_addr, i2c.data);
6476 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6477 break;
6478 }
6479 case SIOCSIFCAP:
6480 /* Layer-4 Rx checksum offload has to be turned on and
6481 * off as a unit.
6482 */
6483 l4csum_en = ifcr->ifcr_capenable & l4csum;
6484 if (l4csum_en != l4csum && l4csum_en != 0)
6485 return EINVAL;
6486 /*FALLTHROUGH*/
6487 case SIOCADDMULTI:
6488 case SIOCDELMULTI:
6489 case SIOCSIFFLAGS:
6490 case SIOCSIFMTU:
6491 default:
6492 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6493 return error;
6494 if ((ifp->if_flags & IFF_RUNNING) == 0)
6495 ;
6496 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6497 IXGBE_CORE_LOCK(adapter);
6498 if ((ifp->if_flags & IFF_RUNNING) != 0)
6499 ixgbe_init_locked(adapter);
6500 ixgbe_recalculate_max_frame(adapter);
6501 IXGBE_CORE_UNLOCK(adapter);
6502 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6503 /*
6504 * Multicast list has changed; set the hardware filter
6505 * accordingly.
6506 */
6507 IXGBE_CORE_LOCK(adapter);
6508 ixgbe_disable_intr(adapter);
6509 ixgbe_set_rxfilter(adapter);
6510 ixgbe_enable_intr(adapter);
6511 IXGBE_CORE_UNLOCK(adapter);
6512 }
6513 return 0;
6514 }
6515
6516 return error;
6517 } /* ixgbe_ioctl */
6518
6519 /************************************************************************
6520 * ixgbe_check_fan_failure
6521 ************************************************************************/
6522 static void
6523 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6524 {
6525 u32 mask;
6526
6527 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6528 IXGBE_ESDP_SDP1;
6529
6530 if (reg & mask)
6531 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6532 } /* ixgbe_check_fan_failure */
6533
6534 /************************************************************************
6535 * ixgbe_handle_que
6536 ************************************************************************/
6537 static void
6538 ixgbe_handle_que(void *context)
6539 {
6540 struct ix_queue *que = context;
6541 struct adapter *adapter = que->adapter;
6542 struct tx_ring *txr = que->txr;
6543 struct ifnet *ifp = adapter->ifp;
6544 bool more = false;
6545
6546 que->handleq.ev_count++;
6547
6548 if (ifp->if_flags & IFF_RUNNING) {
6549 more = ixgbe_rxeof(que);
6550 IXGBE_TX_LOCK(txr);
6551 more |= ixgbe_txeof(txr);
6552 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6553 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6554 ixgbe_mq_start_locked(ifp, txr);
6555 /* Only for queue 0 */
6556 /* NetBSD still needs this for CBQ */
6557 if ((&adapter->queues[0] == que)
6558 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6559 ixgbe_legacy_start_locked(ifp, txr);
6560 IXGBE_TX_UNLOCK(txr);
6561 }
6562
6563 if (more) {
6564 que->req.ev_count++;
6565 ixgbe_sched_handle_que(adapter, que);
6566 } else if (que->res != NULL) {
6567 /* Re-enable this interrupt */
6568 ixgbe_enable_queue(adapter, que->msix);
6569 } else
6570 ixgbe_enable_intr(adapter);
6571
6572 return;
6573 } /* ixgbe_handle_que */
6574
6575 /************************************************************************
6576 * ixgbe_handle_que_work
6577 ************************************************************************/
6578 static void
6579 ixgbe_handle_que_work(struct work *wk, void *context)
6580 {
6581 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6582
6583 /*
6584 * "enqueued flag" is not required here.
6585 * See ixgbe_msix_que().
6586 */
6587 ixgbe_handle_que(que);
6588 }
6589
6590 /************************************************************************
6591 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6592 ************************************************************************/
6593 static int
6594 ixgbe_allocate_legacy(struct adapter *adapter,
6595 const struct pci_attach_args *pa)
6596 {
6597 device_t dev = adapter->dev;
6598 struct ix_queue *que = adapter->queues;
6599 struct tx_ring *txr = adapter->tx_rings;
6600 int counts[PCI_INTR_TYPE_SIZE];
6601 pci_intr_type_t intr_type, max_type;
6602 char intrbuf[PCI_INTRSTR_LEN];
6603 char wqname[MAXCOMLEN];
6604 const char *intrstr = NULL;
6605 int defertx_error = 0, error;
6606
6607 /* We allocate a single interrupt resource */
6608 max_type = PCI_INTR_TYPE_MSI;
6609 counts[PCI_INTR_TYPE_MSIX] = 0;
6610 counts[PCI_INTR_TYPE_MSI] =
6611 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6612 /* Check not feat_en but feat_cap to fallback to INTx */
6613 counts[PCI_INTR_TYPE_INTX] =
6614 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6615
6616 alloc_retry:
6617 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6618 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6619 return ENXIO;
6620 }
6621 adapter->osdep.nintrs = 1;
6622 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6623 intrbuf, sizeof(intrbuf));
6624 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6625 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6626 device_xname(dev));
6627 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6628 if (adapter->osdep.ihs[0] == NULL) {
6629 aprint_error_dev(dev,"unable to establish %s\n",
6630 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6631 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6632 adapter->osdep.intrs = NULL;
6633 switch (intr_type) {
6634 case PCI_INTR_TYPE_MSI:
6635 /* The next try is for INTx: Disable MSI */
6636 max_type = PCI_INTR_TYPE_INTX;
6637 counts[PCI_INTR_TYPE_INTX] = 1;
6638 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6639 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6640 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6641 goto alloc_retry;
6642 } else
6643 break;
6644 case PCI_INTR_TYPE_INTX:
6645 default:
6646 /* See below */
6647 break;
6648 }
6649 }
6650 if (intr_type == PCI_INTR_TYPE_INTX) {
6651 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6652 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6653 }
6654 if (adapter->osdep.ihs[0] == NULL) {
6655 aprint_error_dev(dev,
6656 "couldn't establish interrupt%s%s\n",
6657 intrstr ? " at " : "", intrstr ? intrstr : "");
6658 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6659 adapter->osdep.intrs = NULL;
6660 return ENXIO;
6661 }
6662 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6663 /*
6664 * Try allocating a fast interrupt and the associated deferred
6665 * processing contexts.
6666 */
6667 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6668 txr->txr_si =
6669 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6670 ixgbe_deferred_mq_start, txr);
6671
6672 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6673 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6674 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6675 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6676 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6677 }
6678 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6679 ixgbe_handle_que, que);
6680 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6681 error = workqueue_create(&adapter->que_wq, wqname,
6682 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6683 IXGBE_WORKQUEUE_FLAGS);
6684
6685 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6686 && ((txr->txr_si == NULL) || defertx_error != 0))
6687 || (que->que_si == NULL) || error != 0) {
6688 aprint_error_dev(dev,
6689 "could not establish software interrupts\n");
6690
6691 return ENXIO;
6692 }
6693 /* For simplicity in the handlers */
6694 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6695
6696 return (0);
6697 } /* ixgbe_allocate_legacy */
6698
6699 /************************************************************************
6700 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6701 ************************************************************************/
6702 static int
6703 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6704 {
6705 device_t dev = adapter->dev;
6706 struct ix_queue *que = adapter->queues;
6707 struct tx_ring *txr = adapter->tx_rings;
6708 pci_chipset_tag_t pc;
6709 char intrbuf[PCI_INTRSTR_LEN];
6710 char intr_xname[32];
6711 char wqname[MAXCOMLEN];
6712 const char *intrstr = NULL;
6713 int error, vector = 0;
6714 int cpu_id = 0;
6715 kcpuset_t *affinity;
6716 #ifdef RSS
6717 unsigned int rss_buckets = 0;
6718 kcpuset_t cpu_mask;
6719 #endif
6720
6721 pc = adapter->osdep.pc;
6722 #ifdef RSS
6723 /*
6724 * If we're doing RSS, the number of queues needs to
6725 * match the number of RSS buckets that are configured.
6726 *
6727 * + If there's more queues than RSS buckets, we'll end
6728 * up with queues that get no traffic.
6729 *
6730 * + If there's more RSS buckets than queues, we'll end
6731 * up having multiple RSS buckets map to the same queue,
6732 * so there'll be some contention.
6733 */
6734 rss_buckets = rss_getnumbuckets();
6735 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6736 (adapter->num_queues != rss_buckets)) {
6737 device_printf(dev,
6738 "%s: number of queues (%d) != number of RSS buckets (%d)"
6739 "; performance will be impacted.\n",
6740 __func__, adapter->num_queues, rss_buckets);
6741 }
6742 #endif
6743
6744 adapter->osdep.nintrs = adapter->num_queues + 1;
6745 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6746 adapter->osdep.nintrs) != 0) {
6747 aprint_error_dev(dev,
6748 "failed to allocate MSI-X interrupt\n");
6749 return (ENXIO);
6750 }
6751
6752 kcpuset_create(&affinity, false);
6753 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6754 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6755 device_xname(dev), i);
6756 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6757 sizeof(intrbuf));
6758 #ifdef IXGBE_MPSAFE
6759 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6760 true);
6761 #endif
6762 /* Set the handler function */
6763 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6764 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6765 intr_xname);
6766 if (que->res == NULL) {
6767 aprint_error_dev(dev,
6768 "Failed to register QUE handler\n");
6769 error = ENXIO;
6770 goto err_out;
6771 }
6772 que->msix = vector;
6773 adapter->active_queues |= 1ULL << que->msix;
6774
6775 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6776 #ifdef RSS
6777 /*
6778 * The queue ID is used as the RSS layer bucket ID.
6779 * We look up the queue ID -> RSS CPU ID and select
6780 * that.
6781 */
6782 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6783 CPU_SETOF(cpu_id, &cpu_mask);
6784 #endif
6785 } else {
6786 /*
6787 * Bind the MSI-X vector, and thus the
6788 * rings to the corresponding CPU.
6789 *
6790 * This just happens to match the default RSS
6791 * round-robin bucket -> queue -> CPU allocation.
6792 */
6793 if (adapter->num_queues > 1)
6794 cpu_id = i;
6795 }
6796 /* Round-robin affinity */
6797 kcpuset_zero(affinity);
6798 kcpuset_set(affinity, cpu_id % ncpu);
6799 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6800 NULL);
6801 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6802 intrstr);
6803 if (error == 0) {
6804 #if 1 /* def IXGBE_DEBUG */
6805 #ifdef RSS
6806 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6807 cpu_id % ncpu);
6808 #else
6809 aprint_normal(", bound queue %d to cpu %d", i,
6810 cpu_id % ncpu);
6811 #endif
6812 #endif /* IXGBE_DEBUG */
6813 }
6814 aprint_normal("\n");
6815
6816 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6817 txr->txr_si = softint_establish(
6818 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6819 ixgbe_deferred_mq_start, txr);
6820 if (txr->txr_si == NULL) {
6821 aprint_error_dev(dev,
6822 "couldn't establish software interrupt\n");
6823 error = ENXIO;
6824 goto err_out;
6825 }
6826 }
6827 que->que_si
6828 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6829 ixgbe_handle_que, que);
6830 if (que->que_si == NULL) {
6831 aprint_error_dev(dev,
6832 "couldn't establish software interrupt\n");
6833 error = ENXIO;
6834 goto err_out;
6835 }
6836 }
6837 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6838 error = workqueue_create(&adapter->txr_wq, wqname,
6839 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6840 IXGBE_WORKQUEUE_FLAGS);
6841 if (error) {
6842 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6843 goto err_out;
6844 }
6845 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6846
6847 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6848 error = workqueue_create(&adapter->que_wq, wqname,
6849 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6850 IXGBE_WORKQUEUE_FLAGS);
6851 if (error) {
6852 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6853 goto err_out;
6854 }
6855
6856 /* and Link */
6857 cpu_id++;
6858 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6859 adapter->vector = vector;
6860 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6861 sizeof(intrbuf));
6862 #ifdef IXGBE_MPSAFE
6863 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6864 true);
6865 #endif
6866 /* Set the link handler function */
6867 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6868 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6869 intr_xname);
6870 if (adapter->osdep.ihs[vector] == NULL) {
6871 aprint_error_dev(dev, "Failed to register LINK handler\n");
6872 error = ENXIO;
6873 goto err_out;
6874 }
6875 /* Round-robin affinity */
6876 kcpuset_zero(affinity);
6877 kcpuset_set(affinity, cpu_id % ncpu);
6878 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6879 NULL);
6880
6881 aprint_normal_dev(dev,
6882 "for link, interrupting at %s", intrstr);
6883 if (error == 0)
6884 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6885 else
6886 aprint_normal("\n");
6887
6888 kcpuset_destroy(affinity);
6889 aprint_normal_dev(dev,
6890 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6891
6892 return (0);
6893
6894 err_out:
6895 kcpuset_destroy(affinity);
6896 ixgbe_free_workqueue(adapter);
6897 ixgbe_free_pciintr_resources(adapter);
6898 return (error);
6899 } /* ixgbe_allocate_msix */
6900
6901 /************************************************************************
6902 * ixgbe_configure_interrupts
6903 *
6904 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6905 * This will also depend on user settings.
6906 ************************************************************************/
6907 static int
6908 ixgbe_configure_interrupts(struct adapter *adapter)
6909 {
6910 device_t dev = adapter->dev;
6911 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6912 int want, queues, msgs;
6913
6914 /* Default to 1 queue if MSI-X setup fails */
6915 adapter->num_queues = 1;
6916
6917 /* Override by tuneable */
6918 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6919 goto msi;
6920
6921 /*
6922 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6923 * interrupt slot.
6924 */
6925 if (ncpu == 1)
6926 goto msi;
6927
6928 /* First try MSI-X */
6929 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6930 msgs = MIN(msgs, IXG_MAX_NINTR);
6931 if (msgs < 2)
6932 goto msi;
6933
6934 adapter->msix_mem = (void *)1; /* XXX */
6935
6936 /* Figure out a reasonable auto config value */
6937 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6938
6939 #ifdef RSS
6940 /* If we're doing RSS, clamp at the number of RSS buckets */
6941 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6942 queues = uimin(queues, rss_getnumbuckets());
6943 #endif
6944 if (ixgbe_num_queues > queues) {
6945 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6946 ixgbe_num_queues = queues;
6947 }
6948
6949 if (ixgbe_num_queues != 0)
6950 queues = ixgbe_num_queues;
6951 else
6952 queues = uimin(queues,
6953 uimin(mac->max_tx_queues, mac->max_rx_queues));
6954
6955 /* reflect correct sysctl value */
6956 ixgbe_num_queues = queues;
6957
6958 /*
6959 * Want one vector (RX/TX pair) per queue
6960 * plus an additional for Link.
6961 */
6962 want = queues + 1;
6963 if (msgs >= want)
6964 msgs = want;
6965 else {
6966 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6967 "%d vectors but %d queues wanted!\n",
6968 msgs, want);
6969 goto msi;
6970 }
6971 adapter->num_queues = queues;
6972 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6973 return (0);
6974
6975 /*
6976 * MSI-X allocation failed or provided us with
6977 * less vectors than needed. Free MSI-X resources
6978 * and we'll try enabling MSI.
6979 */
6980 msi:
6981 /* Without MSI-X, some features are no longer supported */
6982 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6983 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6984 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6985 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6986
6987 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6988 adapter->msix_mem = NULL; /* XXX */
6989 if (msgs > 1)
6990 msgs = 1;
6991 if (msgs != 0) {
6992 msgs = 1;
6993 adapter->feat_en |= IXGBE_FEATURE_MSI;
6994 return (0);
6995 }
6996
6997 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6998 aprint_error_dev(dev,
6999 "Device does not support legacy interrupts.\n");
7000 return 1;
7001 }
7002
7003 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7004
7005 return (0);
7006 } /* ixgbe_configure_interrupts */
7007
7008
7009 /************************************************************************
7010 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7011 *
7012 * Done outside of interrupt context since the driver might sleep
7013 ************************************************************************/
7014 static void
7015 ixgbe_handle_link(void *context)
7016 {
7017 struct adapter *adapter = context;
7018 struct ixgbe_hw *hw = &adapter->hw;
7019
7020 ++adapter->link_workev.ev_count;
7021 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7022 ixgbe_update_link_status(adapter);
7023
7024 /* Re-enable link interrupts */
7025 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7026 } /* ixgbe_handle_link */
7027
7028 #if 0
7029 /************************************************************************
7030 * ixgbe_rearm_queues
7031 ************************************************************************/
7032 static __inline void
7033 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7034 {
7035 u32 mask;
7036
7037 switch (adapter->hw.mac.type) {
7038 case ixgbe_mac_82598EB:
7039 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7040 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7041 break;
7042 case ixgbe_mac_82599EB:
7043 case ixgbe_mac_X540:
7044 case ixgbe_mac_X550:
7045 case ixgbe_mac_X550EM_x:
7046 case ixgbe_mac_X550EM_a:
7047 mask = (queues & 0xFFFFFFFF);
7048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7049 mask = (queues >> 32);
7050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7051 break;
7052 default:
7053 break;
7054 }
7055 } /* ixgbe_rearm_queues */
7056 #endif
7057