ixgbe.c revision 1.88.2.46 1 /* $NetBSD: ixgbe.c,v 1.88.2.46 2022/01/29 16:36:07 martin Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.88.2.46 2022/01/29 16:36:07 martin Exp $");
68
69 #ifdef _KERNEL_OPT
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_net_mpsafe.h"
73 #endif
74
75 #include "ixgbe.h"
76 #include "ixgbe_sriov.h"
77 #include "vlan.h"
78
79 #include <sys/cprng.h>
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82
83 /************************************************************************
84 * Driver version
85 ************************************************************************/
86 static const char ixgbe_driver_version[] = "4.0.1-k";
87 /* XXX NetBSD: + 3.3.10 */
88
89 /************************************************************************
90 * PCI Device ID Table
91 *
92 * Used by probe to select devices to load on
93 * Last field stores an index into ixgbe_strings
94 * Last entry must be all 0s
95 *
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 ************************************************************************/
98 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 {
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
150 /* required last entry */
151 {0, 0, 0, 0, 0}
152 };
153
154 /************************************************************************
155 * Table of branding strings
156 ************************************************************************/
157 static const char *ixgbe_strings[] = {
158 "Intel(R) PRO/10GbE PCI-Express Network Driver"
159 };
160
161 /************************************************************************
162 * Function prototypes
163 ************************************************************************/
164 static int ixgbe_probe(device_t, cfdata_t, void *);
165 static void ixgbe_attach(device_t, device_t, void *);
166 static int ixgbe_detach(device_t, int);
167 #if 0
168 static int ixgbe_shutdown(device_t);
169 #endif
170 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
171 static bool ixgbe_resume(device_t, const pmf_qual_t *);
172 static int ixgbe_ifflags_cb(struct ethercom *);
173 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
174 static int ixgbe_init(struct ifnet *);
175 static void ixgbe_init_locked(struct adapter *);
176 static void ixgbe_ifstop(struct ifnet *, int);
177 static void ixgbe_stop_locked(void *);
178 static void ixgbe_init_device_features(struct adapter *);
179 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
180 static void ixgbe_add_media_types(struct adapter *);
181 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
182 static int ixgbe_media_change(struct ifnet *);
183 static int ixgbe_allocate_pci_resources(struct adapter *,
184 const struct pci_attach_args *);
185 static void ixgbe_free_softint(struct adapter *);
186 static void ixgbe_get_slot_info(struct adapter *);
187 static int ixgbe_allocate_msix(struct adapter *,
188 const struct pci_attach_args *);
189 static int ixgbe_allocate_legacy(struct adapter *,
190 const struct pci_attach_args *);
191 static int ixgbe_configure_interrupts(struct adapter *);
192 static void ixgbe_free_pciintr_resources(struct adapter *);
193 static void ixgbe_free_pci_resources(struct adapter *);
194 static void ixgbe_local_timer(void *);
195 static void ixgbe_local_timer1(void *);
196 static void ixgbe_recovery_mode_timer(void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_config_link(struct adapter *);
202 static void ixgbe_check_wol_support(struct adapter *);
203 static int ixgbe_setup_low_power_mode(struct adapter *);
204 static void ixgbe_rearm_queues(struct adapter *, u64);
205
206 static void ixgbe_initialize_transmit_units(struct adapter *);
207 static void ixgbe_initialize_receive_units(struct adapter *);
208 static void ixgbe_enable_rx_drop(struct adapter *);
209 static void ixgbe_disable_rx_drop(struct adapter *);
210 static void ixgbe_initialize_rss_mapping(struct adapter *);
211
212 static void ixgbe_enable_intr(struct adapter *);
213 static void ixgbe_disable_intr(struct adapter *);
214 static void ixgbe_update_stats_counters(struct adapter *);
215 static void ixgbe_set_rxfilter(struct adapter *);
216 static void ixgbe_update_link_status(struct adapter *);
217 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
218 static void ixgbe_configure_ivars(struct adapter *);
219 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
220 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
221
222 static void ixgbe_setup_vlan_hw_support(struct adapter *);
223 #if 0
224 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
225 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
226 #endif
227
228 static void ixgbe_add_device_sysctls(struct adapter *);
229 static void ixgbe_add_hw_stats(struct adapter *);
230 static void ixgbe_clear_evcnt(struct adapter *);
231 static int ixgbe_set_flowcntl(struct adapter *, int);
232 static int ixgbe_set_advertise(struct adapter *, int);
233 static int ixgbe_get_advertise(struct adapter *);
234
235 /* Sysctl handlers */
236 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
237 const char *, int *, int);
238 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
244 #ifdef IXGBE_DEBUG
245 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
247 #endif
248 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
259
260 /* Support for pluggable optic modules */
261 static bool ixgbe_sfp_probe(struct adapter *);
262
263 /* Legacy (single vector) interrupt handler */
264 static int ixgbe_legacy_irq(void *);
265
266 /* The MSI/MSI-X Interrupt handlers */
267 static int ixgbe_msix_que(void *);
268 static int ixgbe_msix_link(void *);
269
270 /* Software interrupts for deferred work */
271 static void ixgbe_handle_que(void *);
272 static void ixgbe_handle_link(void *);
273 static void ixgbe_handle_msf(void *);
274 static void ixgbe_handle_mod(void *);
275 static void ixgbe_handle_phy(void *);
276
277 /* Workqueue handler for deferred work */
278 static void ixgbe_handle_que_work(struct work *, void *);
279
280 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
281
282 /************************************************************************
283 * NetBSD Device Interface Entry Points
284 ************************************************************************/
285 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
286 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
287 DVF_DETACH_SHUTDOWN);
288
289 #if 0
290 devclass_t ix_devclass;
291 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
292
293 MODULE_DEPEND(ix, pci, 1, 1, 1);
294 MODULE_DEPEND(ix, ether, 1, 1, 1);
295 #ifdef DEV_NETMAP
296 MODULE_DEPEND(ix, netmap, 1, 1, 1);
297 #endif
298 #endif
299
300 /*
301 * TUNEABLE PARAMETERS:
302 */
303
304 /*
305 * AIM: Adaptive Interrupt Moderation
306 * which means that the interrupt rate
307 * is varied over time based on the
308 * traffic for that interrupt vector
309 */
310 static bool ixgbe_enable_aim = true;
311 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
312 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
313 "Enable adaptive interrupt moderation");
314
315 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
316 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
317 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
318
319 /* How many packets rxeof tries to clean at a time */
320 static int ixgbe_rx_process_limit = 256;
321 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
322 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
323
324 /* How many packets txeof tries to clean at a time */
325 static int ixgbe_tx_process_limit = 256;
326 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
327 &ixgbe_tx_process_limit, 0,
328 "Maximum number of sent packets to process at a time, -1 means unlimited");
329
330 /* Flow control setting, default to full */
331 static int ixgbe_flow_control = ixgbe_fc_full;
332 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
333 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
334
335 /* Which packet processing uses workqueue or softint */
336 static bool ixgbe_txrx_workqueue = false;
337
338 /*
339 * Smart speed setting, default to on
340 * this only works as a compile option
341 * right now as its during attach, set
342 * this to 'ixgbe_smart_speed_off' to
343 * disable.
344 */
345 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
346
347 /*
348 * MSI-X should be the default for best performance,
349 * but this allows it to be forced off for testing.
350 */
351 static int ixgbe_enable_msix = 1;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
353 "Enable MSI-X interrupts");
354
355 /*
356 * Number of Queues, can be set to 0,
357 * it then autoconfigures based on the
358 * number of cpus with a max of 8. This
359 * can be overridden manually here.
360 */
361 static int ixgbe_num_queues = 0;
362 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
363 "Number of queues to configure, 0 indicates autoconfigure");
364
365 /*
366 * Number of TX descriptors per ring,
367 * setting higher than RX as this seems
368 * the better performing choice.
369 */
370 static int ixgbe_txd = PERFORM_TXD;
371 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
372 "Number of transmit descriptors per queue");
373
374 /* Number of RX descriptors per ring */
375 static int ixgbe_rxd = PERFORM_RXD;
376 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
377 "Number of receive descriptors per queue");
378
379 /*
380 * Defining this on will allow the use
381 * of unsupported SFP+ modules, note that
382 * doing so you are on your own :)
383 */
384 static int allow_unsupported_sfp = false;
385 #define TUNABLE_INT(__x, __y)
386 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
387
388 /*
389 * Not sure if Flow Director is fully baked,
390 * so we'll default to turning it off.
391 */
392 static int ixgbe_enable_fdir = 0;
393 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
394 "Enable Flow Director");
395
396 /* Legacy Transmit (single queue) */
397 static int ixgbe_enable_legacy_tx = 0;
398 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
399 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
400
401 /* Receive-Side Scaling */
402 static int ixgbe_enable_rss = 1;
403 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
404 "Enable Receive-Side Scaling (RSS)");
405
406 #if 0
407 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
408 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
409 #endif
410
411 #ifdef NET_MPSAFE
412 #define IXGBE_MPSAFE 1
413 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
414 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
415 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
416 #else
417 #define IXGBE_CALLOUT_FLAGS 0
418 #define IXGBE_SOFTINT_FLAGS 0
419 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
420 #endif
421 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
422
423 /************************************************************************
424 * ixgbe_initialize_rss_mapping
425 ************************************************************************/
426 static void
427 ixgbe_initialize_rss_mapping(struct adapter *adapter)
428 {
429 struct ixgbe_hw *hw = &adapter->hw;
430 u32 reta = 0, mrqc, rss_key[10];
431 int queue_id, table_size, index_mult;
432 int i, j;
433 u32 rss_hash_config;
434
435 /* force use default RSS key. */
436 #ifdef __NetBSD__
437 rss_getkey((uint8_t *) &rss_key);
438 #else
439 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
440 /* Fetch the configured RSS key */
441 rss_getkey((uint8_t *) &rss_key);
442 } else {
443 /* set up random bits */
444 cprng_fast(&rss_key, sizeof(rss_key));
445 }
446 #endif
447
448 /* Set multiplier for RETA setup and table size based on MAC */
449 index_mult = 0x1;
450 table_size = 128;
451 switch (adapter->hw.mac.type) {
452 case ixgbe_mac_82598EB:
453 index_mult = 0x11;
454 break;
455 case ixgbe_mac_X550:
456 case ixgbe_mac_X550EM_x:
457 case ixgbe_mac_X550EM_a:
458 table_size = 512;
459 break;
460 default:
461 break;
462 }
463
464 /* Set up the redirection table */
465 for (i = 0, j = 0; i < table_size; i++, j++) {
466 if (j == adapter->num_queues)
467 j = 0;
468
469 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
470 /*
471 * Fetch the RSS bucket id for the given indirection
472 * entry. Cap it at the number of configured buckets
473 * (which is num_queues.)
474 */
475 queue_id = rss_get_indirection_to_bucket(i);
476 queue_id = queue_id % adapter->num_queues;
477 } else
478 queue_id = (j * index_mult);
479
480 /*
481 * The low 8 bits are for hash value (n+0);
482 * The next 8 bits are for hash value (n+1), etc.
483 */
484 reta = reta >> 8;
485 reta = reta | (((uint32_t) queue_id) << 24);
486 if ((i & 3) == 3) {
487 if (i < 128)
488 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
489 else
490 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
491 reta);
492 reta = 0;
493 }
494 }
495
496 /* Now fill our hash function seeds */
497 for (i = 0; i < 10; i++)
498 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
499
500 /* Perform hash on these packet types */
501 if (adapter->feat_en & IXGBE_FEATURE_RSS)
502 rss_hash_config = rss_gethashconfig();
503 else {
504 /*
505 * Disable UDP - IP fragments aren't currently being handled
506 * and so we end up with a mix of 2-tuple and 4-tuple
507 * traffic.
508 */
509 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
510 | RSS_HASHTYPE_RSS_TCP_IPV4
511 | RSS_HASHTYPE_RSS_IPV6
512 | RSS_HASHTYPE_RSS_TCP_IPV6
513 | RSS_HASHTYPE_RSS_IPV6_EX
514 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
515 }
516
517 mrqc = IXGBE_MRQC_RSSEN;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
536 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
537 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
538 } /* ixgbe_initialize_rss_mapping */
539
540 /************************************************************************
541 * ixgbe_initialize_receive_units - Setup receive registers and features.
542 ************************************************************************/
543 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
544
545 static void
546 ixgbe_initialize_receive_units(struct adapter *adapter)
547 {
548 struct rx_ring *rxr = adapter->rx_rings;
549 struct ixgbe_hw *hw = &adapter->hw;
550 struct ifnet *ifp = adapter->ifp;
551 int i, j;
552 u32 bufsz, fctrl, srrctl, rxcsum;
553 u32 hlreg;
554
555 /*
556 * Make sure receives are disabled while
557 * setting up the descriptor ring
558 */
559 ixgbe_disable_rx(hw);
560
561 /* Enable broadcasts */
562 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
563 fctrl |= IXGBE_FCTRL_BAM;
564 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
565 fctrl |= IXGBE_FCTRL_DPF;
566 fctrl |= IXGBE_FCTRL_PMCF;
567 }
568 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
569
570 /* Set for Jumbo Frames? */
571 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
572 if (ifp->if_mtu > ETHERMTU)
573 hlreg |= IXGBE_HLREG0_JUMBOEN;
574 else
575 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
576
577 #ifdef DEV_NETMAP
578 /* CRC stripping is conditional in Netmap */
579 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
580 (ifp->if_capenable & IFCAP_NETMAP) &&
581 !ix_crcstrip)
582 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
583 else
584 #endif /* DEV_NETMAP */
585 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
586
587 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
588
589 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
590 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
591
592 for (i = 0; i < adapter->num_queues; i++, rxr++) {
593 u64 rdba = rxr->rxdma.dma_paddr;
594 u32 reg;
595 int regnum = i / 4; /* 1 register per 4 queues */
596 int regshift = i % 4; /* 4 bits per 1 queue */
597 j = rxr->me;
598
599 /* Setup the Base and Length of the Rx Descriptor Ring */
600 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
601 (rdba & 0x00000000ffffffffULL));
602 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
603 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
604 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
605
606 /* Set up the SRRCTL register */
607 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
608 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
609 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
610 srrctl |= bufsz;
611 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
612
613 /* Set RQSMR (Receive Queue Statistic Mapping) register */
614 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
615 reg &= ~(0x000000ffUL << (regshift * 8));
616 reg |= i << (regshift * 8);
617 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
618
619 /*
620 * Set DROP_EN iff we have no flow control and >1 queue.
621 * Note that srrctl was cleared shortly before during reset,
622 * so we do not need to clear the bit, but do it just in case
623 * this code is moved elsewhere.
624 */
625 if (adapter->num_queues > 1 &&
626 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
627 srrctl |= IXGBE_SRRCTL_DROP_EN;
628 } else {
629 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
630 }
631
632 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
633
634 /* Setup the HW Rx Head and Tail Descriptor Pointers */
635 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
636 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
637
638 /* Set the driver rx tail address */
639 rxr->tail = IXGBE_RDT(rxr->me);
640 }
641
642 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
643 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
644 | IXGBE_PSRTYPE_UDPHDR
645 | IXGBE_PSRTYPE_IPV4HDR
646 | IXGBE_PSRTYPE_IPV6HDR;
647 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
648 }
649
650 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
651
652 ixgbe_initialize_rss_mapping(adapter);
653
654 if (adapter->num_queues > 1) {
655 /* RSS and RX IPP Checksum are mutually exclusive */
656 rxcsum |= IXGBE_RXCSUM_PCSD;
657 }
658
659 if (ifp->if_capenable & IFCAP_RXCSUM)
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661
662 /* This is useful for calculating UDP/IP fragment checksums */
663 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
664 rxcsum |= IXGBE_RXCSUM_IPPCSE;
665
666 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
667
668 } /* ixgbe_initialize_receive_units */
669
670 /************************************************************************
671 * ixgbe_initialize_transmit_units - Enable transmit units.
672 ************************************************************************/
673 static void
674 ixgbe_initialize_transmit_units(struct adapter *adapter)
675 {
676 struct tx_ring *txr = adapter->tx_rings;
677 struct ixgbe_hw *hw = &adapter->hw;
678 int i;
679
680 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
681
682 /* Setup the Base and Length of the Tx Descriptor Ring */
683 for (i = 0; i < adapter->num_queues; i++, txr++) {
684 u64 tdba = txr->txdma.dma_paddr;
685 u32 txctrl = 0;
686 u32 tqsmreg, reg;
687 int regnum = i / 4; /* 1 register per 4 queues */
688 int regshift = i % 4; /* 4 bits per 1 queue */
689 int j = txr->me;
690
691 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
692 (tdba & 0x00000000ffffffffULL));
693 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
694 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
695 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
696
697 /*
698 * Set TQSMR (Transmit Queue Statistic Mapping) register.
699 * Register location is different between 82598 and others.
700 */
701 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
702 tqsmreg = IXGBE_TQSMR(regnum);
703 else
704 tqsmreg = IXGBE_TQSM(regnum);
705 reg = IXGBE_READ_REG(hw, tqsmreg);
706 reg &= ~(0x000000ffUL << (regshift * 8));
707 reg |= i << (regshift * 8);
708 IXGBE_WRITE_REG(hw, tqsmreg, reg);
709
710 /* Setup the HW Tx Head and Tail descriptor pointers */
711 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
712 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
713
714 /* Cache the tail address */
715 txr->tail = IXGBE_TDT(j);
716
717 txr->txr_no_space = false;
718
719 /* Disable Head Writeback */
720 /*
721 * Note: for X550 series devices, these registers are actually
722 * prefixed with TPH_ isntead of DCA_, but the addresses and
723 * fields remain the same.
724 */
725 switch (hw->mac.type) {
726 case ixgbe_mac_82598EB:
727 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
728 break;
729 default:
730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
731 break;
732 }
733 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
734 switch (hw->mac.type) {
735 case ixgbe_mac_82598EB:
736 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
737 break;
738 default:
739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
740 break;
741 }
742
743 }
744
745 if (hw->mac.type != ixgbe_mac_82598EB) {
746 u32 dmatxctl, rttdcs;
747
748 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
749 dmatxctl |= IXGBE_DMATXCTL_TE;
750 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
751 /* Disable arbiter to set MTQC */
752 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
753 rttdcs |= IXGBE_RTTDCS_ARBDIS;
754 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
755 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
756 ixgbe_get_mtqc(adapter->iov_mode));
757 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
759 }
760
761 return;
762 } /* ixgbe_initialize_transmit_units */
763
764 /************************************************************************
765 * ixgbe_attach - Device initialization routine
766 *
767 * Called when the driver is being loaded.
768 * Identifies the type of hardware, allocates all resources
769 * and initializes the hardware.
770 *
771 * return 0 on success, positive on failure
772 ************************************************************************/
773 static void
774 ixgbe_attach(device_t parent, device_t dev, void *aux)
775 {
776 struct adapter *adapter;
777 struct ixgbe_hw *hw;
778 int error = -1;
779 u32 ctrl_ext;
780 u16 high, low, nvmreg;
781 pcireg_t id, subid;
782 const ixgbe_vendor_info_t *ent;
783 struct pci_attach_args *pa = aux;
784 bool unsupported_sfp = false;
785 const char *str;
786 char buf[256];
787
788 INIT_DEBUGOUT("ixgbe_attach: begin");
789
790 /* Allocate, clear, and link in our adapter structure */
791 adapter = device_private(dev);
792 adapter->hw.back = adapter;
793 adapter->dev = dev;
794 hw = &adapter->hw;
795 adapter->osdep.pc = pa->pa_pc;
796 adapter->osdep.tag = pa->pa_tag;
797 if (pci_dma64_available(pa))
798 adapter->osdep.dmat = pa->pa_dmat64;
799 else
800 adapter->osdep.dmat = pa->pa_dmat;
801 adapter->osdep.attached = false;
802
803 ent = ixgbe_lookup(pa);
804
805 KASSERT(ent != NULL);
806
807 aprint_normal(": %s, Version - %s\n",
808 ixgbe_strings[ent->index], ixgbe_driver_version);
809
810 /* Core Lock Init */
811 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
812
813 /* Set up the timer callout */
814 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
815
816 /* Determine hardware revision */
817 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
818 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
819
820 hw->vendor_id = PCI_VENDOR(id);
821 hw->device_id = PCI_PRODUCT(id);
822 hw->revision_id =
823 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
824 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
825 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
826
827 /*
828 * Make sure BUSMASTER is set
829 */
830 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
831
832 /* Do base PCI setup - map BAR0 */
833 if (ixgbe_allocate_pci_resources(adapter, pa)) {
834 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
835 error = ENXIO;
836 goto err_out;
837 }
838
839 /* let hardware know driver is loaded */
840 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
841 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
842 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
843
844 /*
845 * Initialize the shared code
846 */
847 if (ixgbe_init_shared_code(hw) != 0) {
848 aprint_error_dev(dev, "Unable to initialize the shared code\n");
849 error = ENXIO;
850 goto err_out;
851 }
852
853 switch (hw->mac.type) {
854 case ixgbe_mac_82598EB:
855 str = "82598EB";
856 break;
857 case ixgbe_mac_82599EB:
858 str = "82599EB";
859 break;
860 case ixgbe_mac_X540:
861 str = "X540";
862 break;
863 case ixgbe_mac_X550:
864 str = "X550";
865 break;
866 case ixgbe_mac_X550EM_x:
867 str = "X550EM X";
868 break;
869 case ixgbe_mac_X550EM_a:
870 str = "X550EM A";
871 break;
872 default:
873 str = "Unknown";
874 break;
875 }
876 aprint_normal_dev(dev, "device %s\n", str);
877
878 if (hw->mbx.ops.init_params)
879 hw->mbx.ops.init_params(hw);
880
881 hw->allow_unsupported_sfp = allow_unsupported_sfp;
882
883 /* Pick up the 82599 settings */
884 if (hw->mac.type != ixgbe_mac_82598EB)
885 hw->phy.smart_speed = ixgbe_smart_speed;
886
887 /* Set the right number of segments */
888 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
889 adapter->num_segs = IXGBE_SCATTER_DEFAULT;
890
891 /* Ensure SW/FW semaphore is free */
892 ixgbe_init_swfw_semaphore(hw);
893
894 hw->mac.ops.set_lan_id(hw);
895 ixgbe_init_device_features(adapter);
896
897 if (ixgbe_configure_interrupts(adapter)) {
898 error = ENXIO;
899 goto err_out;
900 }
901
902 /* Allocate multicast array memory. */
903 adapter->mta = malloc(sizeof(*adapter->mta) *
904 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
905 if (adapter->mta == NULL) {
906 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
907 error = ENOMEM;
908 goto err_out;
909 }
910
911 /* Enable WoL (if supported) */
912 ixgbe_check_wol_support(adapter);
913
914 /* Verify adapter fan is still functional (if applicable) */
915 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
916 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
917 ixgbe_check_fan_failure(adapter, esdp, FALSE);
918 }
919
920 /* Set an initial default flow control value */
921 hw->fc.requested_mode = ixgbe_flow_control;
922
923 /* Sysctls for limiting the amount of work done in the taskqueues */
924 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
925 "max number of rx packets to process",
926 &adapter->rx_process_limit, ixgbe_rx_process_limit);
927
928 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
929 "max number of tx packets to process",
930 &adapter->tx_process_limit, ixgbe_tx_process_limit);
931
932 /* Do descriptor calc and sanity checks */
933 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
934 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
935 aprint_error_dev(dev, "TXD config issue, using default!\n");
936 adapter->num_tx_desc = DEFAULT_TXD;
937 } else
938 adapter->num_tx_desc = ixgbe_txd;
939
940 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
941 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
942 aprint_error_dev(dev, "RXD config issue, using default!\n");
943 adapter->num_rx_desc = DEFAULT_RXD;
944 } else
945 adapter->num_rx_desc = ixgbe_rxd;
946
947 /* Set default high limit of copying mbuf in rxeof */
948 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
949
950 /* Allocate our TX/RX Queues */
951 if (ixgbe_allocate_queues(adapter)) {
952 error = ENOMEM;
953 goto err_out;
954 }
955
956 hw->phy.reset_if_overtemp = TRUE;
957 error = ixgbe_reset_hw(hw);
958 hw->phy.reset_if_overtemp = FALSE;
959 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
960 /*
961 * No optics in this port, set up
962 * so the timer routine will probe
963 * for later insertion.
964 */
965 adapter->sfp_probe = TRUE;
966 error = IXGBE_SUCCESS;
967 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
968 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
969 unsupported_sfp = true;
970 error = IXGBE_SUCCESS;
971 } else if (error) {
972 aprint_error_dev(dev,
973 "Hardware initialization failed(error = %d)\n", error);
974 error = EIO;
975 goto err_late;
976 }
977
978 /* Make sure we have a good EEPROM before we read from it */
979 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
980 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
981 error = EIO;
982 goto err_late;
983 }
984
985 aprint_normal("%s:", device_xname(dev));
986 /* NVM Image Version */
987 high = low = 0;
988 switch (hw->mac.type) {
989 case ixgbe_mac_X540:
990 case ixgbe_mac_X550EM_a:
991 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
992 if (nvmreg == 0xffff)
993 break;
994 high = (nvmreg >> 12) & 0x0f;
995 low = (nvmreg >> 4) & 0xff;
996 id = nvmreg & 0x0f;
997 aprint_normal(" NVM Image Version %u.", high);
998 if (hw->mac.type == ixgbe_mac_X540)
999 str = "%x";
1000 else
1001 str = "%02x";
1002 aprint_normal(str, low);
1003 aprint_normal(" ID 0x%x,", id);
1004 break;
1005 case ixgbe_mac_X550EM_x:
1006 case ixgbe_mac_X550:
1007 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1008 if (nvmreg == 0xffff)
1009 break;
1010 high = (nvmreg >> 12) & 0x0f;
1011 low = nvmreg & 0xff;
1012 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1013 break;
1014 default:
1015 break;
1016 }
1017 hw->eeprom.nvm_image_ver_high = high;
1018 hw->eeprom.nvm_image_ver_low = low;
1019
1020 /* PHY firmware revision */
1021 switch (hw->mac.type) {
1022 case ixgbe_mac_X540:
1023 case ixgbe_mac_X550:
1024 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1025 if (nvmreg == 0xffff)
1026 break;
1027 high = (nvmreg >> 12) & 0x0f;
1028 low = (nvmreg >> 4) & 0xff;
1029 id = nvmreg & 0x000f;
1030 aprint_normal(" PHY FW Revision %u.", high);
1031 if (hw->mac.type == ixgbe_mac_X540)
1032 str = "%x";
1033 else
1034 str = "%02x";
1035 aprint_normal(str, low);
1036 aprint_normal(" ID 0x%x,", id);
1037 break;
1038 default:
1039 break;
1040 }
1041
1042 /* NVM Map version & OEM NVM Image version */
1043 switch (hw->mac.type) {
1044 case ixgbe_mac_X550:
1045 case ixgbe_mac_X550EM_x:
1046 case ixgbe_mac_X550EM_a:
1047 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1048 if (nvmreg != 0xffff) {
1049 high = (nvmreg >> 12) & 0x0f;
1050 low = nvmreg & 0x00ff;
1051 aprint_normal(" NVM Map version %u.%02x,", high, low);
1052 }
1053 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1054 if (nvmreg != 0xffff) {
1055 high = (nvmreg >> 12) & 0x0f;
1056 low = nvmreg & 0x00ff;
1057 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1058 low);
1059 }
1060 break;
1061 default:
1062 break;
1063 }
1064
1065 /* Print the ETrackID */
1066 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1067 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1068 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1069
1070 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1071 error = ixgbe_allocate_msix(adapter, pa);
1072 if (error) {
1073 /* Free allocated queue structures first */
1074 ixgbe_free_queues(adapter);
1075
1076 /* Fallback to legacy interrupt */
1077 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1078 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1079 adapter->feat_en |= IXGBE_FEATURE_MSI;
1080 adapter->num_queues = 1;
1081
1082 /* Allocate our TX/RX Queues again */
1083 if (ixgbe_allocate_queues(adapter)) {
1084 error = ENOMEM;
1085 goto err_out;
1086 }
1087 }
1088 }
1089 /* Recovery mode */
1090 switch (adapter->hw.mac.type) {
1091 case ixgbe_mac_X550:
1092 case ixgbe_mac_X550EM_x:
1093 case ixgbe_mac_X550EM_a:
1094 /* >= 2.00 */
1095 if (hw->eeprom.nvm_image_ver_high >= 2) {
1096 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1097 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1098 }
1099 break;
1100 default:
1101 break;
1102 }
1103
1104 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1105 error = ixgbe_allocate_legacy(adapter, pa);
1106 if (error)
1107 goto err_late;
1108
1109 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1110 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS,
1111 ixgbe_handle_link, adapter);
1112 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1113 ixgbe_handle_mod, adapter);
1114 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1115 ixgbe_handle_msf, adapter);
1116 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1117 ixgbe_handle_phy, adapter);
1118 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1119 adapter->fdir_si =
1120 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1121 ixgbe_reinit_fdir, adapter);
1122 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1123 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1124 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1125 && (adapter->fdir_si == NULL))) {
1126 aprint_error_dev(dev,
1127 "could not establish software interrupts ()\n");
1128 goto err_out;
1129 }
1130
1131 error = ixgbe_start_hw(hw);
1132 switch (error) {
1133 case IXGBE_ERR_EEPROM_VERSION:
1134 aprint_error_dev(dev, "This device is a pre-production adapter/"
1135 "LOM. Please be aware there may be issues associated "
1136 "with your hardware.\nIf you are experiencing problems "
1137 "please contact your Intel or hardware representative "
1138 "who provided you with this hardware.\n");
1139 break;
1140 default:
1141 break;
1142 }
1143
1144 /* Setup OS specific network interface */
1145 if (ixgbe_setup_interface(dev, adapter) != 0)
1146 goto err_late;
1147
1148 /*
1149 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1150 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1151 */
1152 if (hw->phy.media_type == ixgbe_media_type_copper) {
1153 uint16_t id1, id2;
1154 int oui, model, rev;
1155 const char *descr;
1156
1157 id1 = hw->phy.id >> 16;
1158 id2 = hw->phy.id & 0xffff;
1159 oui = MII_OUI(id1, id2);
1160 model = MII_MODEL(id2);
1161 rev = MII_REV(id2);
1162 if ((descr = mii_get_descr(oui, model)) != NULL)
1163 aprint_normal_dev(dev,
1164 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1165 descr, oui, model, rev);
1166 else
1167 aprint_normal_dev(dev,
1168 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1169 oui, model, rev);
1170 }
1171
1172 /* Enable EEE power saving */
1173 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1174 hw->mac.ops.setup_eee(hw,
1175 adapter->feat_en & IXGBE_FEATURE_EEE);
1176
1177 /* Enable power to the phy. */
1178 if (!unsupported_sfp) {
1179 /* Enable the optics for 82599 SFP+ fiber */
1180 ixgbe_enable_tx_laser(hw);
1181
1182 /*
1183 * XXX Currently, ixgbe_set_phy_power() supports only copper
1184 * PHY, so it's not required to test with !unsupported_sfp.
1185 */
1186 ixgbe_set_phy_power(hw, TRUE);
1187 }
1188
1189 /* Initialize statistics */
1190 ixgbe_update_stats_counters(adapter);
1191
1192 /* Check PCIE slot type/speed/width */
1193 ixgbe_get_slot_info(adapter);
1194
1195 /*
1196 * Do time init and sysctl init here, but
1197 * only on the first port of a bypass adapter.
1198 */
1199 ixgbe_bypass_init(adapter);
1200
1201 /* Set an initial dmac value */
1202 adapter->dmac = 0;
1203 /* Set initial advertised speeds (if applicable) */
1204 adapter->advertise = ixgbe_get_advertise(adapter);
1205
1206 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1207 ixgbe_define_iov_schemas(dev, &error);
1208
1209 /* Add sysctls */
1210 ixgbe_add_device_sysctls(adapter);
1211 ixgbe_add_hw_stats(adapter);
1212
1213 /* For Netmap */
1214 adapter->init_locked = ixgbe_init_locked;
1215 adapter->stop_locked = ixgbe_stop_locked;
1216
1217 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1218 ixgbe_netmap_attach(adapter);
1219
1220 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1221 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1222 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1223 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1224
1225 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1226 pmf_class_network_register(dev, adapter->ifp);
1227 else
1228 aprint_error_dev(dev, "couldn't establish power handler\n");
1229
1230 /* Init recovery mode timer and state variable */
1231 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1232 adapter->recovery_mode = 0;
1233
1234 /* Set up the timer callout */
1235 callout_init(&adapter->recovery_mode_timer,
1236 IXGBE_CALLOUT_FLAGS);
1237
1238 /* Start the task */
1239 callout_reset(&adapter->recovery_mode_timer, hz,
1240 ixgbe_recovery_mode_timer, adapter);
1241 }
1242
1243 INIT_DEBUGOUT("ixgbe_attach: end");
1244 adapter->osdep.attached = true;
1245
1246 return;
1247
1248 err_late:
1249 ixgbe_free_queues(adapter);
1250 err_out:
1251 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1252 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1253 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1254 ixgbe_free_softint(adapter);
1255 ixgbe_free_pci_resources(adapter);
1256 if (adapter->mta != NULL)
1257 free(adapter->mta, M_DEVBUF);
1258 IXGBE_CORE_LOCK_DESTROY(adapter);
1259
1260 return;
1261 } /* ixgbe_attach */
1262
1263 /************************************************************************
1264 * ixgbe_check_wol_support
1265 *
1266 * Checks whether the adapter's ports are capable of
1267 * Wake On LAN by reading the adapter's NVM.
1268 *
1269 * Sets each port's hw->wol_enabled value depending
1270 * on the value read here.
1271 ************************************************************************/
1272 static void
1273 ixgbe_check_wol_support(struct adapter *adapter)
1274 {
1275 struct ixgbe_hw *hw = &adapter->hw;
1276 u16 dev_caps = 0;
1277
1278 /* Find out WoL support for port */
1279 adapter->wol_support = hw->wol_enabled = 0;
1280 ixgbe_get_device_caps(hw, &dev_caps);
1281 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1282 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1283 hw->bus.func == 0))
1284 adapter->wol_support = hw->wol_enabled = 1;
1285
1286 /* Save initial wake up filter configuration */
1287 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1288
1289 return;
1290 } /* ixgbe_check_wol_support */
1291
1292 /************************************************************************
1293 * ixgbe_setup_interface
1294 *
1295 * Setup networking device structure and register an interface.
1296 ************************************************************************/
1297 static int
1298 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1299 {
1300 struct ethercom *ec = &adapter->osdep.ec;
1301 struct ifnet *ifp;
1302 int rv;
1303
1304 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1305
1306 ifp = adapter->ifp = &ec->ec_if;
1307 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1308 ifp->if_baudrate = IF_Gbps(10);
1309 ifp->if_init = ixgbe_init;
1310 ifp->if_stop = ixgbe_ifstop;
1311 ifp->if_softc = adapter;
1312 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1313 #ifdef IXGBE_MPSAFE
1314 ifp->if_extflags = IFEF_MPSAFE;
1315 #endif
1316 ifp->if_ioctl = ixgbe_ioctl;
1317 #if __FreeBSD_version >= 1100045
1318 /* TSO parameters */
1319 ifp->if_hw_tsomax = 65518;
1320 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1321 ifp->if_hw_tsomaxsegsize = 2048;
1322 #endif
1323 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1324 #if 0
1325 ixgbe_start_locked = ixgbe_legacy_start_locked;
1326 #endif
1327 } else {
1328 ifp->if_transmit = ixgbe_mq_start;
1329 #if 0
1330 ixgbe_start_locked = ixgbe_mq_start_locked;
1331 #endif
1332 }
1333 ifp->if_start = ixgbe_legacy_start;
1334 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1335 IFQ_SET_READY(&ifp->if_snd);
1336
1337 rv = if_initialize(ifp);
1338 if (rv != 0) {
1339 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1340 return rv;
1341 }
1342 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1343 ether_ifattach(ifp, adapter->hw.mac.addr);
1344 aprint_normal_dev(dev, "Ethernet address %s\n",
1345 ether_sprintf(adapter->hw.mac.addr));
1346 /*
1347 * We use per TX queue softint, so if_deferred_start_init() isn't
1348 * used.
1349 */
1350 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1351
1352 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1353
1354 /*
1355 * Tell the upper layer(s) we support long frames.
1356 */
1357 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1358
1359 /* Set capability flags */
1360 ifp->if_capabilities |= IFCAP_RXCSUM
1361 | IFCAP_TXCSUM
1362 | IFCAP_TSOv4
1363 | IFCAP_TSOv6;
1364 ifp->if_capenable = 0;
1365
1366 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1367 | ETHERCAP_VLAN_HWCSUM
1368 | ETHERCAP_JUMBO_MTU
1369 | ETHERCAP_VLAN_MTU;
1370
1371 /* Enable the above capabilities by default */
1372 ec->ec_capenable = ec->ec_capabilities;
1373
1374 /*
1375 * Don't turn this on by default, if vlans are
1376 * created on another pseudo device (eg. lagg)
1377 * then vlan events are not passed thru, breaking
1378 * operation, but with HW FILTER off it works. If
1379 * using vlans directly on the ixgbe driver you can
1380 * enable this and get full hardware tag filtering.
1381 */
1382 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1383
1384 /*
1385 * Specify the media types supported by this adapter and register
1386 * callbacks to update media and link information
1387 */
1388 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1389 ixgbe_media_status);
1390
1391 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1392 ixgbe_add_media_types(adapter);
1393
1394 /* Set autoselect media by default */
1395 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1396
1397 if_register(ifp);
1398
1399 return (0);
1400 } /* ixgbe_setup_interface */
1401
1402 /************************************************************************
1403 * ixgbe_add_media_types
1404 ************************************************************************/
1405 static void
1406 ixgbe_add_media_types(struct adapter *adapter)
1407 {
1408 struct ixgbe_hw *hw = &adapter->hw;
1409 device_t dev = adapter->dev;
1410 u64 layer;
1411
1412 layer = adapter->phy_layer;
1413
1414 #define ADD(mm, dd) \
1415 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1416
1417 ADD(IFM_NONE, 0);
1418
1419 /* Media types with matching NetBSD media defines */
1420 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1421 ADD(IFM_10G_T | IFM_FDX, 0);
1422 }
1423 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1424 ADD(IFM_1000_T | IFM_FDX, 0);
1425 }
1426 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1427 ADD(IFM_100_TX | IFM_FDX, 0);
1428 }
1429 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1430 ADD(IFM_10_T | IFM_FDX, 0);
1431 }
1432
1433 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1434 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1435 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1436 }
1437
1438 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1439 ADD(IFM_10G_LR | IFM_FDX, 0);
1440 if (hw->phy.multispeed_fiber) {
1441 ADD(IFM_1000_LX | IFM_FDX, 0);
1442 }
1443 }
1444 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1445 ADD(IFM_10G_SR | IFM_FDX, 0);
1446 if (hw->phy.multispeed_fiber) {
1447 ADD(IFM_1000_SX | IFM_FDX, 0);
1448 }
1449 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1450 ADD(IFM_1000_SX | IFM_FDX, 0);
1451 }
1452 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1453 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1454 }
1455
1456 #ifdef IFM_ETH_XTYPE
1457 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1458 ADD(IFM_10G_KR | IFM_FDX, 0);
1459 }
1460 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1461 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1462 }
1463 #else
1464 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1465 device_printf(dev, "Media supported: 10GbaseKR\n");
1466 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1467 ADD(IFM_10G_SR | IFM_FDX, 0);
1468 }
1469 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1470 device_printf(dev, "Media supported: 10GbaseKX4\n");
1471 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1472 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1473 }
1474 #endif
1475 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1476 ADD(IFM_1000_KX | IFM_FDX, 0);
1477 }
1478 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1479 ADD(IFM_2500_KX | IFM_FDX, 0);
1480 }
1481 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1482 ADD(IFM_2500_T | IFM_FDX, 0);
1483 }
1484 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1485 ADD(IFM_5000_T | IFM_FDX, 0);
1486 }
1487 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1488 ADD(IFM_1000_LX | IFM_FDX, 0); /* IFM_1000_BX */
1489 /* XXX no ifmedia_set? */
1490
1491 ADD(IFM_AUTO, 0);
1492
1493 #undef ADD
1494 } /* ixgbe_add_media_types */
1495
1496 /************************************************************************
1497 * ixgbe_is_sfp
1498 ************************************************************************/
1499 static inline bool
1500 ixgbe_is_sfp(struct ixgbe_hw *hw)
1501 {
1502 switch (hw->mac.type) {
1503 case ixgbe_mac_82598EB:
1504 if (hw->phy.type == ixgbe_phy_nl)
1505 return (TRUE);
1506 return (FALSE);
1507 case ixgbe_mac_82599EB:
1508 case ixgbe_mac_X550EM_x:
1509 case ixgbe_mac_X550EM_a:
1510 switch (hw->mac.ops.get_media_type(hw)) {
1511 case ixgbe_media_type_fiber:
1512 case ixgbe_media_type_fiber_qsfp:
1513 return (TRUE);
1514 default:
1515 return (FALSE);
1516 }
1517 default:
1518 return (FALSE);
1519 }
1520 } /* ixgbe_is_sfp */
1521
1522 /************************************************************************
1523 * ixgbe_config_link
1524 ************************************************************************/
1525 static void
1526 ixgbe_config_link(struct adapter *adapter)
1527 {
1528 struct ixgbe_hw *hw = &adapter->hw;
1529 u32 autoneg, err = 0;
1530 bool sfp, negotiate = false;
1531
1532 sfp = ixgbe_is_sfp(hw);
1533
1534 if (sfp) {
1535 if (hw->phy.multispeed_fiber) {
1536 ixgbe_enable_tx_laser(hw);
1537 kpreempt_disable();
1538 softint_schedule(adapter->msf_si);
1539 kpreempt_enable();
1540 }
1541 kpreempt_disable();
1542 softint_schedule(adapter->mod_si);
1543 kpreempt_enable();
1544 } else {
1545 struct ifmedia *ifm = &adapter->media;
1546
1547 if (hw->mac.ops.check_link)
1548 err = ixgbe_check_link(hw, &adapter->link_speed,
1549 &adapter->link_up, FALSE);
1550 if (err)
1551 return;
1552
1553 /*
1554 * Check if it's the first call. If it's the first call,
1555 * get value for auto negotiation.
1556 */
1557 autoneg = hw->phy.autoneg_advertised;
1558 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1559 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1560 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1561 &negotiate);
1562 if (err)
1563 return;
1564 if (hw->mac.ops.setup_link)
1565 err = hw->mac.ops.setup_link(hw, autoneg,
1566 adapter->link_up);
1567 }
1568
1569 } /* ixgbe_config_link */
1570
1571 /************************************************************************
1572 * ixgbe_update_stats_counters - Update board statistics counters.
1573 ************************************************************************/
1574 static void
1575 ixgbe_update_stats_counters(struct adapter *adapter)
1576 {
1577 struct ifnet *ifp = adapter->ifp;
1578 struct ixgbe_hw *hw = &adapter->hw;
1579 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1580 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1581 u64 total_missed_rx = 0;
1582 uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1583 unsigned int queue_counters;
1584 int i;
1585
1586 #define READ_COPY_SET(hw, stats, regname, evname) \
1587 do { \
1588 (evname) = IXGBE_READ_REG((hw), regname); \
1589 (stats)->evname.ev_count += (evname); \
1590 } while (/*CONSTCOND*/0)
1591
1592 READ_COPY_SET(hw, stats, IXGBE_CRCERRS, crcerrs);
1593 READ_COPY_SET(hw, stats, IXGBE_ILLERRC, illerrc);
1594
1595 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1596 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1597 if (hw->mac.type >= ixgbe_mac_X550)
1598 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1599
1600 /* 16 registers exist */
1601 queue_counters = min(__arraycount(stats->qprc), adapter->num_queues);
1602 for (i = 0; i < queue_counters; i++) {
1603 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1604 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1605 if (hw->mac.type >= ixgbe_mac_82599EB) {
1606 stats->qprdc[i].ev_count
1607 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1608 }
1609 }
1610
1611 /* 8 registers exist */
1612 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1613 uint32_t mp;
1614
1615 /* MPC */
1616 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1617 /* global total per queue */
1618 stats->mpc[i].ev_count += mp;
1619 /* running comprehensive total for stats display */
1620 total_missed_rx += mp;
1621
1622 if (hw->mac.type == ixgbe_mac_82598EB)
1623 stats->rnbc[i].ev_count
1624 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1625
1626 stats->pxontxc[i].ev_count
1627 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1628 stats->pxofftxc[i].ev_count
1629 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1630 if (hw->mac.type >= ixgbe_mac_82599EB) {
1631 stats->pxonrxc[i].ev_count
1632 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1633 stats->pxoffrxc[i].ev_count
1634 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1635 stats->pxon2offc[i].ev_count
1636 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1637 } else {
1638 stats->pxonrxc[i].ev_count
1639 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1640 stats->pxoffrxc[i].ev_count
1641 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1642 }
1643 }
1644 stats->mpctotal.ev_count += total_missed_rx;
1645
1646 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1647 if ((adapter->link_active == LINK_STATE_UP)
1648 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1649 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1650 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1651 }
1652 READ_COPY_SET(hw, stats, IXGBE_RLEC, rlec);
1653
1654 /* Hardware workaround, gprc counts missed packets */
1655 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1656
1657 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1658 stats->lxontxc.ev_count += lxon;
1659 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1660 stats->lxofftxc.ev_count += lxoff;
1661 total = lxon + lxoff;
1662
1663 if (hw->mac.type != ixgbe_mac_82598EB) {
1664 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1665 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1666 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1667 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1668 - total * ETHER_MIN_LEN;
1669 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1670 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1671 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1672 stats->lxoffrxc.ev_count
1673 += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1674 } else {
1675 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1676 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1677 /* 82598 only has a counter in the high register */
1678 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1679 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH)
1680 - total * ETHER_MIN_LEN;
1681 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1682 }
1683
1684 /*
1685 * Workaround: mprc hardware is incorrectly counting
1686 * broadcasts, so for now we subtract those.
1687 */
1688 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1689 stats->bprc.ev_count += bprc;
1690 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1691 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1692
1693 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1694 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1695 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1696 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1697 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1698 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1699
1700 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1701 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1702 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1703
1704 READ_COPY_SET(hw, stats, IXGBE_RUC, ruc);
1705 READ_COPY_SET(hw, stats, IXGBE_RFC, rfc);
1706 READ_COPY_SET(hw, stats, IXGBE_ROC, roc);
1707 READ_COPY_SET(hw, stats, IXGBE_RJC, rjc);
1708
1709 #undef READ_COPY_SET
1710
1711 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1712 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1713 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1714 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1715 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1716 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1717 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1718 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1719 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1720 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1721 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1722 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1723 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1724 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1725 /* Only read FCOE on 82599 */
1726 if (hw->mac.type != ixgbe_mac_82598EB) {
1727 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1728 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1729 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1730 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1731 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1732 }
1733
1734 /* Fill out the OS statistics structure */
1735 /*
1736 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1737 * adapter->stats counters. It's required to make ifconfig -z
1738 * (SOICZIFDATA) work.
1739 */
1740 ifp->if_collisions = 0;
1741
1742 /* Rx Errors */
1743 ifp->if_iqdrops += total_missed_rx;
1744
1745 /*
1746 * Aggregate following types of errors as RX errors:
1747 * - CRC error count,
1748 * - illegal byte error count,
1749 * - length error count,
1750 * - undersized packets count,
1751 * - fragmented packets count,
1752 * - oversized packets count,
1753 * - jabber count.
1754 */
1755 ifp->if_ierrors +=
1756 crcerrs + illerrc + rlec + ruc + rfc + roc + rjc;
1757 } /* ixgbe_update_stats_counters */
1758
1759 /************************************************************************
1760 * ixgbe_add_hw_stats
1761 *
1762 * Add sysctl variables, one per statistic, to the system.
1763 ************************************************************************/
1764 static void
1765 ixgbe_add_hw_stats(struct adapter *adapter)
1766 {
1767 device_t dev = adapter->dev;
1768 const struct sysctlnode *rnode, *cnode;
1769 struct sysctllog **log = &adapter->sysctllog;
1770 struct tx_ring *txr = adapter->tx_rings;
1771 struct rx_ring *rxr = adapter->rx_rings;
1772 struct ixgbe_hw *hw = &adapter->hw;
1773 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1774 const char *xname = device_xname(dev);
1775 int i;
1776
1777 /* Driver Statistics */
1778 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1779 NULL, xname, "Driver tx dma soft fail EFBIG");
1780 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1781 NULL, xname, "m_defrag() failed");
1782 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1783 NULL, xname, "Driver tx dma hard fail EFBIG");
1784 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1785 NULL, xname, "Driver tx dma hard fail EINVAL");
1786 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1787 NULL, xname, "Driver tx dma hard fail other");
1788 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1789 NULL, xname, "Driver tx dma soft fail EAGAIN");
1790 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1791 NULL, xname, "Driver tx dma soft fail ENOMEM");
1792 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1793 NULL, xname, "Watchdog timeouts");
1794 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1795 NULL, xname, "TSO errors");
1796 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1797 NULL, xname, "Link MSI-X IRQ Handled");
1798 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1799 NULL, xname, "Link softint");
1800 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1801 NULL, xname, "module softint");
1802 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1803 NULL, xname, "multimode softint");
1804 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1805 NULL, xname, "external PHY softint");
1806
1807 /* Max number of traffic class is 8 */
1808 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1809 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1810 snprintf(adapter->tcs[i].evnamebuf,
1811 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1812 xname, i);
1813 if (i < __arraycount(stats->mpc)) {
1814 evcnt_attach_dynamic(&stats->mpc[i],
1815 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1816 "RX Missed Packet Count");
1817 if (hw->mac.type == ixgbe_mac_82598EB)
1818 evcnt_attach_dynamic(&stats->rnbc[i],
1819 EVCNT_TYPE_MISC, NULL,
1820 adapter->tcs[i].evnamebuf,
1821 "Receive No Buffers");
1822 }
1823 if (i < __arraycount(stats->pxontxc)) {
1824 evcnt_attach_dynamic(&stats->pxontxc[i],
1825 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1826 "pxontxc");
1827 evcnt_attach_dynamic(&stats->pxonrxc[i],
1828 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1829 "pxonrxc");
1830 evcnt_attach_dynamic(&stats->pxofftxc[i],
1831 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1832 "pxofftxc");
1833 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1834 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1835 "pxoffrxc");
1836 if (hw->mac.type >= ixgbe_mac_82599EB)
1837 evcnt_attach_dynamic(&stats->pxon2offc[i],
1838 EVCNT_TYPE_MISC, NULL,
1839 adapter->tcs[i].evnamebuf,
1840 "pxon2offc");
1841 }
1842 }
1843
1844 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1845 #ifdef LRO
1846 struct lro_ctrl *lro = &rxr->lro;
1847 #endif /* LRO */
1848
1849 snprintf(adapter->queues[i].evnamebuf,
1850 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1851 xname, i);
1852 snprintf(adapter->queues[i].namebuf,
1853 sizeof(adapter->queues[i].namebuf), "q%d", i);
1854
1855 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1856 aprint_error_dev(dev, "could not create sysctl root\n");
1857 break;
1858 }
1859
1860 if (sysctl_createv(log, 0, &rnode, &rnode,
1861 0, CTLTYPE_NODE,
1862 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1863 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1864 break;
1865
1866 if (sysctl_createv(log, 0, &rnode, &cnode,
1867 CTLFLAG_READWRITE, CTLTYPE_INT,
1868 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1869 ixgbe_sysctl_interrupt_rate_handler, 0,
1870 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1871 break;
1872
1873 if (sysctl_createv(log, 0, &rnode, &cnode,
1874 CTLFLAG_READONLY, CTLTYPE_INT,
1875 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1876 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1877 0, CTL_CREATE, CTL_EOL) != 0)
1878 break;
1879
1880 if (sysctl_createv(log, 0, &rnode, &cnode,
1881 CTLFLAG_READONLY, CTLTYPE_INT,
1882 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1883 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1884 0, CTL_CREATE, CTL_EOL) != 0)
1885 break;
1886
1887 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1888 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1889 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1890 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1891 "Handled queue in softint");
1892 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1893 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1894 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1895 NULL, adapter->queues[i].evnamebuf, "TSO");
1896 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1897 NULL, adapter->queues[i].evnamebuf,
1898 "TX Queue No Descriptor Available");
1899 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1900 NULL, adapter->queues[i].evnamebuf,
1901 "Queue Packets Transmitted");
1902 #ifndef IXGBE_LEGACY_TX
1903 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1904 NULL, adapter->queues[i].evnamebuf,
1905 "Packets dropped in pcq");
1906 #endif
1907
1908 if (sysctl_createv(log, 0, &rnode, &cnode,
1909 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1910 SYSCTL_DESCR("Receive Descriptor next to check"),
1911 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1912 CTL_CREATE, CTL_EOL) != 0)
1913 break;
1914
1915 if (sysctl_createv(log, 0, &rnode, &cnode,
1916 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1917 SYSCTL_DESCR("Receive Descriptor next to refresh"),
1918 ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1919 CTL_CREATE, CTL_EOL) != 0)
1920 break;
1921
1922 if (sysctl_createv(log, 0, &rnode, &cnode,
1923 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1924 SYSCTL_DESCR("Receive Descriptor Head"),
1925 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1926 CTL_CREATE, CTL_EOL) != 0)
1927 break;
1928
1929 if (sysctl_createv(log, 0, &rnode, &cnode,
1930 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1931 SYSCTL_DESCR("Receive Descriptor Tail"),
1932 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1933 CTL_CREATE, CTL_EOL) != 0)
1934 break;
1935
1936 if (i < __arraycount(stats->qprc)) {
1937 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1938 NULL, adapter->queues[i].evnamebuf, "qprc");
1939 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1940 NULL, adapter->queues[i].evnamebuf, "qptc");
1941 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1942 NULL, adapter->queues[i].evnamebuf, "qbrc");
1943 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1944 NULL, adapter->queues[i].evnamebuf, "qbtc");
1945 if (hw->mac.type >= ixgbe_mac_82599EB)
1946 evcnt_attach_dynamic(&stats->qprdc[i],
1947 EVCNT_TYPE_MISC, NULL,
1948 adapter->queues[i].evnamebuf, "qprdc");
1949 }
1950
1951 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1952 NULL, adapter->queues[i].evnamebuf,
1953 "Queue Packets Received");
1954 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1955 NULL, adapter->queues[i].evnamebuf,
1956 "Queue Bytes Received");
1957 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1958 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1959 evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
1960 NULL, adapter->queues[i].evnamebuf, "Rx no mbuf");
1961 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1962 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1963 #ifdef LRO
1964 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1965 CTLFLAG_RD, &lro->lro_queued, 0,
1966 "LRO Queued");
1967 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1968 CTLFLAG_RD, &lro->lro_flushed, 0,
1969 "LRO Flushed");
1970 #endif /* LRO */
1971 }
1972
1973 /* MAC stats get their own sub node */
1974
1975 snprintf(stats->namebuf,
1976 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1977
1978 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "rx csum offload - IP");
1980 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "rx csum offload - L4");
1982 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "rx csum offload - IP bad");
1984 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "rx csum offload - L4 bad");
1986 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Interrupt conditions zero");
1988 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Legacy interrupts");
1990
1991 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "CRC Errors");
1993 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Illegal Byte Errors");
1995 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Byte Errors");
1997 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "MAC Short Packets Discarded");
1999 if (hw->mac.type >= ixgbe_mac_X550)
2000 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Bad SFD");
2002 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Total Packets Missed");
2004 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "MAC Local Faults");
2006 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "MAC Remote Faults");
2008 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Receive Length Errors");
2010 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Link XON Transmitted");
2012 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Link XON Received");
2014 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Link XOFF Transmitted");
2016 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Link XOFF Received");
2018
2019 /* Packet Reception Stats */
2020 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Total Octets Received");
2022 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Good Octets Received");
2024 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Total Packets Received");
2026 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Good Packets Received");
2028 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Multicast Packets Received");
2030 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Broadcast Packets Received");
2032 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "64 byte frames received ");
2034 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "65-127 byte frames received");
2036 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "128-255 byte frames received");
2038 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "256-511 byte frames received");
2040 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "512-1023 byte frames received");
2042 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "1023-1522 byte frames received");
2044 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "Receive Undersized");
2046 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "Fragmented Packets Received ");
2048 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "Oversized Packets Received");
2050 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Received Jabber");
2052 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Management Packets Received");
2054 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Management Packets Dropped");
2056 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Checksum Errors");
2058
2059 /* Packet Transmission Stats */
2060 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Good Octets Transmitted");
2062 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Total Packets Transmitted");
2064 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2065 stats->namebuf, "Good Packets Transmitted");
2066 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "Broadcast Packets Transmitted");
2068 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "Multicast Packets Transmitted");
2070 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "Management Packets Transmitted");
2072 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "64 byte frames transmitted ");
2074 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "65-127 byte frames transmitted");
2076 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "128-255 byte frames transmitted");
2078 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "256-511 byte frames transmitted");
2080 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2081 stats->namebuf, "512-1023 byte frames transmitted");
2082 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2083 stats->namebuf, "1024-1522 byte frames transmitted");
2084 } /* ixgbe_add_hw_stats */
2085
2086 static void
2087 ixgbe_clear_evcnt(struct adapter *adapter)
2088 {
2089 struct tx_ring *txr = adapter->tx_rings;
2090 struct rx_ring *rxr = adapter->rx_rings;
2091 struct ixgbe_hw *hw = &adapter->hw;
2092 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2093 int i;
2094
2095 adapter->efbig_tx_dma_setup.ev_count = 0;
2096 adapter->mbuf_defrag_failed.ev_count = 0;
2097 adapter->efbig2_tx_dma_setup.ev_count = 0;
2098 adapter->einval_tx_dma_setup.ev_count = 0;
2099 adapter->other_tx_dma_setup.ev_count = 0;
2100 adapter->eagain_tx_dma_setup.ev_count = 0;
2101 adapter->enomem_tx_dma_setup.ev_count = 0;
2102 adapter->tso_err.ev_count = 0;
2103 adapter->watchdog_events.ev_count = 0;
2104 adapter->link_irq.ev_count = 0;
2105 adapter->link_sicount.ev_count = 0;
2106 adapter->mod_sicount.ev_count = 0;
2107 adapter->msf_sicount.ev_count = 0;
2108 adapter->phy_sicount.ev_count = 0;
2109
2110 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2111 if (i < __arraycount(stats->mpc)) {
2112 stats->mpc[i].ev_count = 0;
2113 if (hw->mac.type == ixgbe_mac_82598EB)
2114 stats->rnbc[i].ev_count = 0;
2115 }
2116 if (i < __arraycount(stats->pxontxc)) {
2117 stats->pxontxc[i].ev_count = 0;
2118 stats->pxonrxc[i].ev_count = 0;
2119 stats->pxofftxc[i].ev_count = 0;
2120 stats->pxoffrxc[i].ev_count = 0;
2121 if (hw->mac.type >= ixgbe_mac_82599EB)
2122 stats->pxon2offc[i].ev_count = 0;
2123 }
2124 }
2125
2126 txr = adapter->tx_rings;
2127 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2128 adapter->queues[i].irqs.ev_count = 0;
2129 adapter->queues[i].handleq.ev_count = 0;
2130 adapter->queues[i].req.ev_count = 0;
2131 txr->no_desc_avail.ev_count = 0;
2132 txr->total_packets.ev_count = 0;
2133 txr->tso_tx.ev_count = 0;
2134 #ifndef IXGBE_LEGACY_TX
2135 txr->pcq_drops.ev_count = 0;
2136 #endif
2137 txr->q_efbig_tx_dma_setup = 0;
2138 txr->q_mbuf_defrag_failed = 0;
2139 txr->q_efbig2_tx_dma_setup = 0;
2140 txr->q_einval_tx_dma_setup = 0;
2141 txr->q_other_tx_dma_setup = 0;
2142 txr->q_eagain_tx_dma_setup = 0;
2143 txr->q_enomem_tx_dma_setup = 0;
2144 txr->q_tso_err = 0;
2145
2146 if (i < __arraycount(stats->qprc)) {
2147 stats->qprc[i].ev_count = 0;
2148 stats->qptc[i].ev_count = 0;
2149 stats->qbrc[i].ev_count = 0;
2150 stats->qbtc[i].ev_count = 0;
2151 if (hw->mac.type >= ixgbe_mac_82599EB)
2152 stats->qprdc[i].ev_count = 0;
2153 }
2154
2155 rxr->rx_packets.ev_count = 0;
2156 rxr->rx_bytes.ev_count = 0;
2157 rxr->rx_copies.ev_count = 0;
2158 rxr->no_mbuf.ev_count = 0;
2159 rxr->rx_discarded.ev_count = 0;
2160 }
2161 stats->ipcs.ev_count = 0;
2162 stats->l4cs.ev_count = 0;
2163 stats->ipcs_bad.ev_count = 0;
2164 stats->l4cs_bad.ev_count = 0;
2165 stats->intzero.ev_count = 0;
2166 stats->legint.ev_count = 0;
2167 stats->crcerrs.ev_count = 0;
2168 stats->illerrc.ev_count = 0;
2169 stats->errbc.ev_count = 0;
2170 stats->mspdc.ev_count = 0;
2171 if (hw->mac.type >= ixgbe_mac_X550)
2172 stats->mbsdc.ev_count = 0;
2173 stats->mpctotal.ev_count = 0;
2174 stats->mlfc.ev_count = 0;
2175 stats->mrfc.ev_count = 0;
2176 stats->rlec.ev_count = 0;
2177 stats->lxontxc.ev_count = 0;
2178 stats->lxonrxc.ev_count = 0;
2179 stats->lxofftxc.ev_count = 0;
2180 stats->lxoffrxc.ev_count = 0;
2181
2182 /* Packet Reception Stats */
2183 stats->tor.ev_count = 0;
2184 stats->gorc.ev_count = 0;
2185 stats->tpr.ev_count = 0;
2186 stats->gprc.ev_count = 0;
2187 stats->mprc.ev_count = 0;
2188 stats->bprc.ev_count = 0;
2189 stats->prc64.ev_count = 0;
2190 stats->prc127.ev_count = 0;
2191 stats->prc255.ev_count = 0;
2192 stats->prc511.ev_count = 0;
2193 stats->prc1023.ev_count = 0;
2194 stats->prc1522.ev_count = 0;
2195 stats->ruc.ev_count = 0;
2196 stats->rfc.ev_count = 0;
2197 stats->roc.ev_count = 0;
2198 stats->rjc.ev_count = 0;
2199 stats->mngprc.ev_count = 0;
2200 stats->mngpdc.ev_count = 0;
2201 stats->xec.ev_count = 0;
2202
2203 /* Packet Transmission Stats */
2204 stats->gotc.ev_count = 0;
2205 stats->tpt.ev_count = 0;
2206 stats->gptc.ev_count = 0;
2207 stats->bptc.ev_count = 0;
2208 stats->mptc.ev_count = 0;
2209 stats->mngptc.ev_count = 0;
2210 stats->ptc64.ev_count = 0;
2211 stats->ptc127.ev_count = 0;
2212 stats->ptc255.ev_count = 0;
2213 stats->ptc511.ev_count = 0;
2214 stats->ptc1023.ev_count = 0;
2215 stats->ptc1522.ev_count = 0;
2216 }
2217
2218 /************************************************************************
2219 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2220 *
2221 * Retrieves the TDH value from the hardware
2222 ************************************************************************/
2223 static int
2224 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2225 {
2226 struct sysctlnode node = *rnode;
2227 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2228 struct adapter *adapter;
2229 uint32_t val;
2230
2231 if (!txr)
2232 return (0);
2233
2234 adapter = txr->adapter;
2235 if (ixgbe_fw_recovery_mode_swflag(adapter))
2236 return (EPERM);
2237
2238 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2239 node.sysctl_data = &val;
2240 return sysctl_lookup(SYSCTLFN_CALL(&node));
2241 } /* ixgbe_sysctl_tdh_handler */
2242
2243 /************************************************************************
2244 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2245 *
2246 * Retrieves the TDT value from the hardware
2247 ************************************************************************/
2248 static int
2249 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2250 {
2251 struct sysctlnode node = *rnode;
2252 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2253 struct adapter *adapter;
2254 uint32_t val;
2255
2256 if (!txr)
2257 return (0);
2258
2259 adapter = txr->adapter;
2260 if (ixgbe_fw_recovery_mode_swflag(adapter))
2261 return (EPERM);
2262
2263 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2264 node.sysctl_data = &val;
2265 return sysctl_lookup(SYSCTLFN_CALL(&node));
2266 } /* ixgbe_sysctl_tdt_handler */
2267
2268 /************************************************************************
2269 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2270 * handler function
2271 *
2272 * Retrieves the next_to_check value
2273 ************************************************************************/
2274 static int
2275 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2276 {
2277 struct sysctlnode node = *rnode;
2278 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2279 struct adapter *adapter;
2280 uint32_t val;
2281
2282 if (!rxr)
2283 return (0);
2284
2285 adapter = rxr->adapter;
2286 if (ixgbe_fw_recovery_mode_swflag(adapter))
2287 return (EPERM);
2288
2289 val = rxr->next_to_check;
2290 node.sysctl_data = &val;
2291 return sysctl_lookup(SYSCTLFN_CALL(&node));
2292 } /* ixgbe_sysctl_next_to_check_handler */
2293
2294 /************************************************************************
2295 * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2296 * handler function
2297 *
2298 * Retrieves the next_to_refresh value
2299 ************************************************************************/
2300 static int
2301 ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2302 {
2303 struct sysctlnode node = *rnode;
2304 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2305 struct adapter *adapter;
2306 uint32_t val;
2307
2308 if (!rxr)
2309 return (0);
2310
2311 adapter = rxr->adapter;
2312 if (ixgbe_fw_recovery_mode_swflag(adapter))
2313 return (EPERM);
2314
2315 val = rxr->next_to_refresh;
2316 node.sysctl_data = &val;
2317 return sysctl_lookup(SYSCTLFN_CALL(&node));
2318 } /* ixgbe_sysctl_next_to_refresh_handler */
2319
2320 /************************************************************************
2321 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2322 *
2323 * Retrieves the RDH value from the hardware
2324 ************************************************************************/
2325 static int
2326 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2327 {
2328 struct sysctlnode node = *rnode;
2329 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2330 struct adapter *adapter;
2331 uint32_t val;
2332
2333 if (!rxr)
2334 return (0);
2335
2336 adapter = rxr->adapter;
2337 if (ixgbe_fw_recovery_mode_swflag(adapter))
2338 return (EPERM);
2339
2340 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2341 node.sysctl_data = &val;
2342 return sysctl_lookup(SYSCTLFN_CALL(&node));
2343 } /* ixgbe_sysctl_rdh_handler */
2344
2345 /************************************************************************
2346 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2347 *
2348 * Retrieves the RDT value from the hardware
2349 ************************************************************************/
2350 static int
2351 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2352 {
2353 struct sysctlnode node = *rnode;
2354 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2355 struct adapter *adapter;
2356 uint32_t val;
2357
2358 if (!rxr)
2359 return (0);
2360
2361 adapter = rxr->adapter;
2362 if (ixgbe_fw_recovery_mode_swflag(adapter))
2363 return (EPERM);
2364
2365 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2366 node.sysctl_data = &val;
2367 return sysctl_lookup(SYSCTLFN_CALL(&node));
2368 } /* ixgbe_sysctl_rdt_handler */
2369
2370 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2371 /************************************************************************
2372 * ixgbe_register_vlan
2373 *
2374 * Run via vlan config EVENT, it enables us to use the
2375 * HW Filter table since we can get the vlan id. This
2376 * just creates the entry in the soft version of the
2377 * VFTA, init will repopulate the real table.
2378 ************************************************************************/
2379 static void
2380 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2381 {
2382 struct adapter *adapter = ifp->if_softc;
2383 u16 index, bit;
2384
2385 if (ifp->if_softc != arg) /* Not our event */
2386 return;
2387
2388 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2389 return;
2390
2391 IXGBE_CORE_LOCK(adapter);
2392 index = (vtag >> 5) & 0x7F;
2393 bit = vtag & 0x1F;
2394 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2395 ixgbe_setup_vlan_hw_support(adapter);
2396 IXGBE_CORE_UNLOCK(adapter);
2397 } /* ixgbe_register_vlan */
2398
2399 /************************************************************************
2400 * ixgbe_unregister_vlan
2401 *
2402 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2403 ************************************************************************/
2404 static void
2405 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2406 {
2407 struct adapter *adapter = ifp->if_softc;
2408 u16 index, bit;
2409
2410 if (ifp->if_softc != arg)
2411 return;
2412
2413 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2414 return;
2415
2416 IXGBE_CORE_LOCK(adapter);
2417 index = (vtag >> 5) & 0x7F;
2418 bit = vtag & 0x1F;
2419 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2420 /* Re-init to load the changes */
2421 ixgbe_setup_vlan_hw_support(adapter);
2422 IXGBE_CORE_UNLOCK(adapter);
2423 } /* ixgbe_unregister_vlan */
2424 #endif
2425
2426 static void
2427 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2428 {
2429 struct ethercom *ec = &adapter->osdep.ec;
2430 struct ixgbe_hw *hw = &adapter->hw;
2431 struct rx_ring *rxr;
2432 int i;
2433 u32 ctrl;
2434 bool hwtagging;
2435
2436 /*
2437 * This function is called from both if_init and ifflags_cb()
2438 * on NetBSD.
2439 */
2440
2441 /* Enable HW tagging only if any vlan is attached */
2442 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2443 && VLAN_ATTACHED(ec);
2444
2445 /* Setup the queues for vlans */
2446 for (i = 0; i < adapter->num_queues; i++) {
2447 rxr = &adapter->rx_rings[i];
2448 /*
2449 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2450 */
2451 if (hw->mac.type != ixgbe_mac_82598EB) {
2452 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2453 if (hwtagging)
2454 ctrl |= IXGBE_RXDCTL_VME;
2455 else
2456 ctrl &= ~IXGBE_RXDCTL_VME;
2457 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2458 }
2459 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2460 }
2461
2462 /*
2463 * A soft reset zero's out the VFTA, so
2464 * we need to repopulate it now.
2465 */
2466 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2467 if (adapter->shadow_vfta[i] != 0)
2468 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2469 adapter->shadow_vfta[i]);
2470
2471 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2472 /* Enable the Filter Table if enabled */
2473 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2474 ctrl |= IXGBE_VLNCTRL_VFE;
2475 else
2476 ctrl &= ~IXGBE_VLNCTRL_VFE;
2477 /* VLAN hw tagging for 82598 */
2478 if (hw->mac.type == ixgbe_mac_82598EB) {
2479 if (hwtagging)
2480 ctrl |= IXGBE_VLNCTRL_VME;
2481 else
2482 ctrl &= ~IXGBE_VLNCTRL_VME;
2483 }
2484 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2485 } /* ixgbe_setup_vlan_hw_support */
2486
2487 /************************************************************************
2488 * ixgbe_get_slot_info
2489 *
2490 * Get the width and transaction speed of
2491 * the slot this adapter is plugged into.
2492 ************************************************************************/
2493 static void
2494 ixgbe_get_slot_info(struct adapter *adapter)
2495 {
2496 device_t dev = adapter->dev;
2497 struct ixgbe_hw *hw = &adapter->hw;
2498 u32 offset;
2499 u16 link;
2500 int bus_info_valid = TRUE;
2501
2502 /* Some devices are behind an internal bridge */
2503 switch (hw->device_id) {
2504 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2505 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2506 goto get_parent_info;
2507 default:
2508 break;
2509 }
2510
2511 ixgbe_get_bus_info(hw);
2512
2513 /*
2514 * Some devices don't use PCI-E, but there is no need
2515 * to display "Unknown" for bus speed and width.
2516 */
2517 switch (hw->mac.type) {
2518 case ixgbe_mac_X550EM_x:
2519 case ixgbe_mac_X550EM_a:
2520 return;
2521 default:
2522 goto display;
2523 }
2524
2525 get_parent_info:
2526 /*
2527 * For the Quad port adapter we need to parse back
2528 * up the PCI tree to find the speed of the expansion
2529 * slot into which this adapter is plugged. A bit more work.
2530 */
2531 dev = device_parent(device_parent(dev));
2532 #if 0
2533 #ifdef IXGBE_DEBUG
2534 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2535 pci_get_slot(dev), pci_get_function(dev));
2536 #endif
2537 dev = device_parent(device_parent(dev));
2538 #ifdef IXGBE_DEBUG
2539 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2540 pci_get_slot(dev), pci_get_function(dev));
2541 #endif
2542 #endif
2543 /* Now get the PCI Express Capabilities offset */
2544 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2545 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2546 /*
2547 * Hmm...can't get PCI-Express capabilities.
2548 * Falling back to default method.
2549 */
2550 bus_info_valid = FALSE;
2551 ixgbe_get_bus_info(hw);
2552 goto display;
2553 }
2554 /* ...and read the Link Status Register */
2555 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2556 offset + PCIE_LCSR) >> 16;
2557 ixgbe_set_pci_config_data_generic(hw, link);
2558
2559 display:
2560 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2561 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2562 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2563 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2564 "Unknown"),
2565 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2566 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2567 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2568 "Unknown"));
2569
2570 if (bus_info_valid) {
2571 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2572 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2573 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2574 device_printf(dev, "PCI-Express bandwidth available"
2575 " for this card\n is not sufficient for"
2576 " optimal performance.\n");
2577 device_printf(dev, "For optimal performance a x8 "
2578 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2579 }
2580 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2581 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2582 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2583 device_printf(dev, "PCI-Express bandwidth available"
2584 " for this card\n is not sufficient for"
2585 " optimal performance.\n");
2586 device_printf(dev, "For optimal performance a x8 "
2587 "PCIE Gen3 slot is required.\n");
2588 }
2589 } else
2590 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2591
2592 return;
2593 } /* ixgbe_get_slot_info */
2594
2595 /************************************************************************
2596 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2597 ************************************************************************/
2598 static inline void
2599 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2600 {
2601 struct ixgbe_hw *hw = &adapter->hw;
2602 struct ix_queue *que = &adapter->queues[vector];
2603 u64 queue = 1ULL << vector;
2604 u32 mask;
2605
2606 mutex_enter(&que->dc_mtx);
2607 if (que->disabled_count > 0 && --que->disabled_count > 0)
2608 goto out;
2609
2610 if (hw->mac.type == ixgbe_mac_82598EB) {
2611 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2612 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2613 } else {
2614 mask = (queue & 0xFFFFFFFF);
2615 if (mask)
2616 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2617 mask = (queue >> 32);
2618 if (mask)
2619 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2620 }
2621 out:
2622 mutex_exit(&que->dc_mtx);
2623 } /* ixgbe_enable_queue */
2624
2625 /************************************************************************
2626 * ixgbe_disable_queue_internal
2627 ************************************************************************/
2628 static inline void
2629 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2630 {
2631 struct ixgbe_hw *hw = &adapter->hw;
2632 struct ix_queue *que = &adapter->queues[vector];
2633 u64 queue = 1ULL << vector;
2634 u32 mask;
2635
2636 mutex_enter(&que->dc_mtx);
2637
2638 if (que->disabled_count > 0) {
2639 if (nestok)
2640 que->disabled_count++;
2641 goto out;
2642 }
2643 que->disabled_count++;
2644
2645 if (hw->mac.type == ixgbe_mac_82598EB) {
2646 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2647 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2648 } else {
2649 mask = (queue & 0xFFFFFFFF);
2650 if (mask)
2651 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2652 mask = (queue >> 32);
2653 if (mask)
2654 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2655 }
2656 out:
2657 mutex_exit(&que->dc_mtx);
2658 } /* ixgbe_disable_queue_internal */
2659
2660 /************************************************************************
2661 * ixgbe_disable_queue
2662 ************************************************************************/
2663 static inline void
2664 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2665 {
2666
2667 ixgbe_disable_queue_internal(adapter, vector, true);
2668 } /* ixgbe_disable_queue */
2669
2670 /************************************************************************
2671 * ixgbe_sched_handle_que - schedule deferred packet processing
2672 ************************************************************************/
2673 static inline void
2674 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2675 {
2676
2677 if (que->txrx_use_workqueue) {
2678 /*
2679 * adapter->que_wq is bound to each CPU instead of
2680 * each NIC queue to reduce workqueue kthread. As we
2681 * should consider about interrupt affinity in this
2682 * function, the workqueue kthread must be WQ_PERCPU.
2683 * If create WQ_PERCPU workqueue kthread for each NIC
2684 * queue, that number of created workqueue kthread is
2685 * (number of used NIC queue) * (number of CPUs) =
2686 * (number of CPUs) ^ 2 most often.
2687 *
2688 * The same NIC queue's interrupts are avoided by
2689 * masking the queue's interrupt. And different
2690 * NIC queue's interrupts use different struct work
2691 * (que->wq_cookie). So, "enqueued flag" to avoid
2692 * twice workqueue_enqueue() is not required .
2693 */
2694 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2695 } else {
2696 softint_schedule(que->que_si);
2697 }
2698 }
2699
2700 /************************************************************************
2701 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2702 ************************************************************************/
2703 static int
2704 ixgbe_msix_que(void *arg)
2705 {
2706 struct ix_queue *que = arg;
2707 struct adapter *adapter = que->adapter;
2708 struct ifnet *ifp = adapter->ifp;
2709 struct tx_ring *txr = que->txr;
2710 struct rx_ring *rxr = que->rxr;
2711 bool more;
2712 u32 newitr = 0;
2713
2714 /* Protect against spurious interrupts */
2715 if ((ifp->if_flags & IFF_RUNNING) == 0)
2716 return 0;
2717
2718 ixgbe_disable_queue(adapter, que->msix);
2719 ++que->irqs.ev_count;
2720
2721 /*
2722 * Don't change "que->txrx_use_workqueue" from this point to avoid
2723 * flip-flopping softint/workqueue mode in one deferred processing.
2724 */
2725 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2726
2727 #ifdef __NetBSD__
2728 /* Don't run ixgbe_rxeof in interrupt context */
2729 more = true;
2730 #else
2731 more = ixgbe_rxeof(que);
2732 #endif
2733
2734 IXGBE_TX_LOCK(txr);
2735 ixgbe_txeof(txr);
2736 IXGBE_TX_UNLOCK(txr);
2737
2738 /* Do AIM now? */
2739
2740 if (adapter->enable_aim == false)
2741 goto no_calc;
2742 /*
2743 * Do Adaptive Interrupt Moderation:
2744 * - Write out last calculated setting
2745 * - Calculate based on average size over
2746 * the last interval.
2747 */
2748 if (que->eitr_setting)
2749 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2750
2751 que->eitr_setting = 0;
2752
2753 /* Idle, do nothing */
2754 if ((txr->bytes == 0) && (rxr->bytes == 0))
2755 goto no_calc;
2756
2757 if ((txr->bytes) && (txr->packets))
2758 newitr = txr->bytes/txr->packets;
2759 if ((rxr->bytes) && (rxr->packets))
2760 newitr = max(newitr, (rxr->bytes / rxr->packets));
2761 newitr += 24; /* account for hardware frame, crc */
2762
2763 /* set an upper boundary */
2764 newitr = min(newitr, 3000);
2765
2766 /* Be nice to the mid range */
2767 if ((newitr > 300) && (newitr < 1200))
2768 newitr = (newitr / 3);
2769 else
2770 newitr = (newitr / 2);
2771
2772 /*
2773 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2774 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2775 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2776 * on 1G and higher.
2777 */
2778 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2779 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2780 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2781 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2782 }
2783
2784 /* save for next interrupt */
2785 que->eitr_setting = newitr;
2786
2787 /* Reset state */
2788 txr->bytes = 0;
2789 txr->packets = 0;
2790 rxr->bytes = 0;
2791 rxr->packets = 0;
2792
2793 no_calc:
2794 if (more)
2795 ixgbe_sched_handle_que(adapter, que);
2796 else
2797 ixgbe_enable_queue(adapter, que->msix);
2798
2799 return 1;
2800 } /* ixgbe_msix_que */
2801
2802 /************************************************************************
2803 * ixgbe_media_status - Media Ioctl callback
2804 *
2805 * Called whenever the user queries the status of
2806 * the interface using ifconfig.
2807 ************************************************************************/
2808 static void
2809 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2810 {
2811 struct adapter *adapter = ifp->if_softc;
2812 struct ixgbe_hw *hw = &adapter->hw;
2813 int layer;
2814
2815 INIT_DEBUGOUT("ixgbe_media_status: begin");
2816 IXGBE_CORE_LOCK(adapter);
2817 ixgbe_update_link_status(adapter);
2818
2819 ifmr->ifm_status = IFM_AVALID;
2820 ifmr->ifm_active = IFM_ETHER;
2821
2822 if (adapter->link_active != LINK_STATE_UP) {
2823 ifmr->ifm_active |= IFM_NONE;
2824 IXGBE_CORE_UNLOCK(adapter);
2825 return;
2826 }
2827
2828 ifmr->ifm_status |= IFM_ACTIVE;
2829 layer = adapter->phy_layer;
2830
2831 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2832 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2833 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2834 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2835 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2836 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2837 switch (adapter->link_speed) {
2838 case IXGBE_LINK_SPEED_10GB_FULL:
2839 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2840 break;
2841 case IXGBE_LINK_SPEED_5GB_FULL:
2842 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2843 break;
2844 case IXGBE_LINK_SPEED_2_5GB_FULL:
2845 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2846 break;
2847 case IXGBE_LINK_SPEED_1GB_FULL:
2848 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2849 break;
2850 case IXGBE_LINK_SPEED_100_FULL:
2851 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2852 break;
2853 case IXGBE_LINK_SPEED_10_FULL:
2854 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2855 break;
2856 }
2857 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2858 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2859 switch (adapter->link_speed) {
2860 case IXGBE_LINK_SPEED_10GB_FULL:
2861 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2862 break;
2863 }
2864 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2865 switch (adapter->link_speed) {
2866 case IXGBE_LINK_SPEED_10GB_FULL:
2867 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2868 break;
2869 case IXGBE_LINK_SPEED_1GB_FULL:
2870 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2871 break;
2872 }
2873 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2874 switch (adapter->link_speed) {
2875 case IXGBE_LINK_SPEED_10GB_FULL:
2876 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2877 break;
2878 case IXGBE_LINK_SPEED_1GB_FULL:
2879 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2880 break;
2881 }
2882 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2883 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2884 switch (adapter->link_speed) {
2885 case IXGBE_LINK_SPEED_10GB_FULL:
2886 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2887 break;
2888 case IXGBE_LINK_SPEED_1GB_FULL:
2889 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2890 break;
2891 }
2892 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2893 switch (adapter->link_speed) {
2894 case IXGBE_LINK_SPEED_10GB_FULL:
2895 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2896 break;
2897 }
2898 /*
2899 * XXX: These need to use the proper media types once
2900 * they're added.
2901 */
2902 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2903 switch (adapter->link_speed) {
2904 case IXGBE_LINK_SPEED_10GB_FULL:
2905 #ifndef IFM_ETH_XTYPE
2906 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2907 #else
2908 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2909 #endif
2910 break;
2911 case IXGBE_LINK_SPEED_2_5GB_FULL:
2912 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2913 break;
2914 case IXGBE_LINK_SPEED_1GB_FULL:
2915 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2916 break;
2917 }
2918 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2919 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2920 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2921 switch (adapter->link_speed) {
2922 case IXGBE_LINK_SPEED_10GB_FULL:
2923 #ifndef IFM_ETH_XTYPE
2924 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2925 #else
2926 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2927 #endif
2928 break;
2929 case IXGBE_LINK_SPEED_2_5GB_FULL:
2930 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2931 break;
2932 case IXGBE_LINK_SPEED_1GB_FULL:
2933 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2934 break;
2935 }
2936
2937 /* If nothing is recognized... */
2938 #if 0
2939 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2940 ifmr->ifm_active |= IFM_UNKNOWN;
2941 #endif
2942
2943 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2944
2945 /* Display current flow control setting used on link */
2946 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2947 hw->fc.current_mode == ixgbe_fc_full)
2948 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2949 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2950 hw->fc.current_mode == ixgbe_fc_full)
2951 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2952
2953 IXGBE_CORE_UNLOCK(adapter);
2954
2955 return;
2956 } /* ixgbe_media_status */
2957
2958 /************************************************************************
2959 * ixgbe_media_change - Media Ioctl callback
2960 *
2961 * Called when the user changes speed/duplex using
2962 * media/mediopt option with ifconfig.
2963 ************************************************************************/
2964 static int
2965 ixgbe_media_change(struct ifnet *ifp)
2966 {
2967 struct adapter *adapter = ifp->if_softc;
2968 struct ifmedia *ifm = &adapter->media;
2969 struct ixgbe_hw *hw = &adapter->hw;
2970 ixgbe_link_speed speed = 0;
2971 ixgbe_link_speed link_caps = 0;
2972 bool negotiate = false;
2973 s32 err = IXGBE_NOT_IMPLEMENTED;
2974
2975 INIT_DEBUGOUT("ixgbe_media_change: begin");
2976
2977 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2978 return (EINVAL);
2979
2980 if (hw->phy.media_type == ixgbe_media_type_backplane)
2981 return (EPERM);
2982
2983 IXGBE_CORE_LOCK(adapter);
2984 /*
2985 * We don't actually need to check against the supported
2986 * media types of the adapter; ifmedia will take care of
2987 * that for us.
2988 */
2989 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2990 case IFM_AUTO:
2991 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2992 &negotiate);
2993 if (err != IXGBE_SUCCESS) {
2994 device_printf(adapter->dev, "Unable to determine "
2995 "supported advertise speeds\n");
2996 IXGBE_CORE_UNLOCK(adapter);
2997 return (ENODEV);
2998 }
2999 speed |= link_caps;
3000 break;
3001 case IFM_10G_T:
3002 case IFM_10G_LRM:
3003 case IFM_10G_LR:
3004 case IFM_10G_TWINAX:
3005 case IFM_10G_SR:
3006 case IFM_10G_CX4:
3007 #ifdef IFM_ETH_XTYPE
3008 case IFM_10G_KR:
3009 case IFM_10G_KX4:
3010 #endif
3011 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3012 break;
3013 case IFM_5000_T:
3014 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3015 break;
3016 case IFM_2500_T:
3017 case IFM_2500_KX:
3018 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3019 break;
3020 case IFM_1000_T:
3021 case IFM_1000_LX:
3022 case IFM_1000_SX:
3023 case IFM_1000_KX:
3024 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3025 break;
3026 case IFM_100_TX:
3027 speed |= IXGBE_LINK_SPEED_100_FULL;
3028 break;
3029 case IFM_10_T:
3030 speed |= IXGBE_LINK_SPEED_10_FULL;
3031 break;
3032 case IFM_NONE:
3033 break;
3034 default:
3035 goto invalid;
3036 }
3037
3038 hw->mac.autotry_restart = TRUE;
3039 hw->mac.ops.setup_link(hw, speed, TRUE);
3040 adapter->advertise = 0;
3041 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3042 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3043 adapter->advertise |= 1 << 2;
3044 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3045 adapter->advertise |= 1 << 1;
3046 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3047 adapter->advertise |= 1 << 0;
3048 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3049 adapter->advertise |= 1 << 3;
3050 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3051 adapter->advertise |= 1 << 4;
3052 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3053 adapter->advertise |= 1 << 5;
3054 }
3055
3056 IXGBE_CORE_UNLOCK(adapter);
3057 return (0);
3058
3059 invalid:
3060 device_printf(adapter->dev, "Invalid media type!\n");
3061 IXGBE_CORE_UNLOCK(adapter);
3062
3063 return (EINVAL);
3064 } /* ixgbe_media_change */
3065
3066 /************************************************************************
3067 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3068 ************************************************************************/
3069 static int
3070 ixgbe_msix_link(void *arg)
3071 {
3072 struct adapter *adapter = arg;
3073 struct ixgbe_hw *hw = &adapter->hw;
3074 u32 eicr, eicr_mask;
3075 s32 retval;
3076
3077 ++adapter->link_irq.ev_count;
3078
3079 /* Pause other interrupts */
3080 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3081
3082 /* First get the cause */
3083 /*
3084 * The specifications of 82598, 82599, X540 and X550 say EICS register
3085 * is write only. However, Linux says it is a workaround for silicon
3086 * errata to read EICS instead of EICR to get interrupt cause. It seems
3087 * there is a problem about read clear mechanism for EICR register.
3088 */
3089 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3090 /* Be sure the queue bits are not cleared */
3091 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3092 /* Clear interrupt with write */
3093 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3094
3095 if (ixgbe_is_sfp(hw)) {
3096 /* Pluggable optics-related interrupt */
3097 if (hw->mac.type >= ixgbe_mac_X540)
3098 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3099 else
3100 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3101
3102 /*
3103 * An interrupt might not arrive when a module is inserted.
3104 * When an link status change interrupt occurred and the driver
3105 * still regard SFP as unplugged, issue the module softint
3106 * and then issue LSC interrupt.
3107 */
3108 if ((eicr & eicr_mask)
3109 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3110 && (eicr & IXGBE_EICR_LSC))) {
3111 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3112 softint_schedule(adapter->mod_si);
3113 }
3114
3115 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3116 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3117 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3118 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3119 softint_schedule(adapter->msf_si);
3120 }
3121 }
3122
3123 /* Link status change */
3124 if (eicr & IXGBE_EICR_LSC) {
3125 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3126 softint_schedule(adapter->link_si);
3127 }
3128
3129 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3130 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3131 (eicr & IXGBE_EICR_FLOW_DIR)) {
3132 /* This is probably overkill :) */
3133 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3134 return 1;
3135 /* Disable the interrupt */
3136 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3137 softint_schedule(adapter->fdir_si);
3138 }
3139
3140 if (eicr & IXGBE_EICR_ECC) {
3141 device_printf(adapter->dev,
3142 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3143 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3144 }
3145
3146 /* Check for over temp condition */
3147 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3148 switch (adapter->hw.mac.type) {
3149 case ixgbe_mac_X550EM_a:
3150 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3151 break;
3152 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3153 IXGBE_EICR_GPI_SDP0_X550EM_a);
3154 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3155 IXGBE_EICR_GPI_SDP0_X550EM_a);
3156 retval = hw->phy.ops.check_overtemp(hw);
3157 if (retval != IXGBE_ERR_OVERTEMP)
3158 break;
3159 device_printf(adapter->dev,
3160 "CRITICAL: OVER TEMP!! "
3161 "PHY IS SHUT DOWN!!\n");
3162 device_printf(adapter->dev,
3163 "System shutdown required!\n");
3164 break;
3165 default:
3166 if (!(eicr & IXGBE_EICR_TS))
3167 break;
3168 retval = hw->phy.ops.check_overtemp(hw);
3169 if (retval != IXGBE_ERR_OVERTEMP)
3170 break;
3171 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3172 device_printf(adapter->dev, "System shutdown required!\n");
3173 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3174 break;
3175 }
3176 }
3177
3178 /* Check for VF message */
3179 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3180 (eicr & IXGBE_EICR_MAILBOX))
3181 softint_schedule(adapter->mbx_si);
3182 }
3183
3184 /* Check for fan failure */
3185 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3186 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3187 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3188 }
3189
3190 /* External PHY interrupt */
3191 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3192 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3193 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3194 softint_schedule(adapter->phy_si);
3195 }
3196
3197 /* Re-enable other interrupts */
3198 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3199 return 1;
3200 } /* ixgbe_msix_link */
3201
3202 static void
3203 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3204 {
3205
3206 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3207 itr |= itr << 16;
3208 else
3209 itr |= IXGBE_EITR_CNT_WDIS;
3210
3211 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3212 }
3213
3214
3215 /************************************************************************
3216 * ixgbe_sysctl_interrupt_rate_handler
3217 ************************************************************************/
3218 static int
3219 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3220 {
3221 struct sysctlnode node = *rnode;
3222 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3223 struct adapter *adapter;
3224 uint32_t reg, usec, rate;
3225 int error;
3226
3227 if (que == NULL)
3228 return 0;
3229
3230 adapter = que->adapter;
3231 if (ixgbe_fw_recovery_mode_swflag(adapter))
3232 return (EPERM);
3233
3234 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3235 usec = ((reg & 0x0FF8) >> 3);
3236 if (usec > 0)
3237 rate = 500000 / usec;
3238 else
3239 rate = 0;
3240 node.sysctl_data = &rate;
3241 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3242 if (error || newp == NULL)
3243 return error;
3244 reg &= ~0xfff; /* default, no limitation */
3245 if (rate > 0 && rate < 500000) {
3246 if (rate < 1000)
3247 rate = 1000;
3248 reg |= ((4000000 / rate) & 0xff8);
3249 /*
3250 * When RSC is used, ITR interval must be larger than
3251 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3252 * The minimum value is always greater than 2us on 100M
3253 * (and 10M?(not documented)), but it's not on 1G and higher.
3254 */
3255 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3256 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3257 if ((adapter->num_queues > 1)
3258 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3259 return EINVAL;
3260 }
3261 ixgbe_max_interrupt_rate = rate;
3262 } else
3263 ixgbe_max_interrupt_rate = 0;
3264 ixgbe_eitr_write(adapter, que->msix, reg);
3265
3266 return (0);
3267 } /* ixgbe_sysctl_interrupt_rate_handler */
3268
3269 const struct sysctlnode *
3270 ixgbe_sysctl_instance(struct adapter *adapter)
3271 {
3272 const char *dvname;
3273 struct sysctllog **log;
3274 int rc;
3275 const struct sysctlnode *rnode;
3276
3277 if (adapter->sysctltop != NULL)
3278 return adapter->sysctltop;
3279
3280 log = &adapter->sysctllog;
3281 dvname = device_xname(adapter->dev);
3282
3283 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3284 0, CTLTYPE_NODE, dvname,
3285 SYSCTL_DESCR("ixgbe information and settings"),
3286 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3287 goto err;
3288
3289 return rnode;
3290 err:
3291 device_printf(adapter->dev,
3292 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3293 return NULL;
3294 }
3295
3296 /************************************************************************
3297 * ixgbe_add_device_sysctls
3298 ************************************************************************/
3299 static void
3300 ixgbe_add_device_sysctls(struct adapter *adapter)
3301 {
3302 device_t dev = adapter->dev;
3303 struct ixgbe_hw *hw = &adapter->hw;
3304 struct sysctllog **log;
3305 const struct sysctlnode *rnode, *cnode;
3306
3307 log = &adapter->sysctllog;
3308
3309 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3310 aprint_error_dev(dev, "could not create sysctl root\n");
3311 return;
3312 }
3313
3314 if (sysctl_createv(log, 0, &rnode, &cnode,
3315 CTLFLAG_READWRITE, CTLTYPE_INT,
3316 "debug", SYSCTL_DESCR("Debug Info"),
3317 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL)
3318 != 0)
3319 aprint_error_dev(dev, "could not create sysctl\n");
3320
3321 if (sysctl_createv(log, 0, &rnode, &cnode,
3322 CTLFLAG_READWRITE, CTLTYPE_INT,
3323 "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3324 ixgbe_sysctl_rx_copy_len, 0,
3325 (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3326 aprint_error_dev(dev, "could not create sysctl\n");
3327
3328 if (sysctl_createv(log, 0, &rnode, &cnode,
3329 CTLFLAG_READONLY, CTLTYPE_INT,
3330 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3331 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3332 aprint_error_dev(dev, "could not create sysctl\n");
3333
3334 if (sysctl_createv(log, 0, &rnode, &cnode,
3335 CTLFLAG_READONLY, CTLTYPE_INT,
3336 "num_queues", SYSCTL_DESCR("Number of queues"),
3337 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3338 aprint_error_dev(dev, "could not create sysctl\n");
3339
3340 /* Sysctls for all devices */
3341 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3342 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3343 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3344 CTL_EOL) != 0)
3345 aprint_error_dev(dev, "could not create sysctl\n");
3346
3347 adapter->enable_aim = ixgbe_enable_aim;
3348 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3349 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3350 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3351 aprint_error_dev(dev, "could not create sysctl\n");
3352
3353 if (sysctl_createv(log, 0, &rnode, &cnode,
3354 CTLFLAG_READWRITE, CTLTYPE_INT,
3355 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3356 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3357 CTL_EOL) != 0)
3358 aprint_error_dev(dev, "could not create sysctl\n");
3359
3360 /*
3361 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3362 * it causesflip-flopping softint/workqueue mode in one deferred
3363 * processing. Therefore, preempt_disable()/preempt_enable() are
3364 * required in ixgbe_sched_handle_que() to avoid
3365 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3366 * I think changing "que->txrx_use_workqueue" in interrupt handler
3367 * is lighter than doing preempt_disable()/preempt_enable() in every
3368 * ixgbe_sched_handle_que().
3369 */
3370 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3371 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3372 CTLTYPE_BOOL, "txrx_workqueue",
3373 SYSCTL_DESCR("Use workqueue for packet processing"),
3374 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE,
3375 CTL_EOL) != 0)
3376 aprint_error_dev(dev, "could not create sysctl\n");
3377
3378 #ifdef IXGBE_DEBUG
3379 /* testing sysctls (for all devices) */
3380 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3381 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3382 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3383 CTL_EOL) != 0)
3384 aprint_error_dev(dev, "could not create sysctl\n");
3385
3386 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3387 CTLTYPE_STRING, "print_rss_config",
3388 SYSCTL_DESCR("Prints RSS Configuration"),
3389 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3390 CTL_EOL) != 0)
3391 aprint_error_dev(dev, "could not create sysctl\n");
3392 #endif
3393 /* for X550 series devices */
3394 if (hw->mac.type >= ixgbe_mac_X550)
3395 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3396 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3397 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3398 CTL_EOL) != 0)
3399 aprint_error_dev(dev, "could not create sysctl\n");
3400
3401 /* for WoL-capable devices */
3402 if (adapter->wol_support) {
3403 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3404 CTLTYPE_BOOL, "wol_enable",
3405 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3406 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3407 CTL_EOL) != 0)
3408 aprint_error_dev(dev, "could not create sysctl\n");
3409
3410 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3411 CTLTYPE_INT, "wufc",
3412 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3413 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3414 CTL_EOL) != 0)
3415 aprint_error_dev(dev, "could not create sysctl\n");
3416 }
3417
3418 /* for X552/X557-AT devices */
3419 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3420 const struct sysctlnode *phy_node;
3421
3422 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3423 "phy", SYSCTL_DESCR("External PHY sysctls"),
3424 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3425 aprint_error_dev(dev, "could not create sysctl\n");
3426 return;
3427 }
3428
3429 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3430 CTLTYPE_INT, "temp",
3431 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3432 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3433 CTL_EOL) != 0)
3434 aprint_error_dev(dev, "could not create sysctl\n");
3435
3436 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3437 CTLTYPE_INT, "overtemp_occurred",
3438 SYSCTL_DESCR(
3439 "External PHY High Temperature Event Occurred"),
3440 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3441 CTL_CREATE, CTL_EOL) != 0)
3442 aprint_error_dev(dev, "could not create sysctl\n");
3443 }
3444
3445 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3446 && (hw->phy.type == ixgbe_phy_fw))
3447 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3448 CTLTYPE_BOOL, "force_10_100_autonego",
3449 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3450 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3451 CTL_CREATE, CTL_EOL) != 0)
3452 aprint_error_dev(dev, "could not create sysctl\n");
3453
3454 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3455 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3456 CTLTYPE_INT, "eee_state",
3457 SYSCTL_DESCR("EEE Power Save State"),
3458 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3459 CTL_EOL) != 0)
3460 aprint_error_dev(dev, "could not create sysctl\n");
3461 }
3462 } /* ixgbe_add_device_sysctls */
3463
3464 /************************************************************************
3465 * ixgbe_allocate_pci_resources
3466 ************************************************************************/
3467 static int
3468 ixgbe_allocate_pci_resources(struct adapter *adapter,
3469 const struct pci_attach_args *pa)
3470 {
3471 pcireg_t memtype, csr;
3472 device_t dev = adapter->dev;
3473 bus_addr_t addr;
3474 int flags;
3475
3476 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3477 switch (memtype) {
3478 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3479 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3480 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3481 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3482 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3483 goto map_err;
3484 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3485 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3486 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3487 }
3488 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3489 adapter->osdep.mem_size, flags,
3490 &adapter->osdep.mem_bus_space_handle) != 0) {
3491 map_err:
3492 adapter->osdep.mem_size = 0;
3493 aprint_error_dev(dev, "unable to map BAR0\n");
3494 return ENXIO;
3495 }
3496 /*
3497 * Enable address decoding for memory range in case BIOS or
3498 * UEFI don't set it.
3499 */
3500 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3501 PCI_COMMAND_STATUS_REG);
3502 csr |= PCI_COMMAND_MEM_ENABLE;
3503 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3504 csr);
3505 break;
3506 default:
3507 aprint_error_dev(dev, "unexpected type on BAR0\n");
3508 return ENXIO;
3509 }
3510
3511 return (0);
3512 } /* ixgbe_allocate_pci_resources */
3513
3514 static void
3515 ixgbe_free_softint(struct adapter *adapter)
3516 {
3517 struct ix_queue *que = adapter->queues;
3518 struct tx_ring *txr = adapter->tx_rings;
3519 int i;
3520
3521 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3522 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3523 if (txr->txr_si != NULL)
3524 softint_disestablish(txr->txr_si);
3525 }
3526 if (que->que_si != NULL)
3527 softint_disestablish(que->que_si);
3528 }
3529 if (adapter->txr_wq != NULL)
3530 workqueue_destroy(adapter->txr_wq);
3531 if (adapter->txr_wq_enqueued != NULL)
3532 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3533 if (adapter->que_wq != NULL)
3534 workqueue_destroy(adapter->que_wq);
3535
3536 /* Drain the Link queue */
3537 if (adapter->link_si != NULL) {
3538 softint_disestablish(adapter->link_si);
3539 adapter->link_si = NULL;
3540 }
3541 if (adapter->mod_si != NULL) {
3542 softint_disestablish(adapter->mod_si);
3543 adapter->mod_si = NULL;
3544 }
3545 if (adapter->msf_si != NULL) {
3546 softint_disestablish(adapter->msf_si);
3547 adapter->msf_si = NULL;
3548 }
3549 if (adapter->phy_si != NULL) {
3550 softint_disestablish(adapter->phy_si);
3551 adapter->phy_si = NULL;
3552 }
3553 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3554 if (adapter->fdir_si != NULL) {
3555 softint_disestablish(adapter->fdir_si);
3556 adapter->fdir_si = NULL;
3557 }
3558 }
3559 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3560 if (adapter->mbx_si != NULL) {
3561 softint_disestablish(adapter->mbx_si);
3562 adapter->mbx_si = NULL;
3563 }
3564 }
3565 } /* ixgbe_free_softint */
3566
3567 /************************************************************************
3568 * ixgbe_detach - Device removal routine
3569 *
3570 * Called when the driver is being removed.
3571 * Stops the adapter and deallocates all the resources
3572 * that were allocated for driver operation.
3573 *
3574 * return 0 on success, positive on failure
3575 ************************************************************************/
3576 static int
3577 ixgbe_detach(device_t dev, int flags)
3578 {
3579 struct adapter *adapter = device_private(dev);
3580 struct rx_ring *rxr = adapter->rx_rings;
3581 struct tx_ring *txr = adapter->tx_rings;
3582 struct ixgbe_hw *hw = &adapter->hw;
3583 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3584 u32 ctrl_ext;
3585 int i;
3586
3587 INIT_DEBUGOUT("ixgbe_detach: begin");
3588 if (adapter->osdep.attached == false)
3589 return 0;
3590
3591 if (ixgbe_pci_iov_detach(dev) != 0) {
3592 device_printf(dev, "SR-IOV in use; detach first.\n");
3593 return (EBUSY);
3594 }
3595
3596 /*
3597 * Stop the interface. ixgbe_setup_low_power_mode() calls
3598 * ixgbe_stop_locked(), so it's not required to call ixgbe_stop_locked()
3599 * directly.
3600 */
3601 IXGBE_CORE_LOCK(adapter);
3602 ixgbe_setup_low_power_mode(adapter);
3603 IXGBE_CORE_UNLOCK(adapter);
3604 #if NVLAN > 0
3605 /* Make sure VLANs are not using driver */
3606 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3607 ; /* nothing to do: no VLANs */
3608 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3609 vlan_ifdetach(adapter->ifp);
3610 else {
3611 aprint_error_dev(dev, "VLANs in use, detach first\n");
3612 return (EBUSY);
3613 }
3614 #endif
3615
3616 pmf_device_deregister(dev);
3617
3618 ether_ifdetach(adapter->ifp);
3619
3620 ixgbe_free_softint(adapter);
3621
3622 /* let hardware know driver is unloading */
3623 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3624 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3625 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3626
3627 callout_halt(&adapter->timer, NULL);
3628 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3629 callout_halt(&adapter->recovery_mode_timer, NULL);
3630
3631 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3632 netmap_detach(adapter->ifp);
3633
3634 ixgbe_free_pci_resources(adapter);
3635 #if 0 /* XXX the NetBSD port is probably missing something here */
3636 bus_generic_detach(dev);
3637 #endif
3638 if_detach(adapter->ifp);
3639 if_percpuq_destroy(adapter->ipq);
3640
3641 sysctl_teardown(&adapter->sysctllog);
3642 evcnt_detach(&adapter->efbig_tx_dma_setup);
3643 evcnt_detach(&adapter->mbuf_defrag_failed);
3644 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3645 evcnt_detach(&adapter->einval_tx_dma_setup);
3646 evcnt_detach(&adapter->other_tx_dma_setup);
3647 evcnt_detach(&adapter->eagain_tx_dma_setup);
3648 evcnt_detach(&adapter->enomem_tx_dma_setup);
3649 evcnt_detach(&adapter->watchdog_events);
3650 evcnt_detach(&adapter->tso_err);
3651 evcnt_detach(&adapter->link_irq);
3652 evcnt_detach(&adapter->link_sicount);
3653 evcnt_detach(&adapter->mod_sicount);
3654 evcnt_detach(&adapter->msf_sicount);
3655 evcnt_detach(&adapter->phy_sicount);
3656
3657 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3658 if (i < __arraycount(stats->mpc)) {
3659 evcnt_detach(&stats->mpc[i]);
3660 if (hw->mac.type == ixgbe_mac_82598EB)
3661 evcnt_detach(&stats->rnbc[i]);
3662 }
3663 if (i < __arraycount(stats->pxontxc)) {
3664 evcnt_detach(&stats->pxontxc[i]);
3665 evcnt_detach(&stats->pxonrxc[i]);
3666 evcnt_detach(&stats->pxofftxc[i]);
3667 evcnt_detach(&stats->pxoffrxc[i]);
3668 if (hw->mac.type >= ixgbe_mac_82599EB)
3669 evcnt_detach(&stats->pxon2offc[i]);
3670 }
3671 }
3672
3673 txr = adapter->tx_rings;
3674 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3675 evcnt_detach(&adapter->queues[i].irqs);
3676 evcnt_detach(&adapter->queues[i].handleq);
3677 evcnt_detach(&adapter->queues[i].req);
3678 evcnt_detach(&txr->no_desc_avail);
3679 evcnt_detach(&txr->total_packets);
3680 evcnt_detach(&txr->tso_tx);
3681 #ifndef IXGBE_LEGACY_TX
3682 evcnt_detach(&txr->pcq_drops);
3683 #endif
3684
3685 if (i < __arraycount(stats->qprc)) {
3686 evcnt_detach(&stats->qprc[i]);
3687 evcnt_detach(&stats->qptc[i]);
3688 evcnt_detach(&stats->qbrc[i]);
3689 evcnt_detach(&stats->qbtc[i]);
3690 if (hw->mac.type >= ixgbe_mac_82599EB)
3691 evcnt_detach(&stats->qprdc[i]);
3692 }
3693
3694 evcnt_detach(&rxr->rx_packets);
3695 evcnt_detach(&rxr->rx_bytes);
3696 evcnt_detach(&rxr->rx_copies);
3697 evcnt_detach(&rxr->no_mbuf);
3698 evcnt_detach(&rxr->rx_discarded);
3699 }
3700 evcnt_detach(&stats->ipcs);
3701 evcnt_detach(&stats->l4cs);
3702 evcnt_detach(&stats->ipcs_bad);
3703 evcnt_detach(&stats->l4cs_bad);
3704 evcnt_detach(&stats->intzero);
3705 evcnt_detach(&stats->legint);
3706 evcnt_detach(&stats->crcerrs);
3707 evcnt_detach(&stats->illerrc);
3708 evcnt_detach(&stats->errbc);
3709 evcnt_detach(&stats->mspdc);
3710 if (hw->mac.type >= ixgbe_mac_X550)
3711 evcnt_detach(&stats->mbsdc);
3712 evcnt_detach(&stats->mpctotal);
3713 evcnt_detach(&stats->mlfc);
3714 evcnt_detach(&stats->mrfc);
3715 evcnt_detach(&stats->rlec);
3716 evcnt_detach(&stats->lxontxc);
3717 evcnt_detach(&stats->lxonrxc);
3718 evcnt_detach(&stats->lxofftxc);
3719 evcnt_detach(&stats->lxoffrxc);
3720
3721 /* Packet Reception Stats */
3722 evcnt_detach(&stats->tor);
3723 evcnt_detach(&stats->gorc);
3724 evcnt_detach(&stats->tpr);
3725 evcnt_detach(&stats->gprc);
3726 evcnt_detach(&stats->mprc);
3727 evcnt_detach(&stats->bprc);
3728 evcnt_detach(&stats->prc64);
3729 evcnt_detach(&stats->prc127);
3730 evcnt_detach(&stats->prc255);
3731 evcnt_detach(&stats->prc511);
3732 evcnt_detach(&stats->prc1023);
3733 evcnt_detach(&stats->prc1522);
3734 evcnt_detach(&stats->ruc);
3735 evcnt_detach(&stats->rfc);
3736 evcnt_detach(&stats->roc);
3737 evcnt_detach(&stats->rjc);
3738 evcnt_detach(&stats->mngprc);
3739 evcnt_detach(&stats->mngpdc);
3740 evcnt_detach(&stats->xec);
3741
3742 /* Packet Transmission Stats */
3743 evcnt_detach(&stats->gotc);
3744 evcnt_detach(&stats->tpt);
3745 evcnt_detach(&stats->gptc);
3746 evcnt_detach(&stats->bptc);
3747 evcnt_detach(&stats->mptc);
3748 evcnt_detach(&stats->mngptc);
3749 evcnt_detach(&stats->ptc64);
3750 evcnt_detach(&stats->ptc127);
3751 evcnt_detach(&stats->ptc255);
3752 evcnt_detach(&stats->ptc511);
3753 evcnt_detach(&stats->ptc1023);
3754 evcnt_detach(&stats->ptc1522);
3755
3756 ixgbe_free_queues(adapter);
3757 free(adapter->mta, M_DEVBUF);
3758
3759 IXGBE_CORE_LOCK_DESTROY(adapter);
3760
3761 return (0);
3762 } /* ixgbe_detach */
3763
3764 /************************************************************************
3765 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3766 *
3767 * Prepare the adapter/port for LPLU and/or WoL
3768 ************************************************************************/
3769 static int
3770 ixgbe_setup_low_power_mode(struct adapter *adapter)
3771 {
3772 struct ixgbe_hw *hw = &adapter->hw;
3773 device_t dev = adapter->dev;
3774 s32 error = 0;
3775
3776 KASSERT(mutex_owned(&adapter->core_mtx));
3777
3778 /* Limit power management flow to X550EM baseT */
3779 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3780 hw->phy.ops.enter_lplu) {
3781 /* X550EM baseT adapters need a special LPLU flow */
3782 hw->phy.reset_disable = true;
3783 ixgbe_stop_locked(adapter);
3784 error = hw->phy.ops.enter_lplu(hw);
3785 if (error)
3786 device_printf(dev,
3787 "Error entering LPLU: %d\n", error);
3788 hw->phy.reset_disable = false;
3789 } else {
3790 /* Just stop for other adapters */
3791 ixgbe_stop_locked(adapter);
3792 }
3793
3794 if (!hw->wol_enabled) {
3795 ixgbe_set_phy_power(hw, FALSE);
3796 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3797 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3798 } else {
3799 /* Turn off support for APM wakeup. (Using ACPI instead) */
3800 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3801 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3802
3803 /*
3804 * Clear Wake Up Status register to prevent any previous wakeup
3805 * events from waking us up immediately after we suspend.
3806 */
3807 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3808
3809 /*
3810 * Program the Wakeup Filter Control register with user filter
3811 * settings
3812 */
3813 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3814
3815 /* Enable wakeups and power management in Wakeup Control */
3816 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3817 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3818
3819 }
3820
3821 return error;
3822 } /* ixgbe_setup_low_power_mode */
3823
3824 /************************************************************************
3825 * ixgbe_shutdown - Shutdown entry point
3826 ************************************************************************/
3827 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3828 static int
3829 ixgbe_shutdown(device_t dev)
3830 {
3831 struct adapter *adapter = device_private(dev);
3832 int error = 0;
3833
3834 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3835
3836 IXGBE_CORE_LOCK(adapter);
3837 error = ixgbe_setup_low_power_mode(adapter);
3838 IXGBE_CORE_UNLOCK(adapter);
3839
3840 return (error);
3841 } /* ixgbe_shutdown */
3842 #endif
3843
3844 /************************************************************************
3845 * ixgbe_suspend
3846 *
3847 * From D0 to D3
3848 ************************************************************************/
3849 static bool
3850 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3851 {
3852 struct adapter *adapter = device_private(dev);
3853 int error = 0;
3854
3855 INIT_DEBUGOUT("ixgbe_suspend: begin");
3856
3857 IXGBE_CORE_LOCK(adapter);
3858
3859 error = ixgbe_setup_low_power_mode(adapter);
3860
3861 IXGBE_CORE_UNLOCK(adapter);
3862
3863 return (error);
3864 } /* ixgbe_suspend */
3865
3866 /************************************************************************
3867 * ixgbe_resume
3868 *
3869 * From D3 to D0
3870 ************************************************************************/
3871 static bool
3872 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3873 {
3874 struct adapter *adapter = device_private(dev);
3875 struct ifnet *ifp = adapter->ifp;
3876 struct ixgbe_hw *hw = &adapter->hw;
3877 u32 wus;
3878
3879 INIT_DEBUGOUT("ixgbe_resume: begin");
3880
3881 IXGBE_CORE_LOCK(adapter);
3882
3883 /* Read & clear WUS register */
3884 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3885 if (wus)
3886 device_printf(dev, "Woken up by (WUS): %#010x\n",
3887 IXGBE_READ_REG(hw, IXGBE_WUS));
3888 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3889 /* And clear WUFC until next low-power transition */
3890 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3891
3892 /*
3893 * Required after D3->D0 transition;
3894 * will re-advertise all previous advertised speeds
3895 */
3896 if (ifp->if_flags & IFF_UP)
3897 ixgbe_init_locked(adapter);
3898
3899 IXGBE_CORE_UNLOCK(adapter);
3900
3901 return true;
3902 } /* ixgbe_resume */
3903
3904 /*
3905 * Set the various hardware offload abilities.
3906 *
3907 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3908 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3909 * mbuf offload flags the driver will understand.
3910 */
3911 static void
3912 ixgbe_set_if_hwassist(struct adapter *adapter)
3913 {
3914 /* XXX */
3915 }
3916
3917 /************************************************************************
3918 * ixgbe_init_locked - Init entry point
3919 *
3920 * Used in two ways: It is used by the stack as an init
3921 * entry point in network interface structure. It is also
3922 * used by the driver as a hw/sw initialization routine to
3923 * get to a consistent state.
3924 *
3925 * return 0 on success, positive on failure
3926 ************************************************************************/
3927 static void
3928 ixgbe_init_locked(struct adapter *adapter)
3929 {
3930 struct ifnet *ifp = adapter->ifp;
3931 device_t dev = adapter->dev;
3932 struct ixgbe_hw *hw = &adapter->hw;
3933 struct ix_queue *que;
3934 struct tx_ring *txr;
3935 struct rx_ring *rxr;
3936 u32 txdctl, mhadd;
3937 u32 rxdctl, rxctrl;
3938 u32 ctrl_ext;
3939 bool unsupported_sfp = false;
3940 int i, j, error;
3941
3942 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3943
3944 KASSERT(mutex_owned(&adapter->core_mtx));
3945 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3946
3947 hw->need_unsupported_sfp_recovery = false;
3948 hw->adapter_stopped = FALSE;
3949 ixgbe_stop_adapter(hw);
3950 callout_stop(&adapter->timer);
3951 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3952 que->disabled_count = 0;
3953
3954 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3955 adapter->max_frame_size =
3956 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3957
3958 /* Queue indices may change with IOV mode */
3959 ixgbe_align_all_queue_indices(adapter);
3960
3961 /* reprogram the RAR[0] in case user changed it. */
3962 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3963
3964 /* Get the latest mac address, User can use a LAA */
3965 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3966 IXGBE_ETH_LENGTH_OF_ADDRESS);
3967 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3968 hw->addr_ctrl.rar_used_count = 1;
3969
3970 /* Set hardware offload abilities from ifnet flags */
3971 ixgbe_set_if_hwassist(adapter);
3972
3973 /* Prepare transmit descriptors and buffers */
3974 if (ixgbe_setup_transmit_structures(adapter)) {
3975 device_printf(dev, "Could not setup transmit structures\n");
3976 ixgbe_stop_locked(adapter);
3977 return;
3978 }
3979
3980 ixgbe_init_hw(hw);
3981
3982 ixgbe_initialize_iov(adapter);
3983
3984 ixgbe_initialize_transmit_units(adapter);
3985
3986 /* Setup Multicast table */
3987 ixgbe_set_rxfilter(adapter);
3988
3989 /* Use fixed buffer size, even for jumbo frames */
3990 adapter->rx_mbuf_sz = MCLBYTES;
3991
3992 /* Prepare receive descriptors and buffers */
3993 error = ixgbe_setup_receive_structures(adapter);
3994 if (error) {
3995 device_printf(dev,
3996 "Could not setup receive structures (err = %d)\n", error);
3997 ixgbe_stop_locked(adapter);
3998 return;
3999 }
4000
4001 /* Configure RX settings */
4002 ixgbe_initialize_receive_units(adapter);
4003
4004 /* Enable SDP & MSI-X interrupts based on adapter */
4005 ixgbe_config_gpie(adapter);
4006
4007 /* Set MTU size */
4008 if (ifp->if_mtu > ETHERMTU) {
4009 /* aka IXGBE_MAXFRS on 82599 and newer */
4010 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4011 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4012 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4013 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4014 }
4015
4016 /* Now enable all the queues */
4017 for (i = 0; i < adapter->num_queues; i++) {
4018 txr = &adapter->tx_rings[i];
4019 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4020 txdctl |= IXGBE_TXDCTL_ENABLE;
4021 /* Set WTHRESH to 8, burst writeback */
4022 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4023 /*
4024 * When the internal queue falls below PTHRESH (32),
4025 * start prefetching as long as there are at least
4026 * HTHRESH (1) buffers ready. The values are taken
4027 * from the Intel linux driver 3.8.21.
4028 * Prefetching enables tx line rate even with 1 queue.
4029 */
4030 txdctl |= (32 << 0) | (1 << 8);
4031 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4032 }
4033
4034 for (i = 0; i < adapter->num_queues; i++) {
4035 rxr = &adapter->rx_rings[i];
4036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4037 if (hw->mac.type == ixgbe_mac_82598EB) {
4038 /*
4039 * PTHRESH = 21
4040 * HTHRESH = 4
4041 * WTHRESH = 8
4042 */
4043 rxdctl &= ~0x3FFFFF;
4044 rxdctl |= 0x080420;
4045 }
4046 rxdctl |= IXGBE_RXDCTL_ENABLE;
4047 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4048 for (j = 0; j < 10; j++) {
4049 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4050 IXGBE_RXDCTL_ENABLE)
4051 break;
4052 else
4053 msec_delay(1);
4054 }
4055 wmb();
4056
4057 /*
4058 * In netmap mode, we must preserve the buffers made
4059 * available to userspace before the if_init()
4060 * (this is true by default on the TX side, because
4061 * init makes all buffers available to userspace).
4062 *
4063 * netmap_reset() and the device specific routines
4064 * (e.g. ixgbe_setup_receive_rings()) map these
4065 * buffers at the end of the NIC ring, so here we
4066 * must set the RDT (tail) register to make sure
4067 * they are not overwritten.
4068 *
4069 * In this driver the NIC ring starts at RDH = 0,
4070 * RDT points to the last slot available for reception (?),
4071 * so RDT = num_rx_desc - 1 means the whole ring is available.
4072 */
4073 #ifdef DEV_NETMAP
4074 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4075 (ifp->if_capenable & IFCAP_NETMAP)) {
4076 struct netmap_adapter *na = NA(adapter->ifp);
4077 struct netmap_kring *kring = na->rx_rings[i];
4078 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4079
4080 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4081 } else
4082 #endif /* DEV_NETMAP */
4083 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4084 adapter->num_rx_desc - 1);
4085 }
4086
4087 /* Enable Receive engine */
4088 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4089 if (hw->mac.type == ixgbe_mac_82598EB)
4090 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4091 rxctrl |= IXGBE_RXCTRL_RXEN;
4092 ixgbe_enable_rx_dma(hw, rxctrl);
4093
4094 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4095
4096 /* Set up MSI/MSI-X routing */
4097 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4098 ixgbe_configure_ivars(adapter);
4099 /* Set up auto-mask */
4100 if (hw->mac.type == ixgbe_mac_82598EB)
4101 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4102 else {
4103 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4104 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4105 }
4106 } else { /* Simple settings for Legacy/MSI */
4107 ixgbe_set_ivar(adapter, 0, 0, 0);
4108 ixgbe_set_ivar(adapter, 0, 0, 1);
4109 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4110 }
4111
4112 ixgbe_init_fdir(adapter);
4113
4114 /*
4115 * Check on any SFP devices that
4116 * need to be kick-started
4117 */
4118 if (hw->phy.type == ixgbe_phy_none) {
4119 error = hw->phy.ops.identify(hw);
4120 if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4121 unsupported_sfp = true;
4122 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4123 unsupported_sfp = true;
4124
4125 if (unsupported_sfp)
4126 device_printf(dev,
4127 "Unsupported SFP+ module type was detected.\n");
4128
4129 /* Set moderation on the Link interrupt */
4130 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4131
4132 /* Enable EEE power saving */
4133 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4134 hw->mac.ops.setup_eee(hw,
4135 adapter->feat_en & IXGBE_FEATURE_EEE);
4136
4137 /* Enable power to the phy. */
4138 if (!unsupported_sfp) {
4139 ixgbe_set_phy_power(hw, TRUE);
4140
4141 /* Config/Enable Link */
4142 ixgbe_config_link(adapter);
4143 }
4144
4145 /* Hardware Packet Buffer & Flow Control setup */
4146 ixgbe_config_delay_values(adapter);
4147
4148 /* Initialize the FC settings */
4149 ixgbe_start_hw(hw);
4150
4151 /* Set up VLAN support and filter */
4152 ixgbe_setup_vlan_hw_support(adapter);
4153
4154 /* Setup DMA Coalescing */
4155 ixgbe_config_dmac(adapter);
4156
4157 /* And now turn on interrupts */
4158 ixgbe_enable_intr(adapter);
4159
4160 /* Enable the use of the MBX by the VF's */
4161 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4162 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4163 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4164 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4165 }
4166
4167 /* Update saved flags. See ixgbe_ifflags_cb() */
4168 adapter->if_flags = ifp->if_flags;
4169
4170 /* Now inform the stack we're ready */
4171 ifp->if_flags |= IFF_RUNNING;
4172
4173 return;
4174 } /* ixgbe_init_locked */
4175
4176 /************************************************************************
4177 * ixgbe_init
4178 ************************************************************************/
4179 static int
4180 ixgbe_init(struct ifnet *ifp)
4181 {
4182 struct adapter *adapter = ifp->if_softc;
4183
4184 IXGBE_CORE_LOCK(adapter);
4185 ixgbe_init_locked(adapter);
4186 IXGBE_CORE_UNLOCK(adapter);
4187
4188 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4189 } /* ixgbe_init */
4190
4191 /************************************************************************
4192 * ixgbe_set_ivar
4193 *
4194 * Setup the correct IVAR register for a particular MSI-X interrupt
4195 * (yes this is all very magic and confusing :)
4196 * - entry is the register array entry
4197 * - vector is the MSI-X vector for this queue
4198 * - type is RX/TX/MISC
4199 ************************************************************************/
4200 static void
4201 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4202 {
4203 struct ixgbe_hw *hw = &adapter->hw;
4204 u32 ivar, index;
4205
4206 vector |= IXGBE_IVAR_ALLOC_VAL;
4207
4208 switch (hw->mac.type) {
4209 case ixgbe_mac_82598EB:
4210 if (type == -1)
4211 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4212 else
4213 entry += (type * 64);
4214 index = (entry >> 2) & 0x1F;
4215 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4216 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4217 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4219 break;
4220 case ixgbe_mac_82599EB:
4221 case ixgbe_mac_X540:
4222 case ixgbe_mac_X550:
4223 case ixgbe_mac_X550EM_x:
4224 case ixgbe_mac_X550EM_a:
4225 if (type == -1) { /* MISC IVAR */
4226 index = (entry & 1) * 8;
4227 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4228 ivar &= ~(0xffUL << index);
4229 ivar |= ((u32)vector << index);
4230 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4231 } else { /* RX/TX IVARS */
4232 index = (16 * (entry & 1)) + (8 * type);
4233 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4234 ivar &= ~(0xffUL << index);
4235 ivar |= ((u32)vector << index);
4236 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4237 }
4238 break;
4239 default:
4240 break;
4241 }
4242 } /* ixgbe_set_ivar */
4243
4244 /************************************************************************
4245 * ixgbe_configure_ivars
4246 ************************************************************************/
4247 static void
4248 ixgbe_configure_ivars(struct adapter *adapter)
4249 {
4250 struct ix_queue *que = adapter->queues;
4251 u32 newitr;
4252
4253 if (ixgbe_max_interrupt_rate > 0)
4254 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4255 else {
4256 /*
4257 * Disable DMA coalescing if interrupt moderation is
4258 * disabled.
4259 */
4260 adapter->dmac = 0;
4261 newitr = 0;
4262 }
4263
4264 for (int i = 0; i < adapter->num_queues; i++, que++) {
4265 struct rx_ring *rxr = &adapter->rx_rings[i];
4266 struct tx_ring *txr = &adapter->tx_rings[i];
4267 /* First the RX queue entry */
4268 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4269 /* ... and the TX */
4270 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4271 /* Set an Initial EITR value */
4272 ixgbe_eitr_write(adapter, que->msix, newitr);
4273 /*
4274 * To eliminate influence of the previous state.
4275 * At this point, Tx/Rx interrupt handler
4276 * (ixgbe_msix_que()) cannot be called, so both
4277 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4278 */
4279 que->eitr_setting = 0;
4280 }
4281
4282 /* For the Link interrupt */
4283 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4284 } /* ixgbe_configure_ivars */
4285
4286 /************************************************************************
4287 * ixgbe_config_gpie
4288 ************************************************************************/
4289 static void
4290 ixgbe_config_gpie(struct adapter *adapter)
4291 {
4292 struct ixgbe_hw *hw = &adapter->hw;
4293 u32 gpie;
4294
4295 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4296
4297 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4298 /* Enable Enhanced MSI-X mode */
4299 gpie |= IXGBE_GPIE_MSIX_MODE
4300 | IXGBE_GPIE_EIAME
4301 | IXGBE_GPIE_PBA_SUPPORT
4302 | IXGBE_GPIE_OCD;
4303 }
4304
4305 /* Fan Failure Interrupt */
4306 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4307 gpie |= IXGBE_SDP1_GPIEN;
4308
4309 /* Thermal Sensor Interrupt */
4310 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4311 gpie |= IXGBE_SDP0_GPIEN_X540;
4312
4313 /* Link detection */
4314 switch (hw->mac.type) {
4315 case ixgbe_mac_82599EB:
4316 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4317 break;
4318 case ixgbe_mac_X550EM_x:
4319 case ixgbe_mac_X550EM_a:
4320 gpie |= IXGBE_SDP0_GPIEN_X540;
4321 break;
4322 default:
4323 break;
4324 }
4325
4326 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4327
4328 } /* ixgbe_config_gpie */
4329
4330 /************************************************************************
4331 * ixgbe_config_delay_values
4332 *
4333 * Requires adapter->max_frame_size to be set.
4334 ************************************************************************/
4335 static void
4336 ixgbe_config_delay_values(struct adapter *adapter)
4337 {
4338 struct ixgbe_hw *hw = &adapter->hw;
4339 u32 rxpb, frame, size, tmp;
4340
4341 frame = adapter->max_frame_size;
4342
4343 /* Calculate High Water */
4344 switch (hw->mac.type) {
4345 case ixgbe_mac_X540:
4346 case ixgbe_mac_X550:
4347 case ixgbe_mac_X550EM_x:
4348 case ixgbe_mac_X550EM_a:
4349 tmp = IXGBE_DV_X540(frame, frame);
4350 break;
4351 default:
4352 tmp = IXGBE_DV(frame, frame);
4353 break;
4354 }
4355 size = IXGBE_BT2KB(tmp);
4356 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4357 hw->fc.high_water[0] = rxpb - size;
4358
4359 /* Now calculate Low Water */
4360 switch (hw->mac.type) {
4361 case ixgbe_mac_X540:
4362 case ixgbe_mac_X550:
4363 case ixgbe_mac_X550EM_x:
4364 case ixgbe_mac_X550EM_a:
4365 tmp = IXGBE_LOW_DV_X540(frame);
4366 break;
4367 default:
4368 tmp = IXGBE_LOW_DV(frame);
4369 break;
4370 }
4371 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4372
4373 hw->fc.pause_time = IXGBE_FC_PAUSE;
4374 hw->fc.send_xon = TRUE;
4375 } /* ixgbe_config_delay_values */
4376
4377 /************************************************************************
4378 * ixgbe_set_rxfilter - Multicast Update
4379 *
4380 * Called whenever multicast address list is updated.
4381 ************************************************************************/
4382 static void
4383 ixgbe_set_rxfilter(struct adapter *adapter)
4384 {
4385 struct ixgbe_mc_addr *mta;
4386 struct ifnet *ifp = adapter->ifp;
4387 u8 *update_ptr;
4388 int mcnt = 0;
4389 u32 fctrl;
4390 struct ethercom *ec = &adapter->osdep.ec;
4391 struct ether_multi *enm;
4392 struct ether_multistep step;
4393
4394 KASSERT(mutex_owned(&adapter->core_mtx));
4395 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4396
4397 mta = adapter->mta;
4398 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4399
4400 ifp->if_flags &= ~IFF_ALLMULTI;
4401 ETHER_LOCK(ec);
4402 ETHER_FIRST_MULTI(step, ec, enm);
4403 while (enm != NULL) {
4404 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4405 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4406 ETHER_ADDR_LEN) != 0)) {
4407 ifp->if_flags |= IFF_ALLMULTI;
4408 break;
4409 }
4410 bcopy(enm->enm_addrlo,
4411 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4412 mta[mcnt].vmdq = adapter->pool;
4413 mcnt++;
4414 ETHER_NEXT_MULTI(step, enm);
4415 }
4416 ETHER_UNLOCK(ec);
4417
4418 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4419 if (ifp->if_flags & IFF_PROMISC)
4420 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4421 else if (ifp->if_flags & IFF_ALLMULTI) {
4422 fctrl |= IXGBE_FCTRL_MPE;
4423 fctrl &= ~IXGBE_FCTRL_UPE;
4424 } else
4425 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4426
4427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4428
4429 if (mcnt <= MAX_NUM_MULTICAST_ADDRESSES) {
4430 update_ptr = (u8 *)mta;
4431 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4432 ixgbe_mc_array_itr, TRUE);
4433 }
4434 } /* ixgbe_set_filter */
4435
4436 /************************************************************************
4437 * ixgbe_mc_array_itr
4438 *
4439 * An iterator function needed by the multicast shared code.
4440 * It feeds the shared code routine the addresses in the
4441 * array of ixgbe_set_rxfilter() one by one.
4442 ************************************************************************/
4443 static u8 *
4444 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4445 {
4446 struct ixgbe_mc_addr *mta;
4447
4448 mta = (struct ixgbe_mc_addr *)*update_ptr;
4449 *vmdq = mta->vmdq;
4450
4451 *update_ptr = (u8*)(mta + 1);
4452
4453 return (mta->addr);
4454 } /* ixgbe_mc_array_itr */
4455
4456 /************************************************************************
4457 * ixgbe_local_timer - Timer routine
4458 *
4459 * Checks for link status, updates statistics,
4460 * and runs the watchdog check.
4461 ************************************************************************/
4462 static void
4463 ixgbe_local_timer(void *arg)
4464 {
4465 struct adapter *adapter = arg;
4466
4467 IXGBE_CORE_LOCK(adapter);
4468 ixgbe_local_timer1(adapter);
4469 IXGBE_CORE_UNLOCK(adapter);
4470 }
4471
4472 static void
4473 ixgbe_local_timer1(void *arg)
4474 {
4475 struct adapter *adapter = arg;
4476 device_t dev = adapter->dev;
4477 struct ix_queue *que = adapter->queues;
4478 u64 queues = 0;
4479 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4480 int hung = 0;
4481 int i;
4482
4483 KASSERT(mutex_owned(&adapter->core_mtx));
4484
4485 /* Check for pluggable optics */
4486 if (adapter->sfp_probe)
4487 if (!ixgbe_sfp_probe(adapter))
4488 goto out; /* Nothing to do */
4489
4490 ixgbe_update_link_status(adapter);
4491 ixgbe_update_stats_counters(adapter);
4492
4493 /* Update some event counters */
4494 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4495 que = adapter->queues;
4496 for (i = 0; i < adapter->num_queues; i++, que++) {
4497 struct tx_ring *txr = que->txr;
4498
4499 v0 += txr->q_efbig_tx_dma_setup;
4500 v1 += txr->q_mbuf_defrag_failed;
4501 v2 += txr->q_efbig2_tx_dma_setup;
4502 v3 += txr->q_einval_tx_dma_setup;
4503 v4 += txr->q_other_tx_dma_setup;
4504 v5 += txr->q_eagain_tx_dma_setup;
4505 v6 += txr->q_enomem_tx_dma_setup;
4506 v7 += txr->q_tso_err;
4507 }
4508 adapter->efbig_tx_dma_setup.ev_count = v0;
4509 adapter->mbuf_defrag_failed.ev_count = v1;
4510 adapter->efbig2_tx_dma_setup.ev_count = v2;
4511 adapter->einval_tx_dma_setup.ev_count = v3;
4512 adapter->other_tx_dma_setup.ev_count = v4;
4513 adapter->eagain_tx_dma_setup.ev_count = v5;
4514 adapter->enomem_tx_dma_setup.ev_count = v6;
4515 adapter->tso_err.ev_count = v7;
4516
4517 /*
4518 * Check the TX queues status
4519 * - mark hung queues so we don't schedule on them
4520 * - watchdog only if all queues show hung
4521 */
4522 que = adapter->queues;
4523 for (i = 0; i < adapter->num_queues; i++, que++) {
4524 /* Keep track of queues with work for soft irq */
4525 if (que->txr->busy)
4526 queues |= 1ULL << que->me;
4527 /*
4528 * Each time txeof runs without cleaning, but there
4529 * are uncleaned descriptors it increments busy. If
4530 * we get to the MAX we declare it hung.
4531 */
4532 if (que->busy == IXGBE_QUEUE_HUNG) {
4533 ++hung;
4534 /* Mark the queue as inactive */
4535 adapter->active_queues &= ~(1ULL << que->me);
4536 continue;
4537 } else {
4538 /* Check if we've come back from hung */
4539 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4540 adapter->active_queues |= 1ULL << que->me;
4541 }
4542 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4543 device_printf(dev,
4544 "Warning queue %d appears to be hung!\n", i);
4545 que->txr->busy = IXGBE_QUEUE_HUNG;
4546 ++hung;
4547 }
4548 }
4549
4550 /* Only truly watchdog if all queues show hung */
4551 if (hung == adapter->num_queues)
4552 goto watchdog;
4553 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4554 else if (queues != 0) { /* Force an IRQ on queues with work */
4555 que = adapter->queues;
4556 for (i = 0; i < adapter->num_queues; i++, que++) {
4557 mutex_enter(&que->dc_mtx);
4558 if (que->disabled_count == 0)
4559 ixgbe_rearm_queues(adapter,
4560 queues & ((u64)1 << i));
4561 mutex_exit(&que->dc_mtx);
4562 }
4563 }
4564 #endif
4565
4566 out:
4567 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4568 return;
4569
4570 watchdog:
4571 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4572 adapter->ifp->if_flags &= ~IFF_RUNNING;
4573 adapter->watchdog_events.ev_count++;
4574 ixgbe_init_locked(adapter);
4575 } /* ixgbe_local_timer */
4576
4577 /************************************************************************
4578 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4579 ************************************************************************/
4580 static void
4581 ixgbe_recovery_mode_timer(void *arg)
4582 {
4583 struct adapter *adapter = arg;
4584 struct ixgbe_hw *hw = &adapter->hw;
4585
4586 IXGBE_CORE_LOCK(adapter);
4587 if (ixgbe_fw_recovery_mode(hw)) {
4588 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4589 /* Firmware error detected, entering recovery mode */
4590 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4591
4592 if (hw->adapter_stopped == FALSE)
4593 ixgbe_stop_locked(adapter);
4594 }
4595 } else
4596 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4597
4598 callout_reset(&adapter->recovery_mode_timer, hz,
4599 ixgbe_recovery_mode_timer, adapter);
4600 IXGBE_CORE_UNLOCK(adapter);
4601 } /* ixgbe_recovery_mode_timer */
4602
4603 /************************************************************************
4604 * ixgbe_sfp_probe
4605 *
4606 * Determine if a port had optics inserted.
4607 ************************************************************************/
4608 static bool
4609 ixgbe_sfp_probe(struct adapter *adapter)
4610 {
4611 struct ixgbe_hw *hw = &adapter->hw;
4612 device_t dev = adapter->dev;
4613 bool result = FALSE;
4614
4615 if ((hw->phy.type == ixgbe_phy_nl) &&
4616 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4617 s32 ret = hw->phy.ops.identify_sfp(hw);
4618 if (ret)
4619 goto out;
4620 ret = hw->phy.ops.reset(hw);
4621 adapter->sfp_probe = FALSE;
4622 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4623 device_printf(dev,"Unsupported SFP+ module detected!");
4624 device_printf(dev,
4625 "Reload driver with supported module.\n");
4626 goto out;
4627 } else
4628 device_printf(dev, "SFP+ module detected!\n");
4629 /* We now have supported optics */
4630 result = TRUE;
4631 }
4632 out:
4633
4634 return (result);
4635 } /* ixgbe_sfp_probe */
4636
4637 /************************************************************************
4638 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4639 ************************************************************************/
4640 static void
4641 ixgbe_handle_mod(void *context)
4642 {
4643 struct adapter *adapter = context;
4644 struct ixgbe_hw *hw = &adapter->hw;
4645 device_t dev = adapter->dev;
4646 u32 err, cage_full = 0;
4647
4648 IXGBE_CORE_LOCK(adapter);
4649 ++adapter->mod_sicount.ev_count;
4650 if (adapter->hw.need_crosstalk_fix) {
4651 switch (hw->mac.type) {
4652 case ixgbe_mac_82599EB:
4653 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4654 IXGBE_ESDP_SDP2;
4655 break;
4656 case ixgbe_mac_X550EM_x:
4657 case ixgbe_mac_X550EM_a:
4658 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4659 IXGBE_ESDP_SDP0;
4660 break;
4661 default:
4662 break;
4663 }
4664
4665 if (!cage_full)
4666 goto out;
4667 }
4668
4669 err = hw->phy.ops.identify_sfp(hw);
4670 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4671 device_printf(dev,
4672 "Unsupported SFP+ module type was detected.\n");
4673 goto out;
4674 }
4675
4676 if (hw->need_unsupported_sfp_recovery) {
4677 device_printf(dev, "Recovering from unsupported SFP\n");
4678 /*
4679 * We could recover the status by calling setup_sfp(),
4680 * setup_link() and some others. It's complex and might not
4681 * work correctly on some unknown cases. To avoid such type of
4682 * problem, call ixgbe_init_locked(). It's simple and safe
4683 * approach.
4684 */
4685 ixgbe_init_locked(adapter);
4686 } else {
4687 if (hw->mac.type == ixgbe_mac_82598EB)
4688 err = hw->phy.ops.reset(hw);
4689 else {
4690 err = hw->mac.ops.setup_sfp(hw);
4691 hw->phy.sfp_setup_needed = FALSE;
4692 }
4693 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4694 device_printf(dev,
4695 "Setup failure - unsupported SFP+ module type.\n");
4696 goto out;
4697 }
4698 }
4699 softint_schedule(adapter->msf_si);
4700 out:
4701 IXGBE_CORE_UNLOCK(adapter);
4702 } /* ixgbe_handle_mod */
4703
4704
4705 /************************************************************************
4706 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4707 ************************************************************************/
4708 static void
4709 ixgbe_handle_msf(void *context)
4710 {
4711 struct adapter *adapter = context;
4712 struct ixgbe_hw *hw = &adapter->hw;
4713 u32 autoneg;
4714 bool negotiate;
4715
4716 IXGBE_CORE_LOCK(adapter);
4717 ++adapter->msf_sicount.ev_count;
4718 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4719 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4720
4721 autoneg = hw->phy.autoneg_advertised;
4722 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4723 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4724 else
4725 negotiate = 0;
4726 if (hw->mac.ops.setup_link)
4727 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4728
4729 /* Adjust media types shown in ifconfig */
4730 ifmedia_removeall(&adapter->media);
4731 ixgbe_add_media_types(adapter);
4732 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4733 IXGBE_CORE_UNLOCK(adapter);
4734 } /* ixgbe_handle_msf */
4735
4736 /************************************************************************
4737 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4738 ************************************************************************/
4739 static void
4740 ixgbe_handle_phy(void *context)
4741 {
4742 struct adapter *adapter = context;
4743 struct ixgbe_hw *hw = &adapter->hw;
4744 int error;
4745
4746 ++adapter->phy_sicount.ev_count;
4747 error = hw->phy.ops.handle_lasi(hw);
4748 if (error == IXGBE_ERR_OVERTEMP)
4749 device_printf(adapter->dev,
4750 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4751 " PHY will downshift to lower power state!\n");
4752 else if (error)
4753 device_printf(adapter->dev,
4754 "Error handling LASI interrupt: %d\n", error);
4755 } /* ixgbe_handle_phy */
4756
4757 static void
4758 ixgbe_ifstop(struct ifnet *ifp, int disable)
4759 {
4760 struct adapter *adapter = ifp->if_softc;
4761
4762 IXGBE_CORE_LOCK(adapter);
4763 ixgbe_stop_locked(adapter);
4764 IXGBE_CORE_UNLOCK(adapter);
4765 }
4766
4767 /************************************************************************
4768 * ixgbe_stop_locked - Stop the hardware
4769 *
4770 * Disables all traffic on the adapter by issuing a
4771 * global reset on the MAC and deallocates TX/RX buffers.
4772 ************************************************************************/
4773 static void
4774 ixgbe_stop_locked(void *arg)
4775 {
4776 struct ifnet *ifp;
4777 struct adapter *adapter = arg;
4778 struct ixgbe_hw *hw = &adapter->hw;
4779
4780 ifp = adapter->ifp;
4781
4782 KASSERT(mutex_owned(&adapter->core_mtx));
4783
4784 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4785 ixgbe_disable_intr(adapter);
4786 callout_stop(&adapter->timer);
4787
4788 /* Let the stack know...*/
4789 ifp->if_flags &= ~IFF_RUNNING;
4790
4791 ixgbe_reset_hw(hw);
4792 hw->adapter_stopped = FALSE;
4793 ixgbe_stop_adapter(hw);
4794 if (hw->mac.type == ixgbe_mac_82599EB)
4795 ixgbe_stop_mac_link_on_d3_82599(hw);
4796 /* Turn off the laser - noop with no optics */
4797 ixgbe_disable_tx_laser(hw);
4798
4799 /* Update the stack */
4800 adapter->link_up = FALSE;
4801 ixgbe_update_link_status(adapter);
4802
4803 /* reprogram the RAR[0] in case user changed it. */
4804 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4805
4806 return;
4807 } /* ixgbe_stop_locked */
4808
4809 /************************************************************************
4810 * ixgbe_update_link_status - Update OS on link state
4811 *
4812 * Note: Only updates the OS on the cached link state.
4813 * The real check of the hardware only happens with
4814 * a link interrupt.
4815 ************************************************************************/
4816 static void
4817 ixgbe_update_link_status(struct adapter *adapter)
4818 {
4819 struct ifnet *ifp = adapter->ifp;
4820 device_t dev = adapter->dev;
4821 struct ixgbe_hw *hw = &adapter->hw;
4822
4823 KASSERT(mutex_owned(&adapter->core_mtx));
4824
4825 if (adapter->link_up) {
4826 if (adapter->link_active != LINK_STATE_UP) {
4827 /*
4828 * To eliminate influence of the previous state
4829 * in the same way as ixgbe_init_locked().
4830 */
4831 struct ix_queue *que = adapter->queues;
4832 for (int i = 0; i < adapter->num_queues; i++, que++)
4833 que->eitr_setting = 0;
4834
4835 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4836 /*
4837 * Discard count for both MAC Local Fault and
4838 * Remote Fault because those registers are
4839 * valid only when the link speed is up and
4840 * 10Gbps.
4841 */
4842 IXGBE_READ_REG(hw, IXGBE_MLFC);
4843 IXGBE_READ_REG(hw, IXGBE_MRFC);
4844 }
4845
4846 if (bootverbose) {
4847 const char *bpsmsg;
4848
4849 switch (adapter->link_speed) {
4850 case IXGBE_LINK_SPEED_10GB_FULL:
4851 bpsmsg = "10 Gbps";
4852 break;
4853 case IXGBE_LINK_SPEED_5GB_FULL:
4854 bpsmsg = "5 Gbps";
4855 break;
4856 case IXGBE_LINK_SPEED_2_5GB_FULL:
4857 bpsmsg = "2.5 Gbps";
4858 break;
4859 case IXGBE_LINK_SPEED_1GB_FULL:
4860 bpsmsg = "1 Gbps";
4861 break;
4862 case IXGBE_LINK_SPEED_100_FULL:
4863 bpsmsg = "100 Mbps";
4864 break;
4865 case IXGBE_LINK_SPEED_10_FULL:
4866 bpsmsg = "10 Mbps";
4867 break;
4868 default:
4869 bpsmsg = "unknown speed";
4870 break;
4871 }
4872 device_printf(dev, "Link is up %s %s \n",
4873 bpsmsg, "Full Duplex");
4874 }
4875 adapter->link_active = LINK_STATE_UP;
4876 /* Update any Flow Control changes */
4877 ixgbe_fc_enable(&adapter->hw);
4878 /* Update DMA coalescing config */
4879 ixgbe_config_dmac(adapter);
4880 if_link_state_change(ifp, LINK_STATE_UP);
4881
4882 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4883 ixgbe_ping_all_vfs(adapter);
4884 }
4885 } else {
4886 /*
4887 * Do it when link active changes to DOWN. i.e.
4888 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4889 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4890 */
4891 if (adapter->link_active != LINK_STATE_DOWN) {
4892 if (bootverbose)
4893 device_printf(dev, "Link is Down\n");
4894 if_link_state_change(ifp, LINK_STATE_DOWN);
4895 adapter->link_active = LINK_STATE_DOWN;
4896 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4897 ixgbe_ping_all_vfs(adapter);
4898 ixgbe_drain_all(adapter);
4899 }
4900 }
4901 } /* ixgbe_update_link_status */
4902
4903 /************************************************************************
4904 * ixgbe_config_dmac - Configure DMA Coalescing
4905 ************************************************************************/
4906 static void
4907 ixgbe_config_dmac(struct adapter *adapter)
4908 {
4909 struct ixgbe_hw *hw = &adapter->hw;
4910 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4911
4912 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4913 return;
4914
4915 if (dcfg->watchdog_timer ^ adapter->dmac ||
4916 dcfg->link_speed ^ adapter->link_speed) {
4917 dcfg->watchdog_timer = adapter->dmac;
4918 dcfg->fcoe_en = false;
4919 dcfg->link_speed = adapter->link_speed;
4920 dcfg->num_tcs = 1;
4921
4922 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4923 dcfg->watchdog_timer, dcfg->link_speed);
4924
4925 hw->mac.ops.dmac_config(hw);
4926 }
4927 } /* ixgbe_config_dmac */
4928
4929 /************************************************************************
4930 * ixgbe_enable_intr
4931 ************************************************************************/
4932 static void
4933 ixgbe_enable_intr(struct adapter *adapter)
4934 {
4935 struct ixgbe_hw *hw = &adapter->hw;
4936 struct ix_queue *que = adapter->queues;
4937 u32 mask, fwsm;
4938
4939 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4940
4941 switch (adapter->hw.mac.type) {
4942 case ixgbe_mac_82599EB:
4943 mask |= IXGBE_EIMS_ECC;
4944 /* Temperature sensor on some adapters */
4945 mask |= IXGBE_EIMS_GPI_SDP0;
4946 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4947 mask |= IXGBE_EIMS_GPI_SDP1;
4948 mask |= IXGBE_EIMS_GPI_SDP2;
4949 break;
4950 case ixgbe_mac_X540:
4951 /* Detect if Thermal Sensor is enabled */
4952 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4953 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4954 mask |= IXGBE_EIMS_TS;
4955 mask |= IXGBE_EIMS_ECC;
4956 break;
4957 case ixgbe_mac_X550:
4958 /* MAC thermal sensor is automatically enabled */
4959 mask |= IXGBE_EIMS_TS;
4960 mask |= IXGBE_EIMS_ECC;
4961 break;
4962 case ixgbe_mac_X550EM_x:
4963 case ixgbe_mac_X550EM_a:
4964 /* Some devices use SDP0 for important information */
4965 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4966 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4967 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4968 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4969 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4970 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4971 mask |= IXGBE_EICR_GPI_SDP0_X540;
4972 mask |= IXGBE_EIMS_ECC;
4973 break;
4974 default:
4975 break;
4976 }
4977
4978 /* Enable Fan Failure detection */
4979 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4980 mask |= IXGBE_EIMS_GPI_SDP1;
4981 /* Enable SR-IOV */
4982 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4983 mask |= IXGBE_EIMS_MAILBOX;
4984 /* Enable Flow Director */
4985 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4986 mask |= IXGBE_EIMS_FLOW_DIR;
4987
4988 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4989
4990 /* With MSI-X we use auto clear */
4991 if (adapter->msix_mem) {
4992 mask = IXGBE_EIMS_ENABLE_MASK;
4993 /* Don't autoclear Link */
4994 mask &= ~IXGBE_EIMS_OTHER;
4995 mask &= ~IXGBE_EIMS_LSC;
4996 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4997 mask &= ~IXGBE_EIMS_MAILBOX;
4998 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4999 }
5000
5001 /*
5002 * Now enable all queues, this is done separately to
5003 * allow for handling the extended (beyond 32) MSI-X
5004 * vectors that can be used by 82599
5005 */
5006 for (int i = 0; i < adapter->num_queues; i++, que++)
5007 ixgbe_enable_queue(adapter, que->msix);
5008
5009 IXGBE_WRITE_FLUSH(hw);
5010
5011 } /* ixgbe_enable_intr */
5012
5013 /************************************************************************
5014 * ixgbe_disable_intr_internal
5015 ************************************************************************/
5016 static void
5017 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5018 {
5019 struct ix_queue *que = adapter->queues;
5020
5021 /* disable interrupts other than queues */
5022 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5023
5024 if (adapter->msix_mem)
5025 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5026
5027 for (int i = 0; i < adapter->num_queues; i++, que++)
5028 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5029
5030 IXGBE_WRITE_FLUSH(&adapter->hw);
5031
5032 } /* ixgbe_do_disable_intr_internal */
5033
5034 /************************************************************************
5035 * ixgbe_disable_intr
5036 ************************************************************************/
5037 static void
5038 ixgbe_disable_intr(struct adapter *adapter)
5039 {
5040
5041 ixgbe_disable_intr_internal(adapter, true);
5042 } /* ixgbe_disable_intr */
5043
5044 /************************************************************************
5045 * ixgbe_ensure_disabled_intr
5046 ************************************************************************/
5047 void
5048 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5049 {
5050
5051 ixgbe_disable_intr_internal(adapter, false);
5052 } /* ixgbe_ensure_disabled_intr */
5053
5054 /************************************************************************
5055 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5056 ************************************************************************/
5057 static int
5058 ixgbe_legacy_irq(void *arg)
5059 {
5060 struct ix_queue *que = arg;
5061 struct adapter *adapter = que->adapter;
5062 struct ixgbe_hw *hw = &adapter->hw;
5063 struct ifnet *ifp = adapter->ifp;
5064 struct tx_ring *txr = adapter->tx_rings;
5065 bool more = false;
5066 u32 eicr, eicr_mask;
5067
5068 /* Silicon errata #26 on 82598 */
5069 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5070
5071 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5072
5073 adapter->stats.pf.legint.ev_count++;
5074 ++que->irqs.ev_count;
5075 if (eicr == 0) {
5076 adapter->stats.pf.intzero.ev_count++;
5077 if ((ifp->if_flags & IFF_UP) != 0)
5078 ixgbe_enable_intr(adapter);
5079 return 0;
5080 }
5081
5082 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5083 /*
5084 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5085 */
5086 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5087
5088 #ifdef __NetBSD__
5089 /* Don't run ixgbe_rxeof in interrupt context */
5090 more = true;
5091 #else
5092 more = ixgbe_rxeof(que);
5093 #endif
5094
5095 IXGBE_TX_LOCK(txr);
5096 ixgbe_txeof(txr);
5097 #ifdef notyet
5098 if (!ixgbe_ring_empty(ifp, txr->br))
5099 ixgbe_start_locked(ifp, txr);
5100 #endif
5101 IXGBE_TX_UNLOCK(txr);
5102 }
5103
5104 /* Check for fan failure */
5105 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5106 ixgbe_check_fan_failure(adapter, eicr, true);
5107 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5108 }
5109
5110 /* Link status change */
5111 if (eicr & IXGBE_EICR_LSC)
5112 softint_schedule(adapter->link_si);
5113
5114 if (ixgbe_is_sfp(hw)) {
5115 /* Pluggable optics-related interrupt */
5116 if (hw->mac.type >= ixgbe_mac_X540)
5117 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5118 else
5119 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5120
5121 if (eicr & eicr_mask) {
5122 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5123 softint_schedule(adapter->mod_si);
5124 }
5125
5126 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5127 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5128 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5129 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5130 softint_schedule(adapter->msf_si);
5131 }
5132 }
5133
5134 /* External PHY interrupt */
5135 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5136 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5137 softint_schedule(adapter->phy_si);
5138
5139 if (more) {
5140 que->req.ev_count++;
5141 ixgbe_sched_handle_que(adapter, que);
5142 } else
5143 ixgbe_enable_intr(adapter);
5144
5145 return 1;
5146 } /* ixgbe_legacy_irq */
5147
5148 /************************************************************************
5149 * ixgbe_free_pciintr_resources
5150 ************************************************************************/
5151 static void
5152 ixgbe_free_pciintr_resources(struct adapter *adapter)
5153 {
5154 struct ix_queue *que = adapter->queues;
5155 int rid;
5156
5157 /*
5158 * Release all msix queue resources:
5159 */
5160 for (int i = 0; i < adapter->num_queues; i++, que++) {
5161 if (que->res != NULL) {
5162 pci_intr_disestablish(adapter->osdep.pc,
5163 adapter->osdep.ihs[i]);
5164 adapter->osdep.ihs[i] = NULL;
5165 }
5166 }
5167
5168 /* Clean the Legacy or Link interrupt last */
5169 if (adapter->vector) /* we are doing MSIX */
5170 rid = adapter->vector;
5171 else
5172 rid = 0;
5173
5174 if (adapter->osdep.ihs[rid] != NULL) {
5175 pci_intr_disestablish(adapter->osdep.pc,
5176 adapter->osdep.ihs[rid]);
5177 adapter->osdep.ihs[rid] = NULL;
5178 }
5179
5180 if (adapter->osdep.intrs != NULL) {
5181 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5182 adapter->osdep.nintrs);
5183 adapter->osdep.intrs = NULL;
5184 }
5185 } /* ixgbe_free_pciintr_resources */
5186
5187 /************************************************************************
5188 * ixgbe_free_pci_resources
5189 ************************************************************************/
5190 static void
5191 ixgbe_free_pci_resources(struct adapter *adapter)
5192 {
5193
5194 ixgbe_free_pciintr_resources(adapter);
5195
5196 if (adapter->osdep.mem_size != 0) {
5197 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5198 adapter->osdep.mem_bus_space_handle,
5199 adapter->osdep.mem_size);
5200 }
5201
5202 } /* ixgbe_free_pci_resources */
5203
5204 /************************************************************************
5205 * ixgbe_set_sysctl_value
5206 ************************************************************************/
5207 static void
5208 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5209 const char *description, int *limit, int value)
5210 {
5211 device_t dev = adapter->dev;
5212 struct sysctllog **log;
5213 const struct sysctlnode *rnode, *cnode;
5214
5215 /*
5216 * It's not required to check recovery mode because this function never
5217 * touches hardware.
5218 */
5219
5220 log = &adapter->sysctllog;
5221 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5222 aprint_error_dev(dev, "could not create sysctl root\n");
5223 return;
5224 }
5225 if (sysctl_createv(log, 0, &rnode, &cnode,
5226 CTLFLAG_READWRITE, CTLTYPE_INT,
5227 name, SYSCTL_DESCR(description),
5228 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5229 aprint_error_dev(dev, "could not create sysctl\n");
5230 *limit = value;
5231 } /* ixgbe_set_sysctl_value */
5232
5233 /************************************************************************
5234 * ixgbe_sysctl_flowcntl
5235 *
5236 * SYSCTL wrapper around setting Flow Control
5237 ************************************************************************/
5238 static int
5239 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5240 {
5241 struct sysctlnode node = *rnode;
5242 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5243 int error, fc;
5244
5245 if (ixgbe_fw_recovery_mode_swflag(adapter))
5246 return (EPERM);
5247
5248 fc = adapter->hw.fc.current_mode;
5249 node.sysctl_data = &fc;
5250 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5251 if (error != 0 || newp == NULL)
5252 return error;
5253
5254 /* Don't bother if it's not changed */
5255 if (fc == adapter->hw.fc.current_mode)
5256 return (0);
5257
5258 return ixgbe_set_flowcntl(adapter, fc);
5259 } /* ixgbe_sysctl_flowcntl */
5260
5261 /************************************************************************
5262 * ixgbe_set_flowcntl - Set flow control
5263 *
5264 * Flow control values:
5265 * 0 - off
5266 * 1 - rx pause
5267 * 2 - tx pause
5268 * 3 - full
5269 ************************************************************************/
5270 static int
5271 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5272 {
5273 switch (fc) {
5274 case ixgbe_fc_rx_pause:
5275 case ixgbe_fc_tx_pause:
5276 case ixgbe_fc_full:
5277 adapter->hw.fc.requested_mode = fc;
5278 if (adapter->num_queues > 1)
5279 ixgbe_disable_rx_drop(adapter);
5280 break;
5281 case ixgbe_fc_none:
5282 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5283 if (adapter->num_queues > 1)
5284 ixgbe_enable_rx_drop(adapter);
5285 break;
5286 default:
5287 return (EINVAL);
5288 }
5289
5290 #if 0 /* XXX NetBSD */
5291 /* Don't autoneg if forcing a value */
5292 adapter->hw.fc.disable_fc_autoneg = TRUE;
5293 #endif
5294 ixgbe_fc_enable(&adapter->hw);
5295
5296 return (0);
5297 } /* ixgbe_set_flowcntl */
5298
5299 /************************************************************************
5300 * ixgbe_enable_rx_drop
5301 *
5302 * Enable the hardware to drop packets when the buffer is
5303 * full. This is useful with multiqueue, so that no single
5304 * queue being full stalls the entire RX engine. We only
5305 * enable this when Multiqueue is enabled AND Flow Control
5306 * is disabled.
5307 ************************************************************************/
5308 static void
5309 ixgbe_enable_rx_drop(struct adapter *adapter)
5310 {
5311 struct ixgbe_hw *hw = &adapter->hw;
5312 struct rx_ring *rxr;
5313 u32 srrctl;
5314
5315 for (int i = 0; i < adapter->num_queues; i++) {
5316 rxr = &adapter->rx_rings[i];
5317 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5318 srrctl |= IXGBE_SRRCTL_DROP_EN;
5319 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5320 }
5321
5322 /* enable drop for each vf */
5323 for (int i = 0; i < adapter->num_vfs; i++) {
5324 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5325 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5326 IXGBE_QDE_ENABLE));
5327 }
5328 } /* ixgbe_enable_rx_drop */
5329
5330 /************************************************************************
5331 * ixgbe_disable_rx_drop
5332 ************************************************************************/
5333 static void
5334 ixgbe_disable_rx_drop(struct adapter *adapter)
5335 {
5336 struct ixgbe_hw *hw = &adapter->hw;
5337 struct rx_ring *rxr;
5338 u32 srrctl;
5339
5340 for (int i = 0; i < adapter->num_queues; i++) {
5341 rxr = &adapter->rx_rings[i];
5342 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5343 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5344 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5345 }
5346
5347 /* disable drop for each vf */
5348 for (int i = 0; i < adapter->num_vfs; i++) {
5349 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5350 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5351 }
5352 } /* ixgbe_disable_rx_drop */
5353
5354 /************************************************************************
5355 * ixgbe_sysctl_advertise
5356 *
5357 * SYSCTL wrapper around setting advertised speed
5358 ************************************************************************/
5359 static int
5360 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5361 {
5362 struct sysctlnode node = *rnode;
5363 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5364 int error = 0, advertise;
5365
5366 if (ixgbe_fw_recovery_mode_swflag(adapter))
5367 return (EPERM);
5368
5369 advertise = adapter->advertise;
5370 node.sysctl_data = &advertise;
5371 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5372 if (error != 0 || newp == NULL)
5373 return error;
5374
5375 return ixgbe_set_advertise(adapter, advertise);
5376 } /* ixgbe_sysctl_advertise */
5377
5378 /************************************************************************
5379 * ixgbe_set_advertise - Control advertised link speed
5380 *
5381 * Flags:
5382 * 0x00 - Default (all capable link speed)
5383 * 0x01 - advertise 100 Mb
5384 * 0x02 - advertise 1G
5385 * 0x04 - advertise 10G
5386 * 0x08 - advertise 10 Mb
5387 * 0x10 - advertise 2.5G
5388 * 0x20 - advertise 5G
5389 ************************************************************************/
5390 static int
5391 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5392 {
5393 device_t dev;
5394 struct ixgbe_hw *hw;
5395 ixgbe_link_speed speed = 0;
5396 ixgbe_link_speed link_caps = 0;
5397 s32 err = IXGBE_NOT_IMPLEMENTED;
5398 bool negotiate = FALSE;
5399
5400 /* Checks to validate new value */
5401 if (adapter->advertise == advertise) /* no change */
5402 return (0);
5403
5404 dev = adapter->dev;
5405 hw = &adapter->hw;
5406
5407 /* No speed changes for backplane media */
5408 if (hw->phy.media_type == ixgbe_media_type_backplane)
5409 return (ENODEV);
5410
5411 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5412 (hw->phy.multispeed_fiber))) {
5413 device_printf(dev,
5414 "Advertised speed can only be set on copper or "
5415 "multispeed fiber media types.\n");
5416 return (EINVAL);
5417 }
5418
5419 if (advertise < 0x0 || advertise > 0x3f) {
5420 device_printf(dev, "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5421 return (EINVAL);
5422 }
5423
5424 if (hw->mac.ops.get_link_capabilities) {
5425 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5426 &negotiate);
5427 if (err != IXGBE_SUCCESS) {
5428 device_printf(dev, "Unable to determine supported advertise speeds\n");
5429 return (ENODEV);
5430 }
5431 }
5432
5433 /* Set new value and report new advertised mode */
5434 if (advertise & 0x1) {
5435 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5436 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5437 return (EINVAL);
5438 }
5439 speed |= IXGBE_LINK_SPEED_100_FULL;
5440 }
5441 if (advertise & 0x2) {
5442 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5443 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5444 return (EINVAL);
5445 }
5446 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5447 }
5448 if (advertise & 0x4) {
5449 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5450 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5451 return (EINVAL);
5452 }
5453 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5454 }
5455 if (advertise & 0x8) {
5456 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5457 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5458 return (EINVAL);
5459 }
5460 speed |= IXGBE_LINK_SPEED_10_FULL;
5461 }
5462 if (advertise & 0x10) {
5463 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5464 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5465 return (EINVAL);
5466 }
5467 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5468 }
5469 if (advertise & 0x20) {
5470 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5471 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5472 return (EINVAL);
5473 }
5474 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5475 }
5476 if (advertise == 0)
5477 speed = link_caps; /* All capable link speed */
5478
5479 hw->mac.autotry_restart = TRUE;
5480 hw->mac.ops.setup_link(hw, speed, TRUE);
5481 adapter->advertise = advertise;
5482
5483 return (0);
5484 } /* ixgbe_set_advertise */
5485
5486 /************************************************************************
5487 * ixgbe_get_advertise - Get current advertised speed settings
5488 *
5489 * Formatted for sysctl usage.
5490 * Flags:
5491 * 0x01 - advertise 100 Mb
5492 * 0x02 - advertise 1G
5493 * 0x04 - advertise 10G
5494 * 0x08 - advertise 10 Mb (yes, Mb)
5495 * 0x10 - advertise 2.5G
5496 * 0x20 - advertise 5G
5497 ************************************************************************/
5498 static int
5499 ixgbe_get_advertise(struct adapter *adapter)
5500 {
5501 struct ixgbe_hw *hw = &adapter->hw;
5502 int speed;
5503 ixgbe_link_speed link_caps = 0;
5504 s32 err;
5505 bool negotiate = FALSE;
5506
5507 /*
5508 * Advertised speed means nothing unless it's copper or
5509 * multi-speed fiber
5510 */
5511 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5512 !(hw->phy.multispeed_fiber))
5513 return (0);
5514
5515 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5516 if (err != IXGBE_SUCCESS)
5517 return (0);
5518
5519 speed =
5520 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5521 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5522 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5523 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5524 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5525 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5526
5527 return speed;
5528 } /* ixgbe_get_advertise */
5529
5530 /************************************************************************
5531 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5532 *
5533 * Control values:
5534 * 0/1 - off / on (use default value of 1000)
5535 *
5536 * Legal timer values are:
5537 * 50,100,250,500,1000,2000,5000,10000
5538 *
5539 * Turning off interrupt moderation will also turn this off.
5540 ************************************************************************/
5541 static int
5542 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5543 {
5544 struct sysctlnode node = *rnode;
5545 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5546 struct ifnet *ifp = adapter->ifp;
5547 int error;
5548 int newval;
5549
5550 if (ixgbe_fw_recovery_mode_swflag(adapter))
5551 return (EPERM);
5552
5553 newval = adapter->dmac;
5554 node.sysctl_data = &newval;
5555 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5556 if ((error) || (newp == NULL))
5557 return (error);
5558
5559 switch (newval) {
5560 case 0:
5561 /* Disabled */
5562 adapter->dmac = 0;
5563 break;
5564 case 1:
5565 /* Enable and use default */
5566 adapter->dmac = 1000;
5567 break;
5568 case 50:
5569 case 100:
5570 case 250:
5571 case 500:
5572 case 1000:
5573 case 2000:
5574 case 5000:
5575 case 10000:
5576 /* Legal values - allow */
5577 adapter->dmac = newval;
5578 break;
5579 default:
5580 /* Do nothing, illegal value */
5581 return (EINVAL);
5582 }
5583
5584 /* Re-initialize hardware if it's already running */
5585 if (ifp->if_flags & IFF_RUNNING)
5586 ifp->if_init(ifp);
5587
5588 return (0);
5589 }
5590
5591 #ifdef IXGBE_DEBUG
5592 /************************************************************************
5593 * ixgbe_sysctl_power_state
5594 *
5595 * Sysctl to test power states
5596 * Values:
5597 * 0 - set device to D0
5598 * 3 - set device to D3
5599 * (none) - get current device power state
5600 ************************************************************************/
5601 static int
5602 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5603 {
5604 #ifdef notyet
5605 struct sysctlnode node = *rnode;
5606 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5607 device_t dev = adapter->dev;
5608 int curr_ps, new_ps, error = 0;
5609
5610 if (ixgbe_fw_recovery_mode_swflag(adapter))
5611 return (EPERM);
5612
5613 curr_ps = new_ps = pci_get_powerstate(dev);
5614
5615 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5616 if ((error) || (req->newp == NULL))
5617 return (error);
5618
5619 if (new_ps == curr_ps)
5620 return (0);
5621
5622 if (new_ps == 3 && curr_ps == 0)
5623 error = DEVICE_SUSPEND(dev);
5624 else if (new_ps == 0 && curr_ps == 3)
5625 error = DEVICE_RESUME(dev);
5626 else
5627 return (EINVAL);
5628
5629 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5630
5631 return (error);
5632 #else
5633 return 0;
5634 #endif
5635 } /* ixgbe_sysctl_power_state */
5636 #endif
5637
5638 /************************************************************************
5639 * ixgbe_sysctl_wol_enable
5640 *
5641 * Sysctl to enable/disable the WoL capability,
5642 * if supported by the adapter.
5643 *
5644 * Values:
5645 * 0 - disabled
5646 * 1 - enabled
5647 ************************************************************************/
5648 static int
5649 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5650 {
5651 struct sysctlnode node = *rnode;
5652 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5653 struct ixgbe_hw *hw = &adapter->hw;
5654 bool new_wol_enabled;
5655 int error = 0;
5656
5657 /*
5658 * It's not required to check recovery mode because this function never
5659 * touches hardware.
5660 */
5661 new_wol_enabled = hw->wol_enabled;
5662 node.sysctl_data = &new_wol_enabled;
5663 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5664 if ((error) || (newp == NULL))
5665 return (error);
5666 if (new_wol_enabled == hw->wol_enabled)
5667 return (0);
5668
5669 if (new_wol_enabled && !adapter->wol_support)
5670 return (ENODEV);
5671 else
5672 hw->wol_enabled = new_wol_enabled;
5673
5674 return (0);
5675 } /* ixgbe_sysctl_wol_enable */
5676
5677 /************************************************************************
5678 * ixgbe_sysctl_wufc - Wake Up Filter Control
5679 *
5680 * Sysctl to enable/disable the types of packets that the
5681 * adapter will wake up on upon receipt.
5682 * Flags:
5683 * 0x1 - Link Status Change
5684 * 0x2 - Magic Packet
5685 * 0x4 - Direct Exact
5686 * 0x8 - Directed Multicast
5687 * 0x10 - Broadcast
5688 * 0x20 - ARP/IPv4 Request Packet
5689 * 0x40 - Direct IPv4 Packet
5690 * 0x80 - Direct IPv6 Packet
5691 *
5692 * Settings not listed above will cause the sysctl to return an error.
5693 ************************************************************************/
5694 static int
5695 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5696 {
5697 struct sysctlnode node = *rnode;
5698 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5699 int error = 0;
5700 u32 new_wufc;
5701
5702 /*
5703 * It's not required to check recovery mode because this function never
5704 * touches hardware.
5705 */
5706 new_wufc = adapter->wufc;
5707 node.sysctl_data = &new_wufc;
5708 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5709 if ((error) || (newp == NULL))
5710 return (error);
5711 if (new_wufc == adapter->wufc)
5712 return (0);
5713
5714 if (new_wufc & 0xffffff00)
5715 return (EINVAL);
5716
5717 new_wufc &= 0xff;
5718 new_wufc |= (0xffffff & adapter->wufc);
5719 adapter->wufc = new_wufc;
5720
5721 return (0);
5722 } /* ixgbe_sysctl_wufc */
5723
5724 #ifdef IXGBE_DEBUG
5725 /************************************************************************
5726 * ixgbe_sysctl_print_rss_config
5727 ************************************************************************/
5728 static int
5729 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5730 {
5731 #ifdef notyet
5732 struct sysctlnode node = *rnode;
5733 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5734 struct ixgbe_hw *hw = &adapter->hw;
5735 device_t dev = adapter->dev;
5736 struct sbuf *buf;
5737 int error = 0, reta_size;
5738 u32 reg;
5739
5740 if (ixgbe_fw_recovery_mode_swflag(adapter))
5741 return (EPERM);
5742
5743 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5744 if (!buf) {
5745 device_printf(dev, "Could not allocate sbuf for output.\n");
5746 return (ENOMEM);
5747 }
5748
5749 // TODO: use sbufs to make a string to print out
5750 /* Set multiplier for RETA setup and table size based on MAC */
5751 switch (adapter->hw.mac.type) {
5752 case ixgbe_mac_X550:
5753 case ixgbe_mac_X550EM_x:
5754 case ixgbe_mac_X550EM_a:
5755 reta_size = 128;
5756 break;
5757 default:
5758 reta_size = 32;
5759 break;
5760 }
5761
5762 /* Print out the redirection table */
5763 sbuf_cat(buf, "\n");
5764 for (int i = 0; i < reta_size; i++) {
5765 if (i < 32) {
5766 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5767 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5768 } else {
5769 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5770 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5771 }
5772 }
5773
5774 // TODO: print more config
5775
5776 error = sbuf_finish(buf);
5777 if (error)
5778 device_printf(dev, "Error finishing sbuf: %d\n", error);
5779
5780 sbuf_delete(buf);
5781 #endif
5782 return (0);
5783 } /* ixgbe_sysctl_print_rss_config */
5784 #endif /* IXGBE_DEBUG */
5785
5786 /************************************************************************
5787 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5788 *
5789 * For X552/X557-AT devices using an external PHY
5790 ************************************************************************/
5791 static int
5792 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5793 {
5794 struct sysctlnode node = *rnode;
5795 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5796 struct ixgbe_hw *hw = &adapter->hw;
5797 int val;
5798 u16 reg;
5799 int error;
5800
5801 if (ixgbe_fw_recovery_mode_swflag(adapter))
5802 return (EPERM);
5803
5804 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5805 device_printf(adapter->dev,
5806 "Device has no supported external thermal sensor.\n");
5807 return (ENODEV);
5808 }
5809
5810 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5811 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5812 device_printf(adapter->dev,
5813 "Error reading from PHY's current temperature register\n");
5814 return (EAGAIN);
5815 }
5816
5817 node.sysctl_data = &val;
5818
5819 /* Shift temp for output */
5820 val = reg >> 8;
5821
5822 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5823 if ((error) || (newp == NULL))
5824 return (error);
5825
5826 return (0);
5827 } /* ixgbe_sysctl_phy_temp */
5828
5829 /************************************************************************
5830 * ixgbe_sysctl_phy_overtemp_occurred
5831 *
5832 * Reports (directly from the PHY) whether the current PHY
5833 * temperature is over the overtemp threshold.
5834 ************************************************************************/
5835 static int
5836 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5837 {
5838 struct sysctlnode node = *rnode;
5839 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5840 struct ixgbe_hw *hw = &adapter->hw;
5841 int val, error;
5842 u16 reg;
5843
5844 if (ixgbe_fw_recovery_mode_swflag(adapter))
5845 return (EPERM);
5846
5847 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5848 device_printf(adapter->dev,
5849 "Device has no supported external thermal sensor.\n");
5850 return (ENODEV);
5851 }
5852
5853 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5854 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5855 device_printf(adapter->dev,
5856 "Error reading from PHY's temperature status register\n");
5857 return (EAGAIN);
5858 }
5859
5860 node.sysctl_data = &val;
5861
5862 /* Get occurrence bit */
5863 val = !!(reg & 0x4000);
5864
5865 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5866 if ((error) || (newp == NULL))
5867 return (error);
5868
5869 return (0);
5870 } /* ixgbe_sysctl_phy_overtemp_occurred */
5871
5872 /************************************************************************
5873 * ixgbe_sysctl_eee_state
5874 *
5875 * Sysctl to set EEE power saving feature
5876 * Values:
5877 * 0 - disable EEE
5878 * 1 - enable EEE
5879 * (none) - get current device EEE state
5880 ************************************************************************/
5881 static int
5882 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5883 {
5884 struct sysctlnode node = *rnode;
5885 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5886 struct ifnet *ifp = adapter->ifp;
5887 device_t dev = adapter->dev;
5888 int curr_eee, new_eee, error = 0;
5889 s32 retval;
5890
5891 if (ixgbe_fw_recovery_mode_swflag(adapter))
5892 return (EPERM);
5893
5894 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5895 node.sysctl_data = &new_eee;
5896 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5897 if ((error) || (newp == NULL))
5898 return (error);
5899
5900 /* Nothing to do */
5901 if (new_eee == curr_eee)
5902 return (0);
5903
5904 /* Not supported */
5905 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5906 return (EINVAL);
5907
5908 /* Bounds checking */
5909 if ((new_eee < 0) || (new_eee > 1))
5910 return (EINVAL);
5911
5912 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
5913 if (retval) {
5914 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5915 return (EINVAL);
5916 }
5917
5918 /* Restart auto-neg */
5919 ifp->if_init(ifp);
5920
5921 device_printf(dev, "New EEE state: %d\n", new_eee);
5922
5923 /* Cache new value */
5924 if (new_eee)
5925 adapter->feat_en |= IXGBE_FEATURE_EEE;
5926 else
5927 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5928
5929 return (error);
5930 } /* ixgbe_sysctl_eee_state */
5931
5932 #define PRINTQS(adapter, regname) \
5933 do { \
5934 struct ixgbe_hw *_hw = &(adapter)->hw; \
5935 int _i; \
5936 \
5937 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5938 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5939 printf((_i == 0) ? "\t" : " "); \
5940 printf("%08x", IXGBE_READ_REG(_hw, \
5941 IXGBE_##regname(_i))); \
5942 } \
5943 printf("\n"); \
5944 } while (0)
5945
5946 /************************************************************************
5947 * ixgbe_print_debug_info
5948 *
5949 * Called only when em_display_debug_stats is enabled.
5950 * Provides a way to take a look at important statistics
5951 * maintained by the driver and hardware.
5952 ************************************************************************/
5953 static void
5954 ixgbe_print_debug_info(struct adapter *adapter)
5955 {
5956 device_t dev = adapter->dev;
5957 struct ixgbe_hw *hw = &adapter->hw;
5958 int table_size;
5959 int i;
5960
5961 switch (adapter->hw.mac.type) {
5962 case ixgbe_mac_X550:
5963 case ixgbe_mac_X550EM_x:
5964 case ixgbe_mac_X550EM_a:
5965 table_size = 128;
5966 break;
5967 default:
5968 table_size = 32;
5969 break;
5970 }
5971
5972 device_printf(dev, "[E]RETA:\n");
5973 for (i = 0; i < table_size; i++) {
5974 if (i < 32)
5975 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5976 IXGBE_RETA(i)));
5977 else
5978 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5979 IXGBE_ERETA(i - 32)));
5980 }
5981
5982 device_printf(dev, "queue:");
5983 for (i = 0; i < adapter->num_queues; i++) {
5984 printf((i == 0) ? "\t" : " ");
5985 printf("%8d", i);
5986 }
5987 printf("\n");
5988 PRINTQS(adapter, RDBAL);
5989 PRINTQS(adapter, RDBAH);
5990 PRINTQS(adapter, RDLEN);
5991 PRINTQS(adapter, SRRCTL);
5992 PRINTQS(adapter, RDH);
5993 PRINTQS(adapter, RDT);
5994 PRINTQS(adapter, RXDCTL);
5995
5996 device_printf(dev, "RQSMR:");
5997 for (i = 0; i < adapter->num_queues / 4; i++) {
5998 printf((i == 0) ? "\t" : " ");
5999 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6000 }
6001 printf("\n");
6002
6003 device_printf(dev, "disabled_count:");
6004 for (i = 0; i < adapter->num_queues; i++) {
6005 printf((i == 0) ? "\t" : " ");
6006 printf("%8d", adapter->queues[i].disabled_count);
6007 }
6008 printf("\n");
6009
6010 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6011 if (hw->mac.type != ixgbe_mac_82598EB) {
6012 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6013 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6014 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6015 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6016 }
6017 } /* ixgbe_print_debug_info */
6018
6019 /************************************************************************
6020 * ixgbe_sysctl_debug
6021 ************************************************************************/
6022 static int
6023 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6024 {
6025 struct sysctlnode node = *rnode;
6026 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6027 int error, result = 0;
6028
6029 if (ixgbe_fw_recovery_mode_swflag(adapter))
6030 return (EPERM);
6031
6032 node.sysctl_data = &result;
6033 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6034
6035 if (error || newp == NULL)
6036 return error;
6037
6038 if (result == 1)
6039 ixgbe_print_debug_info(adapter);
6040
6041 return 0;
6042 } /* ixgbe_sysctl_debug */
6043
6044 /************************************************************************
6045 * ixgbe_sysctl_rx_copy_len
6046 ************************************************************************/
6047 static int
6048 ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6049 {
6050 struct sysctlnode node = *rnode;
6051 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6052 int error;
6053 int result = adapter->rx_copy_len;
6054
6055 node.sysctl_data = &result;
6056 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6057
6058 if (error || newp == NULL)
6059 return error;
6060
6061 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6062 return EINVAL;
6063
6064 adapter->rx_copy_len = result;
6065
6066 return 0;
6067 } /* ixgbe_sysctl_rx_copy_len */
6068
6069 /************************************************************************
6070 * ixgbe_init_device_features
6071 ************************************************************************/
6072 static void
6073 ixgbe_init_device_features(struct adapter *adapter)
6074 {
6075 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6076 | IXGBE_FEATURE_RSS
6077 | IXGBE_FEATURE_MSI
6078 | IXGBE_FEATURE_MSIX
6079 | IXGBE_FEATURE_LEGACY_IRQ
6080 | IXGBE_FEATURE_LEGACY_TX;
6081
6082 /* Set capabilities first... */
6083 switch (adapter->hw.mac.type) {
6084 case ixgbe_mac_82598EB:
6085 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6086 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6087 break;
6088 case ixgbe_mac_X540:
6089 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6090 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6091 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6092 (adapter->hw.bus.func == 0))
6093 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6094 break;
6095 case ixgbe_mac_X550:
6096 /*
6097 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6098 * NVM Image version.
6099 */
6100 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6101 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6102 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6103 break;
6104 case ixgbe_mac_X550EM_x:
6105 /*
6106 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6107 * NVM Image version.
6108 */
6109 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6110 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6111 break;
6112 case ixgbe_mac_X550EM_a:
6113 /*
6114 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6115 * NVM Image version.
6116 */
6117 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6118 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6119 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6120 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6121 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6122 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6123 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6124 }
6125 break;
6126 case ixgbe_mac_82599EB:
6127 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6128 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6129 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6130 (adapter->hw.bus.func == 0))
6131 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6132 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6133 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6134 break;
6135 default:
6136 break;
6137 }
6138
6139 /* Enabled by default... */
6140 /* Fan failure detection */
6141 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6142 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6143 /* Netmap */
6144 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6145 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6146 /* EEE */
6147 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6148 adapter->feat_en |= IXGBE_FEATURE_EEE;
6149 /* Thermal Sensor */
6150 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6151 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6152 /*
6153 * Recovery mode:
6154 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6155 * NVM Image version.
6156 */
6157
6158 /* Enabled via global sysctl... */
6159 /* Flow Director */
6160 if (ixgbe_enable_fdir) {
6161 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6162 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6163 else
6164 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6165 }
6166 /* Legacy (single queue) transmit */
6167 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6168 ixgbe_enable_legacy_tx)
6169 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6170 /*
6171 * Message Signal Interrupts - Extended (MSI-X)
6172 * Normal MSI is only enabled if MSI-X calls fail.
6173 */
6174 if (!ixgbe_enable_msix)
6175 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6176 /* Receive-Side Scaling (RSS) */
6177 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6178 adapter->feat_en |= IXGBE_FEATURE_RSS;
6179
6180 /* Disable features with unmet dependencies... */
6181 /* No MSI-X */
6182 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6183 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6184 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6185 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6186 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6187 }
6188 } /* ixgbe_init_device_features */
6189
6190 /************************************************************************
6191 * ixgbe_probe - Device identification routine
6192 *
6193 * Determines if the driver should be loaded on
6194 * adapter based on its PCI vendor/device ID.
6195 *
6196 * return BUS_PROBE_DEFAULT on success, positive on failure
6197 ************************************************************************/
6198 static int
6199 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6200 {
6201 const struct pci_attach_args *pa = aux;
6202
6203 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6204 }
6205
6206 static const ixgbe_vendor_info_t *
6207 ixgbe_lookup(const struct pci_attach_args *pa)
6208 {
6209 const ixgbe_vendor_info_t *ent;
6210 pcireg_t subid;
6211
6212 INIT_DEBUGOUT("ixgbe_lookup: begin");
6213
6214 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6215 return NULL;
6216
6217 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6218
6219 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6220 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6221 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6222 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6223 (ent->subvendor_id == 0)) &&
6224 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6225 (ent->subdevice_id == 0))) {
6226 return ent;
6227 }
6228 }
6229 return NULL;
6230 }
6231
6232 static int
6233 ixgbe_ifflags_cb(struct ethercom *ec)
6234 {
6235 struct ifnet *ifp = &ec->ec_if;
6236 struct adapter *adapter = ifp->if_softc;
6237 u_short change;
6238 int rv = 0;
6239
6240 IXGBE_CORE_LOCK(adapter);
6241
6242 change = ifp->if_flags ^ adapter->if_flags;
6243 if (change != 0)
6244 adapter->if_flags = ifp->if_flags;
6245
6246 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6247 rv = ENETRESET;
6248 goto out;
6249 } else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
6250 ixgbe_set_rxfilter(adapter);
6251
6252 /* Set up VLAN support and filter */
6253 ixgbe_setup_vlan_hw_support(adapter);
6254
6255 out:
6256 IXGBE_CORE_UNLOCK(adapter);
6257
6258 return rv;
6259 }
6260
6261 /************************************************************************
6262 * ixgbe_ioctl - Ioctl entry point
6263 *
6264 * Called when the user wants to configure the interface.
6265 *
6266 * return 0 on success, positive on failure
6267 ************************************************************************/
6268 static int
6269 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6270 {
6271 struct adapter *adapter = ifp->if_softc;
6272 struct ixgbe_hw *hw = &adapter->hw;
6273 struct ifcapreq *ifcr = data;
6274 struct ifreq *ifr = data;
6275 int error = 0;
6276 int l4csum_en;
6277 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6278 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6279
6280 if (ixgbe_fw_recovery_mode_swflag(adapter))
6281 return (EPERM);
6282
6283 switch (command) {
6284 case SIOCSIFFLAGS:
6285 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6286 break;
6287 case SIOCADDMULTI:
6288 case SIOCDELMULTI:
6289 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6290 break;
6291 case SIOCSIFMEDIA:
6292 case SIOCGIFMEDIA:
6293 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6294 break;
6295 case SIOCSIFCAP:
6296 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6297 break;
6298 case SIOCSIFMTU:
6299 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6300 break;
6301 #ifdef __NetBSD__
6302 case SIOCINITIFADDR:
6303 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6304 break;
6305 case SIOCGIFFLAGS:
6306 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6307 break;
6308 case SIOCGIFAFLAG_IN:
6309 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6310 break;
6311 case SIOCGIFADDR:
6312 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6313 break;
6314 case SIOCGIFMTU:
6315 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6316 break;
6317 case SIOCGIFCAP:
6318 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6319 break;
6320 case SIOCGETHERCAP:
6321 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6322 break;
6323 case SIOCGLIFADDR:
6324 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6325 break;
6326 case SIOCZIFDATA:
6327 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6328 hw->mac.ops.clear_hw_cntrs(hw);
6329 ixgbe_clear_evcnt(adapter);
6330 break;
6331 case SIOCAIFADDR:
6332 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6333 break;
6334 #endif
6335 default:
6336 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6337 break;
6338 }
6339
6340 switch (command) {
6341 case SIOCSIFMEDIA:
6342 case SIOCGIFMEDIA:
6343 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6344 case SIOCGI2C:
6345 {
6346 struct ixgbe_i2c_req i2c;
6347
6348 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6349 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6350 if (error != 0)
6351 break;
6352 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6353 error = EINVAL;
6354 break;
6355 }
6356 if (i2c.len > sizeof(i2c.data)) {
6357 error = EINVAL;
6358 break;
6359 }
6360
6361 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6362 i2c.dev_addr, i2c.data);
6363 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6364 break;
6365 }
6366 case SIOCSIFCAP:
6367 /* Layer-4 Rx checksum offload has to be turned on and
6368 * off as a unit.
6369 */
6370 l4csum_en = ifcr->ifcr_capenable & l4csum;
6371 if (l4csum_en != l4csum && l4csum_en != 0)
6372 return EINVAL;
6373 /*FALLTHROUGH*/
6374 case SIOCADDMULTI:
6375 case SIOCDELMULTI:
6376 case SIOCSIFFLAGS:
6377 case SIOCSIFMTU:
6378 default:
6379 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6380 return error;
6381 if ((ifp->if_flags & IFF_RUNNING) == 0)
6382 ;
6383 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6384 IXGBE_CORE_LOCK(adapter);
6385 if ((ifp->if_flags & IFF_RUNNING) != 0)
6386 ixgbe_init_locked(adapter);
6387 ixgbe_recalculate_max_frame(adapter);
6388 IXGBE_CORE_UNLOCK(adapter);
6389 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6390 /*
6391 * Multicast list has changed; set the hardware filter
6392 * accordingly.
6393 */
6394 IXGBE_CORE_LOCK(adapter);
6395 ixgbe_disable_intr(adapter);
6396 ixgbe_set_rxfilter(adapter);
6397 ixgbe_enable_intr(adapter);
6398 IXGBE_CORE_UNLOCK(adapter);
6399 }
6400 return 0;
6401 }
6402
6403 return error;
6404 } /* ixgbe_ioctl */
6405
6406 /************************************************************************
6407 * ixgbe_check_fan_failure
6408 ************************************************************************/
6409 static void
6410 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6411 {
6412 u32 mask;
6413
6414 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6415 IXGBE_ESDP_SDP1;
6416
6417 if (reg & mask)
6418 device_printf(adapter->dev,
6419 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6420 } /* ixgbe_check_fan_failure */
6421
6422 /************************************************************************
6423 * ixgbe_handle_que
6424 ************************************************************************/
6425 static void
6426 ixgbe_handle_que(void *context)
6427 {
6428 struct ix_queue *que = context;
6429 struct adapter *adapter = que->adapter;
6430 struct tx_ring *txr = que->txr;
6431 struct ifnet *ifp = adapter->ifp;
6432 bool more = false;
6433
6434 que->handleq.ev_count++;
6435
6436 if (ifp->if_flags & IFF_RUNNING) {
6437 more = ixgbe_rxeof(que);
6438 IXGBE_TX_LOCK(txr);
6439 more |= ixgbe_txeof(txr);
6440 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6441 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6442 ixgbe_mq_start_locked(ifp, txr);
6443 /* Only for queue 0 */
6444 /* NetBSD still needs this for CBQ */
6445 if ((&adapter->queues[0] == que)
6446 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6447 ixgbe_legacy_start_locked(ifp, txr);
6448 IXGBE_TX_UNLOCK(txr);
6449 }
6450
6451 if (more) {
6452 que->req.ev_count++;
6453 ixgbe_sched_handle_que(adapter, que);
6454 } else if (que->res != NULL) {
6455 /* Re-enable this interrupt */
6456 ixgbe_enable_queue(adapter, que->msix);
6457 } else
6458 ixgbe_enable_intr(adapter);
6459
6460 return;
6461 } /* ixgbe_handle_que */
6462
6463 /************************************************************************
6464 * ixgbe_handle_que_work
6465 ************************************************************************/
6466 static void
6467 ixgbe_handle_que_work(struct work *wk, void *context)
6468 {
6469 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6470
6471 /*
6472 * "enqueued flag" is not required here.
6473 * See ixgbe_msix_que().
6474 */
6475 ixgbe_handle_que(que);
6476 }
6477
6478 /************************************************************************
6479 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6480 ************************************************************************/
6481 static int
6482 ixgbe_allocate_legacy(struct adapter *adapter,
6483 const struct pci_attach_args *pa)
6484 {
6485 device_t dev = adapter->dev;
6486 struct ix_queue *que = adapter->queues;
6487 struct tx_ring *txr = adapter->tx_rings;
6488 int counts[PCI_INTR_TYPE_SIZE];
6489 pci_intr_type_t intr_type, max_type;
6490 char intrbuf[PCI_INTRSTR_LEN];
6491 char wqname[MAXCOMLEN];
6492 const char *intrstr = NULL;
6493 int defertx_error = 0, error;
6494
6495 /* We allocate a single interrupt resource */
6496 max_type = PCI_INTR_TYPE_MSI;
6497 counts[PCI_INTR_TYPE_MSIX] = 0;
6498 counts[PCI_INTR_TYPE_MSI] =
6499 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6500 /* Check not feat_en but feat_cap to fallback to INTx */
6501 counts[PCI_INTR_TYPE_INTX] =
6502 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6503
6504 alloc_retry:
6505 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6506 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6507 return ENXIO;
6508 }
6509 adapter->osdep.nintrs = 1;
6510 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6511 intrbuf, sizeof(intrbuf));
6512 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6513 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6514 device_xname(dev));
6515 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6516 if (adapter->osdep.ihs[0] == NULL) {
6517 aprint_error_dev(dev,"unable to establish %s\n",
6518 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6519 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6520 adapter->osdep.intrs = NULL;
6521 switch (intr_type) {
6522 case PCI_INTR_TYPE_MSI:
6523 /* The next try is for INTx: Disable MSI */
6524 max_type = PCI_INTR_TYPE_INTX;
6525 counts[PCI_INTR_TYPE_INTX] = 1;
6526 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6527 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6528 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6529 goto alloc_retry;
6530 } else
6531 break;
6532 case PCI_INTR_TYPE_INTX:
6533 default:
6534 /* See below */
6535 break;
6536 }
6537 }
6538 if (intr_type == PCI_INTR_TYPE_INTX) {
6539 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6540 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6541 }
6542 if (adapter->osdep.ihs[0] == NULL) {
6543 aprint_error_dev(dev,
6544 "couldn't establish interrupt%s%s\n",
6545 intrstr ? " at " : "", intrstr ? intrstr : "");
6546 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6547 adapter->osdep.intrs = NULL;
6548 return ENXIO;
6549 }
6550 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6551 /*
6552 * Try allocating a fast interrupt and the associated deferred
6553 * processing contexts.
6554 */
6555 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6556 txr->txr_si =
6557 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6558 ixgbe_deferred_mq_start, txr);
6559
6560 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6561 device_xname(dev));
6562 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6563 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6564 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6565 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6566 }
6567 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6568 ixgbe_handle_que, que);
6569 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6570 error = workqueue_create(&adapter->que_wq, wqname,
6571 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6572 IXGBE_WORKQUEUE_FLAGS);
6573
6574 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6575 && ((txr->txr_si == NULL) || defertx_error != 0))
6576 || (que->que_si == NULL) || error != 0) {
6577 aprint_error_dev(dev,
6578 "could not establish software interrupts\n");
6579
6580 return ENXIO;
6581 }
6582 /* For simplicity in the handlers */
6583 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6584
6585 return (0);
6586 } /* ixgbe_allocate_legacy */
6587
6588 /************************************************************************
6589 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6590 ************************************************************************/
6591 static int
6592 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6593 {
6594 device_t dev = adapter->dev;
6595 struct ix_queue *que = adapter->queues;
6596 struct tx_ring *txr = adapter->tx_rings;
6597 pci_chipset_tag_t pc;
6598 char intrbuf[PCI_INTRSTR_LEN];
6599 char intr_xname[32];
6600 char wqname[MAXCOMLEN];
6601 const char *intrstr = NULL;
6602 int error, vector = 0;
6603 int cpu_id = 0;
6604 kcpuset_t *affinity;
6605 #ifdef RSS
6606 unsigned int rss_buckets = 0;
6607 kcpuset_t cpu_mask;
6608 #endif
6609
6610 pc = adapter->osdep.pc;
6611 #ifdef RSS
6612 /*
6613 * If we're doing RSS, the number of queues needs to
6614 * match the number of RSS buckets that are configured.
6615 *
6616 * + If there's more queues than RSS buckets, we'll end
6617 * up with queues that get no traffic.
6618 *
6619 * + If there's more RSS buckets than queues, we'll end
6620 * up having multiple RSS buckets map to the same queue,
6621 * so there'll be some contention.
6622 */
6623 rss_buckets = rss_getnumbuckets();
6624 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6625 (adapter->num_queues != rss_buckets)) {
6626 device_printf(dev,
6627 "%s: number of queues (%d) != number of RSS buckets (%d)"
6628 "; performance will be impacted.\n",
6629 __func__, adapter->num_queues, rss_buckets);
6630 }
6631 #endif
6632
6633 adapter->osdep.nintrs = adapter->num_queues + 1;
6634 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6635 adapter->osdep.nintrs) != 0) {
6636 aprint_error_dev(dev,
6637 "failed to allocate MSI-X interrupt\n");
6638 return (ENXIO);
6639 }
6640
6641 kcpuset_create(&affinity, false);
6642 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6643 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6644 device_xname(dev), i);
6645 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6646 sizeof(intrbuf));
6647 #ifdef IXGBE_MPSAFE
6648 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6649 true);
6650 #endif
6651 /* Set the handler function */
6652 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6653 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6654 intr_xname);
6655 if (que->res == NULL) {
6656 aprint_error_dev(dev,
6657 "Failed to register QUE handler\n");
6658 error = ENXIO;
6659 goto err_out;
6660 }
6661 que->msix = vector;
6662 adapter->active_queues |= 1ULL << que->msix;
6663
6664 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6665 #ifdef RSS
6666 /*
6667 * The queue ID is used as the RSS layer bucket ID.
6668 * We look up the queue ID -> RSS CPU ID and select
6669 * that.
6670 */
6671 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6672 CPU_SETOF(cpu_id, &cpu_mask);
6673 #endif
6674 } else {
6675 /*
6676 * Bind the MSI-X vector, and thus the
6677 * rings to the corresponding CPU.
6678 *
6679 * This just happens to match the default RSS
6680 * round-robin bucket -> queue -> CPU allocation.
6681 */
6682 if (adapter->num_queues > 1)
6683 cpu_id = i;
6684 }
6685 /* Round-robin affinity */
6686 kcpuset_zero(affinity);
6687 kcpuset_set(affinity, cpu_id % ncpu);
6688 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6689 NULL);
6690 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6691 intrstr);
6692 if (error == 0) {
6693 #if 1 /* def IXGBE_DEBUG */
6694 #ifdef RSS
6695 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6696 cpu_id % ncpu);
6697 #else
6698 aprint_normal(", bound queue %d to cpu %d", i,
6699 cpu_id % ncpu);
6700 #endif
6701 #endif /* IXGBE_DEBUG */
6702 }
6703 aprint_normal("\n");
6704
6705 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6706 txr->txr_si = softint_establish(
6707 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6708 ixgbe_deferred_mq_start, txr);
6709 if (txr->txr_si == NULL) {
6710 aprint_error_dev(dev,
6711 "couldn't establish software interrupt\n");
6712 error = ENXIO;
6713 goto err_out;
6714 }
6715 }
6716 que->que_si
6717 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6718 ixgbe_handle_que, que);
6719 if (que->que_si == NULL) {
6720 aprint_error_dev(dev,
6721 "couldn't establish software interrupt\n");
6722 error = ENXIO;
6723 goto err_out;
6724 }
6725 }
6726 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6727 error = workqueue_create(&adapter->txr_wq, wqname,
6728 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6729 IXGBE_WORKQUEUE_FLAGS);
6730 if (error) {
6731 aprint_error_dev(dev,
6732 "couldn't create workqueue for deferred Tx\n");
6733 goto err_out;
6734 }
6735 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6736
6737 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6738 error = workqueue_create(&adapter->que_wq, wqname,
6739 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6740 IXGBE_WORKQUEUE_FLAGS);
6741 if (error) {
6742 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6743 goto err_out;
6744 }
6745
6746 /* and Link */
6747 cpu_id++;
6748 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6749 adapter->vector = vector;
6750 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6751 sizeof(intrbuf));
6752 #ifdef IXGBE_MPSAFE
6753 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6754 true);
6755 #endif
6756 /* Set the link handler function */
6757 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6758 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6759 intr_xname);
6760 if (adapter->osdep.ihs[vector] == NULL) {
6761 aprint_error_dev(dev, "Failed to register LINK handler\n");
6762 error = ENXIO;
6763 goto err_out;
6764 }
6765 /* Round-robin affinity */
6766 kcpuset_zero(affinity);
6767 kcpuset_set(affinity, cpu_id % ncpu);
6768 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6769 NULL);
6770
6771 aprint_normal_dev(dev,
6772 "for link, interrupting at %s", intrstr);
6773 if (error == 0)
6774 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6775 else
6776 aprint_normal("\n");
6777
6778 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6779 adapter->mbx_si =
6780 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6781 ixgbe_handle_mbx, adapter);
6782 if (adapter->mbx_si == NULL) {
6783 aprint_error_dev(dev,
6784 "could not establish software interrupts\n");
6785
6786 error = ENXIO;
6787 goto err_out;
6788 }
6789 }
6790
6791 kcpuset_destroy(affinity);
6792 aprint_normal_dev(dev,
6793 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6794
6795 return (0);
6796
6797 err_out:
6798 kcpuset_destroy(affinity);
6799 ixgbe_free_softint(adapter);
6800 ixgbe_free_pciintr_resources(adapter);
6801 return (error);
6802 } /* ixgbe_allocate_msix */
6803
6804 /************************************************************************
6805 * ixgbe_configure_interrupts
6806 *
6807 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6808 * This will also depend on user settings.
6809 ************************************************************************/
6810 static int
6811 ixgbe_configure_interrupts(struct adapter *adapter)
6812 {
6813 device_t dev = adapter->dev;
6814 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6815 int want, queues, msgs;
6816
6817 /* Default to 1 queue if MSI-X setup fails */
6818 adapter->num_queues = 1;
6819
6820 /* Override by tuneable */
6821 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6822 goto msi;
6823
6824 /*
6825 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6826 * interrupt slot.
6827 */
6828 if (ncpu == 1)
6829 goto msi;
6830
6831 /* First try MSI-X */
6832 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6833 msgs = MIN(msgs, IXG_MAX_NINTR);
6834 if (msgs < 2)
6835 goto msi;
6836
6837 adapter->msix_mem = (void *)1; /* XXX */
6838
6839 /* Figure out a reasonable auto config value */
6840 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6841
6842 #ifdef RSS
6843 /* If we're doing RSS, clamp at the number of RSS buckets */
6844 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6845 queues = min(queues, rss_getnumbuckets());
6846 #endif
6847 if (ixgbe_num_queues > queues) {
6848 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6849 ixgbe_num_queues = queues;
6850 }
6851
6852 if (ixgbe_num_queues != 0)
6853 queues = ixgbe_num_queues;
6854 else
6855 queues = min(queues,
6856 min(mac->max_tx_queues, mac->max_rx_queues));
6857
6858 /* reflect correct sysctl value */
6859 ixgbe_num_queues = queues;
6860
6861 /*
6862 * Want one vector (RX/TX pair) per queue
6863 * plus an additional for Link.
6864 */
6865 want = queues + 1;
6866 if (msgs >= want)
6867 msgs = want;
6868 else {
6869 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6870 "%d vectors but %d queues wanted!\n",
6871 msgs, want);
6872 goto msi;
6873 }
6874 adapter->num_queues = queues;
6875 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6876 return (0);
6877
6878 /*
6879 * MSI-X allocation failed or provided us with
6880 * less vectors than needed. Free MSI-X resources
6881 * and we'll try enabling MSI.
6882 */
6883 msi:
6884 /* Without MSI-X, some features are no longer supported */
6885 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6886 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6887 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6888 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6889
6890 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6891 adapter->msix_mem = NULL; /* XXX */
6892 if (msgs > 1)
6893 msgs = 1;
6894 if (msgs != 0) {
6895 msgs = 1;
6896 adapter->feat_en |= IXGBE_FEATURE_MSI;
6897 return (0);
6898 }
6899
6900 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6901 aprint_error_dev(dev,
6902 "Device does not support legacy interrupts.\n");
6903 return 1;
6904 }
6905
6906 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6907
6908 return (0);
6909 } /* ixgbe_configure_interrupts */
6910
6911
6912 /************************************************************************
6913 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6914 *
6915 * Done outside of interrupt context since the driver might sleep
6916 ************************************************************************/
6917 static void
6918 ixgbe_handle_link(void *context)
6919 {
6920 struct adapter *adapter = context;
6921 struct ixgbe_hw *hw = &adapter->hw;
6922
6923 IXGBE_CORE_LOCK(adapter);
6924 ++adapter->link_sicount.ev_count;
6925 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6926 ixgbe_update_link_status(adapter);
6927
6928 /* Re-enable link interrupts */
6929 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6930
6931 IXGBE_CORE_UNLOCK(adapter);
6932 } /* ixgbe_handle_link */
6933
6934 /************************************************************************
6935 * ixgbe_rearm_queues
6936 ************************************************************************/
6937 static __inline void
6938 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6939 {
6940 u32 mask;
6941
6942 switch (adapter->hw.mac.type) {
6943 case ixgbe_mac_82598EB:
6944 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6945 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6946 break;
6947 case ixgbe_mac_82599EB:
6948 case ixgbe_mac_X540:
6949 case ixgbe_mac_X550:
6950 case ixgbe_mac_X550EM_x:
6951 case ixgbe_mac_X550EM_a:
6952 mask = (queues & 0xFFFFFFFF);
6953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6954 mask = (queues >> 32);
6955 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6956 break;
6957 default:
6958 break;
6959 }
6960 } /* ixgbe_rearm_queues */
6961