ixgbe.c revision 1.220 1 /* $NetBSD: ixgbe.c,v 1.220 2020/01/03 12:59:46 pgoyette Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.10 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */
148 {0, 0, 0, 0, 0}
149 };
150
151 /************************************************************************
152 * Table of branding strings
153 ************************************************************************/
154 static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156 };
157
158 /************************************************************************
159 * Function prototypes
160 ************************************************************************/
161 static int ixgbe_probe(device_t, cfdata_t, void *);
162 static void ixgbe_attach(device_t, device_t, void *);
163 static int ixgbe_detach(device_t, int);
164 #if 0
165 static int ixgbe_shutdown(device_t);
166 #endif
167 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168 static bool ixgbe_resume(device_t, const pmf_qual_t *);
169 static int ixgbe_ifflags_cb(struct ethercom *);
170 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171 static void ixgbe_ifstop(struct ifnet *, int);
172 static int ixgbe_init(struct ifnet *);
173 static void ixgbe_init_locked(struct adapter *);
174 static void ixgbe_stop(void *);
175 static void ixgbe_init_device_features(struct adapter *);
176 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177 static void ixgbe_add_media_types(struct adapter *);
178 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179 static int ixgbe_media_change(struct ifnet *);
180 static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *);
182 static void ixgbe_free_softint(struct adapter *);
183 static void ixgbe_get_slot_info(struct adapter *);
184 static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *);
186 static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_configure_interrupts(struct adapter *);
189 static void ixgbe_free_pciintr_resources(struct adapter *);
190 static void ixgbe_free_pci_resources(struct adapter *);
191 static void ixgbe_local_timer(void *);
192 static void ixgbe_local_timer1(void *);
193 static void ixgbe_recovery_mode_timer(void *);
194 static int ixgbe_setup_interface(device_t, struct adapter *);
195 static void ixgbe_config_gpie(struct adapter *);
196 static void ixgbe_config_dmac(struct adapter *);
197 static void ixgbe_config_delay_values(struct adapter *);
198 static void ixgbe_config_link(struct adapter *);
199 static void ixgbe_check_wol_support(struct adapter *);
200 static int ixgbe_setup_low_power_mode(struct adapter *);
201 #if 0
202 static void ixgbe_rearm_queues(struct adapter *, u64);
203 #endif
204
205 static void ixgbe_initialize_transmit_units(struct adapter *);
206 static void ixgbe_initialize_receive_units(struct adapter *);
207 static void ixgbe_enable_rx_drop(struct adapter *);
208 static void ixgbe_disable_rx_drop(struct adapter *);
209 static void ixgbe_initialize_rss_mapping(struct adapter *);
210
211 static void ixgbe_enable_intr(struct adapter *);
212 static void ixgbe_disable_intr(struct adapter *);
213 static void ixgbe_update_stats_counters(struct adapter *);
214 static void ixgbe_set_rxfilter(struct adapter *);
215 static void ixgbe_update_link_status(struct adapter *);
216 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
217 static void ixgbe_configure_ivars(struct adapter *);
218 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
219 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
220
221 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
222 static void ixgbe_setup_vlan_hw_support(struct adapter *);
223 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
224 static int ixgbe_register_vlan(struct adapter *, u16);
225 static int ixgbe_unregister_vlan(struct adapter *, u16);
226
227 static void ixgbe_add_device_sysctls(struct adapter *);
228 static void ixgbe_add_hw_stats(struct adapter *);
229 static void ixgbe_clear_evcnt(struct adapter *);
230 static int ixgbe_set_flowcntl(struct adapter *, int);
231 static int ixgbe_set_advertise(struct adapter *, int);
232 static int ixgbe_get_advertise(struct adapter *);
233
234 /* Sysctl handlers */
235 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
236 const char *, int *, int);
237 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
238 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
243 #ifdef IXGBE_DEBUG
244 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
246 #endif
247 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
256
257 /* Support for pluggable optic modules */
258 static bool ixgbe_sfp_probe(struct adapter *);
259
260 /* Legacy (single vector) interrupt handler */
261 static int ixgbe_legacy_irq(void *);
262
263 /* The MSI/MSI-X Interrupt handlers */
264 static int ixgbe_msix_que(void *);
265 static int ixgbe_msix_link(void *);
266
267 /* Software interrupts for deferred work */
268 static void ixgbe_handle_que(void *);
269 static void ixgbe_handle_link(void *);
270 static void ixgbe_handle_msf(void *);
271 static void ixgbe_handle_mod(void *);
272 static void ixgbe_handle_phy(void *);
273
274 /* Workqueue handler for deferred work */
275 static void ixgbe_handle_que_work(struct work *, void *);
276
277 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
278
279 /************************************************************************
280 * NetBSD Device Interface Entry Points
281 ************************************************************************/
282 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
283 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
284 DVF_DETACH_SHUTDOWN);
285
286 #if 0
287 devclass_t ix_devclass;
288 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
289
290 MODULE_DEPEND(ix, pci, 1, 1, 1);
291 MODULE_DEPEND(ix, ether, 1, 1, 1);
292 #ifdef DEV_NETMAP
293 MODULE_DEPEND(ix, netmap, 1, 1, 1);
294 #endif
295 #endif
296
297 /*
298 * TUNEABLE PARAMETERS:
299 */
300
301 /*
302 * AIM: Adaptive Interrupt Moderation
303 * which means that the interrupt rate
304 * is varied over time based on the
305 * traffic for that interrupt vector
306 */
307 static bool ixgbe_enable_aim = true;
308 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
309 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
310 "Enable adaptive interrupt moderation");
311
312 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
313 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
314 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
315
316 /* How many packets rxeof tries to clean at a time */
317 static int ixgbe_rx_process_limit = 256;
318 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
319 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
320
321 /* How many packets txeof tries to clean at a time */
322 static int ixgbe_tx_process_limit = 256;
323 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_tx_process_limit, 0,
325 "Maximum number of sent packets to process at a time, -1 means unlimited");
326
327 /* Flow control setting, default to full */
328 static int ixgbe_flow_control = ixgbe_fc_full;
329 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
330 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
331
332 /* Which packet processing uses workqueue or softint */
333 static bool ixgbe_txrx_workqueue = false;
334
335 /*
336 * Smart speed setting, default to on
337 * this only works as a compile option
338 * right now as its during attach, set
339 * this to 'ixgbe_smart_speed_off' to
340 * disable.
341 */
342 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
343
344 /*
345 * MSI-X should be the default for best performance,
346 * but this allows it to be forced off for testing.
347 */
348 static int ixgbe_enable_msix = 1;
349 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
350 "Enable MSI-X interrupts");
351
352 /*
353 * Number of Queues, can be set to 0,
354 * it then autoconfigures based on the
355 * number of cpus with a max of 8. This
356 * can be overridden manually here.
357 */
358 static int ixgbe_num_queues = 0;
359 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
360 "Number of queues to configure, 0 indicates autoconfigure");
361
362 /*
363 * Number of TX descriptors per ring,
364 * setting higher than RX as this seems
365 * the better performing choice.
366 */
367 static int ixgbe_txd = PERFORM_TXD;
368 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
369 "Number of transmit descriptors per queue");
370
371 /* Number of RX descriptors per ring */
372 static int ixgbe_rxd = PERFORM_RXD;
373 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
374 "Number of receive descriptors per queue");
375
376 /*
377 * Defining this on will allow the use
378 * of unsupported SFP+ modules, note that
379 * doing so you are on your own :)
380 */
381 static int allow_unsupported_sfp = false;
382 #define TUNABLE_INT(__x, __y)
383 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
384
385 /*
386 * Not sure if Flow Director is fully baked,
387 * so we'll default to turning it off.
388 */
389 static int ixgbe_enable_fdir = 0;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
391 "Enable Flow Director");
392
393 /* Legacy Transmit (single queue) */
394 static int ixgbe_enable_legacy_tx = 0;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
396 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
397
398 /* Receive-Side Scaling */
399 static int ixgbe_enable_rss = 1;
400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
401 "Enable Receive-Side Scaling (RSS)");
402
403 #if 0
404 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
405 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
406 #endif
407
408 #ifdef NET_MPSAFE
409 #define IXGBE_MPSAFE 1
410 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
411 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
412 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
413 #else
414 #define IXGBE_CALLOUT_FLAGS 0
415 #define IXGBE_SOFTINFT_FLAGS 0
416 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
417 #endif
418 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
419
420 /************************************************************************
421 * ixgbe_initialize_rss_mapping
422 ************************************************************************/
423 static void
424 ixgbe_initialize_rss_mapping(struct adapter *adapter)
425 {
426 struct ixgbe_hw *hw = &adapter->hw;
427 u32 reta = 0, mrqc, rss_key[10];
428 int queue_id, table_size, index_mult;
429 int i, j;
430 u32 rss_hash_config;
431
432 /* force use default RSS key. */
433 #ifdef __NetBSD__
434 rss_getkey((uint8_t *) &rss_key);
435 #else
436 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
437 /* Fetch the configured RSS key */
438 rss_getkey((uint8_t *) &rss_key);
439 } else {
440 /* set up random bits */
441 cprng_fast(&rss_key, sizeof(rss_key));
442 }
443 #endif
444
445 /* Set multiplier for RETA setup and table size based on MAC */
446 index_mult = 0x1;
447 table_size = 128;
448 switch (adapter->hw.mac.type) {
449 case ixgbe_mac_82598EB:
450 index_mult = 0x11;
451 break;
452 case ixgbe_mac_X550:
453 case ixgbe_mac_X550EM_x:
454 case ixgbe_mac_X550EM_a:
455 table_size = 512;
456 break;
457 default:
458 break;
459 }
460
461 /* Set up the redirection table */
462 for (i = 0, j = 0; i < table_size; i++, j++) {
463 if (j == adapter->num_queues)
464 j = 0;
465
466 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
467 /*
468 * Fetch the RSS bucket id for the given indirection
469 * entry. Cap it at the number of configured buckets
470 * (which is num_queues.)
471 */
472 queue_id = rss_get_indirection_to_bucket(i);
473 queue_id = queue_id % adapter->num_queues;
474 } else
475 queue_id = (j * index_mult);
476
477 /*
478 * The low 8 bits are for hash value (n+0);
479 * The next 8 bits are for hash value (n+1), etc.
480 */
481 reta = reta >> 8;
482 reta = reta | (((uint32_t) queue_id) << 24);
483 if ((i & 3) == 3) {
484 if (i < 128)
485 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
486 else
487 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
488 reta);
489 reta = 0;
490 }
491 }
492
493 /* Now fill our hash function seeds */
494 for (i = 0; i < 10; i++)
495 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
496
497 /* Perform hash on these packet types */
498 if (adapter->feat_en & IXGBE_FEATURE_RSS)
499 rss_hash_config = rss_gethashconfig();
500 else {
501 /*
502 * Disable UDP - IP fragments aren't currently being handled
503 * and so we end up with a mix of 2-tuple and 4-tuple
504 * traffic.
505 */
506 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
507 | RSS_HASHTYPE_RSS_TCP_IPV4
508 | RSS_HASHTYPE_RSS_IPV6
509 | RSS_HASHTYPE_RSS_TCP_IPV6
510 | RSS_HASHTYPE_RSS_IPV6_EX
511 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
512 }
513
514 mrqc = IXGBE_MRQC_RSSEN;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
533 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
534 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
535 } /* ixgbe_initialize_rss_mapping */
536
537 /************************************************************************
538 * ixgbe_initialize_receive_units - Setup receive registers and features.
539 ************************************************************************/
540 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
541
542 static void
543 ixgbe_initialize_receive_units(struct adapter *adapter)
544 {
545 struct rx_ring *rxr = adapter->rx_rings;
546 struct ixgbe_hw *hw = &adapter->hw;
547 struct ifnet *ifp = adapter->ifp;
548 int i, j;
549 u32 bufsz, fctrl, srrctl, rxcsum;
550 u32 hlreg;
551
552 /*
553 * Make sure receives are disabled while
554 * setting up the descriptor ring
555 */
556 ixgbe_disable_rx(hw);
557
558 /* Enable broadcasts */
559 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
560 fctrl |= IXGBE_FCTRL_BAM;
561 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
562 fctrl |= IXGBE_FCTRL_DPF;
563 fctrl |= IXGBE_FCTRL_PMCF;
564 }
565 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
566
567 /* Set for Jumbo Frames? */
568 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
569 if (ifp->if_mtu > ETHERMTU)
570 hlreg |= IXGBE_HLREG0_JUMBOEN;
571 else
572 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
573
574 #ifdef DEV_NETMAP
575 /* CRC stripping is conditional in Netmap */
576 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
577 (ifp->if_capenable & IFCAP_NETMAP) &&
578 !ix_crcstrip)
579 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
580 else
581 #endif /* DEV_NETMAP */
582 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
583
584 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
585
586 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
587 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
588
589 for (i = 0; i < adapter->num_queues; i++, rxr++) {
590 u64 rdba = rxr->rxdma.dma_paddr;
591 u32 reg;
592 int regnum = i / 4; /* 1 register per 4 queues */
593 int regshift = i % 4; /* 4 bits per 1 queue */
594 j = rxr->me;
595
596 /* Setup the Base and Length of the Rx Descriptor Ring */
597 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
598 (rdba & 0x00000000ffffffffULL));
599 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
600 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
601 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
602
603 /* Set up the SRRCTL register */
604 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
605 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
606 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
607 srrctl |= bufsz;
608 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
609
610 /* Set RQSMR (Receive Queue Statistic Mapping) register */
611 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
612 reg &= ~(0x000000ffUL << (regshift * 8));
613 reg |= i << (regshift * 8);
614 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
615
616 /*
617 * Set DROP_EN iff we have no flow control and >1 queue.
618 * Note that srrctl was cleared shortly before during reset,
619 * so we do not need to clear the bit, but do it just in case
620 * this code is moved elsewhere.
621 */
622 if (adapter->num_queues > 1 &&
623 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
624 srrctl |= IXGBE_SRRCTL_DROP_EN;
625 } else {
626 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
627 }
628
629 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
630
631 /* Setup the HW Rx Head and Tail Descriptor Pointers */
632 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
633 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
634
635 /* Set the driver rx tail address */
636 rxr->tail = IXGBE_RDT(rxr->me);
637 }
638
639 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
640 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
641 | IXGBE_PSRTYPE_UDPHDR
642 | IXGBE_PSRTYPE_IPV4HDR
643 | IXGBE_PSRTYPE_IPV6HDR;
644 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
645 }
646
647 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
648
649 ixgbe_initialize_rss_mapping(adapter);
650
651 if (adapter->num_queues > 1) {
652 /* RSS and RX IPP Checksum are mutually exclusive */
653 rxcsum |= IXGBE_RXCSUM_PCSD;
654 }
655
656 if (ifp->if_capenable & IFCAP_RXCSUM)
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658
659 /* This is useful for calculating UDP/IP fragment checksums */
660 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
661 rxcsum |= IXGBE_RXCSUM_IPPCSE;
662
663 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
664
665 } /* ixgbe_initialize_receive_units */
666
667 /************************************************************************
668 * ixgbe_initialize_transmit_units - Enable transmit units.
669 ************************************************************************/
670 static void
671 ixgbe_initialize_transmit_units(struct adapter *adapter)
672 {
673 struct tx_ring *txr = adapter->tx_rings;
674 struct ixgbe_hw *hw = &adapter->hw;
675 int i;
676
677 /* Setup the Base and Length of the Tx Descriptor Ring */
678 for (i = 0; i < adapter->num_queues; i++, txr++) {
679 u64 tdba = txr->txdma.dma_paddr;
680 u32 txctrl = 0;
681 u32 tqsmreg, reg;
682 int regnum = i / 4; /* 1 register per 4 queues */
683 int regshift = i % 4; /* 4 bits per 1 queue */
684 int j = txr->me;
685
686 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
687 (tdba & 0x00000000ffffffffULL));
688 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
689 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
690 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
691
692 /*
693 * Set TQSMR (Transmit Queue Statistic Mapping) register.
694 * Register location is different between 82598 and others.
695 */
696 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
697 tqsmreg = IXGBE_TQSMR(regnum);
698 else
699 tqsmreg = IXGBE_TQSM(regnum);
700 reg = IXGBE_READ_REG(hw, tqsmreg);
701 reg &= ~(0x000000ffUL << (regshift * 8));
702 reg |= i << (regshift * 8);
703 IXGBE_WRITE_REG(hw, tqsmreg, reg);
704
705 /* Setup the HW Tx Head and Tail descriptor pointers */
706 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
707 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
708
709 /* Cache the tail address */
710 txr->tail = IXGBE_TDT(j);
711
712 txr->txr_no_space = false;
713
714 /* Disable Head Writeback */
715 /*
716 * Note: for X550 series devices, these registers are actually
717 * prefixed with TPH_ isntead of DCA_, but the addresses and
718 * fields remain the same.
719 */
720 switch (hw->mac.type) {
721 case ixgbe_mac_82598EB:
722 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
723 break;
724 default:
725 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
726 break;
727 }
728 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
732 break;
733 default:
734 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
735 break;
736 }
737
738 }
739
740 if (hw->mac.type != ixgbe_mac_82598EB) {
741 u32 dmatxctl, rttdcs;
742
743 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
744 dmatxctl |= IXGBE_DMATXCTL_TE;
745 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
746 /* Disable arbiter to set MTQC */
747 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
748 rttdcs |= IXGBE_RTTDCS_ARBDIS;
749 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
750 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
751 ixgbe_get_mtqc(adapter->iov_mode));
752 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
753 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
754 }
755
756 return;
757 } /* ixgbe_initialize_transmit_units */
758
759 /************************************************************************
760 * ixgbe_attach - Device initialization routine
761 *
762 * Called when the driver is being loaded.
763 * Identifies the type of hardware, allocates all resources
764 * and initializes the hardware.
765 *
766 * return 0 on success, positive on failure
767 ************************************************************************/
768 static void
769 ixgbe_attach(device_t parent, device_t dev, void *aux)
770 {
771 struct adapter *adapter;
772 struct ixgbe_hw *hw;
773 int error = -1;
774 u32 ctrl_ext;
775 u16 high, low, nvmreg;
776 pcireg_t id, subid;
777 const ixgbe_vendor_info_t *ent;
778 struct pci_attach_args *pa = aux;
779 bool unsupported_sfp = false;
780 const char *str;
781 char buf[256];
782
783 INIT_DEBUGOUT("ixgbe_attach: begin");
784
785 /* Allocate, clear, and link in our adapter structure */
786 adapter = device_private(dev);
787 adapter->hw.back = adapter;
788 adapter->dev = dev;
789 hw = &adapter->hw;
790 adapter->osdep.pc = pa->pa_pc;
791 adapter->osdep.tag = pa->pa_tag;
792 if (pci_dma64_available(pa))
793 adapter->osdep.dmat = pa->pa_dmat64;
794 else
795 adapter->osdep.dmat = pa->pa_dmat;
796 adapter->osdep.attached = false;
797
798 ent = ixgbe_lookup(pa);
799
800 KASSERT(ent != NULL);
801
802 aprint_normal(": %s, Version - %s\n",
803 ixgbe_strings[ent->index], ixgbe_driver_version);
804
805 /* Core Lock Init*/
806 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
807
808 /* Set up the timer callout */
809 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
810
811 /* Determine hardware revision */
812 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
813 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
814
815 hw->vendor_id = PCI_VENDOR(id);
816 hw->device_id = PCI_PRODUCT(id);
817 hw->revision_id =
818 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
819 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
820 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
821
822 /*
823 * Make sure BUSMASTER is set
824 */
825 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
826
827 /* Do base PCI setup - map BAR0 */
828 if (ixgbe_allocate_pci_resources(adapter, pa)) {
829 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
830 error = ENXIO;
831 goto err_out;
832 }
833
834 /* let hardware know driver is loaded */
835 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
836 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
837 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
838
839 /*
840 * Initialize the shared code
841 */
842 if (ixgbe_init_shared_code(hw) != 0) {
843 aprint_error_dev(dev, "Unable to initialize the shared code\n");
844 error = ENXIO;
845 goto err_out;
846 }
847
848 switch (hw->mac.type) {
849 case ixgbe_mac_82598EB:
850 str = "82598EB";
851 break;
852 case ixgbe_mac_82599EB:
853 str = "82599EB";
854 break;
855 case ixgbe_mac_X540:
856 str = "X540";
857 break;
858 case ixgbe_mac_X550:
859 str = "X550";
860 break;
861 case ixgbe_mac_X550EM_x:
862 str = "X550EM";
863 break;
864 case ixgbe_mac_X550EM_a:
865 str = "X550EM A";
866 break;
867 default:
868 str = "Unknown";
869 break;
870 }
871 aprint_normal_dev(dev, "device %s\n", str);
872
873 if (hw->mbx.ops.init_params)
874 hw->mbx.ops.init_params(hw);
875
876 hw->allow_unsupported_sfp = allow_unsupported_sfp;
877
878 /* Pick up the 82599 settings */
879 if (hw->mac.type != ixgbe_mac_82598EB) {
880 hw->phy.smart_speed = ixgbe_smart_speed;
881 adapter->num_segs = IXGBE_82599_SCATTER;
882 } else
883 adapter->num_segs = IXGBE_82598_SCATTER;
884
885 /* Ensure SW/FW semaphore is free */
886 ixgbe_init_swfw_semaphore(hw);
887
888 hw->mac.ops.set_lan_id(hw);
889 ixgbe_init_device_features(adapter);
890
891 if (ixgbe_configure_interrupts(adapter)) {
892 error = ENXIO;
893 goto err_out;
894 }
895
896 /* Allocate multicast array memory. */
897 adapter->mta = malloc(sizeof(*adapter->mta) *
898 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
899
900 /* Enable WoL (if supported) */
901 ixgbe_check_wol_support(adapter);
902
903 /* Register for VLAN events */
904 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
905
906 /* Verify adapter fan is still functional (if applicable) */
907 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
908 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
909 ixgbe_check_fan_failure(adapter, esdp, FALSE);
910 }
911
912 /* Set an initial default flow control value */
913 hw->fc.requested_mode = ixgbe_flow_control;
914
915 /* Sysctls for limiting the amount of work done in the taskqueues */
916 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
917 "max number of rx packets to process",
918 &adapter->rx_process_limit, ixgbe_rx_process_limit);
919
920 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
921 "max number of tx packets to process",
922 &adapter->tx_process_limit, ixgbe_tx_process_limit);
923
924 /* Do descriptor calc and sanity checks */
925 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
926 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
927 aprint_error_dev(dev, "TXD config issue, using default!\n");
928 adapter->num_tx_desc = DEFAULT_TXD;
929 } else
930 adapter->num_tx_desc = ixgbe_txd;
931
932 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
933 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
934 aprint_error_dev(dev, "RXD config issue, using default!\n");
935 adapter->num_rx_desc = DEFAULT_RXD;
936 } else
937 adapter->num_rx_desc = ixgbe_rxd;
938
939 /* Allocate our TX/RX Queues */
940 if (ixgbe_allocate_queues(adapter)) {
941 error = ENOMEM;
942 goto err_out;
943 }
944
945 hw->phy.reset_if_overtemp = TRUE;
946 error = ixgbe_reset_hw(hw);
947 hw->phy.reset_if_overtemp = FALSE;
948 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
949 /*
950 * No optics in this port, set up
951 * so the timer routine will probe
952 * for later insertion.
953 */
954 adapter->sfp_probe = TRUE;
955 error = IXGBE_SUCCESS;
956 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
957 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
958 unsupported_sfp = true;
959 error = IXGBE_SUCCESS;
960 } else if (error) {
961 aprint_error_dev(dev, "Hardware initialization failed\n");
962 error = EIO;
963 goto err_late;
964 }
965
966 /* Make sure we have a good EEPROM before we read from it */
967 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
968 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
969 error = EIO;
970 goto err_late;
971 }
972
973 aprint_normal("%s:", device_xname(dev));
974 /* NVM Image Version */
975 high = low = 0;
976 switch (hw->mac.type) {
977 case ixgbe_mac_X540:
978 case ixgbe_mac_X550EM_a:
979 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
980 if (nvmreg == 0xffff)
981 break;
982 high = (nvmreg >> 12) & 0x0f;
983 low = (nvmreg >> 4) & 0xff;
984 id = nvmreg & 0x0f;
985 aprint_normal(" NVM Image Version %u.", high);
986 if (hw->mac.type == ixgbe_mac_X540)
987 str = "%x";
988 else
989 str = "%02x";
990 aprint_normal(str, low);
991 aprint_normal(" ID 0x%x,", id);
992 break;
993 case ixgbe_mac_X550EM_x:
994 case ixgbe_mac_X550:
995 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
996 if (nvmreg == 0xffff)
997 break;
998 high = (nvmreg >> 12) & 0x0f;
999 low = nvmreg & 0xff;
1000 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1001 break;
1002 default:
1003 break;
1004 }
1005 hw->eeprom.nvm_image_ver_high = high;
1006 hw->eeprom.nvm_image_ver_low = low;
1007
1008 /* PHY firmware revision */
1009 switch (hw->mac.type) {
1010 case ixgbe_mac_X540:
1011 case ixgbe_mac_X550:
1012 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1013 if (nvmreg == 0xffff)
1014 break;
1015 high = (nvmreg >> 12) & 0x0f;
1016 low = (nvmreg >> 4) & 0xff;
1017 id = nvmreg & 0x000f;
1018 aprint_normal(" PHY FW Revision %u.", high);
1019 if (hw->mac.type == ixgbe_mac_X540)
1020 str = "%x";
1021 else
1022 str = "%02x";
1023 aprint_normal(str, low);
1024 aprint_normal(" ID 0x%x,", id);
1025 break;
1026 default:
1027 break;
1028 }
1029
1030 /* NVM Map version & OEM NVM Image version */
1031 switch (hw->mac.type) {
1032 case ixgbe_mac_X550:
1033 case ixgbe_mac_X550EM_x:
1034 case ixgbe_mac_X550EM_a:
1035 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1036 if (nvmreg != 0xffff) {
1037 high = (nvmreg >> 12) & 0x0f;
1038 low = nvmreg & 0x00ff;
1039 aprint_normal(" NVM Map version %u.%02x,", high, low);
1040 }
1041 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1042 if (nvmreg != 0xffff) {
1043 high = (nvmreg >> 12) & 0x0f;
1044 low = nvmreg & 0x00ff;
1045 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1046 low);
1047 }
1048 break;
1049 default:
1050 break;
1051 }
1052
1053 /* Print the ETrackID */
1054 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1055 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1056 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1057
1058 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1059 error = ixgbe_allocate_msix(adapter, pa);
1060 if (error) {
1061 /* Free allocated queue structures first */
1062 ixgbe_free_transmit_structures(adapter);
1063 ixgbe_free_receive_structures(adapter);
1064 free(adapter->queues, M_DEVBUF);
1065
1066 /* Fallback to legacy interrupt */
1067 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1068 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1069 adapter->feat_en |= IXGBE_FEATURE_MSI;
1070 adapter->num_queues = 1;
1071
1072 /* Allocate our TX/RX Queues again */
1073 if (ixgbe_allocate_queues(adapter)) {
1074 error = ENOMEM;
1075 goto err_out;
1076 }
1077 }
1078 }
1079 /* Recovery mode */
1080 switch (adapter->hw.mac.type) {
1081 case ixgbe_mac_X550:
1082 case ixgbe_mac_X550EM_x:
1083 case ixgbe_mac_X550EM_a:
1084 /* >= 2.00 */
1085 if (hw->eeprom.nvm_image_ver_high >= 2) {
1086 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1087 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1088 }
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1095 error = ixgbe_allocate_legacy(adapter, pa);
1096 if (error)
1097 goto err_late;
1098
1099 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1100 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_handle_link, adapter);
1102 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_mod, adapter);
1104 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1105 ixgbe_handle_msf, adapter);
1106 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_handle_phy, adapter);
1108 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1109 adapter->fdir_si =
1110 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1111 ixgbe_reinit_fdir, adapter);
1112 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1113 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1114 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1115 && (adapter->fdir_si == NULL))) {
1116 aprint_error_dev(dev,
1117 "could not establish software interrupts ()\n");
1118 goto err_out;
1119 }
1120
1121 error = ixgbe_start_hw(hw);
1122 switch (error) {
1123 case IXGBE_ERR_EEPROM_VERSION:
1124 aprint_error_dev(dev, "This device is a pre-production adapter/"
1125 "LOM. Please be aware there may be issues associated "
1126 "with your hardware.\nIf you are experiencing problems "
1127 "please contact your Intel or hardware representative "
1128 "who provided you with this hardware.\n");
1129 break;
1130 default:
1131 break;
1132 }
1133
1134 /* Setup OS specific network interface */
1135 if (ixgbe_setup_interface(dev, adapter) != 0)
1136 goto err_late;
1137
1138 /*
1139 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1140 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1141 */
1142 if (hw->phy.media_type == ixgbe_media_type_copper) {
1143 uint16_t id1, id2;
1144 int oui, model, rev;
1145 const char *descr;
1146
1147 id1 = hw->phy.id >> 16;
1148 id2 = hw->phy.id & 0xffff;
1149 oui = MII_OUI(id1, id2);
1150 model = MII_MODEL(id2);
1151 rev = MII_REV(id2);
1152 if ((descr = mii_get_descr(oui, model)) != NULL)
1153 aprint_normal_dev(dev,
1154 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1155 descr, oui, model, rev);
1156 else
1157 aprint_normal_dev(dev,
1158 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1159 oui, model, rev);
1160 }
1161
1162 /* Enable EEE power saving */
1163 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1164 hw->mac.ops.setup_eee(hw,
1165 adapter->feat_en & IXGBE_FEATURE_EEE);
1166
1167 /* Enable power to the phy. */
1168 if (!unsupported_sfp) {
1169 /* Enable the optics for 82599 SFP+ fiber */
1170 ixgbe_enable_tx_laser(hw);
1171
1172 /*
1173 * XXX Currently, ixgbe_set_phy_power() supports only copper
1174 * PHY, so it's not required to test with !unsupported_sfp.
1175 */
1176 ixgbe_set_phy_power(hw, TRUE);
1177 }
1178
1179 /* Initialize statistics */
1180 ixgbe_update_stats_counters(adapter);
1181
1182 /* Check PCIE slot type/speed/width */
1183 ixgbe_get_slot_info(adapter);
1184
1185 /*
1186 * Do time init and sysctl init here, but
1187 * only on the first port of a bypass adapter.
1188 */
1189 ixgbe_bypass_init(adapter);
1190
1191 /* Set an initial dmac value */
1192 adapter->dmac = 0;
1193 /* Set initial advertised speeds (if applicable) */
1194 adapter->advertise = ixgbe_get_advertise(adapter);
1195
1196 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1197 ixgbe_define_iov_schemas(dev, &error);
1198
1199 /* Add sysctls */
1200 ixgbe_add_device_sysctls(adapter);
1201 ixgbe_add_hw_stats(adapter);
1202
1203 /* For Netmap */
1204 adapter->init_locked = ixgbe_init_locked;
1205 adapter->stop_locked = ixgbe_stop;
1206
1207 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1208 ixgbe_netmap_attach(adapter);
1209
1210 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1211 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1212 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1213 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1214
1215 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1216 pmf_class_network_register(dev, adapter->ifp);
1217 else
1218 aprint_error_dev(dev, "couldn't establish power handler\n");
1219
1220 /* Init recovery mode timer and state variable */
1221 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1222 adapter->recovery_mode = 0;
1223
1224 /* Set up the timer callout */
1225 callout_init(&adapter->recovery_mode_timer,
1226 IXGBE_CALLOUT_FLAGS);
1227
1228 /* Start the task */
1229 callout_reset(&adapter->recovery_mode_timer, hz,
1230 ixgbe_recovery_mode_timer, adapter);
1231 }
1232
1233 INIT_DEBUGOUT("ixgbe_attach: end");
1234 adapter->osdep.attached = true;
1235
1236 return;
1237
1238 err_late:
1239 ixgbe_free_transmit_structures(adapter);
1240 ixgbe_free_receive_structures(adapter);
1241 free(adapter->queues, M_DEVBUF);
1242 err_out:
1243 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1244 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1245 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1246 ixgbe_free_softint(adapter);
1247 ixgbe_free_pci_resources(adapter);
1248 if (adapter->mta != NULL)
1249 free(adapter->mta, M_DEVBUF);
1250 IXGBE_CORE_LOCK_DESTROY(adapter);
1251
1252 return;
1253 } /* ixgbe_attach */
1254
1255 /************************************************************************
1256 * ixgbe_check_wol_support
1257 *
1258 * Checks whether the adapter's ports are capable of
1259 * Wake On LAN by reading the adapter's NVM.
1260 *
1261 * Sets each port's hw->wol_enabled value depending
1262 * on the value read here.
1263 ************************************************************************/
1264 static void
1265 ixgbe_check_wol_support(struct adapter *adapter)
1266 {
1267 struct ixgbe_hw *hw = &adapter->hw;
1268 u16 dev_caps = 0;
1269
1270 /* Find out WoL support for port */
1271 adapter->wol_support = hw->wol_enabled = 0;
1272 ixgbe_get_device_caps(hw, &dev_caps);
1273 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1274 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1275 hw->bus.func == 0))
1276 adapter->wol_support = hw->wol_enabled = 1;
1277
1278 /* Save initial wake up filter configuration */
1279 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1280
1281 return;
1282 } /* ixgbe_check_wol_support */
1283
1284 /************************************************************************
1285 * ixgbe_setup_interface
1286 *
1287 * Setup networking device structure and register an interface.
1288 ************************************************************************/
1289 static int
1290 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1291 {
1292 struct ethercom *ec = &adapter->osdep.ec;
1293 struct ifnet *ifp;
1294 int rv;
1295
1296 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1297
1298 ifp = adapter->ifp = &ec->ec_if;
1299 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1300 ifp->if_baudrate = IF_Gbps(10);
1301 ifp->if_init = ixgbe_init;
1302 ifp->if_stop = ixgbe_ifstop;
1303 ifp->if_softc = adapter;
1304 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1305 #ifdef IXGBE_MPSAFE
1306 ifp->if_extflags = IFEF_MPSAFE;
1307 #endif
1308 ifp->if_ioctl = ixgbe_ioctl;
1309 #if __FreeBSD_version >= 1100045
1310 /* TSO parameters */
1311 ifp->if_hw_tsomax = 65518;
1312 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1313 ifp->if_hw_tsomaxsegsize = 2048;
1314 #endif
1315 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1316 #if 0
1317 ixgbe_start_locked = ixgbe_legacy_start_locked;
1318 #endif
1319 } else {
1320 ifp->if_transmit = ixgbe_mq_start;
1321 #if 0
1322 ixgbe_start_locked = ixgbe_mq_start_locked;
1323 #endif
1324 }
1325 ifp->if_start = ixgbe_legacy_start;
1326 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1327 IFQ_SET_READY(&ifp->if_snd);
1328
1329 rv = if_initialize(ifp);
1330 if (rv != 0) {
1331 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1332 return rv;
1333 }
1334 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1335 ether_ifattach(ifp, adapter->hw.mac.addr);
1336 aprint_normal_dev(dev, "Ethernet address %s\n",
1337 ether_sprintf(adapter->hw.mac.addr));
1338 /*
1339 * We use per TX queue softint, so if_deferred_start_init() isn't
1340 * used.
1341 */
1342 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1343
1344 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1345
1346 /*
1347 * Tell the upper layer(s) we support long frames.
1348 */
1349 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1350
1351 /* Set capability flags */
1352 ifp->if_capabilities |= IFCAP_RXCSUM
1353 | IFCAP_TXCSUM
1354 | IFCAP_TSOv4
1355 | IFCAP_TSOv6;
1356 ifp->if_capenable = 0;
1357
1358 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1359 | ETHERCAP_VLAN_HWCSUM
1360 | ETHERCAP_JUMBO_MTU
1361 | ETHERCAP_VLAN_MTU;
1362
1363 /* Enable the above capabilities by default */
1364 ec->ec_capenable = ec->ec_capabilities;
1365
1366 /*
1367 * Don't turn this on by default, if vlans are
1368 * created on another pseudo device (eg. lagg)
1369 * then vlan events are not passed thru, breaking
1370 * operation, but with HW FILTER off it works. If
1371 * using vlans directly on the ixgbe driver you can
1372 * enable this and get full hardware tag filtering.
1373 */
1374 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1375
1376 /*
1377 * Specify the media types supported by this adapter and register
1378 * callbacks to update media and link information
1379 */
1380 ec->ec_ifmedia = &adapter->media;
1381 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1382 ixgbe_media_status);
1383
1384 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1385 ixgbe_add_media_types(adapter);
1386
1387 /* Set autoselect media by default */
1388 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1389
1390 if_register(ifp);
1391
1392 return (0);
1393 } /* ixgbe_setup_interface */
1394
1395 /************************************************************************
1396 * ixgbe_add_media_types
1397 ************************************************************************/
1398 static void
1399 ixgbe_add_media_types(struct adapter *adapter)
1400 {
1401 struct ixgbe_hw *hw = &adapter->hw;
1402 u64 layer;
1403
1404 layer = adapter->phy_layer;
1405
1406 #define ADD(mm, dd) \
1407 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1408
1409 ADD(IFM_NONE, 0);
1410
1411 /* Media types with matching NetBSD media defines */
1412 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1413 ADD(IFM_10G_T | IFM_FDX, 0);
1414 }
1415 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1416 ADD(IFM_1000_T | IFM_FDX, 0);
1417 }
1418 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1419 ADD(IFM_100_TX | IFM_FDX, 0);
1420 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1422 ADD(IFM_10_T | IFM_FDX, 0);
1423 }
1424
1425 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1426 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1427 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1428 }
1429
1430 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1431 ADD(IFM_10G_LR | IFM_FDX, 0);
1432 if (hw->phy.multispeed_fiber) {
1433 ADD(IFM_1000_LX | IFM_FDX, 0);
1434 }
1435 }
1436 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1437 ADD(IFM_10G_SR | IFM_FDX, 0);
1438 if (hw->phy.multispeed_fiber) {
1439 ADD(IFM_1000_SX | IFM_FDX, 0);
1440 }
1441 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1442 ADD(IFM_1000_SX | IFM_FDX, 0);
1443 }
1444 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1445 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1446 }
1447
1448 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1449 ADD(IFM_10G_KR | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1452 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1453 }
1454 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1455 ADD(IFM_1000_KX | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1458 ADD(IFM_2500_KX | IFM_FDX, 0);
1459 }
1460 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1461 ADD(IFM_2500_T | IFM_FDX, 0);
1462 }
1463 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1464 ADD(IFM_5000_T | IFM_FDX, 0);
1465 }
1466 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1467 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1468 /* XXX no ifmedia_set? */
1469
1470 ADD(IFM_AUTO, 0);
1471
1472 #undef ADD
1473 } /* ixgbe_add_media_types */
1474
1475 /************************************************************************
1476 * ixgbe_is_sfp
1477 ************************************************************************/
1478 static inline bool
1479 ixgbe_is_sfp(struct ixgbe_hw *hw)
1480 {
1481 switch (hw->mac.type) {
1482 case ixgbe_mac_82598EB:
1483 if (hw->phy.type == ixgbe_phy_nl)
1484 return (TRUE);
1485 return (FALSE);
1486 case ixgbe_mac_82599EB:
1487 case ixgbe_mac_X550EM_x:
1488 case ixgbe_mac_X550EM_a:
1489 switch (hw->mac.ops.get_media_type(hw)) {
1490 case ixgbe_media_type_fiber:
1491 case ixgbe_media_type_fiber_qsfp:
1492 return (TRUE);
1493 default:
1494 return (FALSE);
1495 }
1496 default:
1497 return (FALSE);
1498 }
1499 } /* ixgbe_is_sfp */
1500
1501 /************************************************************************
1502 * ixgbe_config_link
1503 ************************************************************************/
1504 static void
1505 ixgbe_config_link(struct adapter *adapter)
1506 {
1507 struct ixgbe_hw *hw = &adapter->hw;
1508 u32 autoneg, err = 0;
1509 bool sfp, negotiate = false;
1510
1511 sfp = ixgbe_is_sfp(hw);
1512
1513 if (sfp) {
1514 if (hw->phy.multispeed_fiber) {
1515 ixgbe_enable_tx_laser(hw);
1516 kpreempt_disable();
1517 softint_schedule(adapter->msf_si);
1518 kpreempt_enable();
1519 }
1520 kpreempt_disable();
1521 softint_schedule(adapter->mod_si);
1522 kpreempt_enable();
1523 } else {
1524 struct ifmedia *ifm = &adapter->media;
1525
1526 if (hw->mac.ops.check_link)
1527 err = ixgbe_check_link(hw, &adapter->link_speed,
1528 &adapter->link_up, FALSE);
1529 if (err)
1530 return;
1531
1532 /*
1533 * Check if it's the first call. If it's the first call,
1534 * get value for auto negotiation.
1535 */
1536 autoneg = hw->phy.autoneg_advertised;
1537 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1538 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1539 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1540 &negotiate);
1541 if (err)
1542 return;
1543 if (hw->mac.ops.setup_link)
1544 err = hw->mac.ops.setup_link(hw, autoneg,
1545 adapter->link_up);
1546 }
1547
1548 } /* ixgbe_config_link */
1549
1550 /************************************************************************
1551 * ixgbe_update_stats_counters - Update board statistics counters.
1552 ************************************************************************/
1553 static void
1554 ixgbe_update_stats_counters(struct adapter *adapter)
1555 {
1556 struct ifnet *ifp = adapter->ifp;
1557 struct ixgbe_hw *hw = &adapter->hw;
1558 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1559 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1560 u64 total_missed_rx = 0;
1561 uint64_t crcerrs, rlec;
1562 unsigned int queue_counters;
1563 int i;
1564
1565 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1566 stats->crcerrs.ev_count += crcerrs;
1567 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1568 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1569 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1570 if (hw->mac.type >= ixgbe_mac_X550)
1571 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1572
1573 /* 16 registers exist */
1574 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1575 for (i = 0; i < queue_counters; i++) {
1576 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1577 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1578 if (hw->mac.type >= ixgbe_mac_82599EB) {
1579 stats->qprdc[i].ev_count
1580 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1581 }
1582 }
1583
1584 /* 8 registers exist */
1585 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1586 uint32_t mp;
1587
1588 /* MPC */
1589 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1590 /* global total per queue */
1591 stats->mpc[i].ev_count += mp;
1592 /* running comprehensive total for stats display */
1593 total_missed_rx += mp;
1594
1595 if (hw->mac.type == ixgbe_mac_82598EB)
1596 stats->rnbc[i].ev_count
1597 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1598
1599 stats->pxontxc[i].ev_count
1600 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1601 stats->pxofftxc[i].ev_count
1602 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1603 if (hw->mac.type >= ixgbe_mac_82599EB) {
1604 stats->pxonrxc[i].ev_count
1605 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1606 stats->pxoffrxc[i].ev_count
1607 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1608 stats->pxon2offc[i].ev_count
1609 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1610 } else {
1611 stats->pxonrxc[i].ev_count
1612 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1613 stats->pxoffrxc[i].ev_count
1614 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1615 }
1616 }
1617 stats->mpctotal.ev_count += total_missed_rx;
1618
1619 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1620 if ((adapter->link_active == LINK_STATE_UP)
1621 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1622 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1623 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1624 }
1625 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1626 stats->rlec.ev_count += rlec;
1627
1628 /* Hardware workaround, gprc counts missed packets */
1629 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1630
1631 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1632 stats->lxontxc.ev_count += lxon;
1633 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1634 stats->lxofftxc.ev_count += lxoff;
1635 total = lxon + lxoff;
1636
1637 if (hw->mac.type != ixgbe_mac_82598EB) {
1638 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1639 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1640 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1641 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1642 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1643 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1644 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1645 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1646 } else {
1647 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1648 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1649 /* 82598 only has a counter in the high register */
1650 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1651 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1652 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1653 }
1654
1655 /*
1656 * Workaround: mprc hardware is incorrectly counting
1657 * broadcasts, so for now we subtract those.
1658 */
1659 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1660 stats->bprc.ev_count += bprc;
1661 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1662 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1663
1664 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1665 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1666 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1667 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1668 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1669 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1670
1671 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1672 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1673 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1674
1675 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1676 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1677 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1678 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1679 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1680 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1681 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1682 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1683 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1684 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1685 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1686 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1687 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1688 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1689 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1690 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1691 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1692 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1693 /* Only read FCOE on 82599 */
1694 if (hw->mac.type != ixgbe_mac_82598EB) {
1695 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1696 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1697 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1698 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1699 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1700 }
1701
1702 /* Fill out the OS statistics structure */
1703 /*
1704 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1705 * adapter->stats counters. It's required to make ifconfig -z
1706 * (SOICZIFDATA) work.
1707 */
1708 ifp->if_collisions = 0;
1709
1710 /* Rx Errors */
1711 ifp->if_iqdrops += total_missed_rx;
1712 ifp->if_ierrors += crcerrs + rlec;
1713 } /* ixgbe_update_stats_counters */
1714
1715 /************************************************************************
1716 * ixgbe_add_hw_stats
1717 *
1718 * Add sysctl variables, one per statistic, to the system.
1719 ************************************************************************/
1720 static void
1721 ixgbe_add_hw_stats(struct adapter *adapter)
1722 {
1723 device_t dev = adapter->dev;
1724 const struct sysctlnode *rnode, *cnode;
1725 struct sysctllog **log = &adapter->sysctllog;
1726 struct tx_ring *txr = adapter->tx_rings;
1727 struct rx_ring *rxr = adapter->rx_rings;
1728 struct ixgbe_hw *hw = &adapter->hw;
1729 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1730 const char *xname = device_xname(dev);
1731 int i;
1732
1733 /* Driver Statistics */
1734 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1735 NULL, xname, "Driver tx dma soft fail EFBIG");
1736 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1737 NULL, xname, "m_defrag() failed");
1738 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1739 NULL, xname, "Driver tx dma hard fail EFBIG");
1740 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1741 NULL, xname, "Driver tx dma hard fail EINVAL");
1742 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1743 NULL, xname, "Driver tx dma hard fail other");
1744 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1745 NULL, xname, "Driver tx dma soft fail EAGAIN");
1746 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1747 NULL, xname, "Driver tx dma soft fail ENOMEM");
1748 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1749 NULL, xname, "Watchdog timeouts");
1750 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1751 NULL, xname, "TSO errors");
1752 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1753 NULL, xname, "Link MSI-X IRQ Handled");
1754 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1755 NULL, xname, "Link softint");
1756 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1757 NULL, xname, "module softint");
1758 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1759 NULL, xname, "multimode softint");
1760 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1761 NULL, xname, "external PHY softint");
1762
1763 /* Max number of traffic class is 8 */
1764 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1765 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1766 snprintf(adapter->tcs[i].evnamebuf,
1767 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1768 xname, i);
1769 if (i < __arraycount(stats->mpc)) {
1770 evcnt_attach_dynamic(&stats->mpc[i],
1771 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1772 "RX Missed Packet Count");
1773 if (hw->mac.type == ixgbe_mac_82598EB)
1774 evcnt_attach_dynamic(&stats->rnbc[i],
1775 EVCNT_TYPE_MISC, NULL,
1776 adapter->tcs[i].evnamebuf,
1777 "Receive No Buffers");
1778 }
1779 if (i < __arraycount(stats->pxontxc)) {
1780 evcnt_attach_dynamic(&stats->pxontxc[i],
1781 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1782 "pxontxc");
1783 evcnt_attach_dynamic(&stats->pxonrxc[i],
1784 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1785 "pxonrxc");
1786 evcnt_attach_dynamic(&stats->pxofftxc[i],
1787 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1788 "pxofftxc");
1789 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1790 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1791 "pxoffrxc");
1792 if (hw->mac.type >= ixgbe_mac_82599EB)
1793 evcnt_attach_dynamic(&stats->pxon2offc[i],
1794 EVCNT_TYPE_MISC, NULL,
1795 adapter->tcs[i].evnamebuf,
1796 "pxon2offc");
1797 }
1798 }
1799
1800 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1801 #ifdef LRO
1802 struct lro_ctrl *lro = &rxr->lro;
1803 #endif /* LRO */
1804
1805 snprintf(adapter->queues[i].evnamebuf,
1806 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1807 xname, i);
1808 snprintf(adapter->queues[i].namebuf,
1809 sizeof(adapter->queues[i].namebuf), "q%d", i);
1810
1811 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1812 aprint_error_dev(dev, "could not create sysctl root\n");
1813 break;
1814 }
1815
1816 if (sysctl_createv(log, 0, &rnode, &rnode,
1817 0, CTLTYPE_NODE,
1818 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1819 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1820 break;
1821
1822 if (sysctl_createv(log, 0, &rnode, &cnode,
1823 CTLFLAG_READWRITE, CTLTYPE_INT,
1824 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1825 ixgbe_sysctl_interrupt_rate_handler, 0,
1826 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1827 break;
1828
1829 if (sysctl_createv(log, 0, &rnode, &cnode,
1830 CTLFLAG_READONLY, CTLTYPE_INT,
1831 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1832 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1833 0, CTL_CREATE, CTL_EOL) != 0)
1834 break;
1835
1836 if (sysctl_createv(log, 0, &rnode, &cnode,
1837 CTLFLAG_READONLY, CTLTYPE_INT,
1838 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1839 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1840 0, CTL_CREATE, CTL_EOL) != 0)
1841 break;
1842
1843 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1844 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1845 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1846 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1847 "Handled queue in softint");
1848 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1849 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1850 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1851 NULL, adapter->queues[i].evnamebuf, "TSO");
1852 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1853 NULL, adapter->queues[i].evnamebuf,
1854 "Queue No Descriptor Available");
1855 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1856 NULL, adapter->queues[i].evnamebuf,
1857 "Queue Packets Transmitted");
1858 #ifndef IXGBE_LEGACY_TX
1859 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1860 NULL, adapter->queues[i].evnamebuf,
1861 "Packets dropped in pcq");
1862 #endif
1863
1864 if (sysctl_createv(log, 0, &rnode, &cnode,
1865 CTLFLAG_READONLY,
1866 CTLTYPE_INT,
1867 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1868 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1869 CTL_CREATE, CTL_EOL) != 0)
1870 break;
1871
1872 if (sysctl_createv(log, 0, &rnode, &cnode,
1873 CTLFLAG_READONLY,
1874 CTLTYPE_INT,
1875 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1876 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1877 CTL_CREATE, CTL_EOL) != 0)
1878 break;
1879
1880 if (sysctl_createv(log, 0, &rnode, &cnode,
1881 CTLFLAG_READONLY,
1882 CTLTYPE_INT,
1883 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1884 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1885 CTL_CREATE, CTL_EOL) != 0)
1886 break;
1887
1888 if (i < __arraycount(stats->qprc)) {
1889 evcnt_attach_dynamic(&stats->qprc[i],
1890 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1891 "qprc");
1892 evcnt_attach_dynamic(&stats->qptc[i],
1893 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1894 "qptc");
1895 evcnt_attach_dynamic(&stats->qbrc[i],
1896 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1897 "qbrc");
1898 evcnt_attach_dynamic(&stats->qbtc[i],
1899 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1900 "qbtc");
1901 if (hw->mac.type >= ixgbe_mac_82599EB)
1902 evcnt_attach_dynamic(&stats->qprdc[i],
1903 EVCNT_TYPE_MISC, NULL,
1904 adapter->queues[i].evnamebuf, "qprdc");
1905 }
1906
1907 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1908 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1909 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1910 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1911 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1912 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1913 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1914 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1915 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1916 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1917 #ifdef LRO
1918 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1919 CTLFLAG_RD, &lro->lro_queued, 0,
1920 "LRO Queued");
1921 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1922 CTLFLAG_RD, &lro->lro_flushed, 0,
1923 "LRO Flushed");
1924 #endif /* LRO */
1925 }
1926
1927 /* MAC stats get their own sub node */
1928
1929 snprintf(stats->namebuf,
1930 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1931
1932 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1933 stats->namebuf, "rx csum offload - IP");
1934 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1935 stats->namebuf, "rx csum offload - L4");
1936 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1937 stats->namebuf, "rx csum offload - IP bad");
1938 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1939 stats->namebuf, "rx csum offload - L4 bad");
1940 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "Interrupt conditions zero");
1942 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "Legacy interrupts");
1944
1945 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "CRC Errors");
1947 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Illegal Byte Errors");
1949 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "Byte Errors");
1951 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "MAC Short Packets Discarded");
1953 if (hw->mac.type >= ixgbe_mac_X550)
1954 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Bad SFD");
1956 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "Total Packets Missed");
1958 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "MAC Local Faults");
1960 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "MAC Remote Faults");
1962 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "Receive Length Errors");
1964 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "Link XON Transmitted");
1966 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "Link XON Received");
1968 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "Link XOFF Transmitted");
1970 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "Link XOFF Received");
1972
1973 /* Packet Reception Stats */
1974 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Total Octets Received");
1976 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Good Octets Received");
1978 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Total Packets Received");
1980 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "Good Packets Received");
1982 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "Multicast Packets Received");
1984 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Broadcast Packets Received");
1986 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "64 byte frames received ");
1988 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "65-127 byte frames received");
1990 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "128-255 byte frames received");
1992 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "256-511 byte frames received");
1994 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "512-1023 byte frames received");
1996 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "1023-1522 byte frames received");
1998 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "Receive Undersized");
2000 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Fragmented Packets Received ");
2002 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Oversized Packets Received");
2004 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Received Jabber");
2006 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Management Packets Received");
2008 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Management Packets Dropped");
2010 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Checksum Errors");
2012
2013 /* Packet Transmission Stats */
2014 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Good Octets Transmitted");
2016 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Total Packets Transmitted");
2018 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Good Packets Transmitted");
2020 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Broadcast Packets Transmitted");
2022 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Multicast Packets Transmitted");
2024 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Management Packets Transmitted");
2026 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "64 byte frames transmitted ");
2028 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "65-127 byte frames transmitted");
2030 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "128-255 byte frames transmitted");
2032 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "256-511 byte frames transmitted");
2034 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "512-1023 byte frames transmitted");
2036 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "1024-1522 byte frames transmitted");
2038 } /* ixgbe_add_hw_stats */
2039
2040 static void
2041 ixgbe_clear_evcnt(struct adapter *adapter)
2042 {
2043 struct tx_ring *txr = adapter->tx_rings;
2044 struct rx_ring *rxr = adapter->rx_rings;
2045 struct ixgbe_hw *hw = &adapter->hw;
2046 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2047 int i;
2048
2049 adapter->efbig_tx_dma_setup.ev_count = 0;
2050 adapter->mbuf_defrag_failed.ev_count = 0;
2051 adapter->efbig2_tx_dma_setup.ev_count = 0;
2052 adapter->einval_tx_dma_setup.ev_count = 0;
2053 adapter->other_tx_dma_setup.ev_count = 0;
2054 adapter->eagain_tx_dma_setup.ev_count = 0;
2055 adapter->enomem_tx_dma_setup.ev_count = 0;
2056 adapter->tso_err.ev_count = 0;
2057 adapter->watchdog_events.ev_count = 0;
2058 adapter->link_irq.ev_count = 0;
2059 adapter->link_sicount.ev_count = 0;
2060 adapter->mod_sicount.ev_count = 0;
2061 adapter->msf_sicount.ev_count = 0;
2062 adapter->phy_sicount.ev_count = 0;
2063
2064 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2065 if (i < __arraycount(stats->mpc)) {
2066 stats->mpc[i].ev_count = 0;
2067 if (hw->mac.type == ixgbe_mac_82598EB)
2068 stats->rnbc[i].ev_count = 0;
2069 }
2070 if (i < __arraycount(stats->pxontxc)) {
2071 stats->pxontxc[i].ev_count = 0;
2072 stats->pxonrxc[i].ev_count = 0;
2073 stats->pxofftxc[i].ev_count = 0;
2074 stats->pxoffrxc[i].ev_count = 0;
2075 if (hw->mac.type >= ixgbe_mac_82599EB)
2076 stats->pxon2offc[i].ev_count = 0;
2077 }
2078 }
2079
2080 txr = adapter->tx_rings;
2081 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2082 adapter->queues[i].irqs.ev_count = 0;
2083 adapter->queues[i].handleq.ev_count = 0;
2084 adapter->queues[i].req.ev_count = 0;
2085 txr->no_desc_avail.ev_count = 0;
2086 txr->total_packets.ev_count = 0;
2087 txr->tso_tx.ev_count = 0;
2088 #ifndef IXGBE_LEGACY_TX
2089 txr->pcq_drops.ev_count = 0;
2090 #endif
2091 txr->q_efbig_tx_dma_setup = 0;
2092 txr->q_mbuf_defrag_failed = 0;
2093 txr->q_efbig2_tx_dma_setup = 0;
2094 txr->q_einval_tx_dma_setup = 0;
2095 txr->q_other_tx_dma_setup = 0;
2096 txr->q_eagain_tx_dma_setup = 0;
2097 txr->q_enomem_tx_dma_setup = 0;
2098 txr->q_tso_err = 0;
2099
2100 if (i < __arraycount(stats->qprc)) {
2101 stats->qprc[i].ev_count = 0;
2102 stats->qptc[i].ev_count = 0;
2103 stats->qbrc[i].ev_count = 0;
2104 stats->qbtc[i].ev_count = 0;
2105 if (hw->mac.type >= ixgbe_mac_82599EB)
2106 stats->qprdc[i].ev_count = 0;
2107 }
2108
2109 rxr->rx_packets.ev_count = 0;
2110 rxr->rx_bytes.ev_count = 0;
2111 rxr->rx_copies.ev_count = 0;
2112 rxr->no_jmbuf.ev_count = 0;
2113 rxr->rx_discarded.ev_count = 0;
2114 }
2115 stats->ipcs.ev_count = 0;
2116 stats->l4cs.ev_count = 0;
2117 stats->ipcs_bad.ev_count = 0;
2118 stats->l4cs_bad.ev_count = 0;
2119 stats->intzero.ev_count = 0;
2120 stats->legint.ev_count = 0;
2121 stats->crcerrs.ev_count = 0;
2122 stats->illerrc.ev_count = 0;
2123 stats->errbc.ev_count = 0;
2124 stats->mspdc.ev_count = 0;
2125 if (hw->mac.type >= ixgbe_mac_X550)
2126 stats->mbsdc.ev_count = 0;
2127 stats->mpctotal.ev_count = 0;
2128 stats->mlfc.ev_count = 0;
2129 stats->mrfc.ev_count = 0;
2130 stats->rlec.ev_count = 0;
2131 stats->lxontxc.ev_count = 0;
2132 stats->lxonrxc.ev_count = 0;
2133 stats->lxofftxc.ev_count = 0;
2134 stats->lxoffrxc.ev_count = 0;
2135
2136 /* Packet Reception Stats */
2137 stats->tor.ev_count = 0;
2138 stats->gorc.ev_count = 0;
2139 stats->tpr.ev_count = 0;
2140 stats->gprc.ev_count = 0;
2141 stats->mprc.ev_count = 0;
2142 stats->bprc.ev_count = 0;
2143 stats->prc64.ev_count = 0;
2144 stats->prc127.ev_count = 0;
2145 stats->prc255.ev_count = 0;
2146 stats->prc511.ev_count = 0;
2147 stats->prc1023.ev_count = 0;
2148 stats->prc1522.ev_count = 0;
2149 stats->ruc.ev_count = 0;
2150 stats->rfc.ev_count = 0;
2151 stats->roc.ev_count = 0;
2152 stats->rjc.ev_count = 0;
2153 stats->mngprc.ev_count = 0;
2154 stats->mngpdc.ev_count = 0;
2155 stats->xec.ev_count = 0;
2156
2157 /* Packet Transmission Stats */
2158 stats->gotc.ev_count = 0;
2159 stats->tpt.ev_count = 0;
2160 stats->gptc.ev_count = 0;
2161 stats->bptc.ev_count = 0;
2162 stats->mptc.ev_count = 0;
2163 stats->mngptc.ev_count = 0;
2164 stats->ptc64.ev_count = 0;
2165 stats->ptc127.ev_count = 0;
2166 stats->ptc255.ev_count = 0;
2167 stats->ptc511.ev_count = 0;
2168 stats->ptc1023.ev_count = 0;
2169 stats->ptc1522.ev_count = 0;
2170 }
2171
2172 /************************************************************************
2173 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2174 *
2175 * Retrieves the TDH value from the hardware
2176 ************************************************************************/
2177 static int
2178 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2179 {
2180 struct sysctlnode node = *rnode;
2181 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2182 struct adapter *adapter;
2183 uint32_t val;
2184
2185 if (!txr)
2186 return (0);
2187
2188 adapter = txr->adapter;
2189 if (ixgbe_fw_recovery_mode_swflag(adapter))
2190 return (EPERM);
2191
2192 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2193 node.sysctl_data = &val;
2194 return sysctl_lookup(SYSCTLFN_CALL(&node));
2195 } /* ixgbe_sysctl_tdh_handler */
2196
2197 /************************************************************************
2198 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2199 *
2200 * Retrieves the TDT value from the hardware
2201 ************************************************************************/
2202 static int
2203 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2204 {
2205 struct sysctlnode node = *rnode;
2206 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2207 struct adapter *adapter;
2208 uint32_t val;
2209
2210 if (!txr)
2211 return (0);
2212
2213 adapter = txr->adapter;
2214 if (ixgbe_fw_recovery_mode_swflag(adapter))
2215 return (EPERM);
2216
2217 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2218 node.sysctl_data = &val;
2219 return sysctl_lookup(SYSCTLFN_CALL(&node));
2220 } /* ixgbe_sysctl_tdt_handler */
2221
2222 /************************************************************************
2223 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2224 * handler function
2225 *
2226 * Retrieves the next_to_check value
2227 ************************************************************************/
2228 static int
2229 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2230 {
2231 struct sysctlnode node = *rnode;
2232 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2233 struct adapter *adapter;
2234 uint32_t val;
2235
2236 if (!rxr)
2237 return (0);
2238
2239 adapter = rxr->adapter;
2240 if (ixgbe_fw_recovery_mode_swflag(adapter))
2241 return (EPERM);
2242
2243 val = rxr->next_to_check;
2244 node.sysctl_data = &val;
2245 return sysctl_lookup(SYSCTLFN_CALL(&node));
2246 } /* ixgbe_sysctl_next_to_check_handler */
2247
2248 /************************************************************************
2249 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2250 *
2251 * Retrieves the RDH value from the hardware
2252 ************************************************************************/
2253 static int
2254 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2255 {
2256 struct sysctlnode node = *rnode;
2257 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2258 struct adapter *adapter;
2259 uint32_t val;
2260
2261 if (!rxr)
2262 return (0);
2263
2264 adapter = rxr->adapter;
2265 if (ixgbe_fw_recovery_mode_swflag(adapter))
2266 return (EPERM);
2267
2268 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2269 node.sysctl_data = &val;
2270 return sysctl_lookup(SYSCTLFN_CALL(&node));
2271 } /* ixgbe_sysctl_rdh_handler */
2272
2273 /************************************************************************
2274 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2275 *
2276 * Retrieves the RDT value from the hardware
2277 ************************************************************************/
2278 static int
2279 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2280 {
2281 struct sysctlnode node = *rnode;
2282 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2283 struct adapter *adapter;
2284 uint32_t val;
2285
2286 if (!rxr)
2287 return (0);
2288
2289 adapter = rxr->adapter;
2290 if (ixgbe_fw_recovery_mode_swflag(adapter))
2291 return (EPERM);
2292
2293 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2294 node.sysctl_data = &val;
2295 return sysctl_lookup(SYSCTLFN_CALL(&node));
2296 } /* ixgbe_sysctl_rdt_handler */
2297
2298 static int
2299 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2300 {
2301 struct ifnet *ifp = &ec->ec_if;
2302 struct adapter *adapter = ifp->if_softc;
2303 int rv;
2304
2305 if (set)
2306 rv = ixgbe_register_vlan(adapter, vid);
2307 else
2308 rv = ixgbe_unregister_vlan(adapter, vid);
2309
2310 if (rv != 0)
2311 return rv;
2312
2313 /*
2314 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2315 * or 0 to 1.
2316 */
2317 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2318 ixgbe_setup_vlan_hw_tagging(adapter);
2319
2320 return rv;
2321 }
2322
2323 /************************************************************************
2324 * ixgbe_register_vlan
2325 *
2326 * Run via vlan config EVENT, it enables us to use the
2327 * HW Filter table since we can get the vlan id. This
2328 * just creates the entry in the soft version of the
2329 * VFTA, init will repopulate the real table.
2330 ************************************************************************/
2331 static int
2332 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2333 {
2334 u16 index, bit;
2335 int error;
2336
2337 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2338 return EINVAL;
2339
2340 IXGBE_CORE_LOCK(adapter);
2341 index = (vtag >> 5) & 0x7F;
2342 bit = vtag & 0x1F;
2343 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2344 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2345 true);
2346 IXGBE_CORE_UNLOCK(adapter);
2347 if (error != 0)
2348 error = EACCES;
2349
2350 return error;
2351 } /* ixgbe_register_vlan */
2352
2353 /************************************************************************
2354 * ixgbe_unregister_vlan
2355 *
2356 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2357 ************************************************************************/
2358 static int
2359 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2360 {
2361 u16 index, bit;
2362 int error;
2363
2364 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2365 return EINVAL;
2366
2367 IXGBE_CORE_LOCK(adapter);
2368 index = (vtag >> 5) & 0x7F;
2369 bit = vtag & 0x1F;
2370 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2371 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2372 true);
2373 IXGBE_CORE_UNLOCK(adapter);
2374 if (error != 0)
2375 error = EACCES;
2376
2377 return error;
2378 } /* ixgbe_unregister_vlan */
2379
2380 static void
2381 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2382 {
2383 struct ethercom *ec = &adapter->osdep.ec;
2384 struct ixgbe_hw *hw = &adapter->hw;
2385 struct rx_ring *rxr;
2386 u32 ctrl;
2387 int i;
2388 bool hwtagging;
2389
2390 /* Enable HW tagging only if any vlan is attached */
2391 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2392 && VLAN_ATTACHED(ec);
2393
2394 /* Setup the queues for vlans */
2395 for (i = 0; i < adapter->num_queues; i++) {
2396 rxr = &adapter->rx_rings[i];
2397 /*
2398 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2399 */
2400 if (hw->mac.type != ixgbe_mac_82598EB) {
2401 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2402 if (hwtagging)
2403 ctrl |= IXGBE_RXDCTL_VME;
2404 else
2405 ctrl &= ~IXGBE_RXDCTL_VME;
2406 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2407 }
2408 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2409 }
2410
2411 /* VLAN hw tagging for 82598 */
2412 if (hw->mac.type == ixgbe_mac_82598EB) {
2413 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2414 if (hwtagging)
2415 ctrl |= IXGBE_VLNCTRL_VME;
2416 else
2417 ctrl &= ~IXGBE_VLNCTRL_VME;
2418 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2419 }
2420 } /* ixgbe_setup_vlan_hw_tagging */
2421
2422 static void
2423 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2424 {
2425 struct ethercom *ec = &adapter->osdep.ec;
2426 struct ixgbe_hw *hw = &adapter->hw;
2427 int i;
2428 u32 ctrl;
2429 struct vlanid_list *vlanidp;
2430
2431 /*
2432 * This function is called from both if_init and ifflags_cb()
2433 * on NetBSD.
2434 */
2435
2436 /*
2437 * Part 1:
2438 * Setup VLAN HW tagging
2439 */
2440 ixgbe_setup_vlan_hw_tagging(adapter);
2441
2442 /*
2443 * Part 2:
2444 * Setup VLAN HW filter
2445 */
2446 /* Cleanup shadow_vfta */
2447 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2448 adapter->shadow_vfta[i] = 0;
2449 /* Generate shadow_vfta from ec_vids */
2450 ETHER_LOCK(ec);
2451 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2452 uint32_t idx;
2453
2454 idx = vlanidp->vid / 32;
2455 KASSERT(idx < IXGBE_VFTA_SIZE);
2456 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2457 }
2458 ETHER_UNLOCK(ec);
2459 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2460 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2461
2462 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2463 /* Enable the Filter Table if enabled */
2464 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2465 ctrl |= IXGBE_VLNCTRL_VFE;
2466 else
2467 ctrl &= ~IXGBE_VLNCTRL_VFE;
2468 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2469 } /* ixgbe_setup_vlan_hw_support */
2470
2471 /************************************************************************
2472 * ixgbe_get_slot_info
2473 *
2474 * Get the width and transaction speed of
2475 * the slot this adapter is plugged into.
2476 ************************************************************************/
2477 static void
2478 ixgbe_get_slot_info(struct adapter *adapter)
2479 {
2480 device_t dev = adapter->dev;
2481 struct ixgbe_hw *hw = &adapter->hw;
2482 u32 offset;
2483 u16 link;
2484 int bus_info_valid = TRUE;
2485
2486 /* Some devices are behind an internal bridge */
2487 switch (hw->device_id) {
2488 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2489 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2490 goto get_parent_info;
2491 default:
2492 break;
2493 }
2494
2495 ixgbe_get_bus_info(hw);
2496
2497 /*
2498 * Some devices don't use PCI-E, but there is no need
2499 * to display "Unknown" for bus speed and width.
2500 */
2501 switch (hw->mac.type) {
2502 case ixgbe_mac_X550EM_x:
2503 case ixgbe_mac_X550EM_a:
2504 return;
2505 default:
2506 goto display;
2507 }
2508
2509 get_parent_info:
2510 /*
2511 * For the Quad port adapter we need to parse back
2512 * up the PCI tree to find the speed of the expansion
2513 * slot into which this adapter is plugged. A bit more work.
2514 */
2515 dev = device_parent(device_parent(dev));
2516 #if 0
2517 #ifdef IXGBE_DEBUG
2518 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2519 pci_get_slot(dev), pci_get_function(dev));
2520 #endif
2521 dev = device_parent(device_parent(dev));
2522 #ifdef IXGBE_DEBUG
2523 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2524 pci_get_slot(dev), pci_get_function(dev));
2525 #endif
2526 #endif
2527 /* Now get the PCI Express Capabilities offset */
2528 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2529 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2530 /*
2531 * Hmm...can't get PCI-Express capabilities.
2532 * Falling back to default method.
2533 */
2534 bus_info_valid = FALSE;
2535 ixgbe_get_bus_info(hw);
2536 goto display;
2537 }
2538 /* ...and read the Link Status Register */
2539 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2540 offset + PCIE_LCSR) >> 16;
2541 ixgbe_set_pci_config_data_generic(hw, link);
2542
2543 display:
2544 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2545 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2546 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2547 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2548 "Unknown"),
2549 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2550 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2551 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2552 "Unknown"));
2553
2554 if (bus_info_valid) {
2555 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2556 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2557 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2558 device_printf(dev, "PCI-Express bandwidth available"
2559 " for this card\n is not sufficient for"
2560 " optimal performance.\n");
2561 device_printf(dev, "For optimal performance a x8 "
2562 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2563 }
2564 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2565 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2566 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2567 device_printf(dev, "PCI-Express bandwidth available"
2568 " for this card\n is not sufficient for"
2569 " optimal performance.\n");
2570 device_printf(dev, "For optimal performance a x8 "
2571 "PCIE Gen3 slot is required.\n");
2572 }
2573 } else
2574 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2575
2576 return;
2577 } /* ixgbe_get_slot_info */
2578
2579 /************************************************************************
2580 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2581 ************************************************************************/
2582 static inline void
2583 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2584 {
2585 struct ixgbe_hw *hw = &adapter->hw;
2586 struct ix_queue *que = &adapter->queues[vector];
2587 u64 queue = 1ULL << vector;
2588 u32 mask;
2589
2590 mutex_enter(&que->dc_mtx);
2591 if (que->disabled_count > 0 && --que->disabled_count > 0)
2592 goto out;
2593
2594 if (hw->mac.type == ixgbe_mac_82598EB) {
2595 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2596 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2597 } else {
2598 mask = (queue & 0xFFFFFFFF);
2599 if (mask)
2600 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2601 mask = (queue >> 32);
2602 if (mask)
2603 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2604 }
2605 out:
2606 mutex_exit(&que->dc_mtx);
2607 } /* ixgbe_enable_queue */
2608
2609 /************************************************************************
2610 * ixgbe_disable_queue_internal
2611 ************************************************************************/
2612 static inline void
2613 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2614 {
2615 struct ixgbe_hw *hw = &adapter->hw;
2616 struct ix_queue *que = &adapter->queues[vector];
2617 u64 queue = 1ULL << vector;
2618 u32 mask;
2619
2620 mutex_enter(&que->dc_mtx);
2621
2622 if (que->disabled_count > 0) {
2623 if (nestok)
2624 que->disabled_count++;
2625 goto out;
2626 }
2627 que->disabled_count++;
2628
2629 if (hw->mac.type == ixgbe_mac_82598EB) {
2630 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2631 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2632 } else {
2633 mask = (queue & 0xFFFFFFFF);
2634 if (mask)
2635 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2636 mask = (queue >> 32);
2637 if (mask)
2638 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2639 }
2640 out:
2641 mutex_exit(&que->dc_mtx);
2642 } /* ixgbe_disable_queue_internal */
2643
2644 /************************************************************************
2645 * ixgbe_disable_queue
2646 ************************************************************************/
2647 static inline void
2648 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2649 {
2650
2651 ixgbe_disable_queue_internal(adapter, vector, true);
2652 } /* ixgbe_disable_queue */
2653
2654 /************************************************************************
2655 * ixgbe_sched_handle_que - schedule deferred packet processing
2656 ************************************************************************/
2657 static inline void
2658 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2659 {
2660
2661 if (que->txrx_use_workqueue) {
2662 /*
2663 * adapter->que_wq is bound to each CPU instead of
2664 * each NIC queue to reduce workqueue kthread. As we
2665 * should consider about interrupt affinity in this
2666 * function, the workqueue kthread must be WQ_PERCPU.
2667 * If create WQ_PERCPU workqueue kthread for each NIC
2668 * queue, that number of created workqueue kthread is
2669 * (number of used NIC queue) * (number of CPUs) =
2670 * (number of CPUs) ^ 2 most often.
2671 *
2672 * The same NIC queue's interrupts are avoided by
2673 * masking the queue's interrupt. And different
2674 * NIC queue's interrupts use different struct work
2675 * (que->wq_cookie). So, "enqueued flag" to avoid
2676 * twice workqueue_enqueue() is not required .
2677 */
2678 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2679 } else {
2680 softint_schedule(que->que_si);
2681 }
2682 }
2683
2684 /************************************************************************
2685 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2686 ************************************************************************/
2687 static int
2688 ixgbe_msix_que(void *arg)
2689 {
2690 struct ix_queue *que = arg;
2691 struct adapter *adapter = que->adapter;
2692 struct ifnet *ifp = adapter->ifp;
2693 struct tx_ring *txr = que->txr;
2694 struct rx_ring *rxr = que->rxr;
2695 bool more;
2696 u32 newitr = 0;
2697
2698 /* Protect against spurious interrupts */
2699 if ((ifp->if_flags & IFF_RUNNING) == 0)
2700 return 0;
2701
2702 ixgbe_disable_queue(adapter, que->msix);
2703 ++que->irqs.ev_count;
2704
2705 /*
2706 * Don't change "que->txrx_use_workqueue" from this point to avoid
2707 * flip-flopping softint/workqueue mode in one deferred processing.
2708 */
2709 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2710
2711 #ifdef __NetBSD__
2712 /* Don't run ixgbe_rxeof in interrupt context */
2713 more = true;
2714 #else
2715 more = ixgbe_rxeof(que);
2716 #endif
2717
2718 IXGBE_TX_LOCK(txr);
2719 ixgbe_txeof(txr);
2720 IXGBE_TX_UNLOCK(txr);
2721
2722 /* Do AIM now? */
2723
2724 if (adapter->enable_aim == false)
2725 goto no_calc;
2726 /*
2727 * Do Adaptive Interrupt Moderation:
2728 * - Write out last calculated setting
2729 * - Calculate based on average size over
2730 * the last interval.
2731 */
2732 if (que->eitr_setting)
2733 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2734
2735 que->eitr_setting = 0;
2736
2737 /* Idle, do nothing */
2738 if ((txr->bytes == 0) && (rxr->bytes == 0))
2739 goto no_calc;
2740
2741 if ((txr->bytes) && (txr->packets))
2742 newitr = txr->bytes/txr->packets;
2743 if ((rxr->bytes) && (rxr->packets))
2744 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2745 newitr += 24; /* account for hardware frame, crc */
2746
2747 /* set an upper boundary */
2748 newitr = uimin(newitr, 3000);
2749
2750 /* Be nice to the mid range */
2751 if ((newitr > 300) && (newitr < 1200))
2752 newitr = (newitr / 3);
2753 else
2754 newitr = (newitr / 2);
2755
2756 /*
2757 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2758 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2759 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2760 * on 1G and higher.
2761 */
2762 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2763 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2764 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2765 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2766 }
2767
2768 /* save for next interrupt */
2769 que->eitr_setting = newitr;
2770
2771 /* Reset state */
2772 txr->bytes = 0;
2773 txr->packets = 0;
2774 rxr->bytes = 0;
2775 rxr->packets = 0;
2776
2777 no_calc:
2778 if (more)
2779 ixgbe_sched_handle_que(adapter, que);
2780 else
2781 ixgbe_enable_queue(adapter, que->msix);
2782
2783 return 1;
2784 } /* ixgbe_msix_que */
2785
2786 /************************************************************************
2787 * ixgbe_media_status - Media Ioctl callback
2788 *
2789 * Called whenever the user queries the status of
2790 * the interface using ifconfig.
2791 ************************************************************************/
2792 static void
2793 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2794 {
2795 struct adapter *adapter = ifp->if_softc;
2796 struct ixgbe_hw *hw = &adapter->hw;
2797 int layer;
2798
2799 INIT_DEBUGOUT("ixgbe_media_status: begin");
2800 IXGBE_CORE_LOCK(adapter);
2801 ixgbe_update_link_status(adapter);
2802
2803 ifmr->ifm_status = IFM_AVALID;
2804 ifmr->ifm_active = IFM_ETHER;
2805
2806 if (adapter->link_active != LINK_STATE_UP) {
2807 ifmr->ifm_active |= IFM_NONE;
2808 IXGBE_CORE_UNLOCK(adapter);
2809 return;
2810 }
2811
2812 ifmr->ifm_status |= IFM_ACTIVE;
2813 layer = adapter->phy_layer;
2814
2815 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2816 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2817 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2818 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2819 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2820 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2821 switch (adapter->link_speed) {
2822 case IXGBE_LINK_SPEED_10GB_FULL:
2823 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2824 break;
2825 case IXGBE_LINK_SPEED_5GB_FULL:
2826 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2827 break;
2828 case IXGBE_LINK_SPEED_2_5GB_FULL:
2829 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2830 break;
2831 case IXGBE_LINK_SPEED_1GB_FULL:
2832 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2833 break;
2834 case IXGBE_LINK_SPEED_100_FULL:
2835 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2836 break;
2837 case IXGBE_LINK_SPEED_10_FULL:
2838 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2839 break;
2840 }
2841 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2842 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2843 switch (adapter->link_speed) {
2844 case IXGBE_LINK_SPEED_10GB_FULL:
2845 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2846 break;
2847 }
2848 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2849 switch (adapter->link_speed) {
2850 case IXGBE_LINK_SPEED_10GB_FULL:
2851 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2852 break;
2853 case IXGBE_LINK_SPEED_1GB_FULL:
2854 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2855 break;
2856 }
2857 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2858 switch (adapter->link_speed) {
2859 case IXGBE_LINK_SPEED_10GB_FULL:
2860 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2861 break;
2862 case IXGBE_LINK_SPEED_1GB_FULL:
2863 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2864 break;
2865 }
2866 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2867 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2868 switch (adapter->link_speed) {
2869 case IXGBE_LINK_SPEED_10GB_FULL:
2870 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2871 break;
2872 case IXGBE_LINK_SPEED_1GB_FULL:
2873 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2874 break;
2875 }
2876 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2877 switch (adapter->link_speed) {
2878 case IXGBE_LINK_SPEED_10GB_FULL:
2879 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2880 break;
2881 }
2882 /*
2883 * XXX: These need to use the proper media types once
2884 * they're added.
2885 */
2886 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2887 switch (adapter->link_speed) {
2888 case IXGBE_LINK_SPEED_10GB_FULL:
2889 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2890 break;
2891 case IXGBE_LINK_SPEED_2_5GB_FULL:
2892 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2893 break;
2894 case IXGBE_LINK_SPEED_1GB_FULL:
2895 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2896 break;
2897 }
2898 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2899 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2900 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2901 switch (adapter->link_speed) {
2902 case IXGBE_LINK_SPEED_10GB_FULL:
2903 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2904 break;
2905 case IXGBE_LINK_SPEED_2_5GB_FULL:
2906 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2907 break;
2908 case IXGBE_LINK_SPEED_1GB_FULL:
2909 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2910 break;
2911 }
2912
2913 /* If nothing is recognized... */
2914 #if 0
2915 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2916 ifmr->ifm_active |= IFM_UNKNOWN;
2917 #endif
2918
2919 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2920
2921 /* Display current flow control setting used on link */
2922 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2923 hw->fc.current_mode == ixgbe_fc_full)
2924 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2925 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2926 hw->fc.current_mode == ixgbe_fc_full)
2927 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2928
2929 IXGBE_CORE_UNLOCK(adapter);
2930
2931 return;
2932 } /* ixgbe_media_status */
2933
2934 /************************************************************************
2935 * ixgbe_media_change - Media Ioctl callback
2936 *
2937 * Called when the user changes speed/duplex using
2938 * media/mediopt option with ifconfig.
2939 ************************************************************************/
2940 static int
2941 ixgbe_media_change(struct ifnet *ifp)
2942 {
2943 struct adapter *adapter = ifp->if_softc;
2944 struct ifmedia *ifm = &adapter->media;
2945 struct ixgbe_hw *hw = &adapter->hw;
2946 ixgbe_link_speed speed = 0;
2947 ixgbe_link_speed link_caps = 0;
2948 bool negotiate = false;
2949 s32 err = IXGBE_NOT_IMPLEMENTED;
2950
2951 INIT_DEBUGOUT("ixgbe_media_change: begin");
2952
2953 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2954 return (EINVAL);
2955
2956 if (hw->phy.media_type == ixgbe_media_type_backplane)
2957 return (EPERM);
2958
2959 IXGBE_CORE_LOCK(adapter);
2960 /*
2961 * We don't actually need to check against the supported
2962 * media types of the adapter; ifmedia will take care of
2963 * that for us.
2964 */
2965 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2966 case IFM_AUTO:
2967 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2968 &negotiate);
2969 if (err != IXGBE_SUCCESS) {
2970 device_printf(adapter->dev, "Unable to determine "
2971 "supported advertise speeds\n");
2972 IXGBE_CORE_UNLOCK(adapter);
2973 return (ENODEV);
2974 }
2975 speed |= link_caps;
2976 break;
2977 case IFM_10G_T:
2978 case IFM_10G_LRM:
2979 case IFM_10G_LR:
2980 case IFM_10G_TWINAX:
2981 case IFM_10G_SR:
2982 case IFM_10G_CX4:
2983 case IFM_10G_KR:
2984 case IFM_10G_KX4:
2985 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2986 break;
2987 case IFM_5000_T:
2988 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2989 break;
2990 case IFM_2500_T:
2991 case IFM_2500_KX:
2992 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2993 break;
2994 case IFM_1000_T:
2995 case IFM_1000_LX:
2996 case IFM_1000_SX:
2997 case IFM_1000_KX:
2998 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2999 break;
3000 case IFM_100_TX:
3001 speed |= IXGBE_LINK_SPEED_100_FULL;
3002 break;
3003 case IFM_10_T:
3004 speed |= IXGBE_LINK_SPEED_10_FULL;
3005 break;
3006 case IFM_NONE:
3007 break;
3008 default:
3009 goto invalid;
3010 }
3011
3012 hw->mac.autotry_restart = TRUE;
3013 hw->mac.ops.setup_link(hw, speed, TRUE);
3014 adapter->advertise = 0;
3015 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3016 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3017 adapter->advertise |= 1 << 2;
3018 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3019 adapter->advertise |= 1 << 1;
3020 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3021 adapter->advertise |= 1 << 0;
3022 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3023 adapter->advertise |= 1 << 3;
3024 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3025 adapter->advertise |= 1 << 4;
3026 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3027 adapter->advertise |= 1 << 5;
3028 }
3029
3030 IXGBE_CORE_UNLOCK(adapter);
3031 return (0);
3032
3033 invalid:
3034 device_printf(adapter->dev, "Invalid media type!\n");
3035 IXGBE_CORE_UNLOCK(adapter);
3036
3037 return (EINVAL);
3038 } /* ixgbe_media_change */
3039
3040 /************************************************************************
3041 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3042 ************************************************************************/
3043 static int
3044 ixgbe_msix_link(void *arg)
3045 {
3046 struct adapter *adapter = arg;
3047 struct ixgbe_hw *hw = &adapter->hw;
3048 u32 eicr, eicr_mask;
3049 s32 retval;
3050
3051 ++adapter->link_irq.ev_count;
3052
3053 /* Pause other interrupts */
3054 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3055
3056 /* First get the cause */
3057 /*
3058 * The specifications of 82598, 82599, X540 and X550 say EICS register
3059 * is write only. However, Linux says it is a workaround for silicon
3060 * errata to read EICS instead of EICR to get interrupt cause. It seems
3061 * there is a problem about read clear mechanism for EICR register.
3062 */
3063 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3064 /* Be sure the queue bits are not cleared */
3065 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3066 /* Clear interrupt with write */
3067 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3068
3069 if (ixgbe_is_sfp(hw)) {
3070 /* Pluggable optics-related interrupt */
3071 if (hw->mac.type >= ixgbe_mac_X540)
3072 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3073 else
3074 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3075
3076 /*
3077 * An interrupt might not arrive when a module is inserted.
3078 * When an link status change interrupt occurred and the driver
3079 * still regard SFP as unplugged, issue the module softint
3080 * and then issue LSC interrupt.
3081 */
3082 if ((eicr & eicr_mask)
3083 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3084 && (eicr & IXGBE_EICR_LSC))) {
3085 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3086 softint_schedule(adapter->mod_si);
3087 }
3088
3089 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3090 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3091 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3092 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3093 softint_schedule(adapter->msf_si);
3094 }
3095 }
3096
3097 /* Link status change */
3098 if (eicr & IXGBE_EICR_LSC) {
3099 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3100 softint_schedule(adapter->link_si);
3101 }
3102
3103 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3104 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3105 (eicr & IXGBE_EICR_FLOW_DIR)) {
3106 /* This is probably overkill :) */
3107 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3108 return 1;
3109 /* Disable the interrupt */
3110 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3111 softint_schedule(adapter->fdir_si);
3112 }
3113
3114 if (eicr & IXGBE_EICR_ECC) {
3115 device_printf(adapter->dev,
3116 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3117 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3118 }
3119
3120 /* Check for over temp condition */
3121 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3122 switch (adapter->hw.mac.type) {
3123 case ixgbe_mac_X550EM_a:
3124 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3125 break;
3126 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3127 IXGBE_EICR_GPI_SDP0_X550EM_a);
3128 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3129 IXGBE_EICR_GPI_SDP0_X550EM_a);
3130 retval = hw->phy.ops.check_overtemp(hw);
3131 if (retval != IXGBE_ERR_OVERTEMP)
3132 break;
3133 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3134 device_printf(adapter->dev, "System shutdown required!\n");
3135 break;
3136 default:
3137 if (!(eicr & IXGBE_EICR_TS))
3138 break;
3139 retval = hw->phy.ops.check_overtemp(hw);
3140 if (retval != IXGBE_ERR_OVERTEMP)
3141 break;
3142 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3143 device_printf(adapter->dev, "System shutdown required!\n");
3144 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3145 break;
3146 }
3147 }
3148
3149 /* Check for VF message */
3150 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3151 (eicr & IXGBE_EICR_MAILBOX))
3152 softint_schedule(adapter->mbx_si);
3153 }
3154
3155 /* Check for fan failure */
3156 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3157 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3158 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3159 }
3160
3161 /* External PHY interrupt */
3162 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3163 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3165 softint_schedule(adapter->phy_si);
3166 }
3167
3168 /* Re-enable other interrupts */
3169 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3170 return 1;
3171 } /* ixgbe_msix_link */
3172
3173 static void
3174 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3175 {
3176
3177 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3178 itr |= itr << 16;
3179 else
3180 itr |= IXGBE_EITR_CNT_WDIS;
3181
3182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3183 }
3184
3185
3186 /************************************************************************
3187 * ixgbe_sysctl_interrupt_rate_handler
3188 ************************************************************************/
3189 static int
3190 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3191 {
3192 struct sysctlnode node = *rnode;
3193 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3194 struct adapter *adapter;
3195 uint32_t reg, usec, rate;
3196 int error;
3197
3198 if (que == NULL)
3199 return 0;
3200
3201 adapter = que->adapter;
3202 if (ixgbe_fw_recovery_mode_swflag(adapter))
3203 return (EPERM);
3204
3205 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3206 usec = ((reg & 0x0FF8) >> 3);
3207 if (usec > 0)
3208 rate = 500000 / usec;
3209 else
3210 rate = 0;
3211 node.sysctl_data = &rate;
3212 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3213 if (error || newp == NULL)
3214 return error;
3215 reg &= ~0xfff; /* default, no limitation */
3216 if (rate > 0 && rate < 500000) {
3217 if (rate < 1000)
3218 rate = 1000;
3219 reg |= ((4000000/rate) & 0xff8);
3220 /*
3221 * When RSC is used, ITR interval must be larger than
3222 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3223 * The minimum value is always greater than 2us on 100M
3224 * (and 10M?(not documented)), but it's not on 1G and higher.
3225 */
3226 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3227 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3228 if ((adapter->num_queues > 1)
3229 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3230 return EINVAL;
3231 }
3232 ixgbe_max_interrupt_rate = rate;
3233 } else
3234 ixgbe_max_interrupt_rate = 0;
3235 ixgbe_eitr_write(adapter, que->msix, reg);
3236
3237 return (0);
3238 } /* ixgbe_sysctl_interrupt_rate_handler */
3239
3240 const struct sysctlnode *
3241 ixgbe_sysctl_instance(struct adapter *adapter)
3242 {
3243 const char *dvname;
3244 struct sysctllog **log;
3245 int rc;
3246 const struct sysctlnode *rnode;
3247
3248 if (adapter->sysctltop != NULL)
3249 return adapter->sysctltop;
3250
3251 log = &adapter->sysctllog;
3252 dvname = device_xname(adapter->dev);
3253
3254 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3255 0, CTLTYPE_NODE, dvname,
3256 SYSCTL_DESCR("ixgbe information and settings"),
3257 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3258 goto err;
3259
3260 return rnode;
3261 err:
3262 device_printf(adapter->dev,
3263 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3264 return NULL;
3265 }
3266
3267 /************************************************************************
3268 * ixgbe_add_device_sysctls
3269 ************************************************************************/
3270 static void
3271 ixgbe_add_device_sysctls(struct adapter *adapter)
3272 {
3273 device_t dev = adapter->dev;
3274 struct ixgbe_hw *hw = &adapter->hw;
3275 struct sysctllog **log;
3276 const struct sysctlnode *rnode, *cnode;
3277
3278 log = &adapter->sysctllog;
3279
3280 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3281 aprint_error_dev(dev, "could not create sysctl root\n");
3282 return;
3283 }
3284
3285 if (sysctl_createv(log, 0, &rnode, &cnode,
3286 CTLFLAG_READWRITE, CTLTYPE_INT,
3287 "debug", SYSCTL_DESCR("Debug Info"),
3288 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3289 aprint_error_dev(dev, "could not create sysctl\n");
3290
3291 if (sysctl_createv(log, 0, &rnode, &cnode,
3292 CTLFLAG_READONLY, CTLTYPE_INT,
3293 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3294 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3295 aprint_error_dev(dev, "could not create sysctl\n");
3296
3297 if (sysctl_createv(log, 0, &rnode, &cnode,
3298 CTLFLAG_READONLY, CTLTYPE_INT,
3299 "num_queues", SYSCTL_DESCR("Number of queues"),
3300 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3301 aprint_error_dev(dev, "could not create sysctl\n");
3302
3303 /* Sysctls for all devices */
3304 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3305 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3306 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3307 CTL_EOL) != 0)
3308 aprint_error_dev(dev, "could not create sysctl\n");
3309
3310 adapter->enable_aim = ixgbe_enable_aim;
3311 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3312 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3313 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3314 aprint_error_dev(dev, "could not create sysctl\n");
3315
3316 if (sysctl_createv(log, 0, &rnode, &cnode,
3317 CTLFLAG_READWRITE, CTLTYPE_INT,
3318 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3319 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3320 CTL_EOL) != 0)
3321 aprint_error_dev(dev, "could not create sysctl\n");
3322
3323 /*
3324 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3325 * it causesflip-flopping softint/workqueue mode in one deferred
3326 * processing. Therefore, preempt_disable()/preempt_enable() are
3327 * required in ixgbe_sched_handle_que() to avoid
3328 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3329 * I think changing "que->txrx_use_workqueue" in interrupt handler
3330 * is lighter than doing preempt_disable()/preempt_enable() in every
3331 * ixgbe_sched_handle_que().
3332 */
3333 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3334 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3335 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3336 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3337 aprint_error_dev(dev, "could not create sysctl\n");
3338
3339 #ifdef IXGBE_DEBUG
3340 /* testing sysctls (for all devices) */
3341 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3342 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3343 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3344 CTL_EOL) != 0)
3345 aprint_error_dev(dev, "could not create sysctl\n");
3346
3347 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3348 CTLTYPE_STRING, "print_rss_config",
3349 SYSCTL_DESCR("Prints RSS Configuration"),
3350 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3351 CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353 #endif
3354 /* for X550 series devices */
3355 if (hw->mac.type >= ixgbe_mac_X550)
3356 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3357 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3358 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3359 CTL_EOL) != 0)
3360 aprint_error_dev(dev, "could not create sysctl\n");
3361
3362 /* for WoL-capable devices */
3363 if (adapter->wol_support) {
3364 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3365 CTLTYPE_BOOL, "wol_enable",
3366 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3367 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3368 CTL_EOL) != 0)
3369 aprint_error_dev(dev, "could not create sysctl\n");
3370
3371 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3372 CTLTYPE_INT, "wufc",
3373 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3374 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3375 CTL_EOL) != 0)
3376 aprint_error_dev(dev, "could not create sysctl\n");
3377 }
3378
3379 /* for X552/X557-AT devices */
3380 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3381 const struct sysctlnode *phy_node;
3382
3383 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3384 "phy", SYSCTL_DESCR("External PHY sysctls"),
3385 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3386 aprint_error_dev(dev, "could not create sysctl\n");
3387 return;
3388 }
3389
3390 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3391 CTLTYPE_INT, "temp",
3392 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3393 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3394 CTL_EOL) != 0)
3395 aprint_error_dev(dev, "could not create sysctl\n");
3396
3397 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3398 CTLTYPE_INT, "overtemp_occurred",
3399 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3400 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3401 CTL_CREATE, CTL_EOL) != 0)
3402 aprint_error_dev(dev, "could not create sysctl\n");
3403 }
3404
3405 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3406 && (hw->phy.type == ixgbe_phy_fw))
3407 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3408 CTLTYPE_BOOL, "force_10_100_autonego",
3409 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3410 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3411 CTL_CREATE, CTL_EOL) != 0)
3412 aprint_error_dev(dev, "could not create sysctl\n");
3413
3414 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3415 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3416 CTLTYPE_INT, "eee_state",
3417 SYSCTL_DESCR("EEE Power Save State"),
3418 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3419 CTL_EOL) != 0)
3420 aprint_error_dev(dev, "could not create sysctl\n");
3421 }
3422 } /* ixgbe_add_device_sysctls */
3423
3424 /************************************************************************
3425 * ixgbe_allocate_pci_resources
3426 ************************************************************************/
3427 static int
3428 ixgbe_allocate_pci_resources(struct adapter *adapter,
3429 const struct pci_attach_args *pa)
3430 {
3431 pcireg_t memtype, csr;
3432 device_t dev = adapter->dev;
3433 bus_addr_t addr;
3434 int flags;
3435
3436 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3437 switch (memtype) {
3438 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3439 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3440 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3441 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3442 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3443 goto map_err;
3444 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3445 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3446 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3447 }
3448 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3449 adapter->osdep.mem_size, flags,
3450 &adapter->osdep.mem_bus_space_handle) != 0) {
3451 map_err:
3452 adapter->osdep.mem_size = 0;
3453 aprint_error_dev(dev, "unable to map BAR0\n");
3454 return ENXIO;
3455 }
3456 /*
3457 * Enable address decoding for memory range in case BIOS or
3458 * UEFI don't set it.
3459 */
3460 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3461 PCI_COMMAND_STATUS_REG);
3462 csr |= PCI_COMMAND_MEM_ENABLE;
3463 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3464 csr);
3465 break;
3466 default:
3467 aprint_error_dev(dev, "unexpected type on BAR0\n");
3468 return ENXIO;
3469 }
3470
3471 return (0);
3472 } /* ixgbe_allocate_pci_resources */
3473
3474 static void
3475 ixgbe_free_softint(struct adapter *adapter)
3476 {
3477 struct ix_queue *que = adapter->queues;
3478 struct tx_ring *txr = adapter->tx_rings;
3479 int i;
3480
3481 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3482 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3483 if (txr->txr_si != NULL)
3484 softint_disestablish(txr->txr_si);
3485 }
3486 if (que->que_si != NULL)
3487 softint_disestablish(que->que_si);
3488 }
3489 if (adapter->txr_wq != NULL)
3490 workqueue_destroy(adapter->txr_wq);
3491 if (adapter->txr_wq_enqueued != NULL)
3492 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3493 if (adapter->que_wq != NULL)
3494 workqueue_destroy(adapter->que_wq);
3495
3496 /* Drain the Link queue */
3497 if (adapter->link_si != NULL) {
3498 softint_disestablish(adapter->link_si);
3499 adapter->link_si = NULL;
3500 }
3501 if (adapter->mod_si != NULL) {
3502 softint_disestablish(adapter->mod_si);
3503 adapter->mod_si = NULL;
3504 }
3505 if (adapter->msf_si != NULL) {
3506 softint_disestablish(adapter->msf_si);
3507 adapter->msf_si = NULL;
3508 }
3509 if (adapter->phy_si != NULL) {
3510 softint_disestablish(adapter->phy_si);
3511 adapter->phy_si = NULL;
3512 }
3513 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3514 if (adapter->fdir_si != NULL) {
3515 softint_disestablish(adapter->fdir_si);
3516 adapter->fdir_si = NULL;
3517 }
3518 }
3519 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3520 if (adapter->mbx_si != NULL) {
3521 softint_disestablish(adapter->mbx_si);
3522 adapter->mbx_si = NULL;
3523 }
3524 }
3525 } /* ixgbe_free_softint */
3526
3527 /************************************************************************
3528 * ixgbe_detach - Device removal routine
3529 *
3530 * Called when the driver is being removed.
3531 * Stops the adapter and deallocates all the resources
3532 * that were allocated for driver operation.
3533 *
3534 * return 0 on success, positive on failure
3535 ************************************************************************/
3536 static int
3537 ixgbe_detach(device_t dev, int flags)
3538 {
3539 struct adapter *adapter = device_private(dev);
3540 struct rx_ring *rxr = adapter->rx_rings;
3541 struct tx_ring *txr = adapter->tx_rings;
3542 struct ixgbe_hw *hw = &adapter->hw;
3543 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3544 u32 ctrl_ext;
3545 int i;
3546
3547 INIT_DEBUGOUT("ixgbe_detach: begin");
3548 if (adapter->osdep.attached == false)
3549 return 0;
3550
3551 if (ixgbe_pci_iov_detach(dev) != 0) {
3552 device_printf(dev, "SR-IOV in use; detach first.\n");
3553 return (EBUSY);
3554 }
3555
3556 /*
3557 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3558 * so it's not required to call ixgbe_stop() directly.
3559 */
3560 IXGBE_CORE_LOCK(adapter);
3561 ixgbe_setup_low_power_mode(adapter);
3562 IXGBE_CORE_UNLOCK(adapter);
3563 #if NVLAN > 0
3564 /* Make sure VLANs are not using driver */
3565 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3566 ; /* nothing to do: no VLANs */
3567 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3568 vlan_ifdetach(adapter->ifp);
3569 else {
3570 aprint_error_dev(dev, "VLANs in use, detach first\n");
3571 return (EBUSY);
3572 }
3573 #endif
3574
3575 pmf_device_deregister(dev);
3576
3577 ether_ifdetach(adapter->ifp);
3578
3579 ixgbe_free_softint(adapter);
3580
3581 /* let hardware know driver is unloading */
3582 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3583 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3584 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3585
3586 callout_halt(&adapter->timer, NULL);
3587 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3588 callout_halt(&adapter->recovery_mode_timer, NULL);
3589
3590 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3591 netmap_detach(adapter->ifp);
3592
3593 ixgbe_free_pci_resources(adapter);
3594 #if 0 /* XXX the NetBSD port is probably missing something here */
3595 bus_generic_detach(dev);
3596 #endif
3597 if_detach(adapter->ifp);
3598 if_percpuq_destroy(adapter->ipq);
3599
3600 sysctl_teardown(&adapter->sysctllog);
3601 evcnt_detach(&adapter->efbig_tx_dma_setup);
3602 evcnt_detach(&adapter->mbuf_defrag_failed);
3603 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3604 evcnt_detach(&adapter->einval_tx_dma_setup);
3605 evcnt_detach(&adapter->other_tx_dma_setup);
3606 evcnt_detach(&adapter->eagain_tx_dma_setup);
3607 evcnt_detach(&adapter->enomem_tx_dma_setup);
3608 evcnt_detach(&adapter->watchdog_events);
3609 evcnt_detach(&adapter->tso_err);
3610 evcnt_detach(&adapter->link_irq);
3611 evcnt_detach(&adapter->link_sicount);
3612 evcnt_detach(&adapter->mod_sicount);
3613 evcnt_detach(&adapter->msf_sicount);
3614 evcnt_detach(&adapter->phy_sicount);
3615
3616 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3617 if (i < __arraycount(stats->mpc)) {
3618 evcnt_detach(&stats->mpc[i]);
3619 if (hw->mac.type == ixgbe_mac_82598EB)
3620 evcnt_detach(&stats->rnbc[i]);
3621 }
3622 if (i < __arraycount(stats->pxontxc)) {
3623 evcnt_detach(&stats->pxontxc[i]);
3624 evcnt_detach(&stats->pxonrxc[i]);
3625 evcnt_detach(&stats->pxofftxc[i]);
3626 evcnt_detach(&stats->pxoffrxc[i]);
3627 if (hw->mac.type >= ixgbe_mac_82599EB)
3628 evcnt_detach(&stats->pxon2offc[i]);
3629 }
3630 }
3631
3632 txr = adapter->tx_rings;
3633 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3634 evcnt_detach(&adapter->queues[i].irqs);
3635 evcnt_detach(&adapter->queues[i].handleq);
3636 evcnt_detach(&adapter->queues[i].req);
3637 evcnt_detach(&txr->no_desc_avail);
3638 evcnt_detach(&txr->total_packets);
3639 evcnt_detach(&txr->tso_tx);
3640 #ifndef IXGBE_LEGACY_TX
3641 evcnt_detach(&txr->pcq_drops);
3642 #endif
3643
3644 if (i < __arraycount(stats->qprc)) {
3645 evcnt_detach(&stats->qprc[i]);
3646 evcnt_detach(&stats->qptc[i]);
3647 evcnt_detach(&stats->qbrc[i]);
3648 evcnt_detach(&stats->qbtc[i]);
3649 if (hw->mac.type >= ixgbe_mac_82599EB)
3650 evcnt_detach(&stats->qprdc[i]);
3651 }
3652
3653 evcnt_detach(&rxr->rx_packets);
3654 evcnt_detach(&rxr->rx_bytes);
3655 evcnt_detach(&rxr->rx_copies);
3656 evcnt_detach(&rxr->no_jmbuf);
3657 evcnt_detach(&rxr->rx_discarded);
3658 }
3659 evcnt_detach(&stats->ipcs);
3660 evcnt_detach(&stats->l4cs);
3661 evcnt_detach(&stats->ipcs_bad);
3662 evcnt_detach(&stats->l4cs_bad);
3663 evcnt_detach(&stats->intzero);
3664 evcnt_detach(&stats->legint);
3665 evcnt_detach(&stats->crcerrs);
3666 evcnt_detach(&stats->illerrc);
3667 evcnt_detach(&stats->errbc);
3668 evcnt_detach(&stats->mspdc);
3669 if (hw->mac.type >= ixgbe_mac_X550)
3670 evcnt_detach(&stats->mbsdc);
3671 evcnt_detach(&stats->mpctotal);
3672 evcnt_detach(&stats->mlfc);
3673 evcnt_detach(&stats->mrfc);
3674 evcnt_detach(&stats->rlec);
3675 evcnt_detach(&stats->lxontxc);
3676 evcnt_detach(&stats->lxonrxc);
3677 evcnt_detach(&stats->lxofftxc);
3678 evcnt_detach(&stats->lxoffrxc);
3679
3680 /* Packet Reception Stats */
3681 evcnt_detach(&stats->tor);
3682 evcnt_detach(&stats->gorc);
3683 evcnt_detach(&stats->tpr);
3684 evcnt_detach(&stats->gprc);
3685 evcnt_detach(&stats->mprc);
3686 evcnt_detach(&stats->bprc);
3687 evcnt_detach(&stats->prc64);
3688 evcnt_detach(&stats->prc127);
3689 evcnt_detach(&stats->prc255);
3690 evcnt_detach(&stats->prc511);
3691 evcnt_detach(&stats->prc1023);
3692 evcnt_detach(&stats->prc1522);
3693 evcnt_detach(&stats->ruc);
3694 evcnt_detach(&stats->rfc);
3695 evcnt_detach(&stats->roc);
3696 evcnt_detach(&stats->rjc);
3697 evcnt_detach(&stats->mngprc);
3698 evcnt_detach(&stats->mngpdc);
3699 evcnt_detach(&stats->xec);
3700
3701 /* Packet Transmission Stats */
3702 evcnt_detach(&stats->gotc);
3703 evcnt_detach(&stats->tpt);
3704 evcnt_detach(&stats->gptc);
3705 evcnt_detach(&stats->bptc);
3706 evcnt_detach(&stats->mptc);
3707 evcnt_detach(&stats->mngptc);
3708 evcnt_detach(&stats->ptc64);
3709 evcnt_detach(&stats->ptc127);
3710 evcnt_detach(&stats->ptc255);
3711 evcnt_detach(&stats->ptc511);
3712 evcnt_detach(&stats->ptc1023);
3713 evcnt_detach(&stats->ptc1522);
3714
3715 ixgbe_free_transmit_structures(adapter);
3716 ixgbe_free_receive_structures(adapter);
3717 for (i = 0; i < adapter->num_queues; i++) {
3718 struct ix_queue * que = &adapter->queues[i];
3719 mutex_destroy(&que->dc_mtx);
3720 }
3721 free(adapter->queues, M_DEVBUF);
3722 free(adapter->mta, M_DEVBUF);
3723
3724 IXGBE_CORE_LOCK_DESTROY(adapter);
3725
3726 return (0);
3727 } /* ixgbe_detach */
3728
3729 /************************************************************************
3730 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3731 *
3732 * Prepare the adapter/port for LPLU and/or WoL
3733 ************************************************************************/
3734 static int
3735 ixgbe_setup_low_power_mode(struct adapter *adapter)
3736 {
3737 struct ixgbe_hw *hw = &adapter->hw;
3738 device_t dev = adapter->dev;
3739 s32 error = 0;
3740
3741 KASSERT(mutex_owned(&adapter->core_mtx));
3742
3743 /* Limit power management flow to X550EM baseT */
3744 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3745 hw->phy.ops.enter_lplu) {
3746 /* X550EM baseT adapters need a special LPLU flow */
3747 hw->phy.reset_disable = true;
3748 ixgbe_stop(adapter);
3749 error = hw->phy.ops.enter_lplu(hw);
3750 if (error)
3751 device_printf(dev,
3752 "Error entering LPLU: %d\n", error);
3753 hw->phy.reset_disable = false;
3754 } else {
3755 /* Just stop for other adapters */
3756 ixgbe_stop(adapter);
3757 }
3758
3759 if (!hw->wol_enabled) {
3760 ixgbe_set_phy_power(hw, FALSE);
3761 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3762 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3763 } else {
3764 /* Turn off support for APM wakeup. (Using ACPI instead) */
3765 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3766 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3767
3768 /*
3769 * Clear Wake Up Status register to prevent any previous wakeup
3770 * events from waking us up immediately after we suspend.
3771 */
3772 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3773
3774 /*
3775 * Program the Wakeup Filter Control register with user filter
3776 * settings
3777 */
3778 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3779
3780 /* Enable wakeups and power management in Wakeup Control */
3781 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3782 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3783
3784 }
3785
3786 return error;
3787 } /* ixgbe_setup_low_power_mode */
3788
3789 /************************************************************************
3790 * ixgbe_shutdown - Shutdown entry point
3791 ************************************************************************/
3792 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3793 static int
3794 ixgbe_shutdown(device_t dev)
3795 {
3796 struct adapter *adapter = device_private(dev);
3797 int error = 0;
3798
3799 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3800
3801 IXGBE_CORE_LOCK(adapter);
3802 error = ixgbe_setup_low_power_mode(adapter);
3803 IXGBE_CORE_UNLOCK(adapter);
3804
3805 return (error);
3806 } /* ixgbe_shutdown */
3807 #endif
3808
3809 /************************************************************************
3810 * ixgbe_suspend
3811 *
3812 * From D0 to D3
3813 ************************************************************************/
3814 static bool
3815 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3816 {
3817 struct adapter *adapter = device_private(dev);
3818 int error = 0;
3819
3820 INIT_DEBUGOUT("ixgbe_suspend: begin");
3821
3822 IXGBE_CORE_LOCK(adapter);
3823
3824 error = ixgbe_setup_low_power_mode(adapter);
3825
3826 IXGBE_CORE_UNLOCK(adapter);
3827
3828 return (error);
3829 } /* ixgbe_suspend */
3830
3831 /************************************************************************
3832 * ixgbe_resume
3833 *
3834 * From D3 to D0
3835 ************************************************************************/
3836 static bool
3837 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3838 {
3839 struct adapter *adapter = device_private(dev);
3840 struct ifnet *ifp = adapter->ifp;
3841 struct ixgbe_hw *hw = &adapter->hw;
3842 u32 wus;
3843
3844 INIT_DEBUGOUT("ixgbe_resume: begin");
3845
3846 IXGBE_CORE_LOCK(adapter);
3847
3848 /* Read & clear WUS register */
3849 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3850 if (wus)
3851 device_printf(dev, "Woken up by (WUS): %#010x\n",
3852 IXGBE_READ_REG(hw, IXGBE_WUS));
3853 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3854 /* And clear WUFC until next low-power transition */
3855 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3856
3857 /*
3858 * Required after D3->D0 transition;
3859 * will re-advertise all previous advertised speeds
3860 */
3861 if (ifp->if_flags & IFF_UP)
3862 ixgbe_init_locked(adapter);
3863
3864 IXGBE_CORE_UNLOCK(adapter);
3865
3866 return true;
3867 } /* ixgbe_resume */
3868
3869 /*
3870 * Set the various hardware offload abilities.
3871 *
3872 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3873 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3874 * mbuf offload flags the driver will understand.
3875 */
3876 static void
3877 ixgbe_set_if_hwassist(struct adapter *adapter)
3878 {
3879 /* XXX */
3880 }
3881
3882 /************************************************************************
3883 * ixgbe_init_locked - Init entry point
3884 *
3885 * Used in two ways: It is used by the stack as an init
3886 * entry point in network interface structure. It is also
3887 * used by the driver as a hw/sw initialization routine to
3888 * get to a consistent state.
3889 *
3890 * return 0 on success, positive on failure
3891 ************************************************************************/
3892 static void
3893 ixgbe_init_locked(struct adapter *adapter)
3894 {
3895 struct ifnet *ifp = adapter->ifp;
3896 device_t dev = adapter->dev;
3897 struct ixgbe_hw *hw = &adapter->hw;
3898 struct ix_queue *que;
3899 struct tx_ring *txr;
3900 struct rx_ring *rxr;
3901 u32 txdctl, mhadd;
3902 u32 rxdctl, rxctrl;
3903 u32 ctrl_ext;
3904 bool unsupported_sfp = false;
3905 int i, j, err;
3906
3907 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3908
3909 KASSERT(mutex_owned(&adapter->core_mtx));
3910 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3911
3912 hw->need_unsupported_sfp_recovery = false;
3913 hw->adapter_stopped = FALSE;
3914 ixgbe_stop_adapter(hw);
3915 callout_stop(&adapter->timer);
3916 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3917 que->disabled_count = 0;
3918
3919 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3920 adapter->max_frame_size =
3921 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3922
3923 /* Queue indices may change with IOV mode */
3924 ixgbe_align_all_queue_indices(adapter);
3925
3926 /* reprogram the RAR[0] in case user changed it. */
3927 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3928
3929 /* Get the latest mac address, User can use a LAA */
3930 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3931 IXGBE_ETH_LENGTH_OF_ADDRESS);
3932 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3933 hw->addr_ctrl.rar_used_count = 1;
3934
3935 /* Set hardware offload abilities from ifnet flags */
3936 ixgbe_set_if_hwassist(adapter);
3937
3938 /* Prepare transmit descriptors and buffers */
3939 if (ixgbe_setup_transmit_structures(adapter)) {
3940 device_printf(dev, "Could not setup transmit structures\n");
3941 ixgbe_stop(adapter);
3942 return;
3943 }
3944
3945 ixgbe_init_hw(hw);
3946
3947 ixgbe_initialize_iov(adapter);
3948
3949 ixgbe_initialize_transmit_units(adapter);
3950
3951 /* Setup Multicast table */
3952 ixgbe_set_rxfilter(adapter);
3953
3954 /* Determine the correct mbuf pool, based on frame size */
3955 if (adapter->max_frame_size <= MCLBYTES)
3956 adapter->rx_mbuf_sz = MCLBYTES;
3957 else
3958 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3959
3960 /* Prepare receive descriptors and buffers */
3961 if (ixgbe_setup_receive_structures(adapter)) {
3962 device_printf(dev, "Could not setup receive structures\n");
3963 ixgbe_stop(adapter);
3964 return;
3965 }
3966
3967 /* Configure RX settings */
3968 ixgbe_initialize_receive_units(adapter);
3969
3970 /* Enable SDP & MSI-X interrupts based on adapter */
3971 ixgbe_config_gpie(adapter);
3972
3973 /* Set MTU size */
3974 if (ifp->if_mtu > ETHERMTU) {
3975 /* aka IXGBE_MAXFRS on 82599 and newer */
3976 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3977 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3978 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3979 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3980 }
3981
3982 /* Now enable all the queues */
3983 for (i = 0; i < adapter->num_queues; i++) {
3984 txr = &adapter->tx_rings[i];
3985 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3986 txdctl |= IXGBE_TXDCTL_ENABLE;
3987 /* Set WTHRESH to 8, burst writeback */
3988 txdctl |= (8 << 16);
3989 /*
3990 * When the internal queue falls below PTHRESH (32),
3991 * start prefetching as long as there are at least
3992 * HTHRESH (1) buffers ready. The values are taken
3993 * from the Intel linux driver 3.8.21.
3994 * Prefetching enables tx line rate even with 1 queue.
3995 */
3996 txdctl |= (32 << 0) | (1 << 8);
3997 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3998 }
3999
4000 for (i = 0; i < adapter->num_queues; i++) {
4001 rxr = &adapter->rx_rings[i];
4002 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4003 if (hw->mac.type == ixgbe_mac_82598EB) {
4004 /*
4005 * PTHRESH = 21
4006 * HTHRESH = 4
4007 * WTHRESH = 8
4008 */
4009 rxdctl &= ~0x3FFFFF;
4010 rxdctl |= 0x080420;
4011 }
4012 rxdctl |= IXGBE_RXDCTL_ENABLE;
4013 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4014 for (j = 0; j < 10; j++) {
4015 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4016 IXGBE_RXDCTL_ENABLE)
4017 break;
4018 else
4019 msec_delay(1);
4020 }
4021 IXGBE_WRITE_BARRIER(hw);
4022
4023 /*
4024 * In netmap mode, we must preserve the buffers made
4025 * available to userspace before the if_init()
4026 * (this is true by default on the TX side, because
4027 * init makes all buffers available to userspace).
4028 *
4029 * netmap_reset() and the device specific routines
4030 * (e.g. ixgbe_setup_receive_rings()) map these
4031 * buffers at the end of the NIC ring, so here we
4032 * must set the RDT (tail) register to make sure
4033 * they are not overwritten.
4034 *
4035 * In this driver the NIC ring starts at RDH = 0,
4036 * RDT points to the last slot available for reception (?),
4037 * so RDT = num_rx_desc - 1 means the whole ring is available.
4038 */
4039 #ifdef DEV_NETMAP
4040 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4041 (ifp->if_capenable & IFCAP_NETMAP)) {
4042 struct netmap_adapter *na = NA(adapter->ifp);
4043 struct netmap_kring *kring = na->rx_rings[i];
4044 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4045
4046 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4047 } else
4048 #endif /* DEV_NETMAP */
4049 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4050 adapter->num_rx_desc - 1);
4051 }
4052
4053 /* Enable Receive engine */
4054 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4055 if (hw->mac.type == ixgbe_mac_82598EB)
4056 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4057 rxctrl |= IXGBE_RXCTRL_RXEN;
4058 ixgbe_enable_rx_dma(hw, rxctrl);
4059
4060 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4061
4062 /* Set up MSI/MSI-X routing */
4063 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4064 ixgbe_configure_ivars(adapter);
4065 /* Set up auto-mask */
4066 if (hw->mac.type == ixgbe_mac_82598EB)
4067 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4068 else {
4069 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4070 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4071 }
4072 } else { /* Simple settings for Legacy/MSI */
4073 ixgbe_set_ivar(adapter, 0, 0, 0);
4074 ixgbe_set_ivar(adapter, 0, 0, 1);
4075 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4076 }
4077
4078 ixgbe_init_fdir(adapter);
4079
4080 /*
4081 * Check on any SFP devices that
4082 * need to be kick-started
4083 */
4084 if (hw->phy.type == ixgbe_phy_none) {
4085 err = hw->phy.ops.identify(hw);
4086 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4087 unsupported_sfp = true;
4088 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4089 unsupported_sfp = true;
4090
4091 if (unsupported_sfp)
4092 device_printf(dev,
4093 "Unsupported SFP+ module type was detected.\n");
4094
4095 /* Set moderation on the Link interrupt */
4096 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4097
4098 /* Enable EEE power saving */
4099 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4100 hw->mac.ops.setup_eee(hw,
4101 adapter->feat_en & IXGBE_FEATURE_EEE);
4102
4103 /* Enable power to the phy. */
4104 if (!unsupported_sfp) {
4105 ixgbe_set_phy_power(hw, TRUE);
4106
4107 /* Config/Enable Link */
4108 ixgbe_config_link(adapter);
4109 }
4110
4111 /* Hardware Packet Buffer & Flow Control setup */
4112 ixgbe_config_delay_values(adapter);
4113
4114 /* Initialize the FC settings */
4115 ixgbe_start_hw(hw);
4116
4117 /* Set up VLAN support and filter */
4118 ixgbe_setup_vlan_hw_support(adapter);
4119
4120 /* Setup DMA Coalescing */
4121 ixgbe_config_dmac(adapter);
4122
4123 /* And now turn on interrupts */
4124 ixgbe_enable_intr(adapter);
4125
4126 /* Enable the use of the MBX by the VF's */
4127 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4128 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4129 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4130 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4131 }
4132
4133 /* Update saved flags. See ixgbe_ifflags_cb() */
4134 adapter->if_flags = ifp->if_flags;
4135 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4136
4137 /* Now inform the stack we're ready */
4138 ifp->if_flags |= IFF_RUNNING;
4139
4140 return;
4141 } /* ixgbe_init_locked */
4142
4143 /************************************************************************
4144 * ixgbe_init
4145 ************************************************************************/
4146 static int
4147 ixgbe_init(struct ifnet *ifp)
4148 {
4149 struct adapter *adapter = ifp->if_softc;
4150
4151 IXGBE_CORE_LOCK(adapter);
4152 ixgbe_init_locked(adapter);
4153 IXGBE_CORE_UNLOCK(adapter);
4154
4155 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4156 } /* ixgbe_init */
4157
4158 /************************************************************************
4159 * ixgbe_set_ivar
4160 *
4161 * Setup the correct IVAR register for a particular MSI-X interrupt
4162 * (yes this is all very magic and confusing :)
4163 * - entry is the register array entry
4164 * - vector is the MSI-X vector for this queue
4165 * - type is RX/TX/MISC
4166 ************************************************************************/
4167 static void
4168 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4169 {
4170 struct ixgbe_hw *hw = &adapter->hw;
4171 u32 ivar, index;
4172
4173 vector |= IXGBE_IVAR_ALLOC_VAL;
4174
4175 switch (hw->mac.type) {
4176 case ixgbe_mac_82598EB:
4177 if (type == -1)
4178 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4179 else
4180 entry += (type * 64);
4181 index = (entry >> 2) & 0x1F;
4182 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4183 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4184 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4185 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4186 break;
4187 case ixgbe_mac_82599EB:
4188 case ixgbe_mac_X540:
4189 case ixgbe_mac_X550:
4190 case ixgbe_mac_X550EM_x:
4191 case ixgbe_mac_X550EM_a:
4192 if (type == -1) { /* MISC IVAR */
4193 index = (entry & 1) * 8;
4194 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4195 ivar &= ~(0xffUL << index);
4196 ivar |= ((u32)vector << index);
4197 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4198 } else { /* RX/TX IVARS */
4199 index = (16 * (entry & 1)) + (8 * type);
4200 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4201 ivar &= ~(0xffUL << index);
4202 ivar |= ((u32)vector << index);
4203 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4204 }
4205 break;
4206 default:
4207 break;
4208 }
4209 } /* ixgbe_set_ivar */
4210
4211 /************************************************************************
4212 * ixgbe_configure_ivars
4213 ************************************************************************/
4214 static void
4215 ixgbe_configure_ivars(struct adapter *adapter)
4216 {
4217 struct ix_queue *que = adapter->queues;
4218 u32 newitr;
4219
4220 if (ixgbe_max_interrupt_rate > 0)
4221 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4222 else {
4223 /*
4224 * Disable DMA coalescing if interrupt moderation is
4225 * disabled.
4226 */
4227 adapter->dmac = 0;
4228 newitr = 0;
4229 }
4230
4231 for (int i = 0; i < adapter->num_queues; i++, que++) {
4232 struct rx_ring *rxr = &adapter->rx_rings[i];
4233 struct tx_ring *txr = &adapter->tx_rings[i];
4234 /* First the RX queue entry */
4235 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4236 /* ... and the TX */
4237 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4238 /* Set an Initial EITR value */
4239 ixgbe_eitr_write(adapter, que->msix, newitr);
4240 /*
4241 * To eliminate influence of the previous state.
4242 * At this point, Tx/Rx interrupt handler
4243 * (ixgbe_msix_que()) cannot be called, so both
4244 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4245 */
4246 que->eitr_setting = 0;
4247 }
4248
4249 /* For the Link interrupt */
4250 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4251 } /* ixgbe_configure_ivars */
4252
4253 /************************************************************************
4254 * ixgbe_config_gpie
4255 ************************************************************************/
4256 static void
4257 ixgbe_config_gpie(struct adapter *adapter)
4258 {
4259 struct ixgbe_hw *hw = &adapter->hw;
4260 u32 gpie;
4261
4262 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4263
4264 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4265 /* Enable Enhanced MSI-X mode */
4266 gpie |= IXGBE_GPIE_MSIX_MODE
4267 | IXGBE_GPIE_EIAME
4268 | IXGBE_GPIE_PBA_SUPPORT
4269 | IXGBE_GPIE_OCD;
4270 }
4271
4272 /* Fan Failure Interrupt */
4273 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4274 gpie |= IXGBE_SDP1_GPIEN;
4275
4276 /* Thermal Sensor Interrupt */
4277 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4278 gpie |= IXGBE_SDP0_GPIEN_X540;
4279
4280 /* Link detection */
4281 switch (hw->mac.type) {
4282 case ixgbe_mac_82599EB:
4283 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4284 break;
4285 case ixgbe_mac_X550EM_x:
4286 case ixgbe_mac_X550EM_a:
4287 gpie |= IXGBE_SDP0_GPIEN_X540;
4288 break;
4289 default:
4290 break;
4291 }
4292
4293 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4294
4295 } /* ixgbe_config_gpie */
4296
4297 /************************************************************************
4298 * ixgbe_config_delay_values
4299 *
4300 * Requires adapter->max_frame_size to be set.
4301 ************************************************************************/
4302 static void
4303 ixgbe_config_delay_values(struct adapter *adapter)
4304 {
4305 struct ixgbe_hw *hw = &adapter->hw;
4306 u32 rxpb, frame, size, tmp;
4307
4308 frame = adapter->max_frame_size;
4309
4310 /* Calculate High Water */
4311 switch (hw->mac.type) {
4312 case ixgbe_mac_X540:
4313 case ixgbe_mac_X550:
4314 case ixgbe_mac_X550EM_x:
4315 case ixgbe_mac_X550EM_a:
4316 tmp = IXGBE_DV_X540(frame, frame);
4317 break;
4318 default:
4319 tmp = IXGBE_DV(frame, frame);
4320 break;
4321 }
4322 size = IXGBE_BT2KB(tmp);
4323 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4324 hw->fc.high_water[0] = rxpb - size;
4325
4326 /* Now calculate Low Water */
4327 switch (hw->mac.type) {
4328 case ixgbe_mac_X540:
4329 case ixgbe_mac_X550:
4330 case ixgbe_mac_X550EM_x:
4331 case ixgbe_mac_X550EM_a:
4332 tmp = IXGBE_LOW_DV_X540(frame);
4333 break;
4334 default:
4335 tmp = IXGBE_LOW_DV(frame);
4336 break;
4337 }
4338 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4339
4340 hw->fc.pause_time = IXGBE_FC_PAUSE;
4341 hw->fc.send_xon = TRUE;
4342 } /* ixgbe_config_delay_values */
4343
4344 /************************************************************************
4345 * ixgbe_set_rxfilter - Multicast Update
4346 *
4347 * Called whenever multicast address list is updated.
4348 ************************************************************************/
4349 static void
4350 ixgbe_set_rxfilter(struct adapter *adapter)
4351 {
4352 struct ixgbe_mc_addr *mta;
4353 struct ifnet *ifp = adapter->ifp;
4354 u8 *update_ptr;
4355 int mcnt = 0;
4356 u32 fctrl;
4357 struct ethercom *ec = &adapter->osdep.ec;
4358 struct ether_multi *enm;
4359 struct ether_multistep step;
4360
4361 KASSERT(mutex_owned(&adapter->core_mtx));
4362 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4363
4364 mta = adapter->mta;
4365 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4366
4367 ETHER_LOCK(ec);
4368 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4369 ETHER_FIRST_MULTI(step, ec, enm);
4370 while (enm != NULL) {
4371 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4372 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4373 ETHER_ADDR_LEN) != 0)) {
4374 ec->ec_flags |= ETHER_F_ALLMULTI;
4375 break;
4376 }
4377 bcopy(enm->enm_addrlo,
4378 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4379 mta[mcnt].vmdq = adapter->pool;
4380 mcnt++;
4381 ETHER_NEXT_MULTI(step, enm);
4382 }
4383
4384 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4385 if (ifp->if_flags & IFF_PROMISC)
4386 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4387 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4388 fctrl |= IXGBE_FCTRL_MPE;
4389 fctrl &= ~IXGBE_FCTRL_UPE;
4390 } else
4391 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4392
4393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4394
4395 /* Update multicast filter entries only when it's not ALLMULTI */
4396 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4397 ETHER_UNLOCK(ec);
4398 update_ptr = (u8 *)mta;
4399 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4400 ixgbe_mc_array_itr, TRUE);
4401 } else
4402 ETHER_UNLOCK(ec);
4403 } /* ixgbe_set_rxfilter */
4404
4405 /************************************************************************
4406 * ixgbe_mc_array_itr
4407 *
4408 * An iterator function needed by the multicast shared code.
4409 * It feeds the shared code routine the addresses in the
4410 * array of ixgbe_set_rxfilter() one by one.
4411 ************************************************************************/
4412 static u8 *
4413 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4414 {
4415 struct ixgbe_mc_addr *mta;
4416
4417 mta = (struct ixgbe_mc_addr *)*update_ptr;
4418 *vmdq = mta->vmdq;
4419
4420 *update_ptr = (u8*)(mta + 1);
4421
4422 return (mta->addr);
4423 } /* ixgbe_mc_array_itr */
4424
4425 /************************************************************************
4426 * ixgbe_local_timer - Timer routine
4427 *
4428 * Checks for link status, updates statistics,
4429 * and runs the watchdog check.
4430 ************************************************************************/
4431 static void
4432 ixgbe_local_timer(void *arg)
4433 {
4434 struct adapter *adapter = arg;
4435
4436 IXGBE_CORE_LOCK(adapter);
4437 ixgbe_local_timer1(adapter);
4438 IXGBE_CORE_UNLOCK(adapter);
4439 }
4440
4441 static void
4442 ixgbe_local_timer1(void *arg)
4443 {
4444 struct adapter *adapter = arg;
4445 device_t dev = adapter->dev;
4446 struct ix_queue *que = adapter->queues;
4447 u64 queues = 0;
4448 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4449 int hung = 0;
4450 int i;
4451
4452 KASSERT(mutex_owned(&adapter->core_mtx));
4453
4454 /* Check for pluggable optics */
4455 if (adapter->sfp_probe)
4456 if (!ixgbe_sfp_probe(adapter))
4457 goto out; /* Nothing to do */
4458
4459 ixgbe_update_link_status(adapter);
4460 ixgbe_update_stats_counters(adapter);
4461
4462 /* Update some event counters */
4463 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4464 que = adapter->queues;
4465 for (i = 0; i < adapter->num_queues; i++, que++) {
4466 struct tx_ring *txr = que->txr;
4467
4468 v0 += txr->q_efbig_tx_dma_setup;
4469 v1 += txr->q_mbuf_defrag_failed;
4470 v2 += txr->q_efbig2_tx_dma_setup;
4471 v3 += txr->q_einval_tx_dma_setup;
4472 v4 += txr->q_other_tx_dma_setup;
4473 v5 += txr->q_eagain_tx_dma_setup;
4474 v6 += txr->q_enomem_tx_dma_setup;
4475 v7 += txr->q_tso_err;
4476 }
4477 adapter->efbig_tx_dma_setup.ev_count = v0;
4478 adapter->mbuf_defrag_failed.ev_count = v1;
4479 adapter->efbig2_tx_dma_setup.ev_count = v2;
4480 adapter->einval_tx_dma_setup.ev_count = v3;
4481 adapter->other_tx_dma_setup.ev_count = v4;
4482 adapter->eagain_tx_dma_setup.ev_count = v5;
4483 adapter->enomem_tx_dma_setup.ev_count = v6;
4484 adapter->tso_err.ev_count = v7;
4485
4486 /*
4487 * Check the TX queues status
4488 * - mark hung queues so we don't schedule on them
4489 * - watchdog only if all queues show hung
4490 */
4491 que = adapter->queues;
4492 for (i = 0; i < adapter->num_queues; i++, que++) {
4493 /* Keep track of queues with work for soft irq */
4494 if (que->txr->busy)
4495 queues |= 1ULL << que->me;
4496 /*
4497 * Each time txeof runs without cleaning, but there
4498 * are uncleaned descriptors it increments busy. If
4499 * we get to the MAX we declare it hung.
4500 */
4501 if (que->busy == IXGBE_QUEUE_HUNG) {
4502 ++hung;
4503 /* Mark the queue as inactive */
4504 adapter->active_queues &= ~(1ULL << que->me);
4505 continue;
4506 } else {
4507 /* Check if we've come back from hung */
4508 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4509 adapter->active_queues |= 1ULL << que->me;
4510 }
4511 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4512 device_printf(dev,
4513 "Warning queue %d appears to be hung!\n", i);
4514 que->txr->busy = IXGBE_QUEUE_HUNG;
4515 ++hung;
4516 }
4517 }
4518
4519 /* Only truely watchdog if all queues show hung */
4520 if (hung == adapter->num_queues)
4521 goto watchdog;
4522 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4523 else if (queues != 0) { /* Force an IRQ on queues with work */
4524 que = adapter->queues;
4525 for (i = 0; i < adapter->num_queues; i++, que++) {
4526 mutex_enter(&que->dc_mtx);
4527 if (que->disabled_count == 0)
4528 ixgbe_rearm_queues(adapter,
4529 queues & ((u64)1 << i));
4530 mutex_exit(&que->dc_mtx);
4531 }
4532 }
4533 #endif
4534
4535 out:
4536 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4537 return;
4538
4539 watchdog:
4540 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4541 adapter->ifp->if_flags &= ~IFF_RUNNING;
4542 adapter->watchdog_events.ev_count++;
4543 ixgbe_init_locked(adapter);
4544 } /* ixgbe_local_timer */
4545
4546 /************************************************************************
4547 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4548 ************************************************************************/
4549 static void
4550 ixgbe_recovery_mode_timer(void *arg)
4551 {
4552 struct adapter *adapter = arg;
4553 struct ixgbe_hw *hw = &adapter->hw;
4554
4555 IXGBE_CORE_LOCK(adapter);
4556 if (ixgbe_fw_recovery_mode(hw)) {
4557 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4558 /* Firmware error detected, entering recovery mode */
4559 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4560
4561 if (hw->adapter_stopped == FALSE)
4562 ixgbe_stop(adapter);
4563 }
4564 } else
4565 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4566
4567 callout_reset(&adapter->recovery_mode_timer, hz,
4568 ixgbe_recovery_mode_timer, adapter);
4569 IXGBE_CORE_UNLOCK(adapter);
4570 } /* ixgbe_recovery_mode_timer */
4571
4572 /************************************************************************
4573 * ixgbe_sfp_probe
4574 *
4575 * Determine if a port had optics inserted.
4576 ************************************************************************/
4577 static bool
4578 ixgbe_sfp_probe(struct adapter *adapter)
4579 {
4580 struct ixgbe_hw *hw = &adapter->hw;
4581 device_t dev = adapter->dev;
4582 bool result = FALSE;
4583
4584 if ((hw->phy.type == ixgbe_phy_nl) &&
4585 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4586 s32 ret = hw->phy.ops.identify_sfp(hw);
4587 if (ret)
4588 goto out;
4589 ret = hw->phy.ops.reset(hw);
4590 adapter->sfp_probe = FALSE;
4591 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4592 device_printf(dev,"Unsupported SFP+ module detected!");
4593 device_printf(dev,
4594 "Reload driver with supported module.\n");
4595 goto out;
4596 } else
4597 device_printf(dev, "SFP+ module detected!\n");
4598 /* We now have supported optics */
4599 result = TRUE;
4600 }
4601 out:
4602
4603 return (result);
4604 } /* ixgbe_sfp_probe */
4605
4606 /************************************************************************
4607 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4608 ************************************************************************/
4609 static void
4610 ixgbe_handle_mod(void *context)
4611 {
4612 struct adapter *adapter = context;
4613 struct ixgbe_hw *hw = &adapter->hw;
4614 device_t dev = adapter->dev;
4615 u32 err, cage_full = 0;
4616
4617 IXGBE_CORE_LOCK(adapter);
4618 ++adapter->mod_sicount.ev_count;
4619 if (adapter->hw.need_crosstalk_fix) {
4620 switch (hw->mac.type) {
4621 case ixgbe_mac_82599EB:
4622 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4623 IXGBE_ESDP_SDP2;
4624 break;
4625 case ixgbe_mac_X550EM_x:
4626 case ixgbe_mac_X550EM_a:
4627 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4628 IXGBE_ESDP_SDP0;
4629 break;
4630 default:
4631 break;
4632 }
4633
4634 if (!cage_full)
4635 goto out;
4636 }
4637
4638 err = hw->phy.ops.identify_sfp(hw);
4639 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4640 device_printf(dev,
4641 "Unsupported SFP+ module type was detected.\n");
4642 goto out;
4643 }
4644
4645 if (hw->need_unsupported_sfp_recovery) {
4646 device_printf(dev, "Recovering from unsupported SFP\n");
4647 /*
4648 * We could recover the status by calling setup_sfp(),
4649 * setup_link() and some others. It's complex and might not
4650 * work correctly on some unknown cases. To avoid such type of
4651 * problem, call ixgbe_init_locked(). It's simple and safe
4652 * approach.
4653 */
4654 ixgbe_init_locked(adapter);
4655 } else {
4656 if (hw->mac.type == ixgbe_mac_82598EB)
4657 err = hw->phy.ops.reset(hw);
4658 else {
4659 err = hw->mac.ops.setup_sfp(hw);
4660 hw->phy.sfp_setup_needed = FALSE;
4661 }
4662 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4663 device_printf(dev,
4664 "Setup failure - unsupported SFP+ module type.\n");
4665 goto out;
4666 }
4667 }
4668 softint_schedule(adapter->msf_si);
4669 out:
4670 IXGBE_CORE_UNLOCK(adapter);
4671 } /* ixgbe_handle_mod */
4672
4673
4674 /************************************************************************
4675 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4676 ************************************************************************/
4677 static void
4678 ixgbe_handle_msf(void *context)
4679 {
4680 struct adapter *adapter = context;
4681 struct ixgbe_hw *hw = &adapter->hw;
4682 u32 autoneg;
4683 bool negotiate;
4684
4685 IXGBE_CORE_LOCK(adapter);
4686 ++adapter->msf_sicount.ev_count;
4687 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4688 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4689
4690 autoneg = hw->phy.autoneg_advertised;
4691 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4692 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4693 else
4694 negotiate = 0;
4695 if (hw->mac.ops.setup_link)
4696 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4697
4698 /* Adjust media types shown in ifconfig */
4699 ifmedia_removeall(&adapter->media);
4700 ixgbe_add_media_types(adapter);
4701 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4702 IXGBE_CORE_UNLOCK(adapter);
4703 } /* ixgbe_handle_msf */
4704
4705 /************************************************************************
4706 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4707 ************************************************************************/
4708 static void
4709 ixgbe_handle_phy(void *context)
4710 {
4711 struct adapter *adapter = context;
4712 struct ixgbe_hw *hw = &adapter->hw;
4713 int error;
4714
4715 ++adapter->phy_sicount.ev_count;
4716 error = hw->phy.ops.handle_lasi(hw);
4717 if (error == IXGBE_ERR_OVERTEMP)
4718 device_printf(adapter->dev,
4719 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4720 " PHY will downshift to lower power state!\n");
4721 else if (error)
4722 device_printf(adapter->dev,
4723 "Error handling LASI interrupt: %d\n", error);
4724 } /* ixgbe_handle_phy */
4725
4726 static void
4727 ixgbe_ifstop(struct ifnet *ifp, int disable)
4728 {
4729 struct adapter *adapter = ifp->if_softc;
4730
4731 IXGBE_CORE_LOCK(adapter);
4732 ixgbe_stop(adapter);
4733 IXGBE_CORE_UNLOCK(adapter);
4734 }
4735
4736 /************************************************************************
4737 * ixgbe_stop - Stop the hardware
4738 *
4739 * Disables all traffic on the adapter by issuing a
4740 * global reset on the MAC and deallocates TX/RX buffers.
4741 ************************************************************************/
4742 static void
4743 ixgbe_stop(void *arg)
4744 {
4745 struct ifnet *ifp;
4746 struct adapter *adapter = arg;
4747 struct ixgbe_hw *hw = &adapter->hw;
4748
4749 ifp = adapter->ifp;
4750
4751 KASSERT(mutex_owned(&adapter->core_mtx));
4752
4753 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4754 ixgbe_disable_intr(adapter);
4755 callout_stop(&adapter->timer);
4756
4757 /* Let the stack know...*/
4758 ifp->if_flags &= ~IFF_RUNNING;
4759
4760 ixgbe_reset_hw(hw);
4761 hw->adapter_stopped = FALSE;
4762 ixgbe_stop_adapter(hw);
4763 if (hw->mac.type == ixgbe_mac_82599EB)
4764 ixgbe_stop_mac_link_on_d3_82599(hw);
4765 /* Turn off the laser - noop with no optics */
4766 ixgbe_disable_tx_laser(hw);
4767
4768 /* Update the stack */
4769 adapter->link_up = FALSE;
4770 ixgbe_update_link_status(adapter);
4771
4772 /* reprogram the RAR[0] in case user changed it. */
4773 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4774
4775 return;
4776 } /* ixgbe_stop */
4777
4778 /************************************************************************
4779 * ixgbe_update_link_status - Update OS on link state
4780 *
4781 * Note: Only updates the OS on the cached link state.
4782 * The real check of the hardware only happens with
4783 * a link interrupt.
4784 ************************************************************************/
4785 static void
4786 ixgbe_update_link_status(struct adapter *adapter)
4787 {
4788 struct ifnet *ifp = adapter->ifp;
4789 device_t dev = adapter->dev;
4790 struct ixgbe_hw *hw = &adapter->hw;
4791
4792 KASSERT(mutex_owned(&adapter->core_mtx));
4793
4794 if (adapter->link_up) {
4795 if (adapter->link_active != LINK_STATE_UP) {
4796 /*
4797 * To eliminate influence of the previous state
4798 * in the same way as ixgbe_init_locked().
4799 */
4800 struct ix_queue *que = adapter->queues;
4801 for (int i = 0; i < adapter->num_queues; i++, que++)
4802 que->eitr_setting = 0;
4803
4804 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4805 /*
4806 * Discard count for both MAC Local Fault and
4807 * Remote Fault because those registers are
4808 * valid only when the link speed is up and
4809 * 10Gbps.
4810 */
4811 IXGBE_READ_REG(hw, IXGBE_MLFC);
4812 IXGBE_READ_REG(hw, IXGBE_MRFC);
4813 }
4814
4815 if (bootverbose) {
4816 const char *bpsmsg;
4817
4818 switch (adapter->link_speed) {
4819 case IXGBE_LINK_SPEED_10GB_FULL:
4820 bpsmsg = "10 Gbps";
4821 break;
4822 case IXGBE_LINK_SPEED_5GB_FULL:
4823 bpsmsg = "5 Gbps";
4824 break;
4825 case IXGBE_LINK_SPEED_2_5GB_FULL:
4826 bpsmsg = "2.5 Gbps";
4827 break;
4828 case IXGBE_LINK_SPEED_1GB_FULL:
4829 bpsmsg = "1 Gbps";
4830 break;
4831 case IXGBE_LINK_SPEED_100_FULL:
4832 bpsmsg = "100 Mbps";
4833 break;
4834 case IXGBE_LINK_SPEED_10_FULL:
4835 bpsmsg = "10 Mbps";
4836 break;
4837 default:
4838 bpsmsg = "unknown speed";
4839 break;
4840 }
4841 device_printf(dev, "Link is up %s %s \n",
4842 bpsmsg, "Full Duplex");
4843 }
4844 adapter->link_active = LINK_STATE_UP;
4845 /* Update any Flow Control changes */
4846 ixgbe_fc_enable(&adapter->hw);
4847 /* Update DMA coalescing config */
4848 ixgbe_config_dmac(adapter);
4849 if_link_state_change(ifp, LINK_STATE_UP);
4850
4851 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4852 ixgbe_ping_all_vfs(adapter);
4853 }
4854 } else {
4855 /*
4856 * Do it when link active changes to DOWN. i.e.
4857 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4858 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4859 */
4860 if (adapter->link_active != LINK_STATE_DOWN) {
4861 if (bootverbose)
4862 device_printf(dev, "Link is Down\n");
4863 if_link_state_change(ifp, LINK_STATE_DOWN);
4864 adapter->link_active = LINK_STATE_DOWN;
4865 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4866 ixgbe_ping_all_vfs(adapter);
4867 ixgbe_drain_all(adapter);
4868 }
4869 }
4870 } /* ixgbe_update_link_status */
4871
4872 /************************************************************************
4873 * ixgbe_config_dmac - Configure DMA Coalescing
4874 ************************************************************************/
4875 static void
4876 ixgbe_config_dmac(struct adapter *adapter)
4877 {
4878 struct ixgbe_hw *hw = &adapter->hw;
4879 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4880
4881 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4882 return;
4883
4884 if (dcfg->watchdog_timer ^ adapter->dmac ||
4885 dcfg->link_speed ^ adapter->link_speed) {
4886 dcfg->watchdog_timer = adapter->dmac;
4887 dcfg->fcoe_en = false;
4888 dcfg->link_speed = adapter->link_speed;
4889 dcfg->num_tcs = 1;
4890
4891 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4892 dcfg->watchdog_timer, dcfg->link_speed);
4893
4894 hw->mac.ops.dmac_config(hw);
4895 }
4896 } /* ixgbe_config_dmac */
4897
4898 /************************************************************************
4899 * ixgbe_enable_intr
4900 ************************************************************************/
4901 static void
4902 ixgbe_enable_intr(struct adapter *adapter)
4903 {
4904 struct ixgbe_hw *hw = &adapter->hw;
4905 struct ix_queue *que = adapter->queues;
4906 u32 mask, fwsm;
4907
4908 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4909
4910 switch (adapter->hw.mac.type) {
4911 case ixgbe_mac_82599EB:
4912 mask |= IXGBE_EIMS_ECC;
4913 /* Temperature sensor on some adapters */
4914 mask |= IXGBE_EIMS_GPI_SDP0;
4915 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4916 mask |= IXGBE_EIMS_GPI_SDP1;
4917 mask |= IXGBE_EIMS_GPI_SDP2;
4918 break;
4919 case ixgbe_mac_X540:
4920 /* Detect if Thermal Sensor is enabled */
4921 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4922 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4923 mask |= IXGBE_EIMS_TS;
4924 mask |= IXGBE_EIMS_ECC;
4925 break;
4926 case ixgbe_mac_X550:
4927 /* MAC thermal sensor is automatically enabled */
4928 mask |= IXGBE_EIMS_TS;
4929 mask |= IXGBE_EIMS_ECC;
4930 break;
4931 case ixgbe_mac_X550EM_x:
4932 case ixgbe_mac_X550EM_a:
4933 /* Some devices use SDP0 for important information */
4934 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4935 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4936 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4937 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4938 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4939 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4940 mask |= IXGBE_EICR_GPI_SDP0_X540;
4941 mask |= IXGBE_EIMS_ECC;
4942 break;
4943 default:
4944 break;
4945 }
4946
4947 /* Enable Fan Failure detection */
4948 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4949 mask |= IXGBE_EIMS_GPI_SDP1;
4950 /* Enable SR-IOV */
4951 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4952 mask |= IXGBE_EIMS_MAILBOX;
4953 /* Enable Flow Director */
4954 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4955 mask |= IXGBE_EIMS_FLOW_DIR;
4956
4957 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4958
4959 /* With MSI-X we use auto clear */
4960 if (adapter->msix_mem) {
4961 mask = IXGBE_EIMS_ENABLE_MASK;
4962 /* Don't autoclear Link */
4963 mask &= ~IXGBE_EIMS_OTHER;
4964 mask &= ~IXGBE_EIMS_LSC;
4965 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4966 mask &= ~IXGBE_EIMS_MAILBOX;
4967 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4968 }
4969
4970 /*
4971 * Now enable all queues, this is done separately to
4972 * allow for handling the extended (beyond 32) MSI-X
4973 * vectors that can be used by 82599
4974 */
4975 for (int i = 0; i < adapter->num_queues; i++, que++)
4976 ixgbe_enable_queue(adapter, que->msix);
4977
4978 IXGBE_WRITE_FLUSH(hw);
4979
4980 } /* ixgbe_enable_intr */
4981
4982 /************************************************************************
4983 * ixgbe_disable_intr_internal
4984 ************************************************************************/
4985 static void
4986 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4987 {
4988 struct ix_queue *que = adapter->queues;
4989
4990 /* disable interrupts other than queues */
4991 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4992
4993 if (adapter->msix_mem)
4994 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4995
4996 for (int i = 0; i < adapter->num_queues; i++, que++)
4997 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4998
4999 IXGBE_WRITE_FLUSH(&adapter->hw);
5000
5001 } /* ixgbe_do_disable_intr_internal */
5002
5003 /************************************************************************
5004 * ixgbe_disable_intr
5005 ************************************************************************/
5006 static void
5007 ixgbe_disable_intr(struct adapter *adapter)
5008 {
5009
5010 ixgbe_disable_intr_internal(adapter, true);
5011 } /* ixgbe_disable_intr */
5012
5013 /************************************************************************
5014 * ixgbe_ensure_disabled_intr
5015 ************************************************************************/
5016 void
5017 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5018 {
5019
5020 ixgbe_disable_intr_internal(adapter, false);
5021 } /* ixgbe_ensure_disabled_intr */
5022
5023 /************************************************************************
5024 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5025 ************************************************************************/
5026 static int
5027 ixgbe_legacy_irq(void *arg)
5028 {
5029 struct ix_queue *que = arg;
5030 struct adapter *adapter = que->adapter;
5031 struct ixgbe_hw *hw = &adapter->hw;
5032 struct ifnet *ifp = adapter->ifp;
5033 struct tx_ring *txr = adapter->tx_rings;
5034 bool more = false;
5035 u32 eicr, eicr_mask;
5036
5037 /* Silicon errata #26 on 82598 */
5038 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5039
5040 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5041
5042 adapter->stats.pf.legint.ev_count++;
5043 ++que->irqs.ev_count;
5044 if (eicr == 0) {
5045 adapter->stats.pf.intzero.ev_count++;
5046 if ((ifp->if_flags & IFF_UP) != 0)
5047 ixgbe_enable_intr(adapter);
5048 return 0;
5049 }
5050
5051 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5052 /*
5053 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5054 */
5055 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5056
5057 #ifdef __NetBSD__
5058 /* Don't run ixgbe_rxeof in interrupt context */
5059 more = true;
5060 #else
5061 more = ixgbe_rxeof(que);
5062 #endif
5063
5064 IXGBE_TX_LOCK(txr);
5065 ixgbe_txeof(txr);
5066 #ifdef notyet
5067 if (!ixgbe_ring_empty(ifp, txr->br))
5068 ixgbe_start_locked(ifp, txr);
5069 #endif
5070 IXGBE_TX_UNLOCK(txr);
5071 }
5072
5073 /* Check for fan failure */
5074 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5075 ixgbe_check_fan_failure(adapter, eicr, true);
5076 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5077 }
5078
5079 /* Link status change */
5080 if (eicr & IXGBE_EICR_LSC)
5081 softint_schedule(adapter->link_si);
5082
5083 if (ixgbe_is_sfp(hw)) {
5084 /* Pluggable optics-related interrupt */
5085 if (hw->mac.type >= ixgbe_mac_X540)
5086 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5087 else
5088 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5089
5090 if (eicr & eicr_mask) {
5091 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5092 softint_schedule(adapter->mod_si);
5093 }
5094
5095 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5096 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5097 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5098 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5099 softint_schedule(adapter->msf_si);
5100 }
5101 }
5102
5103 /* External PHY interrupt */
5104 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5105 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5106 softint_schedule(adapter->phy_si);
5107
5108 if (more) {
5109 que->req.ev_count++;
5110 ixgbe_sched_handle_que(adapter, que);
5111 } else
5112 ixgbe_enable_intr(adapter);
5113
5114 return 1;
5115 } /* ixgbe_legacy_irq */
5116
5117 /************************************************************************
5118 * ixgbe_free_pciintr_resources
5119 ************************************************************************/
5120 static void
5121 ixgbe_free_pciintr_resources(struct adapter *adapter)
5122 {
5123 struct ix_queue *que = adapter->queues;
5124 int rid;
5125
5126 /*
5127 * Release all msix queue resources:
5128 */
5129 for (int i = 0; i < adapter->num_queues; i++, que++) {
5130 if (que->res != NULL) {
5131 pci_intr_disestablish(adapter->osdep.pc,
5132 adapter->osdep.ihs[i]);
5133 adapter->osdep.ihs[i] = NULL;
5134 }
5135 }
5136
5137 /* Clean the Legacy or Link interrupt last */
5138 if (adapter->vector) /* we are doing MSIX */
5139 rid = adapter->vector;
5140 else
5141 rid = 0;
5142
5143 if (adapter->osdep.ihs[rid] != NULL) {
5144 pci_intr_disestablish(adapter->osdep.pc,
5145 adapter->osdep.ihs[rid]);
5146 adapter->osdep.ihs[rid] = NULL;
5147 }
5148
5149 if (adapter->osdep.intrs != NULL) {
5150 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5151 adapter->osdep.nintrs);
5152 adapter->osdep.intrs = NULL;
5153 }
5154 } /* ixgbe_free_pciintr_resources */
5155
5156 /************************************************************************
5157 * ixgbe_free_pci_resources
5158 ************************************************************************/
5159 static void
5160 ixgbe_free_pci_resources(struct adapter *adapter)
5161 {
5162
5163 ixgbe_free_pciintr_resources(adapter);
5164
5165 if (adapter->osdep.mem_size != 0) {
5166 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5167 adapter->osdep.mem_bus_space_handle,
5168 adapter->osdep.mem_size);
5169 }
5170
5171 } /* ixgbe_free_pci_resources */
5172
5173 /************************************************************************
5174 * ixgbe_set_sysctl_value
5175 ************************************************************************/
5176 static void
5177 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5178 const char *description, int *limit, int value)
5179 {
5180 device_t dev = adapter->dev;
5181 struct sysctllog **log;
5182 const struct sysctlnode *rnode, *cnode;
5183
5184 /*
5185 * It's not required to check recovery mode because this function never
5186 * touches hardware.
5187 */
5188
5189 log = &adapter->sysctllog;
5190 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5191 aprint_error_dev(dev, "could not create sysctl root\n");
5192 return;
5193 }
5194 if (sysctl_createv(log, 0, &rnode, &cnode,
5195 CTLFLAG_READWRITE, CTLTYPE_INT,
5196 name, SYSCTL_DESCR(description),
5197 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5198 aprint_error_dev(dev, "could not create sysctl\n");
5199 *limit = value;
5200 } /* ixgbe_set_sysctl_value */
5201
5202 /************************************************************************
5203 * ixgbe_sysctl_flowcntl
5204 *
5205 * SYSCTL wrapper around setting Flow Control
5206 ************************************************************************/
5207 static int
5208 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5209 {
5210 struct sysctlnode node = *rnode;
5211 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5212 int error, fc;
5213
5214 if (ixgbe_fw_recovery_mode_swflag(adapter))
5215 return (EPERM);
5216
5217 fc = adapter->hw.fc.current_mode;
5218 node.sysctl_data = &fc;
5219 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5220 if (error != 0 || newp == NULL)
5221 return error;
5222
5223 /* Don't bother if it's not changed */
5224 if (fc == adapter->hw.fc.current_mode)
5225 return (0);
5226
5227 return ixgbe_set_flowcntl(adapter, fc);
5228 } /* ixgbe_sysctl_flowcntl */
5229
5230 /************************************************************************
5231 * ixgbe_set_flowcntl - Set flow control
5232 *
5233 * Flow control values:
5234 * 0 - off
5235 * 1 - rx pause
5236 * 2 - tx pause
5237 * 3 - full
5238 ************************************************************************/
5239 static int
5240 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5241 {
5242 switch (fc) {
5243 case ixgbe_fc_rx_pause:
5244 case ixgbe_fc_tx_pause:
5245 case ixgbe_fc_full:
5246 adapter->hw.fc.requested_mode = fc;
5247 if (adapter->num_queues > 1)
5248 ixgbe_disable_rx_drop(adapter);
5249 break;
5250 case ixgbe_fc_none:
5251 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5252 if (adapter->num_queues > 1)
5253 ixgbe_enable_rx_drop(adapter);
5254 break;
5255 default:
5256 return (EINVAL);
5257 }
5258
5259 #if 0 /* XXX NetBSD */
5260 /* Don't autoneg if forcing a value */
5261 adapter->hw.fc.disable_fc_autoneg = TRUE;
5262 #endif
5263 ixgbe_fc_enable(&adapter->hw);
5264
5265 return (0);
5266 } /* ixgbe_set_flowcntl */
5267
5268 /************************************************************************
5269 * ixgbe_enable_rx_drop
5270 *
5271 * Enable the hardware to drop packets when the buffer is
5272 * full. This is useful with multiqueue, so that no single
5273 * queue being full stalls the entire RX engine. We only
5274 * enable this when Multiqueue is enabled AND Flow Control
5275 * is disabled.
5276 ************************************************************************/
5277 static void
5278 ixgbe_enable_rx_drop(struct adapter *adapter)
5279 {
5280 struct ixgbe_hw *hw = &adapter->hw;
5281 struct rx_ring *rxr;
5282 u32 srrctl;
5283
5284 for (int i = 0; i < adapter->num_queues; i++) {
5285 rxr = &adapter->rx_rings[i];
5286 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5287 srrctl |= IXGBE_SRRCTL_DROP_EN;
5288 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5289 }
5290
5291 /* enable drop for each vf */
5292 for (int i = 0; i < adapter->num_vfs; i++) {
5293 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5294 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5295 IXGBE_QDE_ENABLE));
5296 }
5297 } /* ixgbe_enable_rx_drop */
5298
5299 /************************************************************************
5300 * ixgbe_disable_rx_drop
5301 ************************************************************************/
5302 static void
5303 ixgbe_disable_rx_drop(struct adapter *adapter)
5304 {
5305 struct ixgbe_hw *hw = &adapter->hw;
5306 struct rx_ring *rxr;
5307 u32 srrctl;
5308
5309 for (int i = 0; i < adapter->num_queues; i++) {
5310 rxr = &adapter->rx_rings[i];
5311 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5312 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5313 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5314 }
5315
5316 /* disable drop for each vf */
5317 for (int i = 0; i < adapter->num_vfs; i++) {
5318 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5319 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5320 }
5321 } /* ixgbe_disable_rx_drop */
5322
5323 /************************************************************************
5324 * ixgbe_sysctl_advertise
5325 *
5326 * SYSCTL wrapper around setting advertised speed
5327 ************************************************************************/
5328 static int
5329 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5330 {
5331 struct sysctlnode node = *rnode;
5332 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5333 int error = 0, advertise;
5334
5335 if (ixgbe_fw_recovery_mode_swflag(adapter))
5336 return (EPERM);
5337
5338 advertise = adapter->advertise;
5339 node.sysctl_data = &advertise;
5340 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5341 if (error != 0 || newp == NULL)
5342 return error;
5343
5344 return ixgbe_set_advertise(adapter, advertise);
5345 } /* ixgbe_sysctl_advertise */
5346
5347 /************************************************************************
5348 * ixgbe_set_advertise - Control advertised link speed
5349 *
5350 * Flags:
5351 * 0x00 - Default (all capable link speed)
5352 * 0x01 - advertise 100 Mb
5353 * 0x02 - advertise 1G
5354 * 0x04 - advertise 10G
5355 * 0x08 - advertise 10 Mb
5356 * 0x10 - advertise 2.5G
5357 * 0x20 - advertise 5G
5358 ************************************************************************/
5359 static int
5360 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5361 {
5362 device_t dev;
5363 struct ixgbe_hw *hw;
5364 ixgbe_link_speed speed = 0;
5365 ixgbe_link_speed link_caps = 0;
5366 s32 err = IXGBE_NOT_IMPLEMENTED;
5367 bool negotiate = FALSE;
5368
5369 /* Checks to validate new value */
5370 if (adapter->advertise == advertise) /* no change */
5371 return (0);
5372
5373 dev = adapter->dev;
5374 hw = &adapter->hw;
5375
5376 /* No speed changes for backplane media */
5377 if (hw->phy.media_type == ixgbe_media_type_backplane)
5378 return (ENODEV);
5379
5380 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5381 (hw->phy.multispeed_fiber))) {
5382 device_printf(dev,
5383 "Advertised speed can only be set on copper or "
5384 "multispeed fiber media types.\n");
5385 return (EINVAL);
5386 }
5387
5388 if (advertise < 0x0 || advertise > 0x2f) {
5389 device_printf(dev,
5390 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5391 return (EINVAL);
5392 }
5393
5394 if (hw->mac.ops.get_link_capabilities) {
5395 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5396 &negotiate);
5397 if (err != IXGBE_SUCCESS) {
5398 device_printf(dev, "Unable to determine supported advertise speeds\n");
5399 return (ENODEV);
5400 }
5401 }
5402
5403 /* Set new value and report new advertised mode */
5404 if (advertise & 0x1) {
5405 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5406 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5407 return (EINVAL);
5408 }
5409 speed |= IXGBE_LINK_SPEED_100_FULL;
5410 }
5411 if (advertise & 0x2) {
5412 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5413 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5414 return (EINVAL);
5415 }
5416 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5417 }
5418 if (advertise & 0x4) {
5419 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5420 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5421 return (EINVAL);
5422 }
5423 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5424 }
5425 if (advertise & 0x8) {
5426 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5427 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5428 return (EINVAL);
5429 }
5430 speed |= IXGBE_LINK_SPEED_10_FULL;
5431 }
5432 if (advertise & 0x10) {
5433 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5434 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5435 return (EINVAL);
5436 }
5437 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5438 }
5439 if (advertise & 0x20) {
5440 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5441 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5442 return (EINVAL);
5443 }
5444 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5445 }
5446 if (advertise == 0)
5447 speed = link_caps; /* All capable link speed */
5448
5449 hw->mac.autotry_restart = TRUE;
5450 hw->mac.ops.setup_link(hw, speed, TRUE);
5451 adapter->advertise = advertise;
5452
5453 return (0);
5454 } /* ixgbe_set_advertise */
5455
5456 /************************************************************************
5457 * ixgbe_get_advertise - Get current advertised speed settings
5458 *
5459 * Formatted for sysctl usage.
5460 * Flags:
5461 * 0x01 - advertise 100 Mb
5462 * 0x02 - advertise 1G
5463 * 0x04 - advertise 10G
5464 * 0x08 - advertise 10 Mb (yes, Mb)
5465 * 0x10 - advertise 2.5G
5466 * 0x20 - advertise 5G
5467 ************************************************************************/
5468 static int
5469 ixgbe_get_advertise(struct adapter *adapter)
5470 {
5471 struct ixgbe_hw *hw = &adapter->hw;
5472 int speed;
5473 ixgbe_link_speed link_caps = 0;
5474 s32 err;
5475 bool negotiate = FALSE;
5476
5477 /*
5478 * Advertised speed means nothing unless it's copper or
5479 * multi-speed fiber
5480 */
5481 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5482 !(hw->phy.multispeed_fiber))
5483 return (0);
5484
5485 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5486 if (err != IXGBE_SUCCESS)
5487 return (0);
5488
5489 speed =
5490 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5491 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5492 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5493 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5494 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5495 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5496
5497 return speed;
5498 } /* ixgbe_get_advertise */
5499
5500 /************************************************************************
5501 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5502 *
5503 * Control values:
5504 * 0/1 - off / on (use default value of 1000)
5505 *
5506 * Legal timer values are:
5507 * 50,100,250,500,1000,2000,5000,10000
5508 *
5509 * Turning off interrupt moderation will also turn this off.
5510 ************************************************************************/
5511 static int
5512 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5513 {
5514 struct sysctlnode node = *rnode;
5515 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5516 struct ifnet *ifp = adapter->ifp;
5517 int error;
5518 int newval;
5519
5520 if (ixgbe_fw_recovery_mode_swflag(adapter))
5521 return (EPERM);
5522
5523 newval = adapter->dmac;
5524 node.sysctl_data = &newval;
5525 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5526 if ((error) || (newp == NULL))
5527 return (error);
5528
5529 switch (newval) {
5530 case 0:
5531 /* Disabled */
5532 adapter->dmac = 0;
5533 break;
5534 case 1:
5535 /* Enable and use default */
5536 adapter->dmac = 1000;
5537 break;
5538 case 50:
5539 case 100:
5540 case 250:
5541 case 500:
5542 case 1000:
5543 case 2000:
5544 case 5000:
5545 case 10000:
5546 /* Legal values - allow */
5547 adapter->dmac = newval;
5548 break;
5549 default:
5550 /* Do nothing, illegal value */
5551 return (EINVAL);
5552 }
5553
5554 /* Re-initialize hardware if it's already running */
5555 if (ifp->if_flags & IFF_RUNNING)
5556 ifp->if_init(ifp);
5557
5558 return (0);
5559 }
5560
5561 #ifdef IXGBE_DEBUG
5562 /************************************************************************
5563 * ixgbe_sysctl_power_state
5564 *
5565 * Sysctl to test power states
5566 * Values:
5567 * 0 - set device to D0
5568 * 3 - set device to D3
5569 * (none) - get current device power state
5570 ************************************************************************/
5571 static int
5572 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5573 {
5574 #ifdef notyet
5575 struct sysctlnode node = *rnode;
5576 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5577 device_t dev = adapter->dev;
5578 int curr_ps, new_ps, error = 0;
5579
5580 if (ixgbe_fw_recovery_mode_swflag(adapter))
5581 return (EPERM);
5582
5583 curr_ps = new_ps = pci_get_powerstate(dev);
5584
5585 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5586 if ((error) || (req->newp == NULL))
5587 return (error);
5588
5589 if (new_ps == curr_ps)
5590 return (0);
5591
5592 if (new_ps == 3 && curr_ps == 0)
5593 error = DEVICE_SUSPEND(dev);
5594 else if (new_ps == 0 && curr_ps == 3)
5595 error = DEVICE_RESUME(dev);
5596 else
5597 return (EINVAL);
5598
5599 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5600
5601 return (error);
5602 #else
5603 return 0;
5604 #endif
5605 } /* ixgbe_sysctl_power_state */
5606 #endif
5607
5608 /************************************************************************
5609 * ixgbe_sysctl_wol_enable
5610 *
5611 * Sysctl to enable/disable the WoL capability,
5612 * if supported by the adapter.
5613 *
5614 * Values:
5615 * 0 - disabled
5616 * 1 - enabled
5617 ************************************************************************/
5618 static int
5619 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5620 {
5621 struct sysctlnode node = *rnode;
5622 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5623 struct ixgbe_hw *hw = &adapter->hw;
5624 bool new_wol_enabled;
5625 int error = 0;
5626
5627 /*
5628 * It's not required to check recovery mode because this function never
5629 * touches hardware.
5630 */
5631 new_wol_enabled = hw->wol_enabled;
5632 node.sysctl_data = &new_wol_enabled;
5633 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5634 if ((error) || (newp == NULL))
5635 return (error);
5636 if (new_wol_enabled == hw->wol_enabled)
5637 return (0);
5638
5639 if (new_wol_enabled && !adapter->wol_support)
5640 return (ENODEV);
5641 else
5642 hw->wol_enabled = new_wol_enabled;
5643
5644 return (0);
5645 } /* ixgbe_sysctl_wol_enable */
5646
5647 /************************************************************************
5648 * ixgbe_sysctl_wufc - Wake Up Filter Control
5649 *
5650 * Sysctl to enable/disable the types of packets that the
5651 * adapter will wake up on upon receipt.
5652 * Flags:
5653 * 0x1 - Link Status Change
5654 * 0x2 - Magic Packet
5655 * 0x4 - Direct Exact
5656 * 0x8 - Directed Multicast
5657 * 0x10 - Broadcast
5658 * 0x20 - ARP/IPv4 Request Packet
5659 * 0x40 - Direct IPv4 Packet
5660 * 0x80 - Direct IPv6 Packet
5661 *
5662 * Settings not listed above will cause the sysctl to return an error.
5663 ************************************************************************/
5664 static int
5665 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5666 {
5667 struct sysctlnode node = *rnode;
5668 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5669 int error = 0;
5670 u32 new_wufc;
5671
5672 /*
5673 * It's not required to check recovery mode because this function never
5674 * touches hardware.
5675 */
5676 new_wufc = adapter->wufc;
5677 node.sysctl_data = &new_wufc;
5678 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5679 if ((error) || (newp == NULL))
5680 return (error);
5681 if (new_wufc == adapter->wufc)
5682 return (0);
5683
5684 if (new_wufc & 0xffffff00)
5685 return (EINVAL);
5686
5687 new_wufc &= 0xff;
5688 new_wufc |= (0xffffff & adapter->wufc);
5689 adapter->wufc = new_wufc;
5690
5691 return (0);
5692 } /* ixgbe_sysctl_wufc */
5693
5694 #ifdef IXGBE_DEBUG
5695 /************************************************************************
5696 * ixgbe_sysctl_print_rss_config
5697 ************************************************************************/
5698 static int
5699 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5700 {
5701 #ifdef notyet
5702 struct sysctlnode node = *rnode;
5703 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5704 struct ixgbe_hw *hw = &adapter->hw;
5705 device_t dev = adapter->dev;
5706 struct sbuf *buf;
5707 int error = 0, reta_size;
5708 u32 reg;
5709
5710 if (ixgbe_fw_recovery_mode_swflag(adapter))
5711 return (EPERM);
5712
5713 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5714 if (!buf) {
5715 device_printf(dev, "Could not allocate sbuf for output.\n");
5716 return (ENOMEM);
5717 }
5718
5719 // TODO: use sbufs to make a string to print out
5720 /* Set multiplier for RETA setup and table size based on MAC */
5721 switch (adapter->hw.mac.type) {
5722 case ixgbe_mac_X550:
5723 case ixgbe_mac_X550EM_x:
5724 case ixgbe_mac_X550EM_a:
5725 reta_size = 128;
5726 break;
5727 default:
5728 reta_size = 32;
5729 break;
5730 }
5731
5732 /* Print out the redirection table */
5733 sbuf_cat(buf, "\n");
5734 for (int i = 0; i < reta_size; i++) {
5735 if (i < 32) {
5736 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5737 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5738 } else {
5739 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5740 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5741 }
5742 }
5743
5744 // TODO: print more config
5745
5746 error = sbuf_finish(buf);
5747 if (error)
5748 device_printf(dev, "Error finishing sbuf: %d\n", error);
5749
5750 sbuf_delete(buf);
5751 #endif
5752 return (0);
5753 } /* ixgbe_sysctl_print_rss_config */
5754 #endif /* IXGBE_DEBUG */
5755
5756 /************************************************************************
5757 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5758 *
5759 * For X552/X557-AT devices using an external PHY
5760 ************************************************************************/
5761 static int
5762 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5763 {
5764 struct sysctlnode node = *rnode;
5765 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5766 struct ixgbe_hw *hw = &adapter->hw;
5767 int val;
5768 u16 reg;
5769 int error;
5770
5771 if (ixgbe_fw_recovery_mode_swflag(adapter))
5772 return (EPERM);
5773
5774 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5775 device_printf(adapter->dev,
5776 "Device has no supported external thermal sensor.\n");
5777 return (ENODEV);
5778 }
5779
5780 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5781 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5782 device_printf(adapter->dev,
5783 "Error reading from PHY's current temperature register\n");
5784 return (EAGAIN);
5785 }
5786
5787 node.sysctl_data = &val;
5788
5789 /* Shift temp for output */
5790 val = reg >> 8;
5791
5792 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5793 if ((error) || (newp == NULL))
5794 return (error);
5795
5796 return (0);
5797 } /* ixgbe_sysctl_phy_temp */
5798
5799 /************************************************************************
5800 * ixgbe_sysctl_phy_overtemp_occurred
5801 *
5802 * Reports (directly from the PHY) whether the current PHY
5803 * temperature is over the overtemp threshold.
5804 ************************************************************************/
5805 static int
5806 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5807 {
5808 struct sysctlnode node = *rnode;
5809 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5810 struct ixgbe_hw *hw = &adapter->hw;
5811 int val, error;
5812 u16 reg;
5813
5814 if (ixgbe_fw_recovery_mode_swflag(adapter))
5815 return (EPERM);
5816
5817 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5818 device_printf(adapter->dev,
5819 "Device has no supported external thermal sensor.\n");
5820 return (ENODEV);
5821 }
5822
5823 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5824 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5825 device_printf(adapter->dev,
5826 "Error reading from PHY's temperature status register\n");
5827 return (EAGAIN);
5828 }
5829
5830 node.sysctl_data = &val;
5831
5832 /* Get occurrence bit */
5833 val = !!(reg & 0x4000);
5834
5835 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5836 if ((error) || (newp == NULL))
5837 return (error);
5838
5839 return (0);
5840 } /* ixgbe_sysctl_phy_overtemp_occurred */
5841
5842 /************************************************************************
5843 * ixgbe_sysctl_eee_state
5844 *
5845 * Sysctl to set EEE power saving feature
5846 * Values:
5847 * 0 - disable EEE
5848 * 1 - enable EEE
5849 * (none) - get current device EEE state
5850 ************************************************************************/
5851 static int
5852 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5853 {
5854 struct sysctlnode node = *rnode;
5855 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5856 struct ifnet *ifp = adapter->ifp;
5857 device_t dev = adapter->dev;
5858 int curr_eee, new_eee, error = 0;
5859 s32 retval;
5860
5861 if (ixgbe_fw_recovery_mode_swflag(adapter))
5862 return (EPERM);
5863
5864 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5865 node.sysctl_data = &new_eee;
5866 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5867 if ((error) || (newp == NULL))
5868 return (error);
5869
5870 /* Nothing to do */
5871 if (new_eee == curr_eee)
5872 return (0);
5873
5874 /* Not supported */
5875 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5876 return (EINVAL);
5877
5878 /* Bounds checking */
5879 if ((new_eee < 0) || (new_eee > 1))
5880 return (EINVAL);
5881
5882 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
5883 if (retval) {
5884 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5885 return (EINVAL);
5886 }
5887
5888 /* Restart auto-neg */
5889 ifp->if_init(ifp);
5890
5891 device_printf(dev, "New EEE state: %d\n", new_eee);
5892
5893 /* Cache new value */
5894 if (new_eee)
5895 adapter->feat_en |= IXGBE_FEATURE_EEE;
5896 else
5897 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5898
5899 return (error);
5900 } /* ixgbe_sysctl_eee_state */
5901
5902 #define PRINTQS(adapter, regname) \
5903 do { \
5904 struct ixgbe_hw *_hw = &(adapter)->hw; \
5905 int _i; \
5906 \
5907 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5908 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5909 printf((_i == 0) ? "\t" : " "); \
5910 printf("%08x", IXGBE_READ_REG(_hw, \
5911 IXGBE_##regname(_i))); \
5912 } \
5913 printf("\n"); \
5914 } while (0)
5915
5916 /************************************************************************
5917 * ixgbe_print_debug_info
5918 *
5919 * Called only when em_display_debug_stats is enabled.
5920 * Provides a way to take a look at important statistics
5921 * maintained by the driver and hardware.
5922 ************************************************************************/
5923 static void
5924 ixgbe_print_debug_info(struct adapter *adapter)
5925 {
5926 device_t dev = adapter->dev;
5927 struct ixgbe_hw *hw = &adapter->hw;
5928 int table_size;
5929 int i;
5930
5931 switch (adapter->hw.mac.type) {
5932 case ixgbe_mac_X550:
5933 case ixgbe_mac_X550EM_x:
5934 case ixgbe_mac_X550EM_a:
5935 table_size = 128;
5936 break;
5937 default:
5938 table_size = 32;
5939 break;
5940 }
5941
5942 device_printf(dev, "[E]RETA:\n");
5943 for (i = 0; i < table_size; i++) {
5944 if (i < 32)
5945 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5946 IXGBE_RETA(i)));
5947 else
5948 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5949 IXGBE_ERETA(i - 32)));
5950 }
5951
5952 device_printf(dev, "queue:");
5953 for (i = 0; i < adapter->num_queues; i++) {
5954 printf((i == 0) ? "\t" : " ");
5955 printf("%8d", i);
5956 }
5957 printf("\n");
5958 PRINTQS(adapter, RDBAL);
5959 PRINTQS(adapter, RDBAH);
5960 PRINTQS(adapter, RDLEN);
5961 PRINTQS(adapter, SRRCTL);
5962 PRINTQS(adapter, RDH);
5963 PRINTQS(adapter, RDT);
5964 PRINTQS(adapter, RXDCTL);
5965
5966 device_printf(dev, "RQSMR:");
5967 for (i = 0; i < adapter->num_queues / 4; i++) {
5968 printf((i == 0) ? "\t" : " ");
5969 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5970 }
5971 printf("\n");
5972
5973 device_printf(dev, "disabled_count:");
5974 for (i = 0; i < adapter->num_queues; i++) {
5975 printf((i == 0) ? "\t" : " ");
5976 printf("%8d", adapter->queues[i].disabled_count);
5977 }
5978 printf("\n");
5979
5980 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5981 if (hw->mac.type != ixgbe_mac_82598EB) {
5982 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5983 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5984 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5985 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5986 }
5987 } /* ixgbe_print_debug_info */
5988
5989 /************************************************************************
5990 * ixgbe_sysctl_debug
5991 ************************************************************************/
5992 static int
5993 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5994 {
5995 struct sysctlnode node = *rnode;
5996 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5997 int error, result = 0;
5998
5999 if (ixgbe_fw_recovery_mode_swflag(adapter))
6000 return (EPERM);
6001
6002 node.sysctl_data = &result;
6003 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6004
6005 if (error || newp == NULL)
6006 return error;
6007
6008 if (result == 1)
6009 ixgbe_print_debug_info(adapter);
6010
6011 return 0;
6012 } /* ixgbe_sysctl_debug */
6013
6014 /************************************************************************
6015 * ixgbe_init_device_features
6016 ************************************************************************/
6017 static void
6018 ixgbe_init_device_features(struct adapter *adapter)
6019 {
6020 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6021 | IXGBE_FEATURE_RSS
6022 | IXGBE_FEATURE_MSI
6023 | IXGBE_FEATURE_MSIX
6024 | IXGBE_FEATURE_LEGACY_IRQ
6025 | IXGBE_FEATURE_LEGACY_TX;
6026
6027 /* Set capabilities first... */
6028 switch (adapter->hw.mac.type) {
6029 case ixgbe_mac_82598EB:
6030 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6031 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6032 break;
6033 case ixgbe_mac_X540:
6034 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6035 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6036 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6037 (adapter->hw.bus.func == 0))
6038 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6039 break;
6040 case ixgbe_mac_X550:
6041 /*
6042 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6043 * NVM Image version.
6044 */
6045 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6046 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6047 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6048 break;
6049 case ixgbe_mac_X550EM_x:
6050 /*
6051 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6052 * NVM Image version.
6053 */
6054 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6055 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6056 break;
6057 case ixgbe_mac_X550EM_a:
6058 /*
6059 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6060 * NVM Image version.
6061 */
6062 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6063 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6064 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6065 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6066 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6067 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6068 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6069 }
6070 break;
6071 case ixgbe_mac_82599EB:
6072 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6073 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6074 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6075 (adapter->hw.bus.func == 0))
6076 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6077 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6078 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6079 break;
6080 default:
6081 break;
6082 }
6083
6084 /* Enabled by default... */
6085 /* Fan failure detection */
6086 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6087 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6088 /* Netmap */
6089 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6090 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6091 /* EEE */
6092 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6093 adapter->feat_en |= IXGBE_FEATURE_EEE;
6094 /* Thermal Sensor */
6095 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6096 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6097 /*
6098 * Recovery mode:
6099 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6100 * NVM Image version.
6101 */
6102
6103 /* Enabled via global sysctl... */
6104 /* Flow Director */
6105 if (ixgbe_enable_fdir) {
6106 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6107 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6108 else
6109 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6110 }
6111 /* Legacy (single queue) transmit */
6112 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6113 ixgbe_enable_legacy_tx)
6114 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6115 /*
6116 * Message Signal Interrupts - Extended (MSI-X)
6117 * Normal MSI is only enabled if MSI-X calls fail.
6118 */
6119 if (!ixgbe_enable_msix)
6120 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6121 /* Receive-Side Scaling (RSS) */
6122 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6123 adapter->feat_en |= IXGBE_FEATURE_RSS;
6124
6125 /* Disable features with unmet dependencies... */
6126 /* No MSI-X */
6127 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6128 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6129 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6130 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6131 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6132 }
6133 } /* ixgbe_init_device_features */
6134
6135 /************************************************************************
6136 * ixgbe_probe - Device identification routine
6137 *
6138 * Determines if the driver should be loaded on
6139 * adapter based on its PCI vendor/device ID.
6140 *
6141 * return BUS_PROBE_DEFAULT on success, positive on failure
6142 ************************************************************************/
6143 static int
6144 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6145 {
6146 const struct pci_attach_args *pa = aux;
6147
6148 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6149 }
6150
6151 static const ixgbe_vendor_info_t *
6152 ixgbe_lookup(const struct pci_attach_args *pa)
6153 {
6154 const ixgbe_vendor_info_t *ent;
6155 pcireg_t subid;
6156
6157 INIT_DEBUGOUT("ixgbe_lookup: begin");
6158
6159 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6160 return NULL;
6161
6162 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6163
6164 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6165 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6166 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6167 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6168 (ent->subvendor_id == 0)) &&
6169 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6170 (ent->subdevice_id == 0))) {
6171 return ent;
6172 }
6173 }
6174 return NULL;
6175 }
6176
6177 static int
6178 ixgbe_ifflags_cb(struct ethercom *ec)
6179 {
6180 struct ifnet *ifp = &ec->ec_if;
6181 struct adapter *adapter = ifp->if_softc;
6182 u_short change;
6183 int rv = 0;
6184
6185 IXGBE_CORE_LOCK(adapter);
6186
6187 change = ifp->if_flags ^ adapter->if_flags;
6188 if (change != 0)
6189 adapter->if_flags = ifp->if_flags;
6190
6191 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6192 rv = ENETRESET;
6193 goto out;
6194 } else if ((change & IFF_PROMISC) != 0)
6195 ixgbe_set_rxfilter(adapter);
6196
6197 /* Check for ec_capenable. */
6198 change = ec->ec_capenable ^ adapter->ec_capenable;
6199 adapter->ec_capenable = ec->ec_capenable;
6200 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6201 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6202 rv = ENETRESET;
6203 goto out;
6204 }
6205
6206 /*
6207 * Special handling is not required for ETHERCAP_VLAN_MTU.
6208 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6209 */
6210
6211 /* Set up VLAN support and filter */
6212 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6213 ixgbe_setup_vlan_hw_support(adapter);
6214
6215 out:
6216 IXGBE_CORE_UNLOCK(adapter);
6217
6218 return rv;
6219 }
6220
6221 /************************************************************************
6222 * ixgbe_ioctl - Ioctl entry point
6223 *
6224 * Called when the user wants to configure the interface.
6225 *
6226 * return 0 on success, positive on failure
6227 ************************************************************************/
6228 static int
6229 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
6230 {
6231 struct adapter *adapter = ifp->if_softc;
6232 struct ixgbe_hw *hw = &adapter->hw;
6233 struct ifcapreq *ifcr = data;
6234 struct ifreq *ifr = data;
6235 int error = 0;
6236 int l4csum_en;
6237 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6238 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6239
6240 if (ixgbe_fw_recovery_mode_swflag(adapter))
6241 return (EPERM);
6242
6243 switch (command) {
6244 case SIOCSIFFLAGS:
6245 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6246 break;
6247 case SIOCADDMULTI:
6248 case SIOCDELMULTI:
6249 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6250 break;
6251 case SIOCSIFMEDIA:
6252 case SIOCGIFMEDIA:
6253 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6254 break;
6255 case SIOCSIFCAP:
6256 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6257 break;
6258 case SIOCSIFMTU:
6259 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6260 break;
6261 #ifdef __NetBSD__
6262 case SIOCINITIFADDR:
6263 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6264 break;
6265 case SIOCGIFFLAGS:
6266 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6267 break;
6268 case SIOCGIFAFLAG_IN:
6269 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6270 break;
6271 case SIOCGIFADDR:
6272 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6273 break;
6274 case SIOCGIFMTU:
6275 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6276 break;
6277 case SIOCGIFCAP:
6278 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6279 break;
6280 case SIOCGETHERCAP:
6281 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6282 break;
6283 case SIOCGLIFADDR:
6284 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6285 break;
6286 case SIOCZIFDATA:
6287 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6288 hw->mac.ops.clear_hw_cntrs(hw);
6289 ixgbe_clear_evcnt(adapter);
6290 break;
6291 case SIOCAIFADDR:
6292 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6293 break;
6294 #endif
6295 default:
6296 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6297 break;
6298 }
6299
6300 switch (command) {
6301 case SIOCGI2C:
6302 {
6303 struct ixgbe_i2c_req i2c;
6304
6305 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6306 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6307 if (error != 0)
6308 break;
6309 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6310 error = EINVAL;
6311 break;
6312 }
6313 if (i2c.len > sizeof(i2c.data)) {
6314 error = EINVAL;
6315 break;
6316 }
6317
6318 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6319 i2c.dev_addr, i2c.data);
6320 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6321 break;
6322 }
6323 case SIOCSIFCAP:
6324 /* Layer-4 Rx checksum offload has to be turned on and
6325 * off as a unit.
6326 */
6327 l4csum_en = ifcr->ifcr_capenable & l4csum;
6328 if (l4csum_en != l4csum && l4csum_en != 0)
6329 return EINVAL;
6330 /*FALLTHROUGH*/
6331 case SIOCADDMULTI:
6332 case SIOCDELMULTI:
6333 case SIOCSIFFLAGS:
6334 case SIOCSIFMTU:
6335 default:
6336 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6337 return error;
6338 if ((ifp->if_flags & IFF_RUNNING) == 0)
6339 ;
6340 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6341 IXGBE_CORE_LOCK(adapter);
6342 if ((ifp->if_flags & IFF_RUNNING) != 0)
6343 ixgbe_init_locked(adapter);
6344 ixgbe_recalculate_max_frame(adapter);
6345 IXGBE_CORE_UNLOCK(adapter);
6346 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6347 /*
6348 * Multicast list has changed; set the hardware filter
6349 * accordingly.
6350 */
6351 IXGBE_CORE_LOCK(adapter);
6352 ixgbe_disable_intr(adapter);
6353 ixgbe_set_rxfilter(adapter);
6354 ixgbe_enable_intr(adapter);
6355 IXGBE_CORE_UNLOCK(adapter);
6356 }
6357 return 0;
6358 }
6359
6360 return error;
6361 } /* ixgbe_ioctl */
6362
6363 /************************************************************************
6364 * ixgbe_check_fan_failure
6365 ************************************************************************/
6366 static void
6367 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6368 {
6369 u32 mask;
6370
6371 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6372 IXGBE_ESDP_SDP1;
6373
6374 if (reg & mask)
6375 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6376 } /* ixgbe_check_fan_failure */
6377
6378 /************************************************************************
6379 * ixgbe_handle_que
6380 ************************************************************************/
6381 static void
6382 ixgbe_handle_que(void *context)
6383 {
6384 struct ix_queue *que = context;
6385 struct adapter *adapter = que->adapter;
6386 struct tx_ring *txr = que->txr;
6387 struct ifnet *ifp = adapter->ifp;
6388 bool more = false;
6389
6390 que->handleq.ev_count++;
6391
6392 if (ifp->if_flags & IFF_RUNNING) {
6393 more = ixgbe_rxeof(que);
6394 IXGBE_TX_LOCK(txr);
6395 more |= ixgbe_txeof(txr);
6396 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6397 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6398 ixgbe_mq_start_locked(ifp, txr);
6399 /* Only for queue 0 */
6400 /* NetBSD still needs this for CBQ */
6401 if ((&adapter->queues[0] == que)
6402 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6403 ixgbe_legacy_start_locked(ifp, txr);
6404 IXGBE_TX_UNLOCK(txr);
6405 }
6406
6407 if (more) {
6408 que->req.ev_count++;
6409 ixgbe_sched_handle_que(adapter, que);
6410 } else if (que->res != NULL) {
6411 /* Re-enable this interrupt */
6412 ixgbe_enable_queue(adapter, que->msix);
6413 } else
6414 ixgbe_enable_intr(adapter);
6415
6416 return;
6417 } /* ixgbe_handle_que */
6418
6419 /************************************************************************
6420 * ixgbe_handle_que_work
6421 ************************************************************************/
6422 static void
6423 ixgbe_handle_que_work(struct work *wk, void *context)
6424 {
6425 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6426
6427 /*
6428 * "enqueued flag" is not required here.
6429 * See ixgbe_msix_que().
6430 */
6431 ixgbe_handle_que(que);
6432 }
6433
6434 /************************************************************************
6435 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6436 ************************************************************************/
6437 static int
6438 ixgbe_allocate_legacy(struct adapter *adapter,
6439 const struct pci_attach_args *pa)
6440 {
6441 device_t dev = adapter->dev;
6442 struct ix_queue *que = adapter->queues;
6443 struct tx_ring *txr = adapter->tx_rings;
6444 int counts[PCI_INTR_TYPE_SIZE];
6445 pci_intr_type_t intr_type, max_type;
6446 char intrbuf[PCI_INTRSTR_LEN];
6447 char wqname[MAXCOMLEN];
6448 const char *intrstr = NULL;
6449 int defertx_error = 0, error;
6450
6451 /* We allocate a single interrupt resource */
6452 max_type = PCI_INTR_TYPE_MSI;
6453 counts[PCI_INTR_TYPE_MSIX] = 0;
6454 counts[PCI_INTR_TYPE_MSI] =
6455 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6456 /* Check not feat_en but feat_cap to fallback to INTx */
6457 counts[PCI_INTR_TYPE_INTX] =
6458 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6459
6460 alloc_retry:
6461 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6462 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6463 return ENXIO;
6464 }
6465 adapter->osdep.nintrs = 1;
6466 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6467 intrbuf, sizeof(intrbuf));
6468 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6469 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6470 device_xname(dev));
6471 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6472 if (adapter->osdep.ihs[0] == NULL) {
6473 aprint_error_dev(dev,"unable to establish %s\n",
6474 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6475 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6476 adapter->osdep.intrs = NULL;
6477 switch (intr_type) {
6478 case PCI_INTR_TYPE_MSI:
6479 /* The next try is for INTx: Disable MSI */
6480 max_type = PCI_INTR_TYPE_INTX;
6481 counts[PCI_INTR_TYPE_INTX] = 1;
6482 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6483 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6484 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6485 goto alloc_retry;
6486 } else
6487 break;
6488 case PCI_INTR_TYPE_INTX:
6489 default:
6490 /* See below */
6491 break;
6492 }
6493 }
6494 if (intr_type == PCI_INTR_TYPE_INTX) {
6495 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6496 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6497 }
6498 if (adapter->osdep.ihs[0] == NULL) {
6499 aprint_error_dev(dev,
6500 "couldn't establish interrupt%s%s\n",
6501 intrstr ? " at " : "", intrstr ? intrstr : "");
6502 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6503 adapter->osdep.intrs = NULL;
6504 return ENXIO;
6505 }
6506 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6507 /*
6508 * Try allocating a fast interrupt and the associated deferred
6509 * processing contexts.
6510 */
6511 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6512 txr->txr_si =
6513 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6514 ixgbe_deferred_mq_start, txr);
6515
6516 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6517 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6518 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6519 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6520 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6521 }
6522 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6523 ixgbe_handle_que, que);
6524 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6525 error = workqueue_create(&adapter->que_wq, wqname,
6526 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6527 IXGBE_WORKQUEUE_FLAGS);
6528
6529 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6530 && ((txr->txr_si == NULL) || defertx_error != 0))
6531 || (que->que_si == NULL) || error != 0) {
6532 aprint_error_dev(dev,
6533 "could not establish software interrupts\n");
6534
6535 return ENXIO;
6536 }
6537 /* For simplicity in the handlers */
6538 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6539
6540 return (0);
6541 } /* ixgbe_allocate_legacy */
6542
6543 /************************************************************************
6544 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6545 ************************************************************************/
6546 static int
6547 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6548 {
6549 device_t dev = adapter->dev;
6550 struct ix_queue *que = adapter->queues;
6551 struct tx_ring *txr = adapter->tx_rings;
6552 pci_chipset_tag_t pc;
6553 char intrbuf[PCI_INTRSTR_LEN];
6554 char intr_xname[32];
6555 char wqname[MAXCOMLEN];
6556 const char *intrstr = NULL;
6557 int error, vector = 0;
6558 int cpu_id = 0;
6559 kcpuset_t *affinity;
6560 #ifdef RSS
6561 unsigned int rss_buckets = 0;
6562 kcpuset_t cpu_mask;
6563 #endif
6564
6565 pc = adapter->osdep.pc;
6566 #ifdef RSS
6567 /*
6568 * If we're doing RSS, the number of queues needs to
6569 * match the number of RSS buckets that are configured.
6570 *
6571 * + If there's more queues than RSS buckets, we'll end
6572 * up with queues that get no traffic.
6573 *
6574 * + If there's more RSS buckets than queues, we'll end
6575 * up having multiple RSS buckets map to the same queue,
6576 * so there'll be some contention.
6577 */
6578 rss_buckets = rss_getnumbuckets();
6579 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6580 (adapter->num_queues != rss_buckets)) {
6581 device_printf(dev,
6582 "%s: number of queues (%d) != number of RSS buckets (%d)"
6583 "; performance will be impacted.\n",
6584 __func__, adapter->num_queues, rss_buckets);
6585 }
6586 #endif
6587
6588 adapter->osdep.nintrs = adapter->num_queues + 1;
6589 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6590 adapter->osdep.nintrs) != 0) {
6591 aprint_error_dev(dev,
6592 "failed to allocate MSI-X interrupt\n");
6593 return (ENXIO);
6594 }
6595
6596 kcpuset_create(&affinity, false);
6597 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6598 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6599 device_xname(dev), i);
6600 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6601 sizeof(intrbuf));
6602 #ifdef IXGBE_MPSAFE
6603 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6604 true);
6605 #endif
6606 /* Set the handler function */
6607 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6608 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6609 intr_xname);
6610 if (que->res == NULL) {
6611 aprint_error_dev(dev,
6612 "Failed to register QUE handler\n");
6613 error = ENXIO;
6614 goto err_out;
6615 }
6616 que->msix = vector;
6617 adapter->active_queues |= 1ULL << que->msix;
6618
6619 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6620 #ifdef RSS
6621 /*
6622 * The queue ID is used as the RSS layer bucket ID.
6623 * We look up the queue ID -> RSS CPU ID and select
6624 * that.
6625 */
6626 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6627 CPU_SETOF(cpu_id, &cpu_mask);
6628 #endif
6629 } else {
6630 /*
6631 * Bind the MSI-X vector, and thus the
6632 * rings to the corresponding CPU.
6633 *
6634 * This just happens to match the default RSS
6635 * round-robin bucket -> queue -> CPU allocation.
6636 */
6637 if (adapter->num_queues > 1)
6638 cpu_id = i;
6639 }
6640 /* Round-robin affinity */
6641 kcpuset_zero(affinity);
6642 kcpuset_set(affinity, cpu_id % ncpu);
6643 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6644 NULL);
6645 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6646 intrstr);
6647 if (error == 0) {
6648 #if 1 /* def IXGBE_DEBUG */
6649 #ifdef RSS
6650 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6651 cpu_id % ncpu);
6652 #else
6653 aprint_normal(", bound queue %d to cpu %d", i,
6654 cpu_id % ncpu);
6655 #endif
6656 #endif /* IXGBE_DEBUG */
6657 }
6658 aprint_normal("\n");
6659
6660 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6661 txr->txr_si = softint_establish(
6662 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6663 ixgbe_deferred_mq_start, txr);
6664 if (txr->txr_si == NULL) {
6665 aprint_error_dev(dev,
6666 "couldn't establish software interrupt\n");
6667 error = ENXIO;
6668 goto err_out;
6669 }
6670 }
6671 que->que_si
6672 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6673 ixgbe_handle_que, que);
6674 if (que->que_si == NULL) {
6675 aprint_error_dev(dev,
6676 "couldn't establish software interrupt\n");
6677 error = ENXIO;
6678 goto err_out;
6679 }
6680 }
6681 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6682 error = workqueue_create(&adapter->txr_wq, wqname,
6683 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6684 IXGBE_WORKQUEUE_FLAGS);
6685 if (error) {
6686 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6687 goto err_out;
6688 }
6689 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6690
6691 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6692 error = workqueue_create(&adapter->que_wq, wqname,
6693 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6694 IXGBE_WORKQUEUE_FLAGS);
6695 if (error) {
6696 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6697 goto err_out;
6698 }
6699
6700 /* and Link */
6701 cpu_id++;
6702 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6703 adapter->vector = vector;
6704 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6705 sizeof(intrbuf));
6706 #ifdef IXGBE_MPSAFE
6707 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6708 true);
6709 #endif
6710 /* Set the link handler function */
6711 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6712 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6713 intr_xname);
6714 if (adapter->osdep.ihs[vector] == NULL) {
6715 aprint_error_dev(dev, "Failed to register LINK handler\n");
6716 error = ENXIO;
6717 goto err_out;
6718 }
6719 /* Round-robin affinity */
6720 kcpuset_zero(affinity);
6721 kcpuset_set(affinity, cpu_id % ncpu);
6722 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6723 NULL);
6724
6725 aprint_normal_dev(dev,
6726 "for link, interrupting at %s", intrstr);
6727 if (error == 0)
6728 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6729 else
6730 aprint_normal("\n");
6731
6732 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6733 adapter->mbx_si =
6734 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6735 ixgbe_handle_mbx, adapter);
6736 if (adapter->mbx_si == NULL) {
6737 aprint_error_dev(dev,
6738 "could not establish software interrupts\n");
6739
6740 error = ENXIO;
6741 goto err_out;
6742 }
6743 }
6744
6745 kcpuset_destroy(affinity);
6746 aprint_normal_dev(dev,
6747 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6748
6749 return (0);
6750
6751 err_out:
6752 kcpuset_destroy(affinity);
6753 ixgbe_free_softint(adapter);
6754 ixgbe_free_pciintr_resources(adapter);
6755 return (error);
6756 } /* ixgbe_allocate_msix */
6757
6758 /************************************************************************
6759 * ixgbe_configure_interrupts
6760 *
6761 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6762 * This will also depend on user settings.
6763 ************************************************************************/
6764 static int
6765 ixgbe_configure_interrupts(struct adapter *adapter)
6766 {
6767 device_t dev = adapter->dev;
6768 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6769 int want, queues, msgs;
6770
6771 /* Default to 1 queue if MSI-X setup fails */
6772 adapter->num_queues = 1;
6773
6774 /* Override by tuneable */
6775 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6776 goto msi;
6777
6778 /*
6779 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6780 * interrupt slot.
6781 */
6782 if (ncpu == 1)
6783 goto msi;
6784
6785 /* First try MSI-X */
6786 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6787 msgs = MIN(msgs, IXG_MAX_NINTR);
6788 if (msgs < 2)
6789 goto msi;
6790
6791 adapter->msix_mem = (void *)1; /* XXX */
6792
6793 /* Figure out a reasonable auto config value */
6794 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6795
6796 #ifdef RSS
6797 /* If we're doing RSS, clamp at the number of RSS buckets */
6798 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6799 queues = uimin(queues, rss_getnumbuckets());
6800 #endif
6801 if (ixgbe_num_queues > queues) {
6802 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6803 ixgbe_num_queues = queues;
6804 }
6805
6806 if (ixgbe_num_queues != 0)
6807 queues = ixgbe_num_queues;
6808 else
6809 queues = uimin(queues,
6810 uimin(mac->max_tx_queues, mac->max_rx_queues));
6811
6812 /* reflect correct sysctl value */
6813 ixgbe_num_queues = queues;
6814
6815 /*
6816 * Want one vector (RX/TX pair) per queue
6817 * plus an additional for Link.
6818 */
6819 want = queues + 1;
6820 if (msgs >= want)
6821 msgs = want;
6822 else {
6823 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6824 "%d vectors but %d queues wanted!\n",
6825 msgs, want);
6826 goto msi;
6827 }
6828 adapter->num_queues = queues;
6829 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6830 return (0);
6831
6832 /*
6833 * MSI-X allocation failed or provided us with
6834 * less vectors than needed. Free MSI-X resources
6835 * and we'll try enabling MSI.
6836 */
6837 msi:
6838 /* Without MSI-X, some features are no longer supported */
6839 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6840 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6841 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6842 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6843
6844 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6845 adapter->msix_mem = NULL; /* XXX */
6846 if (msgs > 1)
6847 msgs = 1;
6848 if (msgs != 0) {
6849 msgs = 1;
6850 adapter->feat_en |= IXGBE_FEATURE_MSI;
6851 return (0);
6852 }
6853
6854 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6855 aprint_error_dev(dev,
6856 "Device does not support legacy interrupts.\n");
6857 return 1;
6858 }
6859
6860 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6861
6862 return (0);
6863 } /* ixgbe_configure_interrupts */
6864
6865
6866 /************************************************************************
6867 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6868 *
6869 * Done outside of interrupt context since the driver might sleep
6870 ************************************************************************/
6871 static void
6872 ixgbe_handle_link(void *context)
6873 {
6874 struct adapter *adapter = context;
6875 struct ixgbe_hw *hw = &adapter->hw;
6876
6877 IXGBE_CORE_LOCK(adapter);
6878 ++adapter->link_sicount.ev_count;
6879 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6880 ixgbe_update_link_status(adapter);
6881
6882 /* Re-enable link interrupts */
6883 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6884
6885 IXGBE_CORE_UNLOCK(adapter);
6886 } /* ixgbe_handle_link */
6887
6888 #if 0
6889 /************************************************************************
6890 * ixgbe_rearm_queues
6891 ************************************************************************/
6892 static __inline void
6893 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6894 {
6895 u32 mask;
6896
6897 switch (adapter->hw.mac.type) {
6898 case ixgbe_mac_82598EB:
6899 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6900 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6901 break;
6902 case ixgbe_mac_82599EB:
6903 case ixgbe_mac_X540:
6904 case ixgbe_mac_X550:
6905 case ixgbe_mac_X550EM_x:
6906 case ixgbe_mac_X550EM_a:
6907 mask = (queues & 0xFFFFFFFF);
6908 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6909 mask = (queues >> 32);
6910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6911 break;
6912 default:
6913 break;
6914 }
6915 } /* ixgbe_rearm_queues */
6916 #endif
6917