ixgbe.c revision 1.191 1 /* $NetBSD: ixgbe.c,v 1.191 2019/07/02 08:38:48 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.10 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */
148 {0, 0, 0, 0, 0}
149 };
150
151 /************************************************************************
152 * Table of branding strings
153 ************************************************************************/
154 static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156 };
157
158 /************************************************************************
159 * Function prototypes
160 ************************************************************************/
161 static int ixgbe_probe(device_t, cfdata_t, void *);
162 static void ixgbe_attach(device_t, device_t, void *);
163 static int ixgbe_detach(device_t, int);
164 #if 0
165 static int ixgbe_shutdown(device_t);
166 #endif
167 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168 static bool ixgbe_resume(device_t, const pmf_qual_t *);
169 static int ixgbe_ifflags_cb(struct ethercom *);
170 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171 static void ixgbe_ifstop(struct ifnet *, int);
172 static int ixgbe_init(struct ifnet *);
173 static void ixgbe_init_locked(struct adapter *);
174 static void ixgbe_stop(void *);
175 static void ixgbe_init_device_features(struct adapter *);
176 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177 static void ixgbe_add_media_types(struct adapter *);
178 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179 static int ixgbe_media_change(struct ifnet *);
180 static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *);
182 static void ixgbe_free_softint(struct adapter *);
183 static void ixgbe_get_slot_info(struct adapter *);
184 static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *);
186 static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_configure_interrupts(struct adapter *);
189 static void ixgbe_free_pciintr_resources(struct adapter *);
190 static void ixgbe_free_pci_resources(struct adapter *);
191 static void ixgbe_local_timer(void *);
192 static void ixgbe_local_timer1(void *);
193 static void ixgbe_recovery_mode_timer(void *);
194 static int ixgbe_setup_interface(device_t, struct adapter *);
195 static void ixgbe_config_gpie(struct adapter *);
196 static void ixgbe_config_dmac(struct adapter *);
197 static void ixgbe_config_delay_values(struct adapter *);
198 static void ixgbe_config_link(struct adapter *);
199 static void ixgbe_check_wol_support(struct adapter *);
200 static int ixgbe_setup_low_power_mode(struct adapter *);
201 #if 0
202 static void ixgbe_rearm_queues(struct adapter *, u64);
203 #endif
204
205 static void ixgbe_initialize_transmit_units(struct adapter *);
206 static void ixgbe_initialize_receive_units(struct adapter *);
207 static void ixgbe_enable_rx_drop(struct adapter *);
208 static void ixgbe_disable_rx_drop(struct adapter *);
209 static void ixgbe_initialize_rss_mapping(struct adapter *);
210
211 static void ixgbe_enable_intr(struct adapter *);
212 static void ixgbe_disable_intr(struct adapter *);
213 static void ixgbe_update_stats_counters(struct adapter *);
214 static void ixgbe_set_promisc(struct adapter *);
215 static void ixgbe_set_multi(struct adapter *);
216 static void ixgbe_update_link_status(struct adapter *);
217 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
218 static void ixgbe_configure_ivars(struct adapter *);
219 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
220 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
221
222 static void ixgbe_setup_vlan_hw_support(struct adapter *);
223 #if 0
224 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
225 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
226 #endif
227
228 static void ixgbe_add_device_sysctls(struct adapter *);
229 static void ixgbe_add_hw_stats(struct adapter *);
230 static void ixgbe_clear_evcnt(struct adapter *);
231 static int ixgbe_set_flowcntl(struct adapter *, int);
232 static int ixgbe_set_advertise(struct adapter *, int);
233 static int ixgbe_get_advertise(struct adapter *);
234
235 /* Sysctl handlers */
236 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
237 const char *, int *, int);
238 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
244 #ifdef IXGBE_DEBUG
245 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
247 #endif
248 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
251 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
257
258 /* Support for pluggable optic modules */
259 static bool ixgbe_sfp_probe(struct adapter *);
260
261 /* Legacy (single vector) interrupt handler */
262 static int ixgbe_legacy_irq(void *);
263
264 /* The MSI/MSI-X Interrupt handlers */
265 static int ixgbe_msix_que(void *);
266 static int ixgbe_msix_link(void *);
267
268 /* Software interrupts for deferred work */
269 static void ixgbe_handle_que(void *);
270 static void ixgbe_handle_link(void *);
271 static void ixgbe_handle_msf(void *);
272 static void ixgbe_handle_mod(void *);
273 static void ixgbe_handle_phy(void *);
274
275 /* Workqueue handler for deferred work */
276 static void ixgbe_handle_que_work(struct work *, void *);
277
278 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
279
280 /************************************************************************
281 * NetBSD Device Interface Entry Points
282 ************************************************************************/
283 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
284 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
285 DVF_DETACH_SHUTDOWN);
286
287 #if 0
288 devclass_t ix_devclass;
289 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
290
291 MODULE_DEPEND(ix, pci, 1, 1, 1);
292 MODULE_DEPEND(ix, ether, 1, 1, 1);
293 #ifdef DEV_NETMAP
294 MODULE_DEPEND(ix, netmap, 1, 1, 1);
295 #endif
296 #endif
297
298 /*
299 * TUNEABLE PARAMETERS:
300 */
301
302 /*
303 * AIM: Adaptive Interrupt Moderation
304 * which means that the interrupt rate
305 * is varied over time based on the
306 * traffic for that interrupt vector
307 */
308 static bool ixgbe_enable_aim = true;
309 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
310 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
311 "Enable adaptive interrupt moderation");
312
313 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
314 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
315 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
316
317 /* How many packets rxeof tries to clean at a time */
318 static int ixgbe_rx_process_limit = 256;
319 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
320 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
321
322 /* How many packets txeof tries to clean at a time */
323 static int ixgbe_tx_process_limit = 256;
324 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
325 &ixgbe_tx_process_limit, 0,
326 "Maximum number of sent packets to process at a time, -1 means unlimited");
327
328 /* Flow control setting, default to full */
329 static int ixgbe_flow_control = ixgbe_fc_full;
330 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
331 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
332
333 /* Which packet processing uses workqueue or softint */
334 static bool ixgbe_txrx_workqueue = false;
335
336 /*
337 * Smart speed setting, default to on
338 * this only works as a compile option
339 * right now as its during attach, set
340 * this to 'ixgbe_smart_speed_off' to
341 * disable.
342 */
343 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
344
345 /*
346 * MSI-X should be the default for best performance,
347 * but this allows it to be forced off for testing.
348 */
349 static int ixgbe_enable_msix = 1;
350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
351 "Enable MSI-X interrupts");
352
353 /*
354 * Number of Queues, can be set to 0,
355 * it then autoconfigures based on the
356 * number of cpus with a max of 8. This
357 * can be overriden manually here.
358 */
359 static int ixgbe_num_queues = 0;
360 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
361 "Number of queues to configure, 0 indicates autoconfigure");
362
363 /*
364 * Number of TX descriptors per ring,
365 * setting higher than RX as this seems
366 * the better performing choice.
367 */
368 static int ixgbe_txd = PERFORM_TXD;
369 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
370 "Number of transmit descriptors per queue");
371
372 /* Number of RX descriptors per ring */
373 static int ixgbe_rxd = PERFORM_RXD;
374 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
375 "Number of receive descriptors per queue");
376
377 /*
378 * Defining this on will allow the use
379 * of unsupported SFP+ modules, note that
380 * doing so you are on your own :)
381 */
382 static int allow_unsupported_sfp = false;
383 #define TUNABLE_INT(__x, __y)
384 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
385
386 /*
387 * Not sure if Flow Director is fully baked,
388 * so we'll default to turning it off.
389 */
390 static int ixgbe_enable_fdir = 0;
391 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
392 "Enable Flow Director");
393
394 /* Legacy Transmit (single queue) */
395 static int ixgbe_enable_legacy_tx = 0;
396 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
397 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
398
399 /* Receive-Side Scaling */
400 static int ixgbe_enable_rss = 1;
401 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
402 "Enable Receive-Side Scaling (RSS)");
403
404 #if 0
405 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
406 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
407 #endif
408
409 #ifdef NET_MPSAFE
410 #define IXGBE_MPSAFE 1
411 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
412 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
413 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
414 #else
415 #define IXGBE_CALLOUT_FLAGS 0
416 #define IXGBE_SOFTINFT_FLAGS 0
417 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
418 #endif
419 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
420
421 /************************************************************************
422 * ixgbe_initialize_rss_mapping
423 ************************************************************************/
424 static void
425 ixgbe_initialize_rss_mapping(struct adapter *adapter)
426 {
427 struct ixgbe_hw *hw = &adapter->hw;
428 u32 reta = 0, mrqc, rss_key[10];
429 int queue_id, table_size, index_mult;
430 int i, j;
431 u32 rss_hash_config;
432
433 /* force use default RSS key. */
434 #ifdef __NetBSD__
435 rss_getkey((uint8_t *) &rss_key);
436 #else
437 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
438 /* Fetch the configured RSS key */
439 rss_getkey((uint8_t *) &rss_key);
440 } else {
441 /* set up random bits */
442 cprng_fast(&rss_key, sizeof(rss_key));
443 }
444 #endif
445
446 /* Set multiplier for RETA setup and table size based on MAC */
447 index_mult = 0x1;
448 table_size = 128;
449 switch (adapter->hw.mac.type) {
450 case ixgbe_mac_82598EB:
451 index_mult = 0x11;
452 break;
453 case ixgbe_mac_X550:
454 case ixgbe_mac_X550EM_x:
455 case ixgbe_mac_X550EM_a:
456 table_size = 512;
457 break;
458 default:
459 break;
460 }
461
462 /* Set up the redirection table */
463 for (i = 0, j = 0; i < table_size; i++, j++) {
464 if (j == adapter->num_queues)
465 j = 0;
466
467 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
468 /*
469 * Fetch the RSS bucket id for the given indirection
470 * entry. Cap it at the number of configured buckets
471 * (which is num_queues.)
472 */
473 queue_id = rss_get_indirection_to_bucket(i);
474 queue_id = queue_id % adapter->num_queues;
475 } else
476 queue_id = (j * index_mult);
477
478 /*
479 * The low 8 bits are for hash value (n+0);
480 * The next 8 bits are for hash value (n+1), etc.
481 */
482 reta = reta >> 8;
483 reta = reta | (((uint32_t) queue_id) << 24);
484 if ((i & 3) == 3) {
485 if (i < 128)
486 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
487 else
488 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
489 reta);
490 reta = 0;
491 }
492 }
493
494 /* Now fill our hash function seeds */
495 for (i = 0; i < 10; i++)
496 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
497
498 /* Perform hash on these packet types */
499 if (adapter->feat_en & IXGBE_FEATURE_RSS)
500 rss_hash_config = rss_gethashconfig();
501 else {
502 /*
503 * Disable UDP - IP fragments aren't currently being handled
504 * and so we end up with a mix of 2-tuple and 4-tuple
505 * traffic.
506 */
507 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
508 | RSS_HASHTYPE_RSS_TCP_IPV4
509 | RSS_HASHTYPE_RSS_IPV6
510 | RSS_HASHTYPE_RSS_TCP_IPV6
511 | RSS_HASHTYPE_RSS_IPV6_EX
512 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
513 }
514
515 mrqc = IXGBE_MRQC_RSSEN;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
534 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
535 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
536 } /* ixgbe_initialize_rss_mapping */
537
538 /************************************************************************
539 * ixgbe_initialize_receive_units - Setup receive registers and features.
540 ************************************************************************/
541 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
542
543 static void
544 ixgbe_initialize_receive_units(struct adapter *adapter)
545 {
546 struct rx_ring *rxr = adapter->rx_rings;
547 struct ixgbe_hw *hw = &adapter->hw;
548 struct ifnet *ifp = adapter->ifp;
549 int i, j;
550 u32 bufsz, fctrl, srrctl, rxcsum;
551 u32 hlreg;
552
553 /*
554 * Make sure receives are disabled while
555 * setting up the descriptor ring
556 */
557 ixgbe_disable_rx(hw);
558
559 /* Enable broadcasts */
560 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
561 fctrl |= IXGBE_FCTRL_BAM;
562 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
563 fctrl |= IXGBE_FCTRL_DPF;
564 fctrl |= IXGBE_FCTRL_PMCF;
565 }
566 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
567
568 /* Set for Jumbo Frames? */
569 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
570 if (ifp->if_mtu > ETHERMTU)
571 hlreg |= IXGBE_HLREG0_JUMBOEN;
572 else
573 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
574
575 #ifdef DEV_NETMAP
576 /* CRC stripping is conditional in Netmap */
577 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
578 (ifp->if_capenable & IFCAP_NETMAP) &&
579 !ix_crcstrip)
580 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
581 else
582 #endif /* DEV_NETMAP */
583 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
584
585 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
586
587 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
588 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
589
590 for (i = 0; i < adapter->num_queues; i++, rxr++) {
591 u64 rdba = rxr->rxdma.dma_paddr;
592 u32 reg;
593 int regnum = i / 4; /* 1 register per 4 queues */
594 int regshift = i % 4; /* 4 bits per 1 queue */
595 j = rxr->me;
596
597 /* Setup the Base and Length of the Rx Descriptor Ring */
598 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
599 (rdba & 0x00000000ffffffffULL));
600 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
601 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
602 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
603
604 /* Set up the SRRCTL register */
605 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
608 srrctl |= bufsz;
609 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
610
611 /* Set RQSMR (Receive Queue Statistic Mapping) register */
612 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
613 reg &= ~(0x000000ff << (regshift * 8));
614 reg |= i << (regshift * 8);
615 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
616
617 /*
618 * Set DROP_EN iff we have no flow control and >1 queue.
619 * Note that srrctl was cleared shortly before during reset,
620 * so we do not need to clear the bit, but do it just in case
621 * this code is moved elsewhere.
622 */
623 if (adapter->num_queues > 1 &&
624 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
625 srrctl |= IXGBE_SRRCTL_DROP_EN;
626 } else {
627 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
628 }
629
630 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
631
632 /* Setup the HW Rx Head and Tail Descriptor Pointers */
633 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
634 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
635
636 /* Set the driver rx tail address */
637 rxr->tail = IXGBE_RDT(rxr->me);
638 }
639
640 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
641 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
642 | IXGBE_PSRTYPE_UDPHDR
643 | IXGBE_PSRTYPE_IPV4HDR
644 | IXGBE_PSRTYPE_IPV6HDR;
645 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
646 }
647
648 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
649
650 ixgbe_initialize_rss_mapping(adapter);
651
652 if (adapter->num_queues > 1) {
653 /* RSS and RX IPP Checksum are mutually exclusive */
654 rxcsum |= IXGBE_RXCSUM_PCSD;
655 }
656
657 if (ifp->if_capenable & IFCAP_RXCSUM)
658 rxcsum |= IXGBE_RXCSUM_PCSD;
659
660 /* This is useful for calculating UDP/IP fragment checksums */
661 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
662 rxcsum |= IXGBE_RXCSUM_IPPCSE;
663
664 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
665
666 } /* ixgbe_initialize_receive_units */
667
668 /************************************************************************
669 * ixgbe_initialize_transmit_units - Enable transmit units.
670 ************************************************************************/
671 static void
672 ixgbe_initialize_transmit_units(struct adapter *adapter)
673 {
674 struct tx_ring *txr = adapter->tx_rings;
675 struct ixgbe_hw *hw = &adapter->hw;
676 int i;
677
678 /* Setup the Base and Length of the Tx Descriptor Ring */
679 for (i = 0; i < adapter->num_queues; i++, txr++) {
680 u64 tdba = txr->txdma.dma_paddr;
681 u32 txctrl = 0;
682 u32 tqsmreg, reg;
683 int regnum = i / 4; /* 1 register per 4 queues */
684 int regshift = i % 4; /* 4 bits per 1 queue */
685 int j = txr->me;
686
687 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
688 (tdba & 0x00000000ffffffffULL));
689 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
690 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
691 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
692
693 /*
694 * Set TQSMR (Transmit Queue Statistic Mapping) register.
695 * Register location is different between 82598 and others.
696 */
697 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
698 tqsmreg = IXGBE_TQSMR(regnum);
699 else
700 tqsmreg = IXGBE_TQSM(regnum);
701 reg = IXGBE_READ_REG(hw, tqsmreg);
702 reg &= ~(0x000000ff << (regshift * 8));
703 reg |= i << (regshift * 8);
704 IXGBE_WRITE_REG(hw, tqsmreg, reg);
705
706 /* Setup the HW Tx Head and Tail descriptor pointers */
707 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
708 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
709
710 /* Cache the tail address */
711 txr->tail = IXGBE_TDT(j);
712
713 txr->txr_no_space = false;
714
715 /* Disable Head Writeback */
716 /*
717 * Note: for X550 series devices, these registers are actually
718 * prefixed with TPH_ isntead of DCA_, but the addresses and
719 * fields remain the same.
720 */
721 switch (hw->mac.type) {
722 case ixgbe_mac_82598EB:
723 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
724 break;
725 default:
726 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
727 break;
728 }
729 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
730 switch (hw->mac.type) {
731 case ixgbe_mac_82598EB:
732 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
733 break;
734 default:
735 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
736 break;
737 }
738
739 }
740
741 if (hw->mac.type != ixgbe_mac_82598EB) {
742 u32 dmatxctl, rttdcs;
743
744 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
745 dmatxctl |= IXGBE_DMATXCTL_TE;
746 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
747 /* Disable arbiter to set MTQC */
748 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
749 rttdcs |= IXGBE_RTTDCS_ARBDIS;
750 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
751 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
752 ixgbe_get_mtqc(adapter->iov_mode));
753 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
754 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
755 }
756
757 return;
758 } /* ixgbe_initialize_transmit_units */
759
760 /************************************************************************
761 * ixgbe_attach - Device initialization routine
762 *
763 * Called when the driver is being loaded.
764 * Identifies the type of hardware, allocates all resources
765 * and initializes the hardware.
766 *
767 * return 0 on success, positive on failure
768 ************************************************************************/
769 static void
770 ixgbe_attach(device_t parent, device_t dev, void *aux)
771 {
772 struct adapter *adapter;
773 struct ixgbe_hw *hw;
774 int error = -1;
775 u32 ctrl_ext;
776 u16 high, low, nvmreg;
777 pcireg_t id, subid;
778 const ixgbe_vendor_info_t *ent;
779 struct pci_attach_args *pa = aux;
780 const char *str;
781 char buf[256];
782
783 INIT_DEBUGOUT("ixgbe_attach: begin");
784
785 /* Allocate, clear, and link in our adapter structure */
786 adapter = device_private(dev);
787 adapter->hw.back = adapter;
788 adapter->dev = dev;
789 hw = &adapter->hw;
790 adapter->osdep.pc = pa->pa_pc;
791 adapter->osdep.tag = pa->pa_tag;
792 if (pci_dma64_available(pa))
793 adapter->osdep.dmat = pa->pa_dmat64;
794 else
795 adapter->osdep.dmat = pa->pa_dmat;
796 adapter->osdep.attached = false;
797
798 ent = ixgbe_lookup(pa);
799
800 KASSERT(ent != NULL);
801
802 aprint_normal(": %s, Version - %s\n",
803 ixgbe_strings[ent->index], ixgbe_driver_version);
804
805 /* Core Lock Init*/
806 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
807
808 /* Set up the timer callout */
809 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
810
811 /* Determine hardware revision */
812 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
813 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
814
815 hw->vendor_id = PCI_VENDOR(id);
816 hw->device_id = PCI_PRODUCT(id);
817 hw->revision_id =
818 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
819 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
820 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
821
822 /*
823 * Make sure BUSMASTER is set
824 */
825 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
826
827 /* Do base PCI setup - map BAR0 */
828 if (ixgbe_allocate_pci_resources(adapter, pa)) {
829 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
830 error = ENXIO;
831 goto err_out;
832 }
833
834 /* let hardware know driver is loaded */
835 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
836 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
837 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
838
839 /*
840 * Initialize the shared code
841 */
842 if (ixgbe_init_shared_code(hw) != 0) {
843 aprint_error_dev(dev, "Unable to initialize the shared code\n");
844 error = ENXIO;
845 goto err_out;
846 }
847
848 switch (hw->mac.type) {
849 case ixgbe_mac_82598EB:
850 str = "82598EB";
851 break;
852 case ixgbe_mac_82599EB:
853 str = "82599EB";
854 break;
855 case ixgbe_mac_X540:
856 str = "X540";
857 break;
858 case ixgbe_mac_X550:
859 str = "X550";
860 break;
861 case ixgbe_mac_X550EM_x:
862 str = "X550EM";
863 break;
864 case ixgbe_mac_X550EM_a:
865 str = "X550EM A";
866 break;
867 default:
868 str = "Unknown";
869 break;
870 }
871 aprint_normal_dev(dev, "device %s\n", str);
872
873 if (hw->mbx.ops.init_params)
874 hw->mbx.ops.init_params(hw);
875
876 hw->allow_unsupported_sfp = allow_unsupported_sfp;
877
878 /* Pick up the 82599 settings */
879 if (hw->mac.type != ixgbe_mac_82598EB) {
880 hw->phy.smart_speed = ixgbe_smart_speed;
881 adapter->num_segs = IXGBE_82599_SCATTER;
882 } else
883 adapter->num_segs = IXGBE_82598_SCATTER;
884
885 /* Ensure SW/FW semaphore is free */
886 ixgbe_init_swfw_semaphore(hw);
887
888 hw->mac.ops.set_lan_id(hw);
889 ixgbe_init_device_features(adapter);
890
891 if (ixgbe_configure_interrupts(adapter)) {
892 error = ENXIO;
893 goto err_out;
894 }
895
896 /* Allocate multicast array memory. */
897 adapter->mta = malloc(sizeof(*adapter->mta) *
898 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
899 if (adapter->mta == NULL) {
900 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
901 error = ENOMEM;
902 goto err_out;
903 }
904
905 /* Enable WoL (if supported) */
906 ixgbe_check_wol_support(adapter);
907
908 /* Verify adapter fan is still functional (if applicable) */
909 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
910 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
911 ixgbe_check_fan_failure(adapter, esdp, FALSE);
912 }
913
914 /* Set an initial default flow control value */
915 hw->fc.requested_mode = ixgbe_flow_control;
916
917 /* Sysctls for limiting the amount of work done in the taskqueues */
918 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
919 "max number of rx packets to process",
920 &adapter->rx_process_limit, ixgbe_rx_process_limit);
921
922 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
923 "max number of tx packets to process",
924 &adapter->tx_process_limit, ixgbe_tx_process_limit);
925
926 /* Do descriptor calc and sanity checks */
927 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
928 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
929 aprint_error_dev(dev, "TXD config issue, using default!\n");
930 adapter->num_tx_desc = DEFAULT_TXD;
931 } else
932 adapter->num_tx_desc = ixgbe_txd;
933
934 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
935 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
936 aprint_error_dev(dev, "RXD config issue, using default!\n");
937 adapter->num_rx_desc = DEFAULT_RXD;
938 } else
939 adapter->num_rx_desc = ixgbe_rxd;
940
941 /* Allocate our TX/RX Queues */
942 if (ixgbe_allocate_queues(adapter)) {
943 error = ENOMEM;
944 goto err_out;
945 }
946
947 hw->phy.reset_if_overtemp = TRUE;
948 error = ixgbe_reset_hw(hw);
949 hw->phy.reset_if_overtemp = FALSE;
950 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
951 /*
952 * No optics in this port, set up
953 * so the timer routine will probe
954 * for later insertion.
955 */
956 adapter->sfp_probe = TRUE;
957 error = IXGBE_SUCCESS;
958 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
959 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
960 error = EIO;
961 goto err_late;
962 } else if (error) {
963 aprint_error_dev(dev, "Hardware initialization failed\n");
964 error = EIO;
965 goto err_late;
966 }
967
968 /* Make sure we have a good EEPROM before we read from it */
969 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
970 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
971 error = EIO;
972 goto err_late;
973 }
974
975 aprint_normal("%s:", device_xname(dev));
976 /* NVM Image Version */
977 high = low = 0;
978 switch (hw->mac.type) {
979 case ixgbe_mac_X540:
980 case ixgbe_mac_X550EM_a:
981 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
982 if (nvmreg == 0xffff)
983 break;
984 high = (nvmreg >> 12) & 0x0f;
985 low = (nvmreg >> 4) & 0xff;
986 id = nvmreg & 0x0f;
987 aprint_normal(" NVM Image Version %u.", high);
988 if (hw->mac.type == ixgbe_mac_X540)
989 str = "%x";
990 else
991 str = "%02x";
992 aprint_normal(str, low);
993 aprint_normal(" ID 0x%x,", id);
994 break;
995 case ixgbe_mac_X550EM_x:
996 case ixgbe_mac_X550:
997 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
998 if (nvmreg == 0xffff)
999 break;
1000 high = (nvmreg >> 12) & 0x0f;
1001 low = nvmreg & 0xff;
1002 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1003 break;
1004 default:
1005 break;
1006 }
1007 hw->eeprom.nvm_image_ver_high = high;
1008 hw->eeprom.nvm_image_ver_low = low;
1009
1010 /* PHY firmware revision */
1011 switch (hw->mac.type) {
1012 case ixgbe_mac_X540:
1013 case ixgbe_mac_X550:
1014 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1015 if (nvmreg == 0xffff)
1016 break;
1017 high = (nvmreg >> 12) & 0x0f;
1018 low = (nvmreg >> 4) & 0xff;
1019 id = nvmreg & 0x000f;
1020 aprint_normal(" PHY FW Revision %u.", high);
1021 if (hw->mac.type == ixgbe_mac_X540)
1022 str = "%x";
1023 else
1024 str = "%02x";
1025 aprint_normal(str, low);
1026 aprint_normal(" ID 0x%x,", id);
1027 break;
1028 default:
1029 break;
1030 }
1031
1032 /* NVM Map version & OEM NVM Image version */
1033 switch (hw->mac.type) {
1034 case ixgbe_mac_X550:
1035 case ixgbe_mac_X550EM_x:
1036 case ixgbe_mac_X550EM_a:
1037 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1038 if (nvmreg != 0xffff) {
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0x00ff;
1041 aprint_normal(" NVM Map version %u.%02x,", high, low);
1042 }
1043 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1044 if (nvmreg != 0xffff) {
1045 high = (nvmreg >> 12) & 0x0f;
1046 low = nvmreg & 0x00ff;
1047 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1048 low);
1049 }
1050 break;
1051 default:
1052 break;
1053 }
1054
1055 /* Print the ETrackID */
1056 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1057 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1058 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1059
1060 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1061 error = ixgbe_allocate_msix(adapter, pa);
1062 if (error) {
1063 /* Free allocated queue structures first */
1064 ixgbe_free_transmit_structures(adapter);
1065 ixgbe_free_receive_structures(adapter);
1066 free(adapter->queues, M_DEVBUF);
1067
1068 /* Fallback to legacy interrupt */
1069 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1070 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1071 adapter->feat_en |= IXGBE_FEATURE_MSI;
1072 adapter->num_queues = 1;
1073
1074 /* Allocate our TX/RX Queues again */
1075 if (ixgbe_allocate_queues(adapter)) {
1076 error = ENOMEM;
1077 goto err_out;
1078 }
1079 }
1080 }
1081 /* Recovery mode */
1082 switch (adapter->hw.mac.type) {
1083 case ixgbe_mac_X550:
1084 case ixgbe_mac_X550EM_x:
1085 case ixgbe_mac_X550EM_a:
1086 /* >= 2.00 */
1087 if (hw->eeprom.nvm_image_ver_high >= 2) {
1088 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1089 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1090 }
1091 break;
1092 default:
1093 break;
1094 }
1095
1096 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1097 error = ixgbe_allocate_legacy(adapter, pa);
1098 if (error)
1099 goto err_late;
1100
1101 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1102 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_link, adapter);
1104 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1105 ixgbe_handle_mod, adapter);
1106 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_handle_msf, adapter);
1108 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1109 ixgbe_handle_phy, adapter);
1110 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1111 adapter->fdir_si =
1112 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1113 ixgbe_reinit_fdir, adapter);
1114 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1115 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1116 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1117 && (adapter->fdir_si == NULL))) {
1118 aprint_error_dev(dev,
1119 "could not establish software interrupts ()\n");
1120 goto err_out;
1121 }
1122
1123 error = ixgbe_start_hw(hw);
1124 switch (error) {
1125 case IXGBE_ERR_EEPROM_VERSION:
1126 aprint_error_dev(dev, "This device is a pre-production adapter/"
1127 "LOM. Please be aware there may be issues associated "
1128 "with your hardware.\nIf you are experiencing problems "
1129 "please contact your Intel or hardware representative "
1130 "who provided you with this hardware.\n");
1131 break;
1132 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1133 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1134 error = EIO;
1135 goto err_late;
1136 case IXGBE_ERR_SFP_NOT_PRESENT:
1137 aprint_error_dev(dev, "No SFP+ Module found\n");
1138 /* falls thru */
1139 default:
1140 break;
1141 }
1142
1143 /* Setup OS specific network interface */
1144 if (ixgbe_setup_interface(dev, adapter) != 0)
1145 goto err_late;
1146
1147 /*
1148 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1149 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1150 */
1151 if (hw->phy.media_type == ixgbe_media_type_copper) {
1152 uint16_t id1, id2;
1153 int oui, model, rev;
1154 const char *descr;
1155
1156 id1 = hw->phy.id >> 16;
1157 id2 = hw->phy.id & 0xffff;
1158 oui = MII_OUI(id1, id2);
1159 model = MII_MODEL(id2);
1160 rev = MII_REV(id2);
1161 if ((descr = mii_get_descr(oui, model)) != NULL)
1162 aprint_normal_dev(dev,
1163 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1164 descr, oui, model, rev);
1165 else
1166 aprint_normal_dev(dev,
1167 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1168 oui, model, rev);
1169 }
1170
1171 /* Enable the optics for 82599 SFP+ fiber */
1172 ixgbe_enable_tx_laser(hw);
1173
1174 /* Enable EEE power saving */
1175 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1176 hw->mac.ops.setup_eee(hw,
1177 adapter->feat_en & IXGBE_FEATURE_EEE);
1178
1179 /* Enable power to the phy. */
1180 ixgbe_set_phy_power(hw, TRUE);
1181
1182 /* Initialize statistics */
1183 ixgbe_update_stats_counters(adapter);
1184
1185 /* Check PCIE slot type/speed/width */
1186 ixgbe_get_slot_info(adapter);
1187
1188 /*
1189 * Do time init and sysctl init here, but
1190 * only on the first port of a bypass adapter.
1191 */
1192 ixgbe_bypass_init(adapter);
1193
1194 /* Set an initial dmac value */
1195 adapter->dmac = 0;
1196 /* Set initial advertised speeds (if applicable) */
1197 adapter->advertise = ixgbe_get_advertise(adapter);
1198
1199 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1200 ixgbe_define_iov_schemas(dev, &error);
1201
1202 /* Add sysctls */
1203 ixgbe_add_device_sysctls(adapter);
1204 ixgbe_add_hw_stats(adapter);
1205
1206 /* For Netmap */
1207 adapter->init_locked = ixgbe_init_locked;
1208 adapter->stop_locked = ixgbe_stop;
1209
1210 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1211 ixgbe_netmap_attach(adapter);
1212
1213 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1214 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1215 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1216 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1217
1218 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1219 pmf_class_network_register(dev, adapter->ifp);
1220 else
1221 aprint_error_dev(dev, "couldn't establish power handler\n");
1222
1223 /* Init recovery mode timer and state variable */
1224 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1225 adapter->recovery_mode = 0;
1226
1227 /* Set up the timer callout */
1228 callout_init(&adapter->recovery_mode_timer,
1229 IXGBE_CALLOUT_FLAGS);
1230
1231 /* Start the task */
1232 callout_reset(&adapter->recovery_mode_timer, hz,
1233 ixgbe_recovery_mode_timer, adapter);
1234 }
1235
1236 INIT_DEBUGOUT("ixgbe_attach: end");
1237 adapter->osdep.attached = true;
1238
1239 return;
1240
1241 err_late:
1242 ixgbe_free_transmit_structures(adapter);
1243 ixgbe_free_receive_structures(adapter);
1244 free(adapter->queues, M_DEVBUF);
1245 err_out:
1246 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1247 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1248 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1249 ixgbe_free_softint(adapter);
1250 ixgbe_free_pci_resources(adapter);
1251 if (adapter->mta != NULL)
1252 free(adapter->mta, M_DEVBUF);
1253 IXGBE_CORE_LOCK_DESTROY(adapter);
1254
1255 return;
1256 } /* ixgbe_attach */
1257
1258 /************************************************************************
1259 * ixgbe_check_wol_support
1260 *
1261 * Checks whether the adapter's ports are capable of
1262 * Wake On LAN by reading the adapter's NVM.
1263 *
1264 * Sets each port's hw->wol_enabled value depending
1265 * on the value read here.
1266 ************************************************************************/
1267 static void
1268 ixgbe_check_wol_support(struct adapter *adapter)
1269 {
1270 struct ixgbe_hw *hw = &adapter->hw;
1271 u16 dev_caps = 0;
1272
1273 /* Find out WoL support for port */
1274 adapter->wol_support = hw->wol_enabled = 0;
1275 ixgbe_get_device_caps(hw, &dev_caps);
1276 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1277 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1278 hw->bus.func == 0))
1279 adapter->wol_support = hw->wol_enabled = 1;
1280
1281 /* Save initial wake up filter configuration */
1282 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1283
1284 return;
1285 } /* ixgbe_check_wol_support */
1286
1287 /************************************************************************
1288 * ixgbe_setup_interface
1289 *
1290 * Setup networking device structure and register an interface.
1291 ************************************************************************/
1292 static int
1293 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1294 {
1295 struct ethercom *ec = &adapter->osdep.ec;
1296 struct ifnet *ifp;
1297 int rv;
1298
1299 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1300
1301 ifp = adapter->ifp = &ec->ec_if;
1302 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1303 ifp->if_baudrate = IF_Gbps(10);
1304 ifp->if_init = ixgbe_init;
1305 ifp->if_stop = ixgbe_ifstop;
1306 ifp->if_softc = adapter;
1307 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1308 #ifdef IXGBE_MPSAFE
1309 ifp->if_extflags = IFEF_MPSAFE;
1310 #endif
1311 ifp->if_ioctl = ixgbe_ioctl;
1312 #if __FreeBSD_version >= 1100045
1313 /* TSO parameters */
1314 ifp->if_hw_tsomax = 65518;
1315 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1316 ifp->if_hw_tsomaxsegsize = 2048;
1317 #endif
1318 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1319 #if 0
1320 ixgbe_start_locked = ixgbe_legacy_start_locked;
1321 #endif
1322 } else {
1323 ifp->if_transmit = ixgbe_mq_start;
1324 #if 0
1325 ixgbe_start_locked = ixgbe_mq_start_locked;
1326 #endif
1327 }
1328 ifp->if_start = ixgbe_legacy_start;
1329 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1330 IFQ_SET_READY(&ifp->if_snd);
1331
1332 rv = if_initialize(ifp);
1333 if (rv != 0) {
1334 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1335 return rv;
1336 }
1337 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1338 ether_ifattach(ifp, adapter->hw.mac.addr);
1339 /*
1340 * We use per TX queue softint, so if_deferred_start_init() isn't
1341 * used.
1342 */
1343 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1344
1345 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1346
1347 /*
1348 * Tell the upper layer(s) we support long frames.
1349 */
1350 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1351
1352 /* Set capability flags */
1353 ifp->if_capabilities |= IFCAP_RXCSUM
1354 | IFCAP_TXCSUM
1355 | IFCAP_TSOv4
1356 | IFCAP_TSOv6;
1357 ifp->if_capenable = 0;
1358
1359 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1360 | ETHERCAP_VLAN_HWCSUM
1361 | ETHERCAP_JUMBO_MTU
1362 | ETHERCAP_VLAN_MTU;
1363
1364 /* Enable the above capabilities by default */
1365 ec->ec_capenable = ec->ec_capabilities;
1366
1367 /*
1368 * Don't turn this on by default, if vlans are
1369 * created on another pseudo device (eg. lagg)
1370 * then vlan events are not passed thru, breaking
1371 * operation, but with HW FILTER off it works. If
1372 * using vlans directly on the ixgbe driver you can
1373 * enable this and get full hardware tag filtering.
1374 */
1375 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1376
1377 /*
1378 * Specify the media types supported by this adapter and register
1379 * callbacks to update media and link information
1380 */
1381 ec->ec_ifmedia = &adapter->media;
1382 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1383 ixgbe_media_status);
1384
1385 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1386 ixgbe_add_media_types(adapter);
1387
1388 /* Set autoselect media by default */
1389 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1390
1391 if_register(ifp);
1392
1393 return (0);
1394 } /* ixgbe_setup_interface */
1395
1396 /************************************************************************
1397 * ixgbe_add_media_types
1398 ************************************************************************/
1399 static void
1400 ixgbe_add_media_types(struct adapter *adapter)
1401 {
1402 struct ixgbe_hw *hw = &adapter->hw;
1403 device_t dev = adapter->dev;
1404 u64 layer;
1405
1406 layer = adapter->phy_layer;
1407
1408 #define ADD(mm, dd) \
1409 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1410
1411 ADD(IFM_NONE, 0);
1412
1413 /* Media types with matching NetBSD media defines */
1414 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1415 ADD(IFM_10G_T | IFM_FDX, 0);
1416 }
1417 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1418 ADD(IFM_1000_T | IFM_FDX, 0);
1419 }
1420 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1421 ADD(IFM_100_TX | IFM_FDX, 0);
1422 }
1423 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1424 ADD(IFM_10_T | IFM_FDX, 0);
1425 }
1426
1427 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1428 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1429 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1430 }
1431
1432 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1433 ADD(IFM_10G_LR | IFM_FDX, 0);
1434 if (hw->phy.multispeed_fiber) {
1435 ADD(IFM_1000_LX | IFM_FDX, 0);
1436 }
1437 }
1438 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1439 ADD(IFM_10G_SR | IFM_FDX, 0);
1440 if (hw->phy.multispeed_fiber) {
1441 ADD(IFM_1000_SX | IFM_FDX, 0);
1442 }
1443 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1444 ADD(IFM_1000_SX | IFM_FDX, 0);
1445 }
1446 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1447 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1448 }
1449
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1451 ADD(IFM_10G_KR | IFM_FDX, 0);
1452 }
1453 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1454 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1457 ADD(IFM_1000_KX | IFM_FDX, 0);
1458 }
1459 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1460 ADD(IFM_2500_KX | IFM_FDX, 0);
1461 }
1462 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1463 ADD(IFM_2500_T | IFM_FDX, 0);
1464 }
1465 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1466 ADD(IFM_5000_T | IFM_FDX, 0);
1467 }
1468 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1469 device_printf(dev, "Media supported: 1000baseBX\n");
1470 /* XXX no ifmedia_set? */
1471
1472 ADD(IFM_AUTO, 0);
1473
1474 #undef ADD
1475 } /* ixgbe_add_media_types */
1476
1477 /************************************************************************
1478 * ixgbe_is_sfp
1479 ************************************************************************/
1480 static inline bool
1481 ixgbe_is_sfp(struct ixgbe_hw *hw)
1482 {
1483 switch (hw->mac.type) {
1484 case ixgbe_mac_82598EB:
1485 if (hw->phy.type == ixgbe_phy_nl)
1486 return (TRUE);
1487 return (FALSE);
1488 case ixgbe_mac_82599EB:
1489 switch (hw->mac.ops.get_media_type(hw)) {
1490 case ixgbe_media_type_fiber:
1491 case ixgbe_media_type_fiber_qsfp:
1492 return (TRUE);
1493 default:
1494 return (FALSE);
1495 }
1496 case ixgbe_mac_X550EM_x:
1497 case ixgbe_mac_X550EM_a:
1498 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1499 return (TRUE);
1500 return (FALSE);
1501 default:
1502 return (FALSE);
1503 }
1504 } /* ixgbe_is_sfp */
1505
1506 /************************************************************************
1507 * ixgbe_config_link
1508 ************************************************************************/
1509 static void
1510 ixgbe_config_link(struct adapter *adapter)
1511 {
1512 struct ixgbe_hw *hw = &adapter->hw;
1513 u32 autoneg, err = 0;
1514 bool sfp, negotiate = false;
1515
1516 sfp = ixgbe_is_sfp(hw);
1517
1518 if (sfp) {
1519 if (hw->phy.multispeed_fiber) {
1520 ixgbe_enable_tx_laser(hw);
1521 kpreempt_disable();
1522 softint_schedule(adapter->msf_si);
1523 kpreempt_enable();
1524 }
1525 kpreempt_disable();
1526 softint_schedule(adapter->mod_si);
1527 kpreempt_enable();
1528 } else {
1529 struct ifmedia *ifm = &adapter->media;
1530
1531 if (hw->mac.ops.check_link)
1532 err = ixgbe_check_link(hw, &adapter->link_speed,
1533 &adapter->link_up, FALSE);
1534 if (err)
1535 return;
1536
1537 /*
1538 * Check if it's the first call. If it's the first call,
1539 * get value for auto negotiation.
1540 */
1541 autoneg = hw->phy.autoneg_advertised;
1542 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1543 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1544 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1545 &negotiate);
1546 if (err)
1547 return;
1548 if (hw->mac.ops.setup_link)
1549 err = hw->mac.ops.setup_link(hw, autoneg,
1550 adapter->link_up);
1551 }
1552
1553 } /* ixgbe_config_link */
1554
1555 /************************************************************************
1556 * ixgbe_update_stats_counters - Update board statistics counters.
1557 ************************************************************************/
1558 static void
1559 ixgbe_update_stats_counters(struct adapter *adapter)
1560 {
1561 struct ifnet *ifp = adapter->ifp;
1562 struct ixgbe_hw *hw = &adapter->hw;
1563 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1564 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1565 u64 total_missed_rx = 0;
1566 uint64_t crcerrs, rlec;
1567 unsigned int queue_counters;
1568 int i;
1569
1570 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1571 stats->crcerrs.ev_count += crcerrs;
1572 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1573 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1574 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1575 if (hw->mac.type == ixgbe_mac_X550)
1576 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1577
1578 /* 16 registers exist */
1579 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1580 for (i = 0; i < queue_counters; i++) {
1581 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1582 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1583 if (hw->mac.type >= ixgbe_mac_82599EB) {
1584 stats->qprdc[i].ev_count
1585 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1586 }
1587 }
1588
1589 /* 8 registers exist */
1590 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1591 uint32_t mp;
1592
1593 /* MPC */
1594 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1595 /* global total per queue */
1596 stats->mpc[i].ev_count += mp;
1597 /* running comprehensive total for stats display */
1598 total_missed_rx += mp;
1599
1600 if (hw->mac.type == ixgbe_mac_82598EB)
1601 stats->rnbc[i].ev_count
1602 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1603
1604 stats->pxontxc[i].ev_count
1605 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1606 stats->pxofftxc[i].ev_count
1607 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1608 if (hw->mac.type >= ixgbe_mac_82599EB) {
1609 stats->pxonrxc[i].ev_count
1610 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1611 stats->pxoffrxc[i].ev_count
1612 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1613 stats->pxon2offc[i].ev_count
1614 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1615 } else {
1616 stats->pxonrxc[i].ev_count
1617 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1618 stats->pxoffrxc[i].ev_count
1619 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1620 }
1621 }
1622 stats->mpctotal.ev_count += total_missed_rx;
1623
1624 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1625 if ((adapter->link_active == LINK_STATE_UP)
1626 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1627 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1628 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1629 }
1630 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1631 stats->rlec.ev_count += rlec;
1632
1633 /* Hardware workaround, gprc counts missed packets */
1634 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1635
1636 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1637 stats->lxontxc.ev_count += lxon;
1638 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1639 stats->lxofftxc.ev_count += lxoff;
1640 total = lxon + lxoff;
1641
1642 if (hw->mac.type != ixgbe_mac_82598EB) {
1643 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1644 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1645 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1646 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1647 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1648 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1649 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1650 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1651 } else {
1652 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1653 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1654 /* 82598 only has a counter in the high register */
1655 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1656 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1657 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1658 }
1659
1660 /*
1661 * Workaround: mprc hardware is incorrectly counting
1662 * broadcasts, so for now we subtract those.
1663 */
1664 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1665 stats->bprc.ev_count += bprc;
1666 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1667 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1668
1669 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1670 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1671 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1672 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1673 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1674 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1675
1676 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1677 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1678 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1679
1680 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1681 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1682 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1683 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1684 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1685 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1686 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1687 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1688 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1689 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1690 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1691 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1692 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1693 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1694 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1695 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1696 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1697 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1698 /* Only read FCOE on 82599 */
1699 if (hw->mac.type != ixgbe_mac_82598EB) {
1700 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1701 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1702 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1703 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1704 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1705 }
1706
1707 /* Fill out the OS statistics structure */
1708 /*
1709 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1710 * adapter->stats counters. It's required to make ifconfig -z
1711 * (SOICZIFDATA) work.
1712 */
1713 ifp->if_collisions = 0;
1714
1715 /* Rx Errors */
1716 ifp->if_iqdrops += total_missed_rx;
1717 ifp->if_ierrors += crcerrs + rlec;
1718 } /* ixgbe_update_stats_counters */
1719
1720 /************************************************************************
1721 * ixgbe_add_hw_stats
1722 *
1723 * Add sysctl variables, one per statistic, to the system.
1724 ************************************************************************/
1725 static void
1726 ixgbe_add_hw_stats(struct adapter *adapter)
1727 {
1728 device_t dev = adapter->dev;
1729 const struct sysctlnode *rnode, *cnode;
1730 struct sysctllog **log = &adapter->sysctllog;
1731 struct tx_ring *txr = adapter->tx_rings;
1732 struct rx_ring *rxr = adapter->rx_rings;
1733 struct ixgbe_hw *hw = &adapter->hw;
1734 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1735 const char *xname = device_xname(dev);
1736 int i;
1737
1738 /* Driver Statistics */
1739 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1740 NULL, xname, "Driver tx dma soft fail EFBIG");
1741 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1742 NULL, xname, "m_defrag() failed");
1743 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1744 NULL, xname, "Driver tx dma hard fail EFBIG");
1745 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1746 NULL, xname, "Driver tx dma hard fail EINVAL");
1747 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1748 NULL, xname, "Driver tx dma hard fail other");
1749 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1750 NULL, xname, "Driver tx dma soft fail EAGAIN");
1751 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1752 NULL, xname, "Driver tx dma soft fail ENOMEM");
1753 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1754 NULL, xname, "Watchdog timeouts");
1755 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1756 NULL, xname, "TSO errors");
1757 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1758 NULL, xname, "Link MSI-X IRQ Handled");
1759 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1760 NULL, xname, "Link softint");
1761 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1762 NULL, xname, "module softint");
1763 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1764 NULL, xname, "multimode softint");
1765 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1766 NULL, xname, "external PHY softint");
1767
1768 /* Max number of traffic class is 8 */
1769 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1770 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1771 snprintf(adapter->tcs[i].evnamebuf,
1772 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1773 xname, i);
1774 if (i < __arraycount(stats->mpc)) {
1775 evcnt_attach_dynamic(&stats->mpc[i],
1776 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1777 "RX Missed Packet Count");
1778 if (hw->mac.type == ixgbe_mac_82598EB)
1779 evcnt_attach_dynamic(&stats->rnbc[i],
1780 EVCNT_TYPE_MISC, NULL,
1781 adapter->tcs[i].evnamebuf,
1782 "Receive No Buffers");
1783 }
1784 if (i < __arraycount(stats->pxontxc)) {
1785 evcnt_attach_dynamic(&stats->pxontxc[i],
1786 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1787 "pxontxc");
1788 evcnt_attach_dynamic(&stats->pxonrxc[i],
1789 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1790 "pxonrxc");
1791 evcnt_attach_dynamic(&stats->pxofftxc[i],
1792 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1793 "pxofftxc");
1794 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1796 "pxoffrxc");
1797 if (hw->mac.type >= ixgbe_mac_82599EB)
1798 evcnt_attach_dynamic(&stats->pxon2offc[i],
1799 EVCNT_TYPE_MISC, NULL,
1800 adapter->tcs[i].evnamebuf,
1801 "pxon2offc");
1802 }
1803 }
1804
1805 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1806 #ifdef LRO
1807 struct lro_ctrl *lro = &rxr->lro;
1808 #endif /* LRO */
1809
1810 snprintf(adapter->queues[i].evnamebuf,
1811 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1812 xname, i);
1813 snprintf(adapter->queues[i].namebuf,
1814 sizeof(adapter->queues[i].namebuf), "q%d", i);
1815
1816 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1817 aprint_error_dev(dev, "could not create sysctl root\n");
1818 break;
1819 }
1820
1821 if (sysctl_createv(log, 0, &rnode, &rnode,
1822 0, CTLTYPE_NODE,
1823 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1824 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1825 break;
1826
1827 if (sysctl_createv(log, 0, &rnode, &cnode,
1828 CTLFLAG_READWRITE, CTLTYPE_INT,
1829 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1830 ixgbe_sysctl_interrupt_rate_handler, 0,
1831 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1832 break;
1833
1834 if (sysctl_createv(log, 0, &rnode, &cnode,
1835 CTLFLAG_READONLY, CTLTYPE_INT,
1836 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1837 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1838 0, CTL_CREATE, CTL_EOL) != 0)
1839 break;
1840
1841 if (sysctl_createv(log, 0, &rnode, &cnode,
1842 CTLFLAG_READONLY, CTLTYPE_INT,
1843 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1844 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1845 0, CTL_CREATE, CTL_EOL) != 0)
1846 break;
1847
1848 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1849 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1850 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1851 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1852 "Handled queue in softint");
1853 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1854 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1855 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1856 NULL, adapter->queues[i].evnamebuf, "TSO");
1857 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1858 NULL, adapter->queues[i].evnamebuf,
1859 "Queue No Descriptor Available");
1860 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1861 NULL, adapter->queues[i].evnamebuf,
1862 "Queue Packets Transmitted");
1863 #ifndef IXGBE_LEGACY_TX
1864 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1865 NULL, adapter->queues[i].evnamebuf,
1866 "Packets dropped in pcq");
1867 #endif
1868
1869 if (sysctl_createv(log, 0, &rnode, &cnode,
1870 CTLFLAG_READONLY,
1871 CTLTYPE_INT,
1872 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1873 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1874 CTL_CREATE, CTL_EOL) != 0)
1875 break;
1876
1877 if (sysctl_createv(log, 0, &rnode, &cnode,
1878 CTLFLAG_READONLY,
1879 CTLTYPE_INT,
1880 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1881 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1882 CTL_CREATE, CTL_EOL) != 0)
1883 break;
1884
1885 if (sysctl_createv(log, 0, &rnode, &cnode,
1886 CTLFLAG_READONLY,
1887 CTLTYPE_INT,
1888 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1889 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1890 CTL_CREATE, CTL_EOL) != 0)
1891 break;
1892
1893 if (i < __arraycount(stats->qprc)) {
1894 evcnt_attach_dynamic(&stats->qprc[i],
1895 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1896 "qprc");
1897 evcnt_attach_dynamic(&stats->qptc[i],
1898 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1899 "qptc");
1900 evcnt_attach_dynamic(&stats->qbrc[i],
1901 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1902 "qbrc");
1903 evcnt_attach_dynamic(&stats->qbtc[i],
1904 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1905 "qbtc");
1906 if (hw->mac.type >= ixgbe_mac_82599EB)
1907 evcnt_attach_dynamic(&stats->qprdc[i],
1908 EVCNT_TYPE_MISC, NULL,
1909 adapter->queues[i].evnamebuf, "qprdc");
1910 }
1911
1912 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1913 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1914 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1915 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1916 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1917 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1918 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1919 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1920 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1921 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1922 #ifdef LRO
1923 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1924 CTLFLAG_RD, &lro->lro_queued, 0,
1925 "LRO Queued");
1926 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1927 CTLFLAG_RD, &lro->lro_flushed, 0,
1928 "LRO Flushed");
1929 #endif /* LRO */
1930 }
1931
1932 /* MAC stats get their own sub node */
1933
1934 snprintf(stats->namebuf,
1935 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1936
1937 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1938 stats->namebuf, "rx csum offload - IP");
1939 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "rx csum offload - L4");
1941 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "rx csum offload - IP bad");
1943 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "rx csum offload - L4 bad");
1945 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "Interrupt conditions zero");
1947 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Legacy interrupts");
1949
1950 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "CRC Errors");
1952 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Illegal Byte Errors");
1954 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Byte Errors");
1956 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "MAC Short Packets Discarded");
1958 if (hw->mac.type >= ixgbe_mac_X550)
1959 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "Bad SFD");
1961 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "Total Packets Missed");
1963 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "MAC Local Faults");
1965 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "MAC Remote Faults");
1967 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Receive Length Errors");
1969 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Link XON Transmitted");
1971 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "Link XON Received");
1973 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Link XOFF Transmitted");
1975 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Link XOFF Received");
1977
1978 /* Packet Reception Stats */
1979 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "Total Octets Received");
1981 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Good Octets Received");
1983 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Total Packets Received");
1985 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Good Packets Received");
1987 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Multicast Packets Received");
1989 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Broadcast Packets Received");
1991 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "64 byte frames received ");
1993 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "65-127 byte frames received");
1995 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "128-255 byte frames received");
1997 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "256-511 byte frames received");
1999 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "512-1023 byte frames received");
2001 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "1023-1522 byte frames received");
2003 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "Receive Undersized");
2005 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "Fragmented Packets Received ");
2007 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "Oversized Packets Received");
2009 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Received Jabber");
2011 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Management Packets Received");
2013 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Management Packets Dropped");
2015 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Checksum Errors");
2017
2018 /* Packet Transmission Stats */
2019 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Good Octets Transmitted");
2021 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Total Packets Transmitted");
2023 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2024 stats->namebuf, "Good Packets Transmitted");
2025 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Broadcast Packets Transmitted");
2027 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Multicast Packets Transmitted");
2029 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Management Packets Transmitted");
2031 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "64 byte frames transmitted ");
2033 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "65-127 byte frames transmitted");
2035 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "128-255 byte frames transmitted");
2037 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "256-511 byte frames transmitted");
2039 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "512-1023 byte frames transmitted");
2041 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "1024-1522 byte frames transmitted");
2043 } /* ixgbe_add_hw_stats */
2044
2045 static void
2046 ixgbe_clear_evcnt(struct adapter *adapter)
2047 {
2048 struct tx_ring *txr = adapter->tx_rings;
2049 struct rx_ring *rxr = adapter->rx_rings;
2050 struct ixgbe_hw *hw = &adapter->hw;
2051 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2052 int i;
2053
2054 adapter->efbig_tx_dma_setup.ev_count = 0;
2055 adapter->mbuf_defrag_failed.ev_count = 0;
2056 adapter->efbig2_tx_dma_setup.ev_count = 0;
2057 adapter->einval_tx_dma_setup.ev_count = 0;
2058 adapter->other_tx_dma_setup.ev_count = 0;
2059 adapter->eagain_tx_dma_setup.ev_count = 0;
2060 adapter->enomem_tx_dma_setup.ev_count = 0;
2061 adapter->tso_err.ev_count = 0;
2062 adapter->watchdog_events.ev_count = 0;
2063 adapter->link_irq.ev_count = 0;
2064 adapter->link_sicount.ev_count = 0;
2065 adapter->mod_sicount.ev_count = 0;
2066 adapter->msf_sicount.ev_count = 0;
2067 adapter->phy_sicount.ev_count = 0;
2068
2069 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2070 if (i < __arraycount(stats->mpc)) {
2071 stats->mpc[i].ev_count = 0;
2072 if (hw->mac.type == ixgbe_mac_82598EB)
2073 stats->rnbc[i].ev_count = 0;
2074 }
2075 if (i < __arraycount(stats->pxontxc)) {
2076 stats->pxontxc[i].ev_count = 0;
2077 stats->pxonrxc[i].ev_count = 0;
2078 stats->pxofftxc[i].ev_count = 0;
2079 stats->pxoffrxc[i].ev_count = 0;
2080 if (hw->mac.type >= ixgbe_mac_82599EB)
2081 stats->pxon2offc[i].ev_count = 0;
2082 }
2083 }
2084
2085 txr = adapter->tx_rings;
2086 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2087 adapter->queues[i].irqs.ev_count = 0;
2088 adapter->queues[i].handleq.ev_count = 0;
2089 adapter->queues[i].req.ev_count = 0;
2090 txr->no_desc_avail.ev_count = 0;
2091 txr->total_packets.ev_count = 0;
2092 txr->tso_tx.ev_count = 0;
2093 #ifndef IXGBE_LEGACY_TX
2094 txr->pcq_drops.ev_count = 0;
2095 #endif
2096 txr->q_efbig_tx_dma_setup = 0;
2097 txr->q_mbuf_defrag_failed = 0;
2098 txr->q_efbig2_tx_dma_setup = 0;
2099 txr->q_einval_tx_dma_setup = 0;
2100 txr->q_other_tx_dma_setup = 0;
2101 txr->q_eagain_tx_dma_setup = 0;
2102 txr->q_enomem_tx_dma_setup = 0;
2103 txr->q_tso_err = 0;
2104
2105 if (i < __arraycount(stats->qprc)) {
2106 stats->qprc[i].ev_count = 0;
2107 stats->qptc[i].ev_count = 0;
2108 stats->qbrc[i].ev_count = 0;
2109 stats->qbtc[i].ev_count = 0;
2110 if (hw->mac.type >= ixgbe_mac_82599EB)
2111 stats->qprdc[i].ev_count = 0;
2112 }
2113
2114 rxr->rx_packets.ev_count = 0;
2115 rxr->rx_bytes.ev_count = 0;
2116 rxr->rx_copies.ev_count = 0;
2117 rxr->no_jmbuf.ev_count = 0;
2118 rxr->rx_discarded.ev_count = 0;
2119 }
2120 stats->ipcs.ev_count = 0;
2121 stats->l4cs.ev_count = 0;
2122 stats->ipcs_bad.ev_count = 0;
2123 stats->l4cs_bad.ev_count = 0;
2124 stats->intzero.ev_count = 0;
2125 stats->legint.ev_count = 0;
2126 stats->crcerrs.ev_count = 0;
2127 stats->illerrc.ev_count = 0;
2128 stats->errbc.ev_count = 0;
2129 stats->mspdc.ev_count = 0;
2130 stats->mbsdc.ev_count = 0;
2131 stats->mpctotal.ev_count = 0;
2132 stats->mlfc.ev_count = 0;
2133 stats->mrfc.ev_count = 0;
2134 stats->rlec.ev_count = 0;
2135 stats->lxontxc.ev_count = 0;
2136 stats->lxonrxc.ev_count = 0;
2137 stats->lxofftxc.ev_count = 0;
2138 stats->lxoffrxc.ev_count = 0;
2139
2140 /* Packet Reception Stats */
2141 stats->tor.ev_count = 0;
2142 stats->gorc.ev_count = 0;
2143 stats->tpr.ev_count = 0;
2144 stats->gprc.ev_count = 0;
2145 stats->mprc.ev_count = 0;
2146 stats->bprc.ev_count = 0;
2147 stats->prc64.ev_count = 0;
2148 stats->prc127.ev_count = 0;
2149 stats->prc255.ev_count = 0;
2150 stats->prc511.ev_count = 0;
2151 stats->prc1023.ev_count = 0;
2152 stats->prc1522.ev_count = 0;
2153 stats->ruc.ev_count = 0;
2154 stats->rfc.ev_count = 0;
2155 stats->roc.ev_count = 0;
2156 stats->rjc.ev_count = 0;
2157 stats->mngprc.ev_count = 0;
2158 stats->mngpdc.ev_count = 0;
2159 stats->xec.ev_count = 0;
2160
2161 /* Packet Transmission Stats */
2162 stats->gotc.ev_count = 0;
2163 stats->tpt.ev_count = 0;
2164 stats->gptc.ev_count = 0;
2165 stats->bptc.ev_count = 0;
2166 stats->mptc.ev_count = 0;
2167 stats->mngptc.ev_count = 0;
2168 stats->ptc64.ev_count = 0;
2169 stats->ptc127.ev_count = 0;
2170 stats->ptc255.ev_count = 0;
2171 stats->ptc511.ev_count = 0;
2172 stats->ptc1023.ev_count = 0;
2173 stats->ptc1522.ev_count = 0;
2174 }
2175
2176 /************************************************************************
2177 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2178 *
2179 * Retrieves the TDH value from the hardware
2180 ************************************************************************/
2181 static int
2182 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2183 {
2184 struct sysctlnode node = *rnode;
2185 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2186 struct adapter *adapter;
2187 uint32_t val;
2188
2189 if (!txr)
2190 return (0);
2191
2192 adapter = txr->adapter;
2193 if (ixgbe_fw_recovery_mode_swflag(adapter))
2194 return (EPERM);
2195
2196 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2197 node.sysctl_data = &val;
2198 return sysctl_lookup(SYSCTLFN_CALL(&node));
2199 } /* ixgbe_sysctl_tdh_handler */
2200
2201 /************************************************************************
2202 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2203 *
2204 * Retrieves the TDT value from the hardware
2205 ************************************************************************/
2206 static int
2207 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2208 {
2209 struct sysctlnode node = *rnode;
2210 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2211 struct adapter *adapter;
2212 uint32_t val;
2213
2214 if (!txr)
2215 return (0);
2216
2217 adapter = txr->adapter;
2218 if (ixgbe_fw_recovery_mode_swflag(adapter))
2219 return (EPERM);
2220
2221 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2222 node.sysctl_data = &val;
2223 return sysctl_lookup(SYSCTLFN_CALL(&node));
2224 } /* ixgbe_sysctl_tdt_handler */
2225
2226 /************************************************************************
2227 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2228 * handler function
2229 *
2230 * Retrieves the next_to_check value
2231 ************************************************************************/
2232 static int
2233 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2234 {
2235 struct sysctlnode node = *rnode;
2236 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2237 struct adapter *adapter;
2238 uint32_t val;
2239
2240 if (!rxr)
2241 return (0);
2242
2243 adapter = rxr->adapter;
2244 if (ixgbe_fw_recovery_mode_swflag(adapter))
2245 return (EPERM);
2246
2247 val = rxr->next_to_check;
2248 node.sysctl_data = &val;
2249 return sysctl_lookup(SYSCTLFN_CALL(&node));
2250 } /* ixgbe_sysctl_next_to_check_handler */
2251
2252 /************************************************************************
2253 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2254 *
2255 * Retrieves the RDH value from the hardware
2256 ************************************************************************/
2257 static int
2258 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2259 {
2260 struct sysctlnode node = *rnode;
2261 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2262 struct adapter *adapter;
2263 uint32_t val;
2264
2265 if (!rxr)
2266 return (0);
2267
2268 adapter = rxr->adapter;
2269 if (ixgbe_fw_recovery_mode_swflag(adapter))
2270 return (EPERM);
2271
2272 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2273 node.sysctl_data = &val;
2274 return sysctl_lookup(SYSCTLFN_CALL(&node));
2275 } /* ixgbe_sysctl_rdh_handler */
2276
2277 /************************************************************************
2278 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2279 *
2280 * Retrieves the RDT value from the hardware
2281 ************************************************************************/
2282 static int
2283 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2284 {
2285 struct sysctlnode node = *rnode;
2286 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2287 struct adapter *adapter;
2288 uint32_t val;
2289
2290 if (!rxr)
2291 return (0);
2292
2293 adapter = rxr->adapter;
2294 if (ixgbe_fw_recovery_mode_swflag(adapter))
2295 return (EPERM);
2296
2297 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2298 node.sysctl_data = &val;
2299 return sysctl_lookup(SYSCTLFN_CALL(&node));
2300 } /* ixgbe_sysctl_rdt_handler */
2301
2302 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2303 /************************************************************************
2304 * ixgbe_register_vlan
2305 *
2306 * Run via vlan config EVENT, it enables us to use the
2307 * HW Filter table since we can get the vlan id. This
2308 * just creates the entry in the soft version of the
2309 * VFTA, init will repopulate the real table.
2310 ************************************************************************/
2311 static void
2312 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2313 {
2314 struct adapter *adapter = ifp->if_softc;
2315 u16 index, bit;
2316
2317 if (ifp->if_softc != arg) /* Not our event */
2318 return;
2319
2320 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2321 return;
2322
2323 IXGBE_CORE_LOCK(adapter);
2324 index = (vtag >> 5) & 0x7F;
2325 bit = vtag & 0x1F;
2326 adapter->shadow_vfta[index] |= (1 << bit);
2327 ixgbe_setup_vlan_hw_support(adapter);
2328 IXGBE_CORE_UNLOCK(adapter);
2329 } /* ixgbe_register_vlan */
2330
2331 /************************************************************************
2332 * ixgbe_unregister_vlan
2333 *
2334 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2335 ************************************************************************/
2336 static void
2337 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2338 {
2339 struct adapter *adapter = ifp->if_softc;
2340 u16 index, bit;
2341
2342 if (ifp->if_softc != arg)
2343 return;
2344
2345 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2346 return;
2347
2348 IXGBE_CORE_LOCK(adapter);
2349 index = (vtag >> 5) & 0x7F;
2350 bit = vtag & 0x1F;
2351 adapter->shadow_vfta[index] &= ~(1 << bit);
2352 /* Re-init to load the changes */
2353 ixgbe_setup_vlan_hw_support(adapter);
2354 IXGBE_CORE_UNLOCK(adapter);
2355 } /* ixgbe_unregister_vlan */
2356 #endif
2357
2358 static void
2359 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2360 {
2361 struct ethercom *ec = &adapter->osdep.ec;
2362 struct ixgbe_hw *hw = &adapter->hw;
2363 struct rx_ring *rxr;
2364 int i;
2365 u32 ctrl;
2366 bool hwtagging;
2367
2368 /*
2369 * This function is called from both if_init and ifflags_cb()
2370 * on NetBSD.
2371 */
2372
2373 /* Enable HW tagging only if any vlan is attached */
2374 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2375 && VLAN_ATTACHED(ec);
2376
2377 /* Setup the queues for vlans */
2378 for (i = 0; i < adapter->num_queues; i++) {
2379 rxr = &adapter->rx_rings[i];
2380 /*
2381 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2382 */
2383 if (hw->mac.type != ixgbe_mac_82598EB) {
2384 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2385 if (hwtagging)
2386 ctrl |= IXGBE_RXDCTL_VME;
2387 else
2388 ctrl &= ~IXGBE_RXDCTL_VME;
2389 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2390 }
2391 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2392 }
2393
2394 /*
2395 * A soft reset zero's out the VFTA, so
2396 * we need to repopulate it now.
2397 */
2398 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2399 if (adapter->shadow_vfta[i] != 0)
2400 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2401 adapter->shadow_vfta[i]);
2402
2403 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2404 /* Enable the Filter Table if enabled */
2405 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2406 ctrl |= IXGBE_VLNCTRL_VFE;
2407 else
2408 ctrl &= ~IXGBE_VLNCTRL_VFE;
2409 /* VLAN hw tagging for 82598 */
2410 if (hw->mac.type == ixgbe_mac_82598EB) {
2411 if (hwtagging)
2412 ctrl |= IXGBE_VLNCTRL_VME;
2413 else
2414 ctrl &= ~IXGBE_VLNCTRL_VME;
2415 }
2416 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2417 } /* ixgbe_setup_vlan_hw_support */
2418
2419 /************************************************************************
2420 * ixgbe_get_slot_info
2421 *
2422 * Get the width and transaction speed of
2423 * the slot this adapter is plugged into.
2424 ************************************************************************/
2425 static void
2426 ixgbe_get_slot_info(struct adapter *adapter)
2427 {
2428 device_t dev = adapter->dev;
2429 struct ixgbe_hw *hw = &adapter->hw;
2430 u32 offset;
2431 u16 link;
2432 int bus_info_valid = TRUE;
2433
2434 /* Some devices are behind an internal bridge */
2435 switch (hw->device_id) {
2436 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2437 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2438 goto get_parent_info;
2439 default:
2440 break;
2441 }
2442
2443 ixgbe_get_bus_info(hw);
2444
2445 /*
2446 * Some devices don't use PCI-E, but there is no need
2447 * to display "Unknown" for bus speed and width.
2448 */
2449 switch (hw->mac.type) {
2450 case ixgbe_mac_X550EM_x:
2451 case ixgbe_mac_X550EM_a:
2452 return;
2453 default:
2454 goto display;
2455 }
2456
2457 get_parent_info:
2458 /*
2459 * For the Quad port adapter we need to parse back
2460 * up the PCI tree to find the speed of the expansion
2461 * slot into which this adapter is plugged. A bit more work.
2462 */
2463 dev = device_parent(device_parent(dev));
2464 #if 0
2465 #ifdef IXGBE_DEBUG
2466 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2467 pci_get_slot(dev), pci_get_function(dev));
2468 #endif
2469 dev = device_parent(device_parent(dev));
2470 #ifdef IXGBE_DEBUG
2471 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2472 pci_get_slot(dev), pci_get_function(dev));
2473 #endif
2474 #endif
2475 /* Now get the PCI Express Capabilities offset */
2476 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2477 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2478 /*
2479 * Hmm...can't get PCI-Express capabilities.
2480 * Falling back to default method.
2481 */
2482 bus_info_valid = FALSE;
2483 ixgbe_get_bus_info(hw);
2484 goto display;
2485 }
2486 /* ...and read the Link Status Register */
2487 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2488 offset + PCIE_LCSR) >> 16;
2489 ixgbe_set_pci_config_data_generic(hw, link);
2490
2491 display:
2492 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2493 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2494 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2495 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2496 "Unknown"),
2497 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2498 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2499 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2500 "Unknown"));
2501
2502 if (bus_info_valid) {
2503 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2504 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2505 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2506 device_printf(dev, "PCI-Express bandwidth available"
2507 " for this card\n is not sufficient for"
2508 " optimal performance.\n");
2509 device_printf(dev, "For optimal performance a x8 "
2510 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2511 }
2512 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2513 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2514 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2515 device_printf(dev, "PCI-Express bandwidth available"
2516 " for this card\n is not sufficient for"
2517 " optimal performance.\n");
2518 device_printf(dev, "For optimal performance a x8 "
2519 "PCIE Gen3 slot is required.\n");
2520 }
2521 } else
2522 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2523
2524 return;
2525 } /* ixgbe_get_slot_info */
2526
2527 /************************************************************************
2528 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2529 ************************************************************************/
2530 static inline void
2531 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2532 {
2533 struct ixgbe_hw *hw = &adapter->hw;
2534 struct ix_queue *que = &adapter->queues[vector];
2535 u64 queue = (u64)(1ULL << vector);
2536 u32 mask;
2537
2538 mutex_enter(&que->dc_mtx);
2539 if (que->disabled_count > 0 && --que->disabled_count > 0)
2540 goto out;
2541
2542 if (hw->mac.type == ixgbe_mac_82598EB) {
2543 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2544 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2545 } else {
2546 mask = (queue & 0xFFFFFFFF);
2547 if (mask)
2548 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2549 mask = (queue >> 32);
2550 if (mask)
2551 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2552 }
2553 out:
2554 mutex_exit(&que->dc_mtx);
2555 } /* ixgbe_enable_queue */
2556
2557 /************************************************************************
2558 * ixgbe_disable_queue_internal
2559 ************************************************************************/
2560 static inline void
2561 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2562 {
2563 struct ixgbe_hw *hw = &adapter->hw;
2564 struct ix_queue *que = &adapter->queues[vector];
2565 u64 queue = (u64)(1ULL << vector);
2566 u32 mask;
2567
2568 mutex_enter(&que->dc_mtx);
2569
2570 if (que->disabled_count > 0) {
2571 if (nestok)
2572 que->disabled_count++;
2573 goto out;
2574 }
2575 que->disabled_count++;
2576
2577 if (hw->mac.type == ixgbe_mac_82598EB) {
2578 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2579 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2580 } else {
2581 mask = (queue & 0xFFFFFFFF);
2582 if (mask)
2583 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2584 mask = (queue >> 32);
2585 if (mask)
2586 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2587 }
2588 out:
2589 mutex_exit(&que->dc_mtx);
2590 } /* ixgbe_disable_queue_internal */
2591
2592 /************************************************************************
2593 * ixgbe_disable_queue
2594 ************************************************************************/
2595 static inline void
2596 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2597 {
2598
2599 ixgbe_disable_queue_internal(adapter, vector, true);
2600 } /* ixgbe_disable_queue */
2601
2602 /************************************************************************
2603 * ixgbe_sched_handle_que - schedule deferred packet processing
2604 ************************************************************************/
2605 static inline void
2606 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2607 {
2608
2609 if (que->txrx_use_workqueue) {
2610 /*
2611 * adapter->que_wq is bound to each CPU instead of
2612 * each NIC queue to reduce workqueue kthread. As we
2613 * should consider about interrupt affinity in this
2614 * function, the workqueue kthread must be WQ_PERCPU.
2615 * If create WQ_PERCPU workqueue kthread for each NIC
2616 * queue, that number of created workqueue kthread is
2617 * (number of used NIC queue) * (number of CPUs) =
2618 * (number of CPUs) ^ 2 most often.
2619 *
2620 * The same NIC queue's interrupts are avoided by
2621 * masking the queue's interrupt. And different
2622 * NIC queue's interrupts use different struct work
2623 * (que->wq_cookie). So, "enqueued flag" to avoid
2624 * twice workqueue_enqueue() is not required .
2625 */
2626 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2627 } else {
2628 softint_schedule(que->que_si);
2629 }
2630 }
2631
2632 /************************************************************************
2633 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2634 ************************************************************************/
2635 static int
2636 ixgbe_msix_que(void *arg)
2637 {
2638 struct ix_queue *que = arg;
2639 struct adapter *adapter = que->adapter;
2640 struct ifnet *ifp = adapter->ifp;
2641 struct tx_ring *txr = que->txr;
2642 struct rx_ring *rxr = que->rxr;
2643 bool more;
2644 u32 newitr = 0;
2645
2646 /* Protect against spurious interrupts */
2647 if ((ifp->if_flags & IFF_RUNNING) == 0)
2648 return 0;
2649
2650 ixgbe_disable_queue(adapter, que->msix);
2651 ++que->irqs.ev_count;
2652
2653 /*
2654 * Don't change "que->txrx_use_workqueue" from this point to avoid
2655 * flip-flopping softint/workqueue mode in one deferred processing.
2656 */
2657 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2658
2659 #ifdef __NetBSD__
2660 /* Don't run ixgbe_rxeof in interrupt context */
2661 more = true;
2662 #else
2663 more = ixgbe_rxeof(que);
2664 #endif
2665
2666 IXGBE_TX_LOCK(txr);
2667 ixgbe_txeof(txr);
2668 IXGBE_TX_UNLOCK(txr);
2669
2670 /* Do AIM now? */
2671
2672 if (adapter->enable_aim == false)
2673 goto no_calc;
2674 /*
2675 * Do Adaptive Interrupt Moderation:
2676 * - Write out last calculated setting
2677 * - Calculate based on average size over
2678 * the last interval.
2679 */
2680 if (que->eitr_setting)
2681 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2682
2683 que->eitr_setting = 0;
2684
2685 /* Idle, do nothing */
2686 if ((txr->bytes == 0) && (rxr->bytes == 0))
2687 goto no_calc;
2688
2689 if ((txr->bytes) && (txr->packets))
2690 newitr = txr->bytes/txr->packets;
2691 if ((rxr->bytes) && (rxr->packets))
2692 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2693 newitr += 24; /* account for hardware frame, crc */
2694
2695 /* set an upper boundary */
2696 newitr = uimin(newitr, 3000);
2697
2698 /* Be nice to the mid range */
2699 if ((newitr > 300) && (newitr < 1200))
2700 newitr = (newitr / 3);
2701 else
2702 newitr = (newitr / 2);
2703
2704 /*
2705 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2706 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2707 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2708 * on 1G and higher.
2709 */
2710 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2711 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2712 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2713 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2714 }
2715
2716 /* save for next interrupt */
2717 que->eitr_setting = newitr;
2718
2719 /* Reset state */
2720 txr->bytes = 0;
2721 txr->packets = 0;
2722 rxr->bytes = 0;
2723 rxr->packets = 0;
2724
2725 no_calc:
2726 if (more)
2727 ixgbe_sched_handle_que(adapter, que);
2728 else
2729 ixgbe_enable_queue(adapter, que->msix);
2730
2731 return 1;
2732 } /* ixgbe_msix_que */
2733
2734 /************************************************************************
2735 * ixgbe_media_status - Media Ioctl callback
2736 *
2737 * Called whenever the user queries the status of
2738 * the interface using ifconfig.
2739 ************************************************************************/
2740 static void
2741 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2742 {
2743 struct adapter *adapter = ifp->if_softc;
2744 struct ixgbe_hw *hw = &adapter->hw;
2745 int layer;
2746
2747 INIT_DEBUGOUT("ixgbe_media_status: begin");
2748 IXGBE_CORE_LOCK(adapter);
2749 ixgbe_update_link_status(adapter);
2750
2751 ifmr->ifm_status = IFM_AVALID;
2752 ifmr->ifm_active = IFM_ETHER;
2753
2754 if (adapter->link_active != LINK_STATE_UP) {
2755 ifmr->ifm_active |= IFM_NONE;
2756 IXGBE_CORE_UNLOCK(adapter);
2757 return;
2758 }
2759
2760 ifmr->ifm_status |= IFM_ACTIVE;
2761 layer = adapter->phy_layer;
2762
2763 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2764 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2765 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2766 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2767 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2768 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2769 switch (adapter->link_speed) {
2770 case IXGBE_LINK_SPEED_10GB_FULL:
2771 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2772 break;
2773 case IXGBE_LINK_SPEED_5GB_FULL:
2774 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2775 break;
2776 case IXGBE_LINK_SPEED_2_5GB_FULL:
2777 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2778 break;
2779 case IXGBE_LINK_SPEED_1GB_FULL:
2780 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2781 break;
2782 case IXGBE_LINK_SPEED_100_FULL:
2783 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2784 break;
2785 case IXGBE_LINK_SPEED_10_FULL:
2786 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2787 break;
2788 }
2789 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2790 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2791 switch (adapter->link_speed) {
2792 case IXGBE_LINK_SPEED_10GB_FULL:
2793 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2794 break;
2795 }
2796 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2797 switch (adapter->link_speed) {
2798 case IXGBE_LINK_SPEED_10GB_FULL:
2799 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2800 break;
2801 case IXGBE_LINK_SPEED_1GB_FULL:
2802 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2803 break;
2804 }
2805 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2806 switch (adapter->link_speed) {
2807 case IXGBE_LINK_SPEED_10GB_FULL:
2808 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2809 break;
2810 case IXGBE_LINK_SPEED_1GB_FULL:
2811 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2812 break;
2813 }
2814 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2815 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2816 switch (adapter->link_speed) {
2817 case IXGBE_LINK_SPEED_10GB_FULL:
2818 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2819 break;
2820 case IXGBE_LINK_SPEED_1GB_FULL:
2821 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2822 break;
2823 }
2824 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2825 switch (adapter->link_speed) {
2826 case IXGBE_LINK_SPEED_10GB_FULL:
2827 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2828 break;
2829 }
2830 /*
2831 * XXX: These need to use the proper media types once
2832 * they're added.
2833 */
2834 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2835 switch (adapter->link_speed) {
2836 case IXGBE_LINK_SPEED_10GB_FULL:
2837 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2838 break;
2839 case IXGBE_LINK_SPEED_2_5GB_FULL:
2840 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2841 break;
2842 case IXGBE_LINK_SPEED_1GB_FULL:
2843 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2844 break;
2845 }
2846 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2847 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2848 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2849 switch (adapter->link_speed) {
2850 case IXGBE_LINK_SPEED_10GB_FULL:
2851 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2852 break;
2853 case IXGBE_LINK_SPEED_2_5GB_FULL:
2854 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2855 break;
2856 case IXGBE_LINK_SPEED_1GB_FULL:
2857 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2858 break;
2859 }
2860
2861 /* If nothing is recognized... */
2862 #if 0
2863 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2864 ifmr->ifm_active |= IFM_UNKNOWN;
2865 #endif
2866
2867 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2868
2869 /* Display current flow control setting used on link */
2870 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2871 hw->fc.current_mode == ixgbe_fc_full)
2872 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2873 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2874 hw->fc.current_mode == ixgbe_fc_full)
2875 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2876
2877 IXGBE_CORE_UNLOCK(adapter);
2878
2879 return;
2880 } /* ixgbe_media_status */
2881
2882 /************************************************************************
2883 * ixgbe_media_change - Media Ioctl callback
2884 *
2885 * Called when the user changes speed/duplex using
2886 * media/mediopt option with ifconfig.
2887 ************************************************************************/
2888 static int
2889 ixgbe_media_change(struct ifnet *ifp)
2890 {
2891 struct adapter *adapter = ifp->if_softc;
2892 struct ifmedia *ifm = &adapter->media;
2893 struct ixgbe_hw *hw = &adapter->hw;
2894 ixgbe_link_speed speed = 0;
2895 ixgbe_link_speed link_caps = 0;
2896 bool negotiate = false;
2897 s32 err = IXGBE_NOT_IMPLEMENTED;
2898
2899 INIT_DEBUGOUT("ixgbe_media_change: begin");
2900
2901 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2902 return (EINVAL);
2903
2904 if (hw->phy.media_type == ixgbe_media_type_backplane)
2905 return (EPERM);
2906
2907 IXGBE_CORE_LOCK(adapter);
2908 /*
2909 * We don't actually need to check against the supported
2910 * media types of the adapter; ifmedia will take care of
2911 * that for us.
2912 */
2913 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2914 case IFM_AUTO:
2915 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2916 &negotiate);
2917 if (err != IXGBE_SUCCESS) {
2918 device_printf(adapter->dev, "Unable to determine "
2919 "supported advertise speeds\n");
2920 IXGBE_CORE_UNLOCK(adapter);
2921 return (ENODEV);
2922 }
2923 speed |= link_caps;
2924 break;
2925 case IFM_10G_T:
2926 case IFM_10G_LRM:
2927 case IFM_10G_LR:
2928 case IFM_10G_TWINAX:
2929 case IFM_10G_SR:
2930 case IFM_10G_CX4:
2931 case IFM_10G_KR:
2932 case IFM_10G_KX4:
2933 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2934 break;
2935 case IFM_5000_T:
2936 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2937 break;
2938 case IFM_2500_T:
2939 case IFM_2500_KX:
2940 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2941 break;
2942 case IFM_1000_T:
2943 case IFM_1000_LX:
2944 case IFM_1000_SX:
2945 case IFM_1000_KX:
2946 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2947 break;
2948 case IFM_100_TX:
2949 speed |= IXGBE_LINK_SPEED_100_FULL;
2950 break;
2951 case IFM_10_T:
2952 speed |= IXGBE_LINK_SPEED_10_FULL;
2953 break;
2954 case IFM_NONE:
2955 break;
2956 default:
2957 goto invalid;
2958 }
2959
2960 hw->mac.autotry_restart = TRUE;
2961 hw->mac.ops.setup_link(hw, speed, TRUE);
2962 adapter->advertise = 0;
2963 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2964 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2965 adapter->advertise |= 1 << 2;
2966 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2967 adapter->advertise |= 1 << 1;
2968 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2969 adapter->advertise |= 1 << 0;
2970 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2971 adapter->advertise |= 1 << 3;
2972 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2973 adapter->advertise |= 1 << 4;
2974 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2975 adapter->advertise |= 1 << 5;
2976 }
2977
2978 IXGBE_CORE_UNLOCK(adapter);
2979 return (0);
2980
2981 invalid:
2982 device_printf(adapter->dev, "Invalid media type!\n");
2983 IXGBE_CORE_UNLOCK(adapter);
2984
2985 return (EINVAL);
2986 } /* ixgbe_media_change */
2987
2988 /************************************************************************
2989 * ixgbe_set_promisc
2990 ************************************************************************/
2991 static void
2992 ixgbe_set_promisc(struct adapter *adapter)
2993 {
2994 struct ifnet *ifp = adapter->ifp;
2995 int mcnt = 0;
2996 u32 rctl;
2997 struct ether_multi *enm;
2998 struct ether_multistep step;
2999 struct ethercom *ec = &adapter->osdep.ec;
3000
3001 KASSERT(mutex_owned(&adapter->core_mtx));
3002 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3003 rctl &= (~IXGBE_FCTRL_UPE);
3004 ETHER_LOCK(ec);
3005 if (ec->ec_flags & ETHER_F_ALLMULTI)
3006 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
3007 else {
3008 ETHER_FIRST_MULTI(step, ec, enm);
3009 while (enm != NULL) {
3010 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3011 break;
3012 mcnt++;
3013 ETHER_NEXT_MULTI(step, enm);
3014 }
3015 }
3016 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
3017 rctl &= (~IXGBE_FCTRL_MPE);
3018 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3019
3020 if (ifp->if_flags & IFF_PROMISC) {
3021 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3022 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3023 } else if (ec->ec_flags & ETHER_F_ALLMULTI) {
3024 rctl |= IXGBE_FCTRL_MPE;
3025 rctl &= ~IXGBE_FCTRL_UPE;
3026 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3027 }
3028 ETHER_UNLOCK(ec);
3029 } /* ixgbe_set_promisc */
3030
3031 /************************************************************************
3032 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3033 ************************************************************************/
3034 static int
3035 ixgbe_msix_link(void *arg)
3036 {
3037 struct adapter *adapter = arg;
3038 struct ixgbe_hw *hw = &adapter->hw;
3039 u32 eicr, eicr_mask;
3040 s32 retval;
3041
3042 ++adapter->link_irq.ev_count;
3043
3044 /* Pause other interrupts */
3045 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3046
3047 /* First get the cause */
3048 /*
3049 * The specifications of 82598, 82599, X540 and X550 say EICS register
3050 * is write only. However, Linux says it is a workaround for silicon
3051 * errata to read EICS instead of EICR to get interrupt cause. It seems
3052 * there is a problem about read clear mechanism for EICR register.
3053 */
3054 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3055 /* Be sure the queue bits are not cleared */
3056 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3057 /* Clear interrupt with write */
3058 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3059
3060 /* Link status change */
3061 if (eicr & IXGBE_EICR_LSC) {
3062 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3063 softint_schedule(adapter->link_si);
3064 }
3065
3066 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3067 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3068 (eicr & IXGBE_EICR_FLOW_DIR)) {
3069 /* This is probably overkill :) */
3070 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3071 return 1;
3072 /* Disable the interrupt */
3073 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3074 softint_schedule(adapter->fdir_si);
3075 }
3076
3077 if (eicr & IXGBE_EICR_ECC) {
3078 device_printf(adapter->dev,
3079 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3080 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3081 }
3082
3083 /* Check for over temp condition */
3084 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3085 switch (adapter->hw.mac.type) {
3086 case ixgbe_mac_X550EM_a:
3087 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3088 break;
3089 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3090 IXGBE_EICR_GPI_SDP0_X550EM_a);
3091 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3092 IXGBE_EICR_GPI_SDP0_X550EM_a);
3093 retval = hw->phy.ops.check_overtemp(hw);
3094 if (retval != IXGBE_ERR_OVERTEMP)
3095 break;
3096 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3097 device_printf(adapter->dev, "System shutdown required!\n");
3098 break;
3099 default:
3100 if (!(eicr & IXGBE_EICR_TS))
3101 break;
3102 retval = hw->phy.ops.check_overtemp(hw);
3103 if (retval != IXGBE_ERR_OVERTEMP)
3104 break;
3105 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3106 device_printf(adapter->dev, "System shutdown required!\n");
3107 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3108 break;
3109 }
3110 }
3111
3112 /* Check for VF message */
3113 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3114 (eicr & IXGBE_EICR_MAILBOX))
3115 softint_schedule(adapter->mbx_si);
3116 }
3117
3118 if (ixgbe_is_sfp(hw)) {
3119 /* Pluggable optics-related interrupt */
3120 if (hw->mac.type >= ixgbe_mac_X540)
3121 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3122 else
3123 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3124
3125 if (eicr & eicr_mask) {
3126 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3127 softint_schedule(adapter->mod_si);
3128 }
3129
3130 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3131 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3132 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3133 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3134 softint_schedule(adapter->msf_si);
3135 }
3136 }
3137
3138 /* Check for fan failure */
3139 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3140 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3141 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3142 }
3143
3144 /* External PHY interrupt */
3145 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3146 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3147 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3148 softint_schedule(adapter->phy_si);
3149 }
3150
3151 /* Re-enable other interrupts */
3152 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3153 return 1;
3154 } /* ixgbe_msix_link */
3155
3156 static void
3157 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3158 {
3159
3160 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3161 itr |= itr << 16;
3162 else
3163 itr |= IXGBE_EITR_CNT_WDIS;
3164
3165 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3166 }
3167
3168
3169 /************************************************************************
3170 * ixgbe_sysctl_interrupt_rate_handler
3171 ************************************************************************/
3172 static int
3173 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3174 {
3175 struct sysctlnode node = *rnode;
3176 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3177 struct adapter *adapter;
3178 uint32_t reg, usec, rate;
3179 int error;
3180
3181 if (que == NULL)
3182 return 0;
3183
3184 adapter = que->adapter;
3185 if (ixgbe_fw_recovery_mode_swflag(adapter))
3186 return (EPERM);
3187
3188 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3189 usec = ((reg & 0x0FF8) >> 3);
3190 if (usec > 0)
3191 rate = 500000 / usec;
3192 else
3193 rate = 0;
3194 node.sysctl_data = &rate;
3195 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3196 if (error || newp == NULL)
3197 return error;
3198 reg &= ~0xfff; /* default, no limitation */
3199 if (rate > 0 && rate < 500000) {
3200 if (rate < 1000)
3201 rate = 1000;
3202 reg |= ((4000000/rate) & 0xff8);
3203 /*
3204 * When RSC is used, ITR interval must be larger than
3205 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3206 * The minimum value is always greater than 2us on 100M
3207 * (and 10M?(not documented)), but it's not on 1G and higher.
3208 */
3209 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3210 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3211 if ((adapter->num_queues > 1)
3212 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3213 return EINVAL;
3214 }
3215 ixgbe_max_interrupt_rate = rate;
3216 } else
3217 ixgbe_max_interrupt_rate = 0;
3218 ixgbe_eitr_write(adapter, que->msix, reg);
3219
3220 return (0);
3221 } /* ixgbe_sysctl_interrupt_rate_handler */
3222
3223 const struct sysctlnode *
3224 ixgbe_sysctl_instance(struct adapter *adapter)
3225 {
3226 const char *dvname;
3227 struct sysctllog **log;
3228 int rc;
3229 const struct sysctlnode *rnode;
3230
3231 if (adapter->sysctltop != NULL)
3232 return adapter->sysctltop;
3233
3234 log = &adapter->sysctllog;
3235 dvname = device_xname(adapter->dev);
3236
3237 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3238 0, CTLTYPE_NODE, dvname,
3239 SYSCTL_DESCR("ixgbe information and settings"),
3240 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3241 goto err;
3242
3243 return rnode;
3244 err:
3245 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3246 return NULL;
3247 }
3248
3249 /************************************************************************
3250 * ixgbe_add_device_sysctls
3251 ************************************************************************/
3252 static void
3253 ixgbe_add_device_sysctls(struct adapter *adapter)
3254 {
3255 device_t dev = adapter->dev;
3256 struct ixgbe_hw *hw = &adapter->hw;
3257 struct sysctllog **log;
3258 const struct sysctlnode *rnode, *cnode;
3259
3260 log = &adapter->sysctllog;
3261
3262 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3263 aprint_error_dev(dev, "could not create sysctl root\n");
3264 return;
3265 }
3266
3267 if (sysctl_createv(log, 0, &rnode, &cnode,
3268 CTLFLAG_READWRITE, CTLTYPE_INT,
3269 "debug", SYSCTL_DESCR("Debug Info"),
3270 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3271 aprint_error_dev(dev, "could not create sysctl\n");
3272
3273 if (sysctl_createv(log, 0, &rnode, &cnode,
3274 CTLFLAG_READONLY, CTLTYPE_INT,
3275 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3276 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3277 aprint_error_dev(dev, "could not create sysctl\n");
3278
3279 if (sysctl_createv(log, 0, &rnode, &cnode,
3280 CTLFLAG_READONLY, CTLTYPE_INT,
3281 "num_queues", SYSCTL_DESCR("Number of queues"),
3282 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3283 aprint_error_dev(dev, "could not create sysctl\n");
3284
3285 /* Sysctls for all devices */
3286 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3287 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3288 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3289 CTL_EOL) != 0)
3290 aprint_error_dev(dev, "could not create sysctl\n");
3291
3292 adapter->enable_aim = ixgbe_enable_aim;
3293 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3294 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3295 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3296 aprint_error_dev(dev, "could not create sysctl\n");
3297
3298 if (sysctl_createv(log, 0, &rnode, &cnode,
3299 CTLFLAG_READWRITE, CTLTYPE_INT,
3300 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3301 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3302 CTL_EOL) != 0)
3303 aprint_error_dev(dev, "could not create sysctl\n");
3304
3305 /*
3306 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3307 * it causesflip-flopping softint/workqueue mode in one deferred
3308 * processing. Therefore, preempt_disable()/preempt_enable() are
3309 * required in ixgbe_sched_handle_que() to avoid
3310 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3311 * I think changing "que->txrx_use_workqueue" in interrupt handler
3312 * is lighter than doing preempt_disable()/preempt_enable() in every
3313 * ixgbe_sched_handle_que().
3314 */
3315 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3316 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3317 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3318 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3319 aprint_error_dev(dev, "could not create sysctl\n");
3320
3321 #ifdef IXGBE_DEBUG
3322 /* testing sysctls (for all devices) */
3323 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3324 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3325 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3326 CTL_EOL) != 0)
3327 aprint_error_dev(dev, "could not create sysctl\n");
3328
3329 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3330 CTLTYPE_STRING, "print_rss_config",
3331 SYSCTL_DESCR("Prints RSS Configuration"),
3332 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3333 CTL_EOL) != 0)
3334 aprint_error_dev(dev, "could not create sysctl\n");
3335 #endif
3336 /* for X550 series devices */
3337 if (hw->mac.type >= ixgbe_mac_X550)
3338 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3339 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3340 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3341 CTL_EOL) != 0)
3342 aprint_error_dev(dev, "could not create sysctl\n");
3343
3344 /* for WoL-capable devices */
3345 if (adapter->wol_support) {
3346 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3347 CTLTYPE_BOOL, "wol_enable",
3348 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3349 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3350 CTL_EOL) != 0)
3351 aprint_error_dev(dev, "could not create sysctl\n");
3352
3353 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3354 CTLTYPE_INT, "wufc",
3355 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3356 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3357 CTL_EOL) != 0)
3358 aprint_error_dev(dev, "could not create sysctl\n");
3359 }
3360
3361 /* for X552/X557-AT devices */
3362 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3363 const struct sysctlnode *phy_node;
3364
3365 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3366 "phy", SYSCTL_DESCR("External PHY sysctls"),
3367 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3368 aprint_error_dev(dev, "could not create sysctl\n");
3369 return;
3370 }
3371
3372 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3373 CTLTYPE_INT, "temp",
3374 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3375 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3376 CTL_EOL) != 0)
3377 aprint_error_dev(dev, "could not create sysctl\n");
3378
3379 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3380 CTLTYPE_INT, "overtemp_occurred",
3381 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3382 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3383 CTL_CREATE, CTL_EOL) != 0)
3384 aprint_error_dev(dev, "could not create sysctl\n");
3385 }
3386
3387 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3388 && (hw->phy.type == ixgbe_phy_fw))
3389 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3390 CTLTYPE_BOOL, "force_10_100_autonego",
3391 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3392 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3393 CTL_CREATE, CTL_EOL) != 0)
3394 aprint_error_dev(dev, "could not create sysctl\n");
3395
3396 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3397 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3398 CTLTYPE_INT, "eee_state",
3399 SYSCTL_DESCR("EEE Power Save State"),
3400 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3401 CTL_EOL) != 0)
3402 aprint_error_dev(dev, "could not create sysctl\n");
3403 }
3404 } /* ixgbe_add_device_sysctls */
3405
3406 /************************************************************************
3407 * ixgbe_allocate_pci_resources
3408 ************************************************************************/
3409 static int
3410 ixgbe_allocate_pci_resources(struct adapter *adapter,
3411 const struct pci_attach_args *pa)
3412 {
3413 pcireg_t memtype, csr;
3414 device_t dev = adapter->dev;
3415 bus_addr_t addr;
3416 int flags;
3417
3418 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3419 switch (memtype) {
3420 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3421 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3422 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3423 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3424 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3425 goto map_err;
3426 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3427 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3428 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3429 }
3430 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3431 adapter->osdep.mem_size, flags,
3432 &adapter->osdep.mem_bus_space_handle) != 0) {
3433 map_err:
3434 adapter->osdep.mem_size = 0;
3435 aprint_error_dev(dev, "unable to map BAR0\n");
3436 return ENXIO;
3437 }
3438 /*
3439 * Enable address decoding for memory range in case BIOS or
3440 * UEFI don't set it.
3441 */
3442 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3443 PCI_COMMAND_STATUS_REG);
3444 csr |= PCI_COMMAND_MEM_ENABLE;
3445 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3446 csr);
3447 break;
3448 default:
3449 aprint_error_dev(dev, "unexpected type on BAR0\n");
3450 return ENXIO;
3451 }
3452
3453 return (0);
3454 } /* ixgbe_allocate_pci_resources */
3455
3456 static void
3457 ixgbe_free_softint(struct adapter *adapter)
3458 {
3459 struct ix_queue *que = adapter->queues;
3460 struct tx_ring *txr = adapter->tx_rings;
3461 int i;
3462
3463 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3464 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3465 if (txr->txr_si != NULL)
3466 softint_disestablish(txr->txr_si);
3467 }
3468 if (que->que_si != NULL)
3469 softint_disestablish(que->que_si);
3470 }
3471 if (adapter->txr_wq != NULL)
3472 workqueue_destroy(adapter->txr_wq);
3473 if (adapter->txr_wq_enqueued != NULL)
3474 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3475 if (adapter->que_wq != NULL)
3476 workqueue_destroy(adapter->que_wq);
3477
3478 /* Drain the Link queue */
3479 if (adapter->link_si != NULL) {
3480 softint_disestablish(adapter->link_si);
3481 adapter->link_si = NULL;
3482 }
3483 if (adapter->mod_si != NULL) {
3484 softint_disestablish(adapter->mod_si);
3485 adapter->mod_si = NULL;
3486 }
3487 if (adapter->msf_si != NULL) {
3488 softint_disestablish(adapter->msf_si);
3489 adapter->msf_si = NULL;
3490 }
3491 if (adapter->phy_si != NULL) {
3492 softint_disestablish(adapter->phy_si);
3493 adapter->phy_si = NULL;
3494 }
3495 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3496 if (adapter->fdir_si != NULL) {
3497 softint_disestablish(adapter->fdir_si);
3498 adapter->fdir_si = NULL;
3499 }
3500 }
3501 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3502 if (adapter->mbx_si != NULL) {
3503 softint_disestablish(adapter->mbx_si);
3504 adapter->mbx_si = NULL;
3505 }
3506 }
3507 } /* ixgbe_free_softint */
3508
3509 /************************************************************************
3510 * ixgbe_detach - Device removal routine
3511 *
3512 * Called when the driver is being removed.
3513 * Stops the adapter and deallocates all the resources
3514 * that were allocated for driver operation.
3515 *
3516 * return 0 on success, positive on failure
3517 ************************************************************************/
3518 static int
3519 ixgbe_detach(device_t dev, int flags)
3520 {
3521 struct adapter *adapter = device_private(dev);
3522 struct rx_ring *rxr = adapter->rx_rings;
3523 struct tx_ring *txr = adapter->tx_rings;
3524 struct ixgbe_hw *hw = &adapter->hw;
3525 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3526 u32 ctrl_ext;
3527 int i;
3528
3529 INIT_DEBUGOUT("ixgbe_detach: begin");
3530 if (adapter->osdep.attached == false)
3531 return 0;
3532
3533 if (ixgbe_pci_iov_detach(dev) != 0) {
3534 device_printf(dev, "SR-IOV in use; detach first.\n");
3535 return (EBUSY);
3536 }
3537
3538 /* Stop the interface. Callouts are stopped in it. */
3539 ixgbe_ifstop(adapter->ifp, 1);
3540 #if NVLAN > 0
3541 /* Make sure VLANs are not using driver */
3542 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3543 ; /* nothing to do: no VLANs */
3544 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3545 vlan_ifdetach(adapter->ifp);
3546 else {
3547 aprint_error_dev(dev, "VLANs in use, detach first\n");
3548 return (EBUSY);
3549 }
3550 #endif
3551
3552 pmf_device_deregister(dev);
3553
3554 ether_ifdetach(adapter->ifp);
3555 /* Stop the adapter */
3556 IXGBE_CORE_LOCK(adapter);
3557 ixgbe_setup_low_power_mode(adapter);
3558 IXGBE_CORE_UNLOCK(adapter);
3559
3560 ixgbe_free_softint(adapter);
3561
3562 /* let hardware know driver is unloading */
3563 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3564 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3565 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3566
3567 callout_halt(&adapter->timer, NULL);
3568 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3569 callout_halt(&adapter->recovery_mode_timer, NULL);
3570
3571 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3572 netmap_detach(adapter->ifp);
3573
3574 ixgbe_free_pci_resources(adapter);
3575 #if 0 /* XXX the NetBSD port is probably missing something here */
3576 bus_generic_detach(dev);
3577 #endif
3578 if_detach(adapter->ifp);
3579 if_percpuq_destroy(adapter->ipq);
3580
3581 sysctl_teardown(&adapter->sysctllog);
3582 evcnt_detach(&adapter->efbig_tx_dma_setup);
3583 evcnt_detach(&adapter->mbuf_defrag_failed);
3584 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3585 evcnt_detach(&adapter->einval_tx_dma_setup);
3586 evcnt_detach(&adapter->other_tx_dma_setup);
3587 evcnt_detach(&adapter->eagain_tx_dma_setup);
3588 evcnt_detach(&adapter->enomem_tx_dma_setup);
3589 evcnt_detach(&adapter->watchdog_events);
3590 evcnt_detach(&adapter->tso_err);
3591 evcnt_detach(&adapter->link_irq);
3592 evcnt_detach(&adapter->link_sicount);
3593 evcnt_detach(&adapter->mod_sicount);
3594 evcnt_detach(&adapter->msf_sicount);
3595 evcnt_detach(&adapter->phy_sicount);
3596
3597 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3598 if (i < __arraycount(stats->mpc)) {
3599 evcnt_detach(&stats->mpc[i]);
3600 if (hw->mac.type == ixgbe_mac_82598EB)
3601 evcnt_detach(&stats->rnbc[i]);
3602 }
3603 if (i < __arraycount(stats->pxontxc)) {
3604 evcnt_detach(&stats->pxontxc[i]);
3605 evcnt_detach(&stats->pxonrxc[i]);
3606 evcnt_detach(&stats->pxofftxc[i]);
3607 evcnt_detach(&stats->pxoffrxc[i]);
3608 if (hw->mac.type >= ixgbe_mac_82599EB)
3609 evcnt_detach(&stats->pxon2offc[i]);
3610 }
3611 }
3612
3613 txr = adapter->tx_rings;
3614 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3615 evcnt_detach(&adapter->queues[i].irqs);
3616 evcnt_detach(&adapter->queues[i].handleq);
3617 evcnt_detach(&adapter->queues[i].req);
3618 evcnt_detach(&txr->no_desc_avail);
3619 evcnt_detach(&txr->total_packets);
3620 evcnt_detach(&txr->tso_tx);
3621 #ifndef IXGBE_LEGACY_TX
3622 evcnt_detach(&txr->pcq_drops);
3623 #endif
3624
3625 if (i < __arraycount(stats->qprc)) {
3626 evcnt_detach(&stats->qprc[i]);
3627 evcnt_detach(&stats->qptc[i]);
3628 evcnt_detach(&stats->qbrc[i]);
3629 evcnt_detach(&stats->qbtc[i]);
3630 if (hw->mac.type >= ixgbe_mac_82599EB)
3631 evcnt_detach(&stats->qprdc[i]);
3632 }
3633
3634 evcnt_detach(&rxr->rx_packets);
3635 evcnt_detach(&rxr->rx_bytes);
3636 evcnt_detach(&rxr->rx_copies);
3637 evcnt_detach(&rxr->no_jmbuf);
3638 evcnt_detach(&rxr->rx_discarded);
3639 }
3640 evcnt_detach(&stats->ipcs);
3641 evcnt_detach(&stats->l4cs);
3642 evcnt_detach(&stats->ipcs_bad);
3643 evcnt_detach(&stats->l4cs_bad);
3644 evcnt_detach(&stats->intzero);
3645 evcnt_detach(&stats->legint);
3646 evcnt_detach(&stats->crcerrs);
3647 evcnt_detach(&stats->illerrc);
3648 evcnt_detach(&stats->errbc);
3649 evcnt_detach(&stats->mspdc);
3650 if (hw->mac.type >= ixgbe_mac_X550)
3651 evcnt_detach(&stats->mbsdc);
3652 evcnt_detach(&stats->mpctotal);
3653 evcnt_detach(&stats->mlfc);
3654 evcnt_detach(&stats->mrfc);
3655 evcnt_detach(&stats->rlec);
3656 evcnt_detach(&stats->lxontxc);
3657 evcnt_detach(&stats->lxonrxc);
3658 evcnt_detach(&stats->lxofftxc);
3659 evcnt_detach(&stats->lxoffrxc);
3660
3661 /* Packet Reception Stats */
3662 evcnt_detach(&stats->tor);
3663 evcnt_detach(&stats->gorc);
3664 evcnt_detach(&stats->tpr);
3665 evcnt_detach(&stats->gprc);
3666 evcnt_detach(&stats->mprc);
3667 evcnt_detach(&stats->bprc);
3668 evcnt_detach(&stats->prc64);
3669 evcnt_detach(&stats->prc127);
3670 evcnt_detach(&stats->prc255);
3671 evcnt_detach(&stats->prc511);
3672 evcnt_detach(&stats->prc1023);
3673 evcnt_detach(&stats->prc1522);
3674 evcnt_detach(&stats->ruc);
3675 evcnt_detach(&stats->rfc);
3676 evcnt_detach(&stats->roc);
3677 evcnt_detach(&stats->rjc);
3678 evcnt_detach(&stats->mngprc);
3679 evcnt_detach(&stats->mngpdc);
3680 evcnt_detach(&stats->xec);
3681
3682 /* Packet Transmission Stats */
3683 evcnt_detach(&stats->gotc);
3684 evcnt_detach(&stats->tpt);
3685 evcnt_detach(&stats->gptc);
3686 evcnt_detach(&stats->bptc);
3687 evcnt_detach(&stats->mptc);
3688 evcnt_detach(&stats->mngptc);
3689 evcnt_detach(&stats->ptc64);
3690 evcnt_detach(&stats->ptc127);
3691 evcnt_detach(&stats->ptc255);
3692 evcnt_detach(&stats->ptc511);
3693 evcnt_detach(&stats->ptc1023);
3694 evcnt_detach(&stats->ptc1522);
3695
3696 ixgbe_free_transmit_structures(adapter);
3697 ixgbe_free_receive_structures(adapter);
3698 for (i = 0; i < adapter->num_queues; i++) {
3699 struct ix_queue * que = &adapter->queues[i];
3700 mutex_destroy(&que->dc_mtx);
3701 }
3702 free(adapter->queues, M_DEVBUF);
3703 free(adapter->mta, M_DEVBUF);
3704
3705 IXGBE_CORE_LOCK_DESTROY(adapter);
3706
3707 return (0);
3708 } /* ixgbe_detach */
3709
3710 /************************************************************************
3711 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3712 *
3713 * Prepare the adapter/port for LPLU and/or WoL
3714 ************************************************************************/
3715 static int
3716 ixgbe_setup_low_power_mode(struct adapter *adapter)
3717 {
3718 struct ixgbe_hw *hw = &adapter->hw;
3719 device_t dev = adapter->dev;
3720 s32 error = 0;
3721
3722 KASSERT(mutex_owned(&adapter->core_mtx));
3723
3724 /* Limit power management flow to X550EM baseT */
3725 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3726 hw->phy.ops.enter_lplu) {
3727 /* X550EM baseT adapters need a special LPLU flow */
3728 hw->phy.reset_disable = true;
3729 ixgbe_stop(adapter);
3730 error = hw->phy.ops.enter_lplu(hw);
3731 if (error)
3732 device_printf(dev,
3733 "Error entering LPLU: %d\n", error);
3734 hw->phy.reset_disable = false;
3735 } else {
3736 /* Just stop for other adapters */
3737 ixgbe_stop(adapter);
3738 }
3739
3740 if (!hw->wol_enabled) {
3741 ixgbe_set_phy_power(hw, FALSE);
3742 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3743 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3744 } else {
3745 /* Turn off support for APM wakeup. (Using ACPI instead) */
3746 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3747 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3748
3749 /*
3750 * Clear Wake Up Status register to prevent any previous wakeup
3751 * events from waking us up immediately after we suspend.
3752 */
3753 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3754
3755 /*
3756 * Program the Wakeup Filter Control register with user filter
3757 * settings
3758 */
3759 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3760
3761 /* Enable wakeups and power management in Wakeup Control */
3762 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3763 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3764
3765 }
3766
3767 return error;
3768 } /* ixgbe_setup_low_power_mode */
3769
3770 /************************************************************************
3771 * ixgbe_shutdown - Shutdown entry point
3772 ************************************************************************/
3773 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3774 static int
3775 ixgbe_shutdown(device_t dev)
3776 {
3777 struct adapter *adapter = device_private(dev);
3778 int error = 0;
3779
3780 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3781
3782 IXGBE_CORE_LOCK(adapter);
3783 error = ixgbe_setup_low_power_mode(adapter);
3784 IXGBE_CORE_UNLOCK(adapter);
3785
3786 return (error);
3787 } /* ixgbe_shutdown */
3788 #endif
3789
3790 /************************************************************************
3791 * ixgbe_suspend
3792 *
3793 * From D0 to D3
3794 ************************************************************************/
3795 static bool
3796 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3797 {
3798 struct adapter *adapter = device_private(dev);
3799 int error = 0;
3800
3801 INIT_DEBUGOUT("ixgbe_suspend: begin");
3802
3803 IXGBE_CORE_LOCK(adapter);
3804
3805 error = ixgbe_setup_low_power_mode(adapter);
3806
3807 IXGBE_CORE_UNLOCK(adapter);
3808
3809 return (error);
3810 } /* ixgbe_suspend */
3811
3812 /************************************************************************
3813 * ixgbe_resume
3814 *
3815 * From D3 to D0
3816 ************************************************************************/
3817 static bool
3818 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3819 {
3820 struct adapter *adapter = device_private(dev);
3821 struct ifnet *ifp = adapter->ifp;
3822 struct ixgbe_hw *hw = &adapter->hw;
3823 u32 wus;
3824
3825 INIT_DEBUGOUT("ixgbe_resume: begin");
3826
3827 IXGBE_CORE_LOCK(adapter);
3828
3829 /* Read & clear WUS register */
3830 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3831 if (wus)
3832 device_printf(dev, "Woken up by (WUS): %#010x\n",
3833 IXGBE_READ_REG(hw, IXGBE_WUS));
3834 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3835 /* And clear WUFC until next low-power transition */
3836 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3837
3838 /*
3839 * Required after D3->D0 transition;
3840 * will re-advertise all previous advertised speeds
3841 */
3842 if (ifp->if_flags & IFF_UP)
3843 ixgbe_init_locked(adapter);
3844
3845 IXGBE_CORE_UNLOCK(adapter);
3846
3847 return true;
3848 } /* ixgbe_resume */
3849
3850 /*
3851 * Set the various hardware offload abilities.
3852 *
3853 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3854 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3855 * mbuf offload flags the driver will understand.
3856 */
3857 static void
3858 ixgbe_set_if_hwassist(struct adapter *adapter)
3859 {
3860 /* XXX */
3861 }
3862
3863 /************************************************************************
3864 * ixgbe_init_locked - Init entry point
3865 *
3866 * Used in two ways: It is used by the stack as an init
3867 * entry point in network interface structure. It is also
3868 * used by the driver as a hw/sw initialization routine to
3869 * get to a consistent state.
3870 *
3871 * return 0 on success, positive on failure
3872 ************************************************************************/
3873 static void
3874 ixgbe_init_locked(struct adapter *adapter)
3875 {
3876 struct ifnet *ifp = adapter->ifp;
3877 device_t dev = adapter->dev;
3878 struct ixgbe_hw *hw = &adapter->hw;
3879 struct ix_queue *que;
3880 struct tx_ring *txr;
3881 struct rx_ring *rxr;
3882 u32 txdctl, mhadd;
3883 u32 rxdctl, rxctrl;
3884 u32 ctrl_ext;
3885 int i, j, err;
3886
3887 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3888
3889 KASSERT(mutex_owned(&adapter->core_mtx));
3890 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3891
3892 hw->adapter_stopped = FALSE;
3893 ixgbe_stop_adapter(hw);
3894 callout_stop(&adapter->timer);
3895 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3896 que->disabled_count = 0;
3897
3898 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3899 adapter->max_frame_size =
3900 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3901
3902 /* Queue indices may change with IOV mode */
3903 ixgbe_align_all_queue_indices(adapter);
3904
3905 /* reprogram the RAR[0] in case user changed it. */
3906 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3907
3908 /* Get the latest mac address, User can use a LAA */
3909 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3910 IXGBE_ETH_LENGTH_OF_ADDRESS);
3911 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3912 hw->addr_ctrl.rar_used_count = 1;
3913
3914 /* Set hardware offload abilities from ifnet flags */
3915 ixgbe_set_if_hwassist(adapter);
3916
3917 /* Prepare transmit descriptors and buffers */
3918 if (ixgbe_setup_transmit_structures(adapter)) {
3919 device_printf(dev, "Could not setup transmit structures\n");
3920 ixgbe_stop(adapter);
3921 return;
3922 }
3923
3924 ixgbe_init_hw(hw);
3925
3926 ixgbe_initialize_iov(adapter);
3927
3928 ixgbe_initialize_transmit_units(adapter);
3929
3930 /* Setup Multicast table */
3931 ixgbe_set_multi(adapter);
3932
3933 /* Determine the correct mbuf pool, based on frame size */
3934 if (adapter->max_frame_size <= MCLBYTES)
3935 adapter->rx_mbuf_sz = MCLBYTES;
3936 else
3937 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3938
3939 /* Prepare receive descriptors and buffers */
3940 if (ixgbe_setup_receive_structures(adapter)) {
3941 device_printf(dev, "Could not setup receive structures\n");
3942 ixgbe_stop(adapter);
3943 return;
3944 }
3945
3946 /* Configure RX settings */
3947 ixgbe_initialize_receive_units(adapter);
3948
3949 /* Enable SDP & MSI-X interrupts based on adapter */
3950 ixgbe_config_gpie(adapter);
3951
3952 /* Set MTU size */
3953 if (ifp->if_mtu > ETHERMTU) {
3954 /* aka IXGBE_MAXFRS on 82599 and newer */
3955 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3956 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3957 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3958 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3959 }
3960
3961 /* Now enable all the queues */
3962 for (i = 0; i < adapter->num_queues; i++) {
3963 txr = &adapter->tx_rings[i];
3964 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3965 txdctl |= IXGBE_TXDCTL_ENABLE;
3966 /* Set WTHRESH to 8, burst writeback */
3967 txdctl |= (8 << 16);
3968 /*
3969 * When the internal queue falls below PTHRESH (32),
3970 * start prefetching as long as there are at least
3971 * HTHRESH (1) buffers ready. The values are taken
3972 * from the Intel linux driver 3.8.21.
3973 * Prefetching enables tx line rate even with 1 queue.
3974 */
3975 txdctl |= (32 << 0) | (1 << 8);
3976 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3977 }
3978
3979 for (i = 0; i < adapter->num_queues; i++) {
3980 rxr = &adapter->rx_rings[i];
3981 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3982 if (hw->mac.type == ixgbe_mac_82598EB) {
3983 /*
3984 * PTHRESH = 21
3985 * HTHRESH = 4
3986 * WTHRESH = 8
3987 */
3988 rxdctl &= ~0x3FFFFF;
3989 rxdctl |= 0x080420;
3990 }
3991 rxdctl |= IXGBE_RXDCTL_ENABLE;
3992 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3993 for (j = 0; j < 10; j++) {
3994 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3995 IXGBE_RXDCTL_ENABLE)
3996 break;
3997 else
3998 msec_delay(1);
3999 }
4000 wmb();
4001
4002 /*
4003 * In netmap mode, we must preserve the buffers made
4004 * available to userspace before the if_init()
4005 * (this is true by default on the TX side, because
4006 * init makes all buffers available to userspace).
4007 *
4008 * netmap_reset() and the device specific routines
4009 * (e.g. ixgbe_setup_receive_rings()) map these
4010 * buffers at the end of the NIC ring, so here we
4011 * must set the RDT (tail) register to make sure
4012 * they are not overwritten.
4013 *
4014 * In this driver the NIC ring starts at RDH = 0,
4015 * RDT points to the last slot available for reception (?),
4016 * so RDT = num_rx_desc - 1 means the whole ring is available.
4017 */
4018 #ifdef DEV_NETMAP
4019 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4020 (ifp->if_capenable & IFCAP_NETMAP)) {
4021 struct netmap_adapter *na = NA(adapter->ifp);
4022 struct netmap_kring *kring = na->rx_rings[i];
4023 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4024
4025 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4026 } else
4027 #endif /* DEV_NETMAP */
4028 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4029 adapter->num_rx_desc - 1);
4030 }
4031
4032 /* Enable Receive engine */
4033 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4034 if (hw->mac.type == ixgbe_mac_82598EB)
4035 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4036 rxctrl |= IXGBE_RXCTRL_RXEN;
4037 ixgbe_enable_rx_dma(hw, rxctrl);
4038
4039 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4040
4041 /* Set up MSI/MSI-X routing */
4042 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4043 ixgbe_configure_ivars(adapter);
4044 /* Set up auto-mask */
4045 if (hw->mac.type == ixgbe_mac_82598EB)
4046 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4047 else {
4048 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4049 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4050 }
4051 } else { /* Simple settings for Legacy/MSI */
4052 ixgbe_set_ivar(adapter, 0, 0, 0);
4053 ixgbe_set_ivar(adapter, 0, 0, 1);
4054 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4055 }
4056
4057 ixgbe_init_fdir(adapter);
4058
4059 /*
4060 * Check on any SFP devices that
4061 * need to be kick-started
4062 */
4063 if (hw->phy.type == ixgbe_phy_none) {
4064 err = hw->phy.ops.identify(hw);
4065 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4066 device_printf(dev,
4067 "Unsupported SFP+ module type was detected.\n");
4068 return;
4069 }
4070 }
4071
4072 /* Set moderation on the Link interrupt */
4073 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4074
4075 /* Enable EEE power saving */
4076 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4077 hw->mac.ops.setup_eee(hw,
4078 adapter->feat_en & IXGBE_FEATURE_EEE);
4079
4080 /* Enable power to the phy. */
4081 ixgbe_set_phy_power(hw, TRUE);
4082
4083 /* Config/Enable Link */
4084 ixgbe_config_link(adapter);
4085
4086 /* Hardware Packet Buffer & Flow Control setup */
4087 ixgbe_config_delay_values(adapter);
4088
4089 /* Initialize the FC settings */
4090 ixgbe_start_hw(hw);
4091
4092 /* Set up VLAN support and filter */
4093 ixgbe_setup_vlan_hw_support(adapter);
4094
4095 /* Setup DMA Coalescing */
4096 ixgbe_config_dmac(adapter);
4097
4098 /* And now turn on interrupts */
4099 ixgbe_enable_intr(adapter);
4100
4101 /* Enable the use of the MBX by the VF's */
4102 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4103 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4104 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4105 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4106 }
4107
4108 /* Update saved flags. See ixgbe_ifflags_cb() */
4109 adapter->if_flags = ifp->if_flags;
4110
4111 /* Now inform the stack we're ready */
4112 ifp->if_flags |= IFF_RUNNING;
4113
4114 return;
4115 } /* ixgbe_init_locked */
4116
4117 /************************************************************************
4118 * ixgbe_init
4119 ************************************************************************/
4120 static int
4121 ixgbe_init(struct ifnet *ifp)
4122 {
4123 struct adapter *adapter = ifp->if_softc;
4124
4125 IXGBE_CORE_LOCK(adapter);
4126 ixgbe_init_locked(adapter);
4127 IXGBE_CORE_UNLOCK(adapter);
4128
4129 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4130 } /* ixgbe_init */
4131
4132 /************************************************************************
4133 * ixgbe_set_ivar
4134 *
4135 * Setup the correct IVAR register for a particular MSI-X interrupt
4136 * (yes this is all very magic and confusing :)
4137 * - entry is the register array entry
4138 * - vector is the MSI-X vector for this queue
4139 * - type is RX/TX/MISC
4140 ************************************************************************/
4141 static void
4142 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4143 {
4144 struct ixgbe_hw *hw = &adapter->hw;
4145 u32 ivar, index;
4146
4147 vector |= IXGBE_IVAR_ALLOC_VAL;
4148
4149 switch (hw->mac.type) {
4150 case ixgbe_mac_82598EB:
4151 if (type == -1)
4152 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4153 else
4154 entry += (type * 64);
4155 index = (entry >> 2) & 0x1F;
4156 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4157 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4158 ivar |= (vector << (8 * (entry & 0x3)));
4159 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4160 break;
4161 case ixgbe_mac_82599EB:
4162 case ixgbe_mac_X540:
4163 case ixgbe_mac_X550:
4164 case ixgbe_mac_X550EM_x:
4165 case ixgbe_mac_X550EM_a:
4166 if (type == -1) { /* MISC IVAR */
4167 index = (entry & 1) * 8;
4168 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4169 ivar &= ~(0xFF << index);
4170 ivar |= (vector << index);
4171 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4172 } else { /* RX/TX IVARS */
4173 index = (16 * (entry & 1)) + (8 * type);
4174 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4175 ivar &= ~(0xFF << index);
4176 ivar |= (vector << index);
4177 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4178 }
4179 break;
4180 default:
4181 break;
4182 }
4183 } /* ixgbe_set_ivar */
4184
4185 /************************************************************************
4186 * ixgbe_configure_ivars
4187 ************************************************************************/
4188 static void
4189 ixgbe_configure_ivars(struct adapter *adapter)
4190 {
4191 struct ix_queue *que = adapter->queues;
4192 u32 newitr;
4193
4194 if (ixgbe_max_interrupt_rate > 0)
4195 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4196 else {
4197 /*
4198 * Disable DMA coalescing if interrupt moderation is
4199 * disabled.
4200 */
4201 adapter->dmac = 0;
4202 newitr = 0;
4203 }
4204
4205 for (int i = 0; i < adapter->num_queues; i++, que++) {
4206 struct rx_ring *rxr = &adapter->rx_rings[i];
4207 struct tx_ring *txr = &adapter->tx_rings[i];
4208 /* First the RX queue entry */
4209 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4210 /* ... and the TX */
4211 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4212 /* Set an Initial EITR value */
4213 ixgbe_eitr_write(adapter, que->msix, newitr);
4214 /*
4215 * To eliminate influence of the previous state.
4216 * At this point, Tx/Rx interrupt handler
4217 * (ixgbe_msix_que()) cannot be called, so both
4218 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4219 */
4220 que->eitr_setting = 0;
4221 }
4222
4223 /* For the Link interrupt */
4224 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4225 } /* ixgbe_configure_ivars */
4226
4227 /************************************************************************
4228 * ixgbe_config_gpie
4229 ************************************************************************/
4230 static void
4231 ixgbe_config_gpie(struct adapter *adapter)
4232 {
4233 struct ixgbe_hw *hw = &adapter->hw;
4234 u32 gpie;
4235
4236 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4237
4238 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4239 /* Enable Enhanced MSI-X mode */
4240 gpie |= IXGBE_GPIE_MSIX_MODE
4241 | IXGBE_GPIE_EIAME
4242 | IXGBE_GPIE_PBA_SUPPORT
4243 | IXGBE_GPIE_OCD;
4244 }
4245
4246 /* Fan Failure Interrupt */
4247 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4248 gpie |= IXGBE_SDP1_GPIEN;
4249
4250 /* Thermal Sensor Interrupt */
4251 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4252 gpie |= IXGBE_SDP0_GPIEN_X540;
4253
4254 /* Link detection */
4255 switch (hw->mac.type) {
4256 case ixgbe_mac_82599EB:
4257 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4258 break;
4259 case ixgbe_mac_X550EM_x:
4260 case ixgbe_mac_X550EM_a:
4261 gpie |= IXGBE_SDP0_GPIEN_X540;
4262 break;
4263 default:
4264 break;
4265 }
4266
4267 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4268
4269 } /* ixgbe_config_gpie */
4270
4271 /************************************************************************
4272 * ixgbe_config_delay_values
4273 *
4274 * Requires adapter->max_frame_size to be set.
4275 ************************************************************************/
4276 static void
4277 ixgbe_config_delay_values(struct adapter *adapter)
4278 {
4279 struct ixgbe_hw *hw = &adapter->hw;
4280 u32 rxpb, frame, size, tmp;
4281
4282 frame = adapter->max_frame_size;
4283
4284 /* Calculate High Water */
4285 switch (hw->mac.type) {
4286 case ixgbe_mac_X540:
4287 case ixgbe_mac_X550:
4288 case ixgbe_mac_X550EM_x:
4289 case ixgbe_mac_X550EM_a:
4290 tmp = IXGBE_DV_X540(frame, frame);
4291 break;
4292 default:
4293 tmp = IXGBE_DV(frame, frame);
4294 break;
4295 }
4296 size = IXGBE_BT2KB(tmp);
4297 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4298 hw->fc.high_water[0] = rxpb - size;
4299
4300 /* Now calculate Low Water */
4301 switch (hw->mac.type) {
4302 case ixgbe_mac_X540:
4303 case ixgbe_mac_X550:
4304 case ixgbe_mac_X550EM_x:
4305 case ixgbe_mac_X550EM_a:
4306 tmp = IXGBE_LOW_DV_X540(frame);
4307 break;
4308 default:
4309 tmp = IXGBE_LOW_DV(frame);
4310 break;
4311 }
4312 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4313
4314 hw->fc.pause_time = IXGBE_FC_PAUSE;
4315 hw->fc.send_xon = TRUE;
4316 } /* ixgbe_config_delay_values */
4317
4318 /************************************************************************
4319 * ixgbe_set_multi - Multicast Update
4320 *
4321 * Called whenever multicast address list is updated.
4322 ************************************************************************/
4323 static void
4324 ixgbe_set_multi(struct adapter *adapter)
4325 {
4326 struct ixgbe_mc_addr *mta;
4327 struct ifnet *ifp = adapter->ifp;
4328 u8 *update_ptr;
4329 int mcnt = 0;
4330 u32 fctrl;
4331 struct ethercom *ec = &adapter->osdep.ec;
4332 struct ether_multi *enm;
4333 struct ether_multistep step;
4334
4335 KASSERT(mutex_owned(&adapter->core_mtx));
4336 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4337
4338 mta = adapter->mta;
4339 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4340
4341 ETHER_LOCK(ec);
4342 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4343 ETHER_FIRST_MULTI(step, ec, enm);
4344 while (enm != NULL) {
4345 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4346 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4347 ETHER_ADDR_LEN) != 0)) {
4348 ec->ec_flags |= ETHER_F_ALLMULTI;
4349 break;
4350 }
4351 bcopy(enm->enm_addrlo,
4352 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4353 mta[mcnt].vmdq = adapter->pool;
4354 mcnt++;
4355 ETHER_NEXT_MULTI(step, enm);
4356 }
4357
4358 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4359 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4360 if (ifp->if_flags & IFF_PROMISC)
4361 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4362 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4363 fctrl |= IXGBE_FCTRL_MPE;
4364 }
4365 ETHER_UNLOCK(ec);
4366
4367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4368
4369 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4370 update_ptr = (u8 *)mta;
4371 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4372 ixgbe_mc_array_itr, TRUE);
4373 }
4374
4375 } /* ixgbe_set_multi */
4376
4377 /************************************************************************
4378 * ixgbe_mc_array_itr
4379 *
4380 * An iterator function needed by the multicast shared code.
4381 * It feeds the shared code routine the addresses in the
4382 * array of ixgbe_set_multi() one by one.
4383 ************************************************************************/
4384 static u8 *
4385 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4386 {
4387 struct ixgbe_mc_addr *mta;
4388
4389 mta = (struct ixgbe_mc_addr *)*update_ptr;
4390 *vmdq = mta->vmdq;
4391
4392 *update_ptr = (u8*)(mta + 1);
4393
4394 return (mta->addr);
4395 } /* ixgbe_mc_array_itr */
4396
4397 /************************************************************************
4398 * ixgbe_local_timer - Timer routine
4399 *
4400 * Checks for link status, updates statistics,
4401 * and runs the watchdog check.
4402 ************************************************************************/
4403 static void
4404 ixgbe_local_timer(void *arg)
4405 {
4406 struct adapter *adapter = arg;
4407
4408 IXGBE_CORE_LOCK(adapter);
4409 ixgbe_local_timer1(adapter);
4410 IXGBE_CORE_UNLOCK(adapter);
4411 }
4412
4413 static void
4414 ixgbe_local_timer1(void *arg)
4415 {
4416 struct adapter *adapter = arg;
4417 device_t dev = adapter->dev;
4418 struct ix_queue *que = adapter->queues;
4419 u64 queues = 0;
4420 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4421 int hung = 0;
4422 int i;
4423
4424 KASSERT(mutex_owned(&adapter->core_mtx));
4425
4426 /* Check for pluggable optics */
4427 if (adapter->sfp_probe)
4428 if (!ixgbe_sfp_probe(adapter))
4429 goto out; /* Nothing to do */
4430
4431 ixgbe_update_link_status(adapter);
4432 ixgbe_update_stats_counters(adapter);
4433
4434 /* Update some event counters */
4435 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4436 que = adapter->queues;
4437 for (i = 0; i < adapter->num_queues; i++, que++) {
4438 struct tx_ring *txr = que->txr;
4439
4440 v0 += txr->q_efbig_tx_dma_setup;
4441 v1 += txr->q_mbuf_defrag_failed;
4442 v2 += txr->q_efbig2_tx_dma_setup;
4443 v3 += txr->q_einval_tx_dma_setup;
4444 v4 += txr->q_other_tx_dma_setup;
4445 v5 += txr->q_eagain_tx_dma_setup;
4446 v6 += txr->q_enomem_tx_dma_setup;
4447 v7 += txr->q_tso_err;
4448 }
4449 adapter->efbig_tx_dma_setup.ev_count = v0;
4450 adapter->mbuf_defrag_failed.ev_count = v1;
4451 adapter->efbig2_tx_dma_setup.ev_count = v2;
4452 adapter->einval_tx_dma_setup.ev_count = v3;
4453 adapter->other_tx_dma_setup.ev_count = v4;
4454 adapter->eagain_tx_dma_setup.ev_count = v5;
4455 adapter->enomem_tx_dma_setup.ev_count = v6;
4456 adapter->tso_err.ev_count = v7;
4457
4458 /*
4459 * Check the TX queues status
4460 * - mark hung queues so we don't schedule on them
4461 * - watchdog only if all queues show hung
4462 */
4463 que = adapter->queues;
4464 for (i = 0; i < adapter->num_queues; i++, que++) {
4465 /* Keep track of queues with work for soft irq */
4466 if (que->txr->busy)
4467 queues |= 1ULL << que->me;
4468 /*
4469 * Each time txeof runs without cleaning, but there
4470 * are uncleaned descriptors it increments busy. If
4471 * we get to the MAX we declare it hung.
4472 */
4473 if (que->busy == IXGBE_QUEUE_HUNG) {
4474 ++hung;
4475 /* Mark the queue as inactive */
4476 adapter->active_queues &= ~(1ULL << que->me);
4477 continue;
4478 } else {
4479 /* Check if we've come back from hung */
4480 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4481 adapter->active_queues |= 1ULL << que->me;
4482 }
4483 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4484 device_printf(dev,
4485 "Warning queue %d appears to be hung!\n", i);
4486 que->txr->busy = IXGBE_QUEUE_HUNG;
4487 ++hung;
4488 }
4489 }
4490
4491 /* Only truely watchdog if all queues show hung */
4492 if (hung == adapter->num_queues)
4493 goto watchdog;
4494 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4495 else if (queues != 0) { /* Force an IRQ on queues with work */
4496 que = adapter->queues;
4497 for (i = 0; i < adapter->num_queues; i++, que++) {
4498 mutex_enter(&que->dc_mtx);
4499 if (que->disabled_count == 0)
4500 ixgbe_rearm_queues(adapter,
4501 queues & ((u64)1 << i));
4502 mutex_exit(&que->dc_mtx);
4503 }
4504 }
4505 #endif
4506
4507 out:
4508 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4509 return;
4510
4511 watchdog:
4512 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4513 adapter->ifp->if_flags &= ~IFF_RUNNING;
4514 adapter->watchdog_events.ev_count++;
4515 ixgbe_init_locked(adapter);
4516 } /* ixgbe_local_timer */
4517
4518 /************************************************************************
4519 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4520 ************************************************************************/
4521 static void
4522 ixgbe_recovery_mode_timer(void *arg)
4523 {
4524 struct adapter *adapter = arg;
4525 struct ixgbe_hw *hw = &adapter->hw;
4526
4527 IXGBE_CORE_LOCK(adapter);
4528 if (ixgbe_fw_recovery_mode(hw)) {
4529 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4530 /* Firmware error detected, entering recovery mode */
4531 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4532
4533 if (hw->adapter_stopped == FALSE)
4534 ixgbe_stop(adapter);
4535 }
4536 } else
4537 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4538
4539 callout_reset(&adapter->recovery_mode_timer, hz,
4540 ixgbe_recovery_mode_timer, adapter);
4541 IXGBE_CORE_UNLOCK(adapter);
4542 } /* ixgbe_recovery_mode_timer */
4543
4544 /************************************************************************
4545 * ixgbe_sfp_probe
4546 *
4547 * Determine if a port had optics inserted.
4548 ************************************************************************/
4549 static bool
4550 ixgbe_sfp_probe(struct adapter *adapter)
4551 {
4552 struct ixgbe_hw *hw = &adapter->hw;
4553 device_t dev = adapter->dev;
4554 bool result = FALSE;
4555
4556 if ((hw->phy.type == ixgbe_phy_nl) &&
4557 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4558 s32 ret = hw->phy.ops.identify_sfp(hw);
4559 if (ret)
4560 goto out;
4561 ret = hw->phy.ops.reset(hw);
4562 adapter->sfp_probe = FALSE;
4563 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4564 device_printf(dev,"Unsupported SFP+ module detected!");
4565 device_printf(dev,
4566 "Reload driver with supported module.\n");
4567 goto out;
4568 } else
4569 device_printf(dev, "SFP+ module detected!\n");
4570 /* We now have supported optics */
4571 result = TRUE;
4572 }
4573 out:
4574
4575 return (result);
4576 } /* ixgbe_sfp_probe */
4577
4578 /************************************************************************
4579 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4580 ************************************************************************/
4581 static void
4582 ixgbe_handle_mod(void *context)
4583 {
4584 struct adapter *adapter = context;
4585 struct ixgbe_hw *hw = &adapter->hw;
4586 device_t dev = adapter->dev;
4587 u32 err, cage_full = 0;
4588
4589 ++adapter->mod_sicount.ev_count;
4590 if (adapter->hw.need_crosstalk_fix) {
4591 switch (hw->mac.type) {
4592 case ixgbe_mac_82599EB:
4593 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4594 IXGBE_ESDP_SDP2;
4595 break;
4596 case ixgbe_mac_X550EM_x:
4597 case ixgbe_mac_X550EM_a:
4598 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4599 IXGBE_ESDP_SDP0;
4600 break;
4601 default:
4602 break;
4603 }
4604
4605 if (!cage_full)
4606 return;
4607 }
4608
4609 err = hw->phy.ops.identify_sfp(hw);
4610 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4611 device_printf(dev,
4612 "Unsupported SFP+ module type was detected.\n");
4613 return;
4614 }
4615
4616 if (hw->mac.type == ixgbe_mac_82598EB)
4617 err = hw->phy.ops.reset(hw);
4618 else
4619 err = hw->mac.ops.setup_sfp(hw);
4620
4621 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4622 device_printf(dev,
4623 "Setup failure - unsupported SFP+ module type.\n");
4624 return;
4625 }
4626 softint_schedule(adapter->msf_si);
4627 } /* ixgbe_handle_mod */
4628
4629
4630 /************************************************************************
4631 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4632 ************************************************************************/
4633 static void
4634 ixgbe_handle_msf(void *context)
4635 {
4636 struct adapter *adapter = context;
4637 struct ixgbe_hw *hw = &adapter->hw;
4638 u32 autoneg;
4639 bool negotiate;
4640
4641 IXGBE_CORE_LOCK(adapter);
4642 ++adapter->msf_sicount.ev_count;
4643 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4644 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4645
4646 autoneg = hw->phy.autoneg_advertised;
4647 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4648 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4649 else
4650 negotiate = 0;
4651 if (hw->mac.ops.setup_link)
4652 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4653
4654 /* Adjust media types shown in ifconfig */
4655 ifmedia_removeall(&adapter->media);
4656 ixgbe_add_media_types(adapter);
4657 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4658 IXGBE_CORE_UNLOCK(adapter);
4659 } /* ixgbe_handle_msf */
4660
4661 /************************************************************************
4662 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4663 ************************************************************************/
4664 static void
4665 ixgbe_handle_phy(void *context)
4666 {
4667 struct adapter *adapter = context;
4668 struct ixgbe_hw *hw = &adapter->hw;
4669 int error;
4670
4671 ++adapter->phy_sicount.ev_count;
4672 error = hw->phy.ops.handle_lasi(hw);
4673 if (error == IXGBE_ERR_OVERTEMP)
4674 device_printf(adapter->dev,
4675 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4676 " PHY will downshift to lower power state!\n");
4677 else if (error)
4678 device_printf(adapter->dev,
4679 "Error handling LASI interrupt: %d\n", error);
4680 } /* ixgbe_handle_phy */
4681
4682 static void
4683 ixgbe_ifstop(struct ifnet *ifp, int disable)
4684 {
4685 struct adapter *adapter = ifp->if_softc;
4686
4687 IXGBE_CORE_LOCK(adapter);
4688 ixgbe_stop(adapter);
4689 IXGBE_CORE_UNLOCK(adapter);
4690 }
4691
4692 /************************************************************************
4693 * ixgbe_stop - Stop the hardware
4694 *
4695 * Disables all traffic on the adapter by issuing a
4696 * global reset on the MAC and deallocates TX/RX buffers.
4697 ************************************************************************/
4698 static void
4699 ixgbe_stop(void *arg)
4700 {
4701 struct ifnet *ifp;
4702 struct adapter *adapter = arg;
4703 struct ixgbe_hw *hw = &adapter->hw;
4704
4705 ifp = adapter->ifp;
4706
4707 KASSERT(mutex_owned(&adapter->core_mtx));
4708
4709 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4710 ixgbe_disable_intr(adapter);
4711 callout_stop(&adapter->timer);
4712
4713 /* Let the stack know...*/
4714 ifp->if_flags &= ~IFF_RUNNING;
4715
4716 ixgbe_reset_hw(hw);
4717 hw->adapter_stopped = FALSE;
4718 ixgbe_stop_adapter(hw);
4719 if (hw->mac.type == ixgbe_mac_82599EB)
4720 ixgbe_stop_mac_link_on_d3_82599(hw);
4721 /* Turn off the laser - noop with no optics */
4722 ixgbe_disable_tx_laser(hw);
4723
4724 /* Update the stack */
4725 adapter->link_up = FALSE;
4726 ixgbe_update_link_status(adapter);
4727
4728 /* reprogram the RAR[0] in case user changed it. */
4729 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4730
4731 return;
4732 } /* ixgbe_stop */
4733
4734 /************************************************************************
4735 * ixgbe_update_link_status - Update OS on link state
4736 *
4737 * Note: Only updates the OS on the cached link state.
4738 * The real check of the hardware only happens with
4739 * a link interrupt.
4740 ************************************************************************/
4741 static void
4742 ixgbe_update_link_status(struct adapter *adapter)
4743 {
4744 struct ifnet *ifp = adapter->ifp;
4745 device_t dev = adapter->dev;
4746 struct ixgbe_hw *hw = &adapter->hw;
4747
4748 KASSERT(mutex_owned(&adapter->core_mtx));
4749
4750 if (adapter->link_up) {
4751 if (adapter->link_active != LINK_STATE_UP) {
4752 /*
4753 * To eliminate influence of the previous state
4754 * in the same way as ixgbe_init_locked().
4755 */
4756 struct ix_queue *que = adapter->queues;
4757 for (int i = 0; i < adapter->num_queues; i++, que++)
4758 que->eitr_setting = 0;
4759
4760 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4761 /*
4762 * Discard count for both MAC Local Fault and
4763 * Remote Fault because those registers are
4764 * valid only when the link speed is up and
4765 * 10Gbps.
4766 */
4767 IXGBE_READ_REG(hw, IXGBE_MLFC);
4768 IXGBE_READ_REG(hw, IXGBE_MRFC);
4769 }
4770
4771 if (bootverbose) {
4772 const char *bpsmsg;
4773
4774 switch (adapter->link_speed) {
4775 case IXGBE_LINK_SPEED_10GB_FULL:
4776 bpsmsg = "10 Gbps";
4777 break;
4778 case IXGBE_LINK_SPEED_5GB_FULL:
4779 bpsmsg = "5 Gbps";
4780 break;
4781 case IXGBE_LINK_SPEED_2_5GB_FULL:
4782 bpsmsg = "2.5 Gbps";
4783 break;
4784 case IXGBE_LINK_SPEED_1GB_FULL:
4785 bpsmsg = "1 Gbps";
4786 break;
4787 case IXGBE_LINK_SPEED_100_FULL:
4788 bpsmsg = "100 Mbps";
4789 break;
4790 case IXGBE_LINK_SPEED_10_FULL:
4791 bpsmsg = "10 Mbps";
4792 break;
4793 default:
4794 bpsmsg = "unknown speed";
4795 break;
4796 }
4797 device_printf(dev, "Link is up %s %s \n",
4798 bpsmsg, "Full Duplex");
4799 }
4800 adapter->link_active = LINK_STATE_UP;
4801 /* Update any Flow Control changes */
4802 ixgbe_fc_enable(&adapter->hw);
4803 /* Update DMA coalescing config */
4804 ixgbe_config_dmac(adapter);
4805 if_link_state_change(ifp, LINK_STATE_UP);
4806
4807 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4808 ixgbe_ping_all_vfs(adapter);
4809 }
4810 } else {
4811 /*
4812 * Do it when link active changes to DOWN. i.e.
4813 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4814 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4815 */
4816 if (adapter->link_active != LINK_STATE_DOWN) {
4817 if (bootverbose)
4818 device_printf(dev, "Link is Down\n");
4819 if_link_state_change(ifp, LINK_STATE_DOWN);
4820 adapter->link_active = LINK_STATE_DOWN;
4821 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4822 ixgbe_ping_all_vfs(adapter);
4823 ixgbe_drain_all(adapter);
4824 }
4825 }
4826 } /* ixgbe_update_link_status */
4827
4828 /************************************************************************
4829 * ixgbe_config_dmac - Configure DMA Coalescing
4830 ************************************************************************/
4831 static void
4832 ixgbe_config_dmac(struct adapter *adapter)
4833 {
4834 struct ixgbe_hw *hw = &adapter->hw;
4835 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4836
4837 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4838 return;
4839
4840 if (dcfg->watchdog_timer ^ adapter->dmac ||
4841 dcfg->link_speed ^ adapter->link_speed) {
4842 dcfg->watchdog_timer = adapter->dmac;
4843 dcfg->fcoe_en = false;
4844 dcfg->link_speed = adapter->link_speed;
4845 dcfg->num_tcs = 1;
4846
4847 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4848 dcfg->watchdog_timer, dcfg->link_speed);
4849
4850 hw->mac.ops.dmac_config(hw);
4851 }
4852 } /* ixgbe_config_dmac */
4853
4854 /************************************************************************
4855 * ixgbe_enable_intr
4856 ************************************************************************/
4857 static void
4858 ixgbe_enable_intr(struct adapter *adapter)
4859 {
4860 struct ixgbe_hw *hw = &adapter->hw;
4861 struct ix_queue *que = adapter->queues;
4862 u32 mask, fwsm;
4863
4864 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4865
4866 switch (adapter->hw.mac.type) {
4867 case ixgbe_mac_82599EB:
4868 mask |= IXGBE_EIMS_ECC;
4869 /* Temperature sensor on some adapters */
4870 mask |= IXGBE_EIMS_GPI_SDP0;
4871 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4872 mask |= IXGBE_EIMS_GPI_SDP1;
4873 mask |= IXGBE_EIMS_GPI_SDP2;
4874 break;
4875 case ixgbe_mac_X540:
4876 /* Detect if Thermal Sensor is enabled */
4877 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4878 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4879 mask |= IXGBE_EIMS_TS;
4880 mask |= IXGBE_EIMS_ECC;
4881 break;
4882 case ixgbe_mac_X550:
4883 /* MAC thermal sensor is automatically enabled */
4884 mask |= IXGBE_EIMS_TS;
4885 mask |= IXGBE_EIMS_ECC;
4886 break;
4887 case ixgbe_mac_X550EM_x:
4888 case ixgbe_mac_X550EM_a:
4889 /* Some devices use SDP0 for important information */
4890 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4891 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4892 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4893 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4894 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4895 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4896 mask |= IXGBE_EICR_GPI_SDP0_X540;
4897 mask |= IXGBE_EIMS_ECC;
4898 break;
4899 default:
4900 break;
4901 }
4902
4903 /* Enable Fan Failure detection */
4904 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4905 mask |= IXGBE_EIMS_GPI_SDP1;
4906 /* Enable SR-IOV */
4907 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4908 mask |= IXGBE_EIMS_MAILBOX;
4909 /* Enable Flow Director */
4910 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4911 mask |= IXGBE_EIMS_FLOW_DIR;
4912
4913 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4914
4915 /* With MSI-X we use auto clear */
4916 if (adapter->msix_mem) {
4917 mask = IXGBE_EIMS_ENABLE_MASK;
4918 /* Don't autoclear Link */
4919 mask &= ~IXGBE_EIMS_OTHER;
4920 mask &= ~IXGBE_EIMS_LSC;
4921 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4922 mask &= ~IXGBE_EIMS_MAILBOX;
4923 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4924 }
4925
4926 /*
4927 * Now enable all queues, this is done separately to
4928 * allow for handling the extended (beyond 32) MSI-X
4929 * vectors that can be used by 82599
4930 */
4931 for (int i = 0; i < adapter->num_queues; i++, que++)
4932 ixgbe_enable_queue(adapter, que->msix);
4933
4934 IXGBE_WRITE_FLUSH(hw);
4935
4936 } /* ixgbe_enable_intr */
4937
4938 /************************************************************************
4939 * ixgbe_disable_intr_internal
4940 ************************************************************************/
4941 static void
4942 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4943 {
4944 struct ix_queue *que = adapter->queues;
4945
4946 /* disable interrupts other than queues */
4947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4948
4949 if (adapter->msix_mem)
4950 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4951
4952 for (int i = 0; i < adapter->num_queues; i++, que++)
4953 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4954
4955 IXGBE_WRITE_FLUSH(&adapter->hw);
4956
4957 } /* ixgbe_do_disable_intr_internal */
4958
4959 /************************************************************************
4960 * ixgbe_disable_intr
4961 ************************************************************************/
4962 static void
4963 ixgbe_disable_intr(struct adapter *adapter)
4964 {
4965
4966 ixgbe_disable_intr_internal(adapter, true);
4967 } /* ixgbe_disable_intr */
4968
4969 /************************************************************************
4970 * ixgbe_ensure_disabled_intr
4971 ************************************************************************/
4972 void
4973 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4974 {
4975
4976 ixgbe_disable_intr_internal(adapter, false);
4977 } /* ixgbe_ensure_disabled_intr */
4978
4979 /************************************************************************
4980 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4981 ************************************************************************/
4982 static int
4983 ixgbe_legacy_irq(void *arg)
4984 {
4985 struct ix_queue *que = arg;
4986 struct adapter *adapter = que->adapter;
4987 struct ixgbe_hw *hw = &adapter->hw;
4988 struct ifnet *ifp = adapter->ifp;
4989 struct tx_ring *txr = adapter->tx_rings;
4990 bool more = false;
4991 u32 eicr, eicr_mask;
4992
4993 /* Silicon errata #26 on 82598 */
4994 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4995
4996 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4997
4998 adapter->stats.pf.legint.ev_count++;
4999 ++que->irqs.ev_count;
5000 if (eicr == 0) {
5001 adapter->stats.pf.intzero.ev_count++;
5002 if ((ifp->if_flags & IFF_UP) != 0)
5003 ixgbe_enable_intr(adapter);
5004 return 0;
5005 }
5006
5007 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5008 /*
5009 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5010 */
5011 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5012
5013 #ifdef __NetBSD__
5014 /* Don't run ixgbe_rxeof in interrupt context */
5015 more = true;
5016 #else
5017 more = ixgbe_rxeof(que);
5018 #endif
5019
5020 IXGBE_TX_LOCK(txr);
5021 ixgbe_txeof(txr);
5022 #ifdef notyet
5023 if (!ixgbe_ring_empty(ifp, txr->br))
5024 ixgbe_start_locked(ifp, txr);
5025 #endif
5026 IXGBE_TX_UNLOCK(txr);
5027 }
5028
5029 /* Check for fan failure */
5030 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5031 ixgbe_check_fan_failure(adapter, eicr, true);
5032 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5033 }
5034
5035 /* Link status change */
5036 if (eicr & IXGBE_EICR_LSC)
5037 softint_schedule(adapter->link_si);
5038
5039 if (ixgbe_is_sfp(hw)) {
5040 /* Pluggable optics-related interrupt */
5041 if (hw->mac.type >= ixgbe_mac_X540)
5042 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5043 else
5044 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5045
5046 if (eicr & eicr_mask) {
5047 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5048 softint_schedule(adapter->mod_si);
5049 }
5050
5051 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5052 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5053 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5054 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5055 softint_schedule(adapter->msf_si);
5056 }
5057 }
5058
5059 /* External PHY interrupt */
5060 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5061 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5062 softint_schedule(adapter->phy_si);
5063
5064 if (more) {
5065 que->req.ev_count++;
5066 ixgbe_sched_handle_que(adapter, que);
5067 } else
5068 ixgbe_enable_intr(adapter);
5069
5070 return 1;
5071 } /* ixgbe_legacy_irq */
5072
5073 /************************************************************************
5074 * ixgbe_free_pciintr_resources
5075 ************************************************************************/
5076 static void
5077 ixgbe_free_pciintr_resources(struct adapter *adapter)
5078 {
5079 struct ix_queue *que = adapter->queues;
5080 int rid;
5081
5082 /*
5083 * Release all msix queue resources:
5084 */
5085 for (int i = 0; i < adapter->num_queues; i++, que++) {
5086 if (que->res != NULL) {
5087 pci_intr_disestablish(adapter->osdep.pc,
5088 adapter->osdep.ihs[i]);
5089 adapter->osdep.ihs[i] = NULL;
5090 }
5091 }
5092
5093 /* Clean the Legacy or Link interrupt last */
5094 if (adapter->vector) /* we are doing MSIX */
5095 rid = adapter->vector;
5096 else
5097 rid = 0;
5098
5099 if (adapter->osdep.ihs[rid] != NULL) {
5100 pci_intr_disestablish(adapter->osdep.pc,
5101 adapter->osdep.ihs[rid]);
5102 adapter->osdep.ihs[rid] = NULL;
5103 }
5104
5105 if (adapter->osdep.intrs != NULL) {
5106 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5107 adapter->osdep.nintrs);
5108 adapter->osdep.intrs = NULL;
5109 }
5110 } /* ixgbe_free_pciintr_resources */
5111
5112 /************************************************************************
5113 * ixgbe_free_pci_resources
5114 ************************************************************************/
5115 static void
5116 ixgbe_free_pci_resources(struct adapter *adapter)
5117 {
5118
5119 ixgbe_free_pciintr_resources(adapter);
5120
5121 if (adapter->osdep.mem_size != 0) {
5122 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5123 adapter->osdep.mem_bus_space_handle,
5124 adapter->osdep.mem_size);
5125 }
5126
5127 } /* ixgbe_free_pci_resources */
5128
5129 /************************************************************************
5130 * ixgbe_set_sysctl_value
5131 ************************************************************************/
5132 static void
5133 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5134 const char *description, int *limit, int value)
5135 {
5136 device_t dev = adapter->dev;
5137 struct sysctllog **log;
5138 const struct sysctlnode *rnode, *cnode;
5139
5140 /*
5141 * It's not required to check recovery mode because this function never
5142 * touches hardware.
5143 */
5144
5145 log = &adapter->sysctllog;
5146 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5147 aprint_error_dev(dev, "could not create sysctl root\n");
5148 return;
5149 }
5150 if (sysctl_createv(log, 0, &rnode, &cnode,
5151 CTLFLAG_READWRITE, CTLTYPE_INT,
5152 name, SYSCTL_DESCR(description),
5153 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5154 aprint_error_dev(dev, "could not create sysctl\n");
5155 *limit = value;
5156 } /* ixgbe_set_sysctl_value */
5157
5158 /************************************************************************
5159 * ixgbe_sysctl_flowcntl
5160 *
5161 * SYSCTL wrapper around setting Flow Control
5162 ************************************************************************/
5163 static int
5164 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5165 {
5166 struct sysctlnode node = *rnode;
5167 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5168 int error, fc;
5169
5170 if (ixgbe_fw_recovery_mode_swflag(adapter))
5171 return (EPERM);
5172
5173 fc = adapter->hw.fc.current_mode;
5174 node.sysctl_data = &fc;
5175 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5176 if (error != 0 || newp == NULL)
5177 return error;
5178
5179 /* Don't bother if it's not changed */
5180 if (fc == adapter->hw.fc.current_mode)
5181 return (0);
5182
5183 return ixgbe_set_flowcntl(adapter, fc);
5184 } /* ixgbe_sysctl_flowcntl */
5185
5186 /************************************************************************
5187 * ixgbe_set_flowcntl - Set flow control
5188 *
5189 * Flow control values:
5190 * 0 - off
5191 * 1 - rx pause
5192 * 2 - tx pause
5193 * 3 - full
5194 ************************************************************************/
5195 static int
5196 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5197 {
5198 switch (fc) {
5199 case ixgbe_fc_rx_pause:
5200 case ixgbe_fc_tx_pause:
5201 case ixgbe_fc_full:
5202 adapter->hw.fc.requested_mode = fc;
5203 if (adapter->num_queues > 1)
5204 ixgbe_disable_rx_drop(adapter);
5205 break;
5206 case ixgbe_fc_none:
5207 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5208 if (adapter->num_queues > 1)
5209 ixgbe_enable_rx_drop(adapter);
5210 break;
5211 default:
5212 return (EINVAL);
5213 }
5214
5215 #if 0 /* XXX NetBSD */
5216 /* Don't autoneg if forcing a value */
5217 adapter->hw.fc.disable_fc_autoneg = TRUE;
5218 #endif
5219 ixgbe_fc_enable(&adapter->hw);
5220
5221 return (0);
5222 } /* ixgbe_set_flowcntl */
5223
5224 /************************************************************************
5225 * ixgbe_enable_rx_drop
5226 *
5227 * Enable the hardware to drop packets when the buffer is
5228 * full. This is useful with multiqueue, so that no single
5229 * queue being full stalls the entire RX engine. We only
5230 * enable this when Multiqueue is enabled AND Flow Control
5231 * is disabled.
5232 ************************************************************************/
5233 static void
5234 ixgbe_enable_rx_drop(struct adapter *adapter)
5235 {
5236 struct ixgbe_hw *hw = &adapter->hw;
5237 struct rx_ring *rxr;
5238 u32 srrctl;
5239
5240 for (int i = 0; i < adapter->num_queues; i++) {
5241 rxr = &adapter->rx_rings[i];
5242 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5243 srrctl |= IXGBE_SRRCTL_DROP_EN;
5244 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5245 }
5246
5247 /* enable drop for each vf */
5248 for (int i = 0; i < adapter->num_vfs; i++) {
5249 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5250 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5251 IXGBE_QDE_ENABLE));
5252 }
5253 } /* ixgbe_enable_rx_drop */
5254
5255 /************************************************************************
5256 * ixgbe_disable_rx_drop
5257 ************************************************************************/
5258 static void
5259 ixgbe_disable_rx_drop(struct adapter *adapter)
5260 {
5261 struct ixgbe_hw *hw = &adapter->hw;
5262 struct rx_ring *rxr;
5263 u32 srrctl;
5264
5265 for (int i = 0; i < adapter->num_queues; i++) {
5266 rxr = &adapter->rx_rings[i];
5267 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5268 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5269 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5270 }
5271
5272 /* disable drop for each vf */
5273 for (int i = 0; i < adapter->num_vfs; i++) {
5274 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5275 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5276 }
5277 } /* ixgbe_disable_rx_drop */
5278
5279 /************************************************************************
5280 * ixgbe_sysctl_advertise
5281 *
5282 * SYSCTL wrapper around setting advertised speed
5283 ************************************************************************/
5284 static int
5285 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5286 {
5287 struct sysctlnode node = *rnode;
5288 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5289 int error = 0, advertise;
5290
5291 if (ixgbe_fw_recovery_mode_swflag(adapter))
5292 return (EPERM);
5293
5294 advertise = adapter->advertise;
5295 node.sysctl_data = &advertise;
5296 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5297 if (error != 0 || newp == NULL)
5298 return error;
5299
5300 return ixgbe_set_advertise(adapter, advertise);
5301 } /* ixgbe_sysctl_advertise */
5302
5303 /************************************************************************
5304 * ixgbe_set_advertise - Control advertised link speed
5305 *
5306 * Flags:
5307 * 0x00 - Default (all capable link speed)
5308 * 0x01 - advertise 100 Mb
5309 * 0x02 - advertise 1G
5310 * 0x04 - advertise 10G
5311 * 0x08 - advertise 10 Mb
5312 * 0x10 - advertise 2.5G
5313 * 0x20 - advertise 5G
5314 ************************************************************************/
5315 static int
5316 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5317 {
5318 device_t dev;
5319 struct ixgbe_hw *hw;
5320 ixgbe_link_speed speed = 0;
5321 ixgbe_link_speed link_caps = 0;
5322 s32 err = IXGBE_NOT_IMPLEMENTED;
5323 bool negotiate = FALSE;
5324
5325 /* Checks to validate new value */
5326 if (adapter->advertise == advertise) /* no change */
5327 return (0);
5328
5329 dev = adapter->dev;
5330 hw = &adapter->hw;
5331
5332 /* No speed changes for backplane media */
5333 if (hw->phy.media_type == ixgbe_media_type_backplane)
5334 return (ENODEV);
5335
5336 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5337 (hw->phy.multispeed_fiber))) {
5338 device_printf(dev,
5339 "Advertised speed can only be set on copper or "
5340 "multispeed fiber media types.\n");
5341 return (EINVAL);
5342 }
5343
5344 if (advertise < 0x0 || advertise > 0x2f) {
5345 device_printf(dev,
5346 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5347 return (EINVAL);
5348 }
5349
5350 if (hw->mac.ops.get_link_capabilities) {
5351 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5352 &negotiate);
5353 if (err != IXGBE_SUCCESS) {
5354 device_printf(dev, "Unable to determine supported advertise speeds\n");
5355 return (ENODEV);
5356 }
5357 }
5358
5359 /* Set new value and report new advertised mode */
5360 if (advertise & 0x1) {
5361 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5362 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5363 return (EINVAL);
5364 }
5365 speed |= IXGBE_LINK_SPEED_100_FULL;
5366 }
5367 if (advertise & 0x2) {
5368 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5369 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5370 return (EINVAL);
5371 }
5372 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5373 }
5374 if (advertise & 0x4) {
5375 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5376 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5377 return (EINVAL);
5378 }
5379 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5380 }
5381 if (advertise & 0x8) {
5382 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5383 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5384 return (EINVAL);
5385 }
5386 speed |= IXGBE_LINK_SPEED_10_FULL;
5387 }
5388 if (advertise & 0x10) {
5389 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5390 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5391 return (EINVAL);
5392 }
5393 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5394 }
5395 if (advertise & 0x20) {
5396 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5397 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5398 return (EINVAL);
5399 }
5400 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5401 }
5402 if (advertise == 0)
5403 speed = link_caps; /* All capable link speed */
5404
5405 hw->mac.autotry_restart = TRUE;
5406 hw->mac.ops.setup_link(hw, speed, TRUE);
5407 adapter->advertise = advertise;
5408
5409 return (0);
5410 } /* ixgbe_set_advertise */
5411
5412 /************************************************************************
5413 * ixgbe_get_advertise - Get current advertised speed settings
5414 *
5415 * Formatted for sysctl usage.
5416 * Flags:
5417 * 0x01 - advertise 100 Mb
5418 * 0x02 - advertise 1G
5419 * 0x04 - advertise 10G
5420 * 0x08 - advertise 10 Mb (yes, Mb)
5421 * 0x10 - advertise 2.5G
5422 * 0x20 - advertise 5G
5423 ************************************************************************/
5424 static int
5425 ixgbe_get_advertise(struct adapter *adapter)
5426 {
5427 struct ixgbe_hw *hw = &adapter->hw;
5428 int speed;
5429 ixgbe_link_speed link_caps = 0;
5430 s32 err;
5431 bool negotiate = FALSE;
5432
5433 /*
5434 * Advertised speed means nothing unless it's copper or
5435 * multi-speed fiber
5436 */
5437 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5438 !(hw->phy.multispeed_fiber))
5439 return (0);
5440
5441 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5442 if (err != IXGBE_SUCCESS)
5443 return (0);
5444
5445 speed =
5446 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5447 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5448 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5449 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5450 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5451 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5452
5453 return speed;
5454 } /* ixgbe_get_advertise */
5455
5456 /************************************************************************
5457 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5458 *
5459 * Control values:
5460 * 0/1 - off / on (use default value of 1000)
5461 *
5462 * Legal timer values are:
5463 * 50,100,250,500,1000,2000,5000,10000
5464 *
5465 * Turning off interrupt moderation will also turn this off.
5466 ************************************************************************/
5467 static int
5468 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5469 {
5470 struct sysctlnode node = *rnode;
5471 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5472 struct ifnet *ifp = adapter->ifp;
5473 int error;
5474 int newval;
5475
5476 if (ixgbe_fw_recovery_mode_swflag(adapter))
5477 return (EPERM);
5478
5479 newval = adapter->dmac;
5480 node.sysctl_data = &newval;
5481 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5482 if ((error) || (newp == NULL))
5483 return (error);
5484
5485 switch (newval) {
5486 case 0:
5487 /* Disabled */
5488 adapter->dmac = 0;
5489 break;
5490 case 1:
5491 /* Enable and use default */
5492 adapter->dmac = 1000;
5493 break;
5494 case 50:
5495 case 100:
5496 case 250:
5497 case 500:
5498 case 1000:
5499 case 2000:
5500 case 5000:
5501 case 10000:
5502 /* Legal values - allow */
5503 adapter->dmac = newval;
5504 break;
5505 default:
5506 /* Do nothing, illegal value */
5507 return (EINVAL);
5508 }
5509
5510 /* Re-initialize hardware if it's already running */
5511 if (ifp->if_flags & IFF_RUNNING)
5512 ifp->if_init(ifp);
5513
5514 return (0);
5515 }
5516
5517 #ifdef IXGBE_DEBUG
5518 /************************************************************************
5519 * ixgbe_sysctl_power_state
5520 *
5521 * Sysctl to test power states
5522 * Values:
5523 * 0 - set device to D0
5524 * 3 - set device to D3
5525 * (none) - get current device power state
5526 ************************************************************************/
5527 static int
5528 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5529 {
5530 #ifdef notyet
5531 struct sysctlnode node = *rnode;
5532 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5533 device_t dev = adapter->dev;
5534 int curr_ps, new_ps, error = 0;
5535
5536 if (ixgbe_fw_recovery_mode_swflag(adapter))
5537 return (EPERM);
5538
5539 curr_ps = new_ps = pci_get_powerstate(dev);
5540
5541 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5542 if ((error) || (req->newp == NULL))
5543 return (error);
5544
5545 if (new_ps == curr_ps)
5546 return (0);
5547
5548 if (new_ps == 3 && curr_ps == 0)
5549 error = DEVICE_SUSPEND(dev);
5550 else if (new_ps == 0 && curr_ps == 3)
5551 error = DEVICE_RESUME(dev);
5552 else
5553 return (EINVAL);
5554
5555 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5556
5557 return (error);
5558 #else
5559 return 0;
5560 #endif
5561 } /* ixgbe_sysctl_power_state */
5562 #endif
5563
5564 /************************************************************************
5565 * ixgbe_sysctl_wol_enable
5566 *
5567 * Sysctl to enable/disable the WoL capability,
5568 * if supported by the adapter.
5569 *
5570 * Values:
5571 * 0 - disabled
5572 * 1 - enabled
5573 ************************************************************************/
5574 static int
5575 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5576 {
5577 struct sysctlnode node = *rnode;
5578 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5579 struct ixgbe_hw *hw = &adapter->hw;
5580 bool new_wol_enabled;
5581 int error = 0;
5582
5583 /*
5584 * It's not required to check recovery mode because this function never
5585 * touches hardware.
5586 */
5587 new_wol_enabled = hw->wol_enabled;
5588 node.sysctl_data = &new_wol_enabled;
5589 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5590 if ((error) || (newp == NULL))
5591 return (error);
5592 if (new_wol_enabled == hw->wol_enabled)
5593 return (0);
5594
5595 if (new_wol_enabled && !adapter->wol_support)
5596 return (ENODEV);
5597 else
5598 hw->wol_enabled = new_wol_enabled;
5599
5600 return (0);
5601 } /* ixgbe_sysctl_wol_enable */
5602
5603 /************************************************************************
5604 * ixgbe_sysctl_wufc - Wake Up Filter Control
5605 *
5606 * Sysctl to enable/disable the types of packets that the
5607 * adapter will wake up on upon receipt.
5608 * Flags:
5609 * 0x1 - Link Status Change
5610 * 0x2 - Magic Packet
5611 * 0x4 - Direct Exact
5612 * 0x8 - Directed Multicast
5613 * 0x10 - Broadcast
5614 * 0x20 - ARP/IPv4 Request Packet
5615 * 0x40 - Direct IPv4 Packet
5616 * 0x80 - Direct IPv6 Packet
5617 *
5618 * Settings not listed above will cause the sysctl to return an error.
5619 ************************************************************************/
5620 static int
5621 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5622 {
5623 struct sysctlnode node = *rnode;
5624 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5625 int error = 0;
5626 u32 new_wufc;
5627
5628 /*
5629 * It's not required to check recovery mode because this function never
5630 * touches hardware.
5631 */
5632 new_wufc = adapter->wufc;
5633 node.sysctl_data = &new_wufc;
5634 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5635 if ((error) || (newp == NULL))
5636 return (error);
5637 if (new_wufc == adapter->wufc)
5638 return (0);
5639
5640 if (new_wufc & 0xffffff00)
5641 return (EINVAL);
5642
5643 new_wufc &= 0xff;
5644 new_wufc |= (0xffffff & adapter->wufc);
5645 adapter->wufc = new_wufc;
5646
5647 return (0);
5648 } /* ixgbe_sysctl_wufc */
5649
5650 #ifdef IXGBE_DEBUG
5651 /************************************************************************
5652 * ixgbe_sysctl_print_rss_config
5653 ************************************************************************/
5654 static int
5655 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5656 {
5657 #ifdef notyet
5658 struct sysctlnode node = *rnode;
5659 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5660 struct ixgbe_hw *hw = &adapter->hw;
5661 device_t dev = adapter->dev;
5662 struct sbuf *buf;
5663 int error = 0, reta_size;
5664 u32 reg;
5665
5666 if (ixgbe_fw_recovery_mode_swflag(adapter))
5667 return (EPERM);
5668
5669 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5670 if (!buf) {
5671 device_printf(dev, "Could not allocate sbuf for output.\n");
5672 return (ENOMEM);
5673 }
5674
5675 // TODO: use sbufs to make a string to print out
5676 /* Set multiplier for RETA setup and table size based on MAC */
5677 switch (adapter->hw.mac.type) {
5678 case ixgbe_mac_X550:
5679 case ixgbe_mac_X550EM_x:
5680 case ixgbe_mac_X550EM_a:
5681 reta_size = 128;
5682 break;
5683 default:
5684 reta_size = 32;
5685 break;
5686 }
5687
5688 /* Print out the redirection table */
5689 sbuf_cat(buf, "\n");
5690 for (int i = 0; i < reta_size; i++) {
5691 if (i < 32) {
5692 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5693 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5694 } else {
5695 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5696 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5697 }
5698 }
5699
5700 // TODO: print more config
5701
5702 error = sbuf_finish(buf);
5703 if (error)
5704 device_printf(dev, "Error finishing sbuf: %d\n", error);
5705
5706 sbuf_delete(buf);
5707 #endif
5708 return (0);
5709 } /* ixgbe_sysctl_print_rss_config */
5710 #endif /* IXGBE_DEBUG */
5711
5712 /************************************************************************
5713 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5714 *
5715 * For X552/X557-AT devices using an external PHY
5716 ************************************************************************/
5717 static int
5718 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5719 {
5720 struct sysctlnode node = *rnode;
5721 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5722 struct ixgbe_hw *hw = &adapter->hw;
5723 int val;
5724 u16 reg;
5725 int error;
5726
5727 if (ixgbe_fw_recovery_mode_swflag(adapter))
5728 return (EPERM);
5729
5730 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5731 device_printf(adapter->dev,
5732 "Device has no supported external thermal sensor.\n");
5733 return (ENODEV);
5734 }
5735
5736 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5737 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5738 device_printf(adapter->dev,
5739 "Error reading from PHY's current temperature register\n");
5740 return (EAGAIN);
5741 }
5742
5743 node.sysctl_data = &val;
5744
5745 /* Shift temp for output */
5746 val = reg >> 8;
5747
5748 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5749 if ((error) || (newp == NULL))
5750 return (error);
5751
5752 return (0);
5753 } /* ixgbe_sysctl_phy_temp */
5754
5755 /************************************************************************
5756 * ixgbe_sysctl_phy_overtemp_occurred
5757 *
5758 * Reports (directly from the PHY) whether the current PHY
5759 * temperature is over the overtemp threshold.
5760 ************************************************************************/
5761 static int
5762 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5763 {
5764 struct sysctlnode node = *rnode;
5765 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5766 struct ixgbe_hw *hw = &adapter->hw;
5767 int val, error;
5768 u16 reg;
5769
5770 if (ixgbe_fw_recovery_mode_swflag(adapter))
5771 return (EPERM);
5772
5773 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5774 device_printf(adapter->dev,
5775 "Device has no supported external thermal sensor.\n");
5776 return (ENODEV);
5777 }
5778
5779 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5780 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5781 device_printf(adapter->dev,
5782 "Error reading from PHY's temperature status register\n");
5783 return (EAGAIN);
5784 }
5785
5786 node.sysctl_data = &val;
5787
5788 /* Get occurrence bit */
5789 val = !!(reg & 0x4000);
5790
5791 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5792 if ((error) || (newp == NULL))
5793 return (error);
5794
5795 return (0);
5796 } /* ixgbe_sysctl_phy_overtemp_occurred */
5797
5798 /************************************************************************
5799 * ixgbe_sysctl_eee_state
5800 *
5801 * Sysctl to set EEE power saving feature
5802 * Values:
5803 * 0 - disable EEE
5804 * 1 - enable EEE
5805 * (none) - get current device EEE state
5806 ************************************************************************/
5807 static int
5808 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5809 {
5810 struct sysctlnode node = *rnode;
5811 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5812 struct ifnet *ifp = adapter->ifp;
5813 device_t dev = adapter->dev;
5814 int curr_eee, new_eee, error = 0;
5815 s32 retval;
5816
5817 if (ixgbe_fw_recovery_mode_swflag(adapter))
5818 return (EPERM);
5819
5820 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5821 node.sysctl_data = &new_eee;
5822 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5823 if ((error) || (newp == NULL))
5824 return (error);
5825
5826 /* Nothing to do */
5827 if (new_eee == curr_eee)
5828 return (0);
5829
5830 /* Not supported */
5831 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5832 return (EINVAL);
5833
5834 /* Bounds checking */
5835 if ((new_eee < 0) || (new_eee > 1))
5836 return (EINVAL);
5837
5838 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5839 if (retval) {
5840 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5841 return (EINVAL);
5842 }
5843
5844 /* Restart auto-neg */
5845 ifp->if_init(ifp);
5846
5847 device_printf(dev, "New EEE state: %d\n", new_eee);
5848
5849 /* Cache new value */
5850 if (new_eee)
5851 adapter->feat_en |= IXGBE_FEATURE_EEE;
5852 else
5853 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5854
5855 return (error);
5856 } /* ixgbe_sysctl_eee_state */
5857
5858 #define PRINTQS(adapter, regname) \
5859 do { \
5860 struct ixgbe_hw *_hw = &(adapter)->hw; \
5861 int _i; \
5862 \
5863 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5864 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5865 printf((_i == 0) ? "\t" : " "); \
5866 printf("%08x", IXGBE_READ_REG(_hw, \
5867 IXGBE_##regname(_i))); \
5868 } \
5869 printf("\n"); \
5870 } while (0)
5871
5872 /************************************************************************
5873 * ixgbe_print_debug_info
5874 *
5875 * Called only when em_display_debug_stats is enabled.
5876 * Provides a way to take a look at important statistics
5877 * maintained by the driver and hardware.
5878 ************************************************************************/
5879 static void
5880 ixgbe_print_debug_info(struct adapter *adapter)
5881 {
5882 device_t dev = adapter->dev;
5883 struct ixgbe_hw *hw = &adapter->hw;
5884 int table_size;
5885 int i;
5886
5887 switch (adapter->hw.mac.type) {
5888 case ixgbe_mac_X550:
5889 case ixgbe_mac_X550EM_x:
5890 case ixgbe_mac_X550EM_a:
5891 table_size = 128;
5892 break;
5893 default:
5894 table_size = 32;
5895 break;
5896 }
5897
5898 device_printf(dev, "[E]RETA:\n");
5899 for (i = 0; i < table_size; i++) {
5900 if (i < 32)
5901 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5902 IXGBE_RETA(i)));
5903 else
5904 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5905 IXGBE_ERETA(i - 32)));
5906 }
5907
5908 device_printf(dev, "queue:");
5909 for (i = 0; i < adapter->num_queues; i++) {
5910 printf((i == 0) ? "\t" : " ");
5911 printf("%8d", i);
5912 }
5913 printf("\n");
5914 PRINTQS(adapter, RDBAL);
5915 PRINTQS(adapter, RDBAH);
5916 PRINTQS(adapter, RDLEN);
5917 PRINTQS(adapter, SRRCTL);
5918 PRINTQS(adapter, RDH);
5919 PRINTQS(adapter, RDT);
5920 PRINTQS(adapter, RXDCTL);
5921
5922 device_printf(dev, "RQSMR:");
5923 for (i = 0; i < adapter->num_queues / 4; i++) {
5924 printf((i == 0) ? "\t" : " ");
5925 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5926 }
5927 printf("\n");
5928
5929 device_printf(dev, "disabled_count:");
5930 for (i = 0; i < adapter->num_queues; i++) {
5931 printf((i == 0) ? "\t" : " ");
5932 printf("%8d", adapter->queues[i].disabled_count);
5933 }
5934 printf("\n");
5935
5936 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5937 if (hw->mac.type != ixgbe_mac_82598EB) {
5938 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5939 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5940 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5941 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5942 }
5943 } /* ixgbe_print_debug_info */
5944
5945 /************************************************************************
5946 * ixgbe_sysctl_debug
5947 ************************************************************************/
5948 static int
5949 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5950 {
5951 struct sysctlnode node = *rnode;
5952 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5953 int error, result = 0;
5954
5955 if (ixgbe_fw_recovery_mode_swflag(adapter))
5956 return (EPERM);
5957
5958 node.sysctl_data = &result;
5959 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5960
5961 if (error || newp == NULL)
5962 return error;
5963
5964 if (result == 1)
5965 ixgbe_print_debug_info(adapter);
5966
5967 return 0;
5968 } /* ixgbe_sysctl_debug */
5969
5970 /************************************************************************
5971 * ixgbe_init_device_features
5972 ************************************************************************/
5973 static void
5974 ixgbe_init_device_features(struct adapter *adapter)
5975 {
5976 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5977 | IXGBE_FEATURE_RSS
5978 | IXGBE_FEATURE_MSI
5979 | IXGBE_FEATURE_MSIX
5980 | IXGBE_FEATURE_LEGACY_IRQ
5981 | IXGBE_FEATURE_LEGACY_TX;
5982
5983 /* Set capabilities first... */
5984 switch (adapter->hw.mac.type) {
5985 case ixgbe_mac_82598EB:
5986 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5987 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5988 break;
5989 case ixgbe_mac_X540:
5990 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5991 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5992 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5993 (adapter->hw.bus.func == 0))
5994 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5995 break;
5996 case ixgbe_mac_X550:
5997 /*
5998 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
5999 * NVM Image version.
6000 */
6001 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6002 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6003 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6004 break;
6005 case ixgbe_mac_X550EM_x:
6006 /*
6007 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6008 * NVM Image version.
6009 */
6010 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6011 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6012 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
6013 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6014 break;
6015 case ixgbe_mac_X550EM_a:
6016 /*
6017 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6018 * NVM Image version.
6019 */
6020 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6021 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6022 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6023 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6024 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6025 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6026 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6027 }
6028 break;
6029 case ixgbe_mac_82599EB:
6030 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6031 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6032 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6033 (adapter->hw.bus.func == 0))
6034 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6035 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6036 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6037 break;
6038 default:
6039 break;
6040 }
6041
6042 /* Enabled by default... */
6043 /* Fan failure detection */
6044 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6045 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6046 /* Netmap */
6047 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6048 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6049 /* EEE */
6050 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6051 adapter->feat_en |= IXGBE_FEATURE_EEE;
6052 /* Thermal Sensor */
6053 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6054 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6055 /*
6056 * Recovery mode:
6057 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6058 * NVM Image version.
6059 */
6060
6061 /* Enabled via global sysctl... */
6062 /* Flow Director */
6063 if (ixgbe_enable_fdir) {
6064 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6065 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6066 else
6067 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6068 }
6069 /* Legacy (single queue) transmit */
6070 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6071 ixgbe_enable_legacy_tx)
6072 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6073 /*
6074 * Message Signal Interrupts - Extended (MSI-X)
6075 * Normal MSI is only enabled if MSI-X calls fail.
6076 */
6077 if (!ixgbe_enable_msix)
6078 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6079 /* Receive-Side Scaling (RSS) */
6080 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6081 adapter->feat_en |= IXGBE_FEATURE_RSS;
6082
6083 /* Disable features with unmet dependencies... */
6084 /* No MSI-X */
6085 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6086 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6087 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6088 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6089 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6090 }
6091 } /* ixgbe_init_device_features */
6092
6093 /************************************************************************
6094 * ixgbe_probe - Device identification routine
6095 *
6096 * Determines if the driver should be loaded on
6097 * adapter based on its PCI vendor/device ID.
6098 *
6099 * return BUS_PROBE_DEFAULT on success, positive on failure
6100 ************************************************************************/
6101 static int
6102 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6103 {
6104 const struct pci_attach_args *pa = aux;
6105
6106 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6107 }
6108
6109 static const ixgbe_vendor_info_t *
6110 ixgbe_lookup(const struct pci_attach_args *pa)
6111 {
6112 const ixgbe_vendor_info_t *ent;
6113 pcireg_t subid;
6114
6115 INIT_DEBUGOUT("ixgbe_lookup: begin");
6116
6117 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6118 return NULL;
6119
6120 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6121
6122 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6123 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6124 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6125 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6126 (ent->subvendor_id == 0)) &&
6127 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6128 (ent->subdevice_id == 0))) {
6129 return ent;
6130 }
6131 }
6132 return NULL;
6133 }
6134
6135 static int
6136 ixgbe_ifflags_cb(struct ethercom *ec)
6137 {
6138 struct ifnet *ifp = &ec->ec_if;
6139 struct adapter *adapter = ifp->if_softc;
6140 int change, rc = 0;
6141
6142 IXGBE_CORE_LOCK(adapter);
6143
6144 change = ifp->if_flags ^ adapter->if_flags;
6145 if (change != 0)
6146 adapter->if_flags = ifp->if_flags;
6147
6148 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
6149 rc = ENETRESET;
6150 else if ((change & IFF_PROMISC) != 0)
6151 ixgbe_set_promisc(adapter);
6152
6153 /* Set up VLAN support and filter */
6154 ixgbe_setup_vlan_hw_support(adapter);
6155
6156 IXGBE_CORE_UNLOCK(adapter);
6157
6158 return rc;
6159 }
6160
6161 /************************************************************************
6162 * ixgbe_ioctl - Ioctl entry point
6163 *
6164 * Called when the user wants to configure the interface.
6165 *
6166 * return 0 on success, positive on failure
6167 ************************************************************************/
6168 static int
6169 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
6170 {
6171 struct adapter *adapter = ifp->if_softc;
6172 struct ixgbe_hw *hw = &adapter->hw;
6173 struct ifcapreq *ifcr = data;
6174 struct ifreq *ifr = data;
6175 int error = 0;
6176 int l4csum_en;
6177 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6178 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6179
6180 if (ixgbe_fw_recovery_mode_swflag(adapter))
6181 return (EPERM);
6182
6183 switch (command) {
6184 case SIOCSIFFLAGS:
6185 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6186 break;
6187 case SIOCADDMULTI:
6188 case SIOCDELMULTI:
6189 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6190 break;
6191 case SIOCSIFMEDIA:
6192 case SIOCGIFMEDIA:
6193 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6194 break;
6195 case SIOCSIFCAP:
6196 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6197 break;
6198 case SIOCSIFMTU:
6199 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6200 break;
6201 #ifdef __NetBSD__
6202 case SIOCINITIFADDR:
6203 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6204 break;
6205 case SIOCGIFFLAGS:
6206 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6207 break;
6208 case SIOCGIFAFLAG_IN:
6209 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6210 break;
6211 case SIOCGIFADDR:
6212 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6213 break;
6214 case SIOCGIFMTU:
6215 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6216 break;
6217 case SIOCGIFCAP:
6218 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6219 break;
6220 case SIOCGETHERCAP:
6221 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6222 break;
6223 case SIOCGLIFADDR:
6224 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6225 break;
6226 case SIOCZIFDATA:
6227 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6228 hw->mac.ops.clear_hw_cntrs(hw);
6229 ixgbe_clear_evcnt(adapter);
6230 break;
6231 case SIOCAIFADDR:
6232 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6233 break;
6234 #endif
6235 default:
6236 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6237 break;
6238 }
6239
6240 switch (command) {
6241 case SIOCGI2C:
6242 {
6243 struct ixgbe_i2c_req i2c;
6244
6245 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6246 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6247 if (error != 0)
6248 break;
6249 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6250 error = EINVAL;
6251 break;
6252 }
6253 if (i2c.len > sizeof(i2c.data)) {
6254 error = EINVAL;
6255 break;
6256 }
6257
6258 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6259 i2c.dev_addr, i2c.data);
6260 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6261 break;
6262 }
6263 case SIOCSIFCAP:
6264 /* Layer-4 Rx checksum offload has to be turned on and
6265 * off as a unit.
6266 */
6267 l4csum_en = ifcr->ifcr_capenable & l4csum;
6268 if (l4csum_en != l4csum && l4csum_en != 0)
6269 return EINVAL;
6270 /*FALLTHROUGH*/
6271 case SIOCADDMULTI:
6272 case SIOCDELMULTI:
6273 case SIOCSIFFLAGS:
6274 case SIOCSIFMTU:
6275 default:
6276 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6277 return error;
6278 if ((ifp->if_flags & IFF_RUNNING) == 0)
6279 ;
6280 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6281 IXGBE_CORE_LOCK(adapter);
6282 if ((ifp->if_flags & IFF_RUNNING) != 0)
6283 ixgbe_init_locked(adapter);
6284 ixgbe_recalculate_max_frame(adapter);
6285 IXGBE_CORE_UNLOCK(adapter);
6286 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6287 /*
6288 * Multicast list has changed; set the hardware filter
6289 * accordingly.
6290 */
6291 IXGBE_CORE_LOCK(adapter);
6292 ixgbe_disable_intr(adapter);
6293 ixgbe_set_multi(adapter);
6294 ixgbe_enable_intr(adapter);
6295 IXGBE_CORE_UNLOCK(adapter);
6296 }
6297 return 0;
6298 }
6299
6300 return error;
6301 } /* ixgbe_ioctl */
6302
6303 /************************************************************************
6304 * ixgbe_check_fan_failure
6305 ************************************************************************/
6306 static void
6307 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6308 {
6309 u32 mask;
6310
6311 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6312 IXGBE_ESDP_SDP1;
6313
6314 if (reg & mask)
6315 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6316 } /* ixgbe_check_fan_failure */
6317
6318 /************************************************************************
6319 * ixgbe_handle_que
6320 ************************************************************************/
6321 static void
6322 ixgbe_handle_que(void *context)
6323 {
6324 struct ix_queue *que = context;
6325 struct adapter *adapter = que->adapter;
6326 struct tx_ring *txr = que->txr;
6327 struct ifnet *ifp = adapter->ifp;
6328 bool more = false;
6329
6330 que->handleq.ev_count++;
6331
6332 if (ifp->if_flags & IFF_RUNNING) {
6333 more = ixgbe_rxeof(que);
6334 IXGBE_TX_LOCK(txr);
6335 more |= ixgbe_txeof(txr);
6336 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6337 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6338 ixgbe_mq_start_locked(ifp, txr);
6339 /* Only for queue 0 */
6340 /* NetBSD still needs this for CBQ */
6341 if ((&adapter->queues[0] == que)
6342 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6343 ixgbe_legacy_start_locked(ifp, txr);
6344 IXGBE_TX_UNLOCK(txr);
6345 }
6346
6347 if (more) {
6348 que->req.ev_count++;
6349 ixgbe_sched_handle_que(adapter, que);
6350 } else if (que->res != NULL) {
6351 /* Re-enable this interrupt */
6352 ixgbe_enable_queue(adapter, que->msix);
6353 } else
6354 ixgbe_enable_intr(adapter);
6355
6356 return;
6357 } /* ixgbe_handle_que */
6358
6359 /************************************************************************
6360 * ixgbe_handle_que_work
6361 ************************************************************************/
6362 static void
6363 ixgbe_handle_que_work(struct work *wk, void *context)
6364 {
6365 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6366
6367 /*
6368 * "enqueued flag" is not required here.
6369 * See ixgbe_msix_que().
6370 */
6371 ixgbe_handle_que(que);
6372 }
6373
6374 /************************************************************************
6375 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6376 ************************************************************************/
6377 static int
6378 ixgbe_allocate_legacy(struct adapter *adapter,
6379 const struct pci_attach_args *pa)
6380 {
6381 device_t dev = adapter->dev;
6382 struct ix_queue *que = adapter->queues;
6383 struct tx_ring *txr = adapter->tx_rings;
6384 int counts[PCI_INTR_TYPE_SIZE];
6385 pci_intr_type_t intr_type, max_type;
6386 char intrbuf[PCI_INTRSTR_LEN];
6387 const char *intrstr = NULL;
6388
6389 /* We allocate a single interrupt resource */
6390 max_type = PCI_INTR_TYPE_MSI;
6391 counts[PCI_INTR_TYPE_MSIX] = 0;
6392 counts[PCI_INTR_TYPE_MSI] =
6393 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6394 /* Check not feat_en but feat_cap to fallback to INTx */
6395 counts[PCI_INTR_TYPE_INTX] =
6396 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6397
6398 alloc_retry:
6399 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6400 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6401 return ENXIO;
6402 }
6403 adapter->osdep.nintrs = 1;
6404 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6405 intrbuf, sizeof(intrbuf));
6406 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6407 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6408 device_xname(dev));
6409 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6410 if (adapter->osdep.ihs[0] == NULL) {
6411 aprint_error_dev(dev,"unable to establish %s\n",
6412 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6413 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6414 adapter->osdep.intrs = NULL;
6415 switch (intr_type) {
6416 case PCI_INTR_TYPE_MSI:
6417 /* The next try is for INTx: Disable MSI */
6418 max_type = PCI_INTR_TYPE_INTX;
6419 counts[PCI_INTR_TYPE_INTX] = 1;
6420 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6421 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6422 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6423 goto alloc_retry;
6424 } else
6425 break;
6426 case PCI_INTR_TYPE_INTX:
6427 default:
6428 /* See below */
6429 break;
6430 }
6431 }
6432 if (intr_type == PCI_INTR_TYPE_INTX) {
6433 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6434 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6435 }
6436 if (adapter->osdep.ihs[0] == NULL) {
6437 aprint_error_dev(dev,
6438 "couldn't establish interrupt%s%s\n",
6439 intrstr ? " at " : "", intrstr ? intrstr : "");
6440 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6441 adapter->osdep.intrs = NULL;
6442 return ENXIO;
6443 }
6444 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6445 /*
6446 * Try allocating a fast interrupt and the associated deferred
6447 * processing contexts.
6448 */
6449 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6450 txr->txr_si =
6451 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6452 ixgbe_deferred_mq_start, txr);
6453 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6454 ixgbe_handle_que, que);
6455
6456 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6457 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6458 aprint_error_dev(dev,
6459 "could not establish software interrupts\n");
6460
6461 return ENXIO;
6462 }
6463 /* For simplicity in the handlers */
6464 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6465
6466 return (0);
6467 } /* ixgbe_allocate_legacy */
6468
6469 /************************************************************************
6470 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6471 ************************************************************************/
6472 static int
6473 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6474 {
6475 device_t dev = adapter->dev;
6476 struct ix_queue *que = adapter->queues;
6477 struct tx_ring *txr = adapter->tx_rings;
6478 pci_chipset_tag_t pc;
6479 char intrbuf[PCI_INTRSTR_LEN];
6480 char intr_xname[32];
6481 char wqname[MAXCOMLEN];
6482 const char *intrstr = NULL;
6483 int error, vector = 0;
6484 int cpu_id = 0;
6485 kcpuset_t *affinity;
6486 #ifdef RSS
6487 unsigned int rss_buckets = 0;
6488 kcpuset_t cpu_mask;
6489 #endif
6490
6491 pc = adapter->osdep.pc;
6492 #ifdef RSS
6493 /*
6494 * If we're doing RSS, the number of queues needs to
6495 * match the number of RSS buckets that are configured.
6496 *
6497 * + If there's more queues than RSS buckets, we'll end
6498 * up with queues that get no traffic.
6499 *
6500 * + If there's more RSS buckets than queues, we'll end
6501 * up having multiple RSS buckets map to the same queue,
6502 * so there'll be some contention.
6503 */
6504 rss_buckets = rss_getnumbuckets();
6505 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6506 (adapter->num_queues != rss_buckets)) {
6507 device_printf(dev,
6508 "%s: number of queues (%d) != number of RSS buckets (%d)"
6509 "; performance will be impacted.\n",
6510 __func__, adapter->num_queues, rss_buckets);
6511 }
6512 #endif
6513
6514 adapter->osdep.nintrs = adapter->num_queues + 1;
6515 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6516 adapter->osdep.nintrs) != 0) {
6517 aprint_error_dev(dev,
6518 "failed to allocate MSI-X interrupt\n");
6519 return (ENXIO);
6520 }
6521
6522 kcpuset_create(&affinity, false);
6523 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6524 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6525 device_xname(dev), i);
6526 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6527 sizeof(intrbuf));
6528 #ifdef IXGBE_MPSAFE
6529 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6530 true);
6531 #endif
6532 /* Set the handler function */
6533 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6534 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6535 intr_xname);
6536 if (que->res == NULL) {
6537 aprint_error_dev(dev,
6538 "Failed to register QUE handler\n");
6539 error = ENXIO;
6540 goto err_out;
6541 }
6542 que->msix = vector;
6543 adapter->active_queues |= 1ULL << que->msix;
6544
6545 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6546 #ifdef RSS
6547 /*
6548 * The queue ID is used as the RSS layer bucket ID.
6549 * We look up the queue ID -> RSS CPU ID and select
6550 * that.
6551 */
6552 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6553 CPU_SETOF(cpu_id, &cpu_mask);
6554 #endif
6555 } else {
6556 /*
6557 * Bind the MSI-X vector, and thus the
6558 * rings to the corresponding CPU.
6559 *
6560 * This just happens to match the default RSS
6561 * round-robin bucket -> queue -> CPU allocation.
6562 */
6563 if (adapter->num_queues > 1)
6564 cpu_id = i;
6565 }
6566 /* Round-robin affinity */
6567 kcpuset_zero(affinity);
6568 kcpuset_set(affinity, cpu_id % ncpu);
6569 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6570 NULL);
6571 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6572 intrstr);
6573 if (error == 0) {
6574 #if 1 /* def IXGBE_DEBUG */
6575 #ifdef RSS
6576 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6577 cpu_id % ncpu);
6578 #else
6579 aprint_normal(", bound queue %d to cpu %d", i,
6580 cpu_id % ncpu);
6581 #endif
6582 #endif /* IXGBE_DEBUG */
6583 }
6584 aprint_normal("\n");
6585
6586 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6587 txr->txr_si = softint_establish(
6588 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6589 ixgbe_deferred_mq_start, txr);
6590 if (txr->txr_si == NULL) {
6591 aprint_error_dev(dev,
6592 "couldn't establish software interrupt\n");
6593 error = ENXIO;
6594 goto err_out;
6595 }
6596 }
6597 que->que_si
6598 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6599 ixgbe_handle_que, que);
6600 if (que->que_si == NULL) {
6601 aprint_error_dev(dev,
6602 "couldn't establish software interrupt\n");
6603 error = ENXIO;
6604 goto err_out;
6605 }
6606 }
6607 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6608 error = workqueue_create(&adapter->txr_wq, wqname,
6609 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6610 IXGBE_WORKQUEUE_FLAGS);
6611 if (error) {
6612 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6613 goto err_out;
6614 }
6615 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6616
6617 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6618 error = workqueue_create(&adapter->que_wq, wqname,
6619 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6620 IXGBE_WORKQUEUE_FLAGS);
6621 if (error) {
6622 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6623 goto err_out;
6624 }
6625
6626 /* and Link */
6627 cpu_id++;
6628 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6629 adapter->vector = vector;
6630 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6631 sizeof(intrbuf));
6632 #ifdef IXGBE_MPSAFE
6633 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6634 true);
6635 #endif
6636 /* Set the link handler function */
6637 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6638 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6639 intr_xname);
6640 if (adapter->osdep.ihs[vector] == NULL) {
6641 aprint_error_dev(dev, "Failed to register LINK handler\n");
6642 error = ENXIO;
6643 goto err_out;
6644 }
6645 /* Round-robin affinity */
6646 kcpuset_zero(affinity);
6647 kcpuset_set(affinity, cpu_id % ncpu);
6648 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6649 NULL);
6650
6651 aprint_normal_dev(dev,
6652 "for link, interrupting at %s", intrstr);
6653 if (error == 0)
6654 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6655 else
6656 aprint_normal("\n");
6657
6658 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6659 adapter->mbx_si =
6660 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6661 ixgbe_handle_mbx, adapter);
6662 if (adapter->mbx_si == NULL) {
6663 aprint_error_dev(dev,
6664 "could not establish software interrupts\n");
6665
6666 error = ENXIO;
6667 goto err_out;
6668 }
6669 }
6670
6671 kcpuset_destroy(affinity);
6672 aprint_normal_dev(dev,
6673 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6674
6675 return (0);
6676
6677 err_out:
6678 kcpuset_destroy(affinity);
6679 ixgbe_free_softint(adapter);
6680 ixgbe_free_pciintr_resources(adapter);
6681 return (error);
6682 } /* ixgbe_allocate_msix */
6683
6684 /************************************************************************
6685 * ixgbe_configure_interrupts
6686 *
6687 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6688 * This will also depend on user settings.
6689 ************************************************************************/
6690 static int
6691 ixgbe_configure_interrupts(struct adapter *adapter)
6692 {
6693 device_t dev = adapter->dev;
6694 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6695 int want, queues, msgs;
6696
6697 /* Default to 1 queue if MSI-X setup fails */
6698 adapter->num_queues = 1;
6699
6700 /* Override by tuneable */
6701 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6702 goto msi;
6703
6704 /*
6705 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6706 * interrupt slot.
6707 */
6708 if (ncpu == 1)
6709 goto msi;
6710
6711 /* First try MSI-X */
6712 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6713 msgs = MIN(msgs, IXG_MAX_NINTR);
6714 if (msgs < 2)
6715 goto msi;
6716
6717 adapter->msix_mem = (void *)1; /* XXX */
6718
6719 /* Figure out a reasonable auto config value */
6720 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6721
6722 #ifdef RSS
6723 /* If we're doing RSS, clamp at the number of RSS buckets */
6724 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6725 queues = uimin(queues, rss_getnumbuckets());
6726 #endif
6727 if (ixgbe_num_queues > queues) {
6728 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6729 ixgbe_num_queues = queues;
6730 }
6731
6732 if (ixgbe_num_queues != 0)
6733 queues = ixgbe_num_queues;
6734 else
6735 queues = uimin(queues,
6736 uimin(mac->max_tx_queues, mac->max_rx_queues));
6737
6738 /* reflect correct sysctl value */
6739 ixgbe_num_queues = queues;
6740
6741 /*
6742 * Want one vector (RX/TX pair) per queue
6743 * plus an additional for Link.
6744 */
6745 want = queues + 1;
6746 if (msgs >= want)
6747 msgs = want;
6748 else {
6749 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6750 "%d vectors but %d queues wanted!\n",
6751 msgs, want);
6752 goto msi;
6753 }
6754 adapter->num_queues = queues;
6755 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6756 return (0);
6757
6758 /*
6759 * MSI-X allocation failed or provided us with
6760 * less vectors than needed. Free MSI-X resources
6761 * and we'll try enabling MSI.
6762 */
6763 msi:
6764 /* Without MSI-X, some features are no longer supported */
6765 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6766 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6767 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6768 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6769
6770 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6771 adapter->msix_mem = NULL; /* XXX */
6772 if (msgs > 1)
6773 msgs = 1;
6774 if (msgs != 0) {
6775 msgs = 1;
6776 adapter->feat_en |= IXGBE_FEATURE_MSI;
6777 return (0);
6778 }
6779
6780 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6781 aprint_error_dev(dev,
6782 "Device does not support legacy interrupts.\n");
6783 return 1;
6784 }
6785
6786 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6787
6788 return (0);
6789 } /* ixgbe_configure_interrupts */
6790
6791
6792 /************************************************************************
6793 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6794 *
6795 * Done outside of interrupt context since the driver might sleep
6796 ************************************************************************/
6797 static void
6798 ixgbe_handle_link(void *context)
6799 {
6800 struct adapter *adapter = context;
6801 struct ixgbe_hw *hw = &adapter->hw;
6802
6803 IXGBE_CORE_LOCK(adapter);
6804 ++adapter->link_sicount.ev_count;
6805 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6806 ixgbe_update_link_status(adapter);
6807
6808 /* Re-enable link interrupts */
6809 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6810
6811 IXGBE_CORE_UNLOCK(adapter);
6812 } /* ixgbe_handle_link */
6813
6814 #if 0
6815 /************************************************************************
6816 * ixgbe_rearm_queues
6817 ************************************************************************/
6818 static __inline void
6819 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6820 {
6821 u32 mask;
6822
6823 switch (adapter->hw.mac.type) {
6824 case ixgbe_mac_82598EB:
6825 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6826 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6827 break;
6828 case ixgbe_mac_82599EB:
6829 case ixgbe_mac_X540:
6830 case ixgbe_mac_X550:
6831 case ixgbe_mac_X550EM_x:
6832 case ixgbe_mac_X550EM_a:
6833 mask = (queues & 0xFFFFFFFF);
6834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6835 mask = (queues >> 32);
6836 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6837 break;
6838 default:
6839 break;
6840 }
6841 } /* ixgbe_rearm_queues */
6842 #endif
6843