ixgbe.c revision 1.170 1 /* $NetBSD: ixgbe.c,v 1.170 2018/12/08 14:57:11 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.6 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static void ixgbe_recovery_mode_timer(void *);
188 static int ixgbe_setup_interface(device_t, struct adapter *);
189 static void ixgbe_config_gpie(struct adapter *);
190 static void ixgbe_config_dmac(struct adapter *);
191 static void ixgbe_config_delay_values(struct adapter *);
192 static void ixgbe_config_link(struct adapter *);
193 static void ixgbe_check_wol_support(struct adapter *);
194 static int ixgbe_setup_low_power_mode(struct adapter *);
195 #if 0
196 static void ixgbe_rearm_queues(struct adapter *, u64);
197 #endif
198
199 static void ixgbe_initialize_transmit_units(struct adapter *);
200 static void ixgbe_initialize_receive_units(struct adapter *);
201 static void ixgbe_enable_rx_drop(struct adapter *);
202 static void ixgbe_disable_rx_drop(struct adapter *);
203 static void ixgbe_initialize_rss_mapping(struct adapter *);
204
205 static void ixgbe_enable_intr(struct adapter *);
206 static void ixgbe_disable_intr(struct adapter *);
207 static void ixgbe_update_stats_counters(struct adapter *);
208 static void ixgbe_set_promisc(struct adapter *);
209 static void ixgbe_set_multi(struct adapter *);
210 static void ixgbe_update_link_status(struct adapter *);
211 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
212 static void ixgbe_configure_ivars(struct adapter *);
213 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
214 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
215
216 static void ixgbe_setup_vlan_hw_support(struct adapter *);
217 #if 0
218 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
219 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
220 #endif
221
222 static void ixgbe_add_device_sysctls(struct adapter *);
223 static void ixgbe_add_hw_stats(struct adapter *);
224 static void ixgbe_clear_evcnt(struct adapter *);
225 static int ixgbe_set_flowcntl(struct adapter *, int);
226 static int ixgbe_set_advertise(struct adapter *, int);
227 static int ixgbe_get_advertise(struct adapter *);
228
229 /* Sysctl handlers */
230 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
231 const char *, int *, int);
232 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
235 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
238 #ifdef IXGBE_DEBUG
239 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
241 #endif
242 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
251
252 /* Support for pluggable optic modules */
253 static bool ixgbe_sfp_probe(struct adapter *);
254
255 /* Legacy (single vector) interrupt handler */
256 static int ixgbe_legacy_irq(void *);
257
258 /* The MSI/MSI-X Interrupt handlers */
259 static int ixgbe_msix_que(void *);
260 static int ixgbe_msix_link(void *);
261
262 /* Software interrupts for deferred work */
263 static void ixgbe_handle_que(void *);
264 static void ixgbe_handle_link(void *);
265 static void ixgbe_handle_msf(void *);
266 static void ixgbe_handle_mod(void *);
267 static void ixgbe_handle_phy(void *);
268
269 /* Workqueue handler for deferred work */
270 static void ixgbe_handle_que_work(struct work *, void *);
271
272 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
273
274 /************************************************************************
275 * NetBSD Device Interface Entry Points
276 ************************************************************************/
277 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
278 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
279 DVF_DETACH_SHUTDOWN);
280
281 #if 0
282 devclass_t ix_devclass;
283 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
284
285 MODULE_DEPEND(ix, pci, 1, 1, 1);
286 MODULE_DEPEND(ix, ether, 1, 1, 1);
287 #ifdef DEV_NETMAP
288 MODULE_DEPEND(ix, netmap, 1, 1, 1);
289 #endif
290 #endif
291
292 /*
293 * TUNEABLE PARAMETERS:
294 */
295
296 /*
297 * AIM: Adaptive Interrupt Moderation
298 * which means that the interrupt rate
299 * is varied over time based on the
300 * traffic for that interrupt vector
301 */
302 static bool ixgbe_enable_aim = true;
303 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
305 "Enable adaptive interrupt moderation");
306
307 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
308 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
309 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
310
311 /* How many packets rxeof tries to clean at a time */
312 static int ixgbe_rx_process_limit = 256;
313 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
314 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
315
316 /* How many packets txeof tries to clean at a time */
317 static int ixgbe_tx_process_limit = 256;
318 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
319 &ixgbe_tx_process_limit, 0,
320 "Maximum number of sent packets to process at a time, -1 means unlimited");
321
322 /* Flow control setting, default to full */
323 static int ixgbe_flow_control = ixgbe_fc_full;
324 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
325 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
326
327 /* Which pakcet processing uses workqueue or softint */
328 static bool ixgbe_txrx_workqueue = false;
329
330 /*
331 * Smart speed setting, default to on
332 * this only works as a compile option
333 * right now as its during attach, set
334 * this to 'ixgbe_smart_speed_off' to
335 * disable.
336 */
337 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
338
339 /*
340 * MSI-X should be the default for best performance,
341 * but this allows it to be forced off for testing.
342 */
343 static int ixgbe_enable_msix = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
345 "Enable MSI-X interrupts");
346
347 /*
348 * Number of Queues, can be set to 0,
349 * it then autoconfigures based on the
350 * number of cpus with a max of 8. This
351 * can be overriden manually here.
352 */
353 static int ixgbe_num_queues = 0;
354 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
355 "Number of queues to configure, 0 indicates autoconfigure");
356
357 /*
358 * Number of TX descriptors per ring,
359 * setting higher than RX as this seems
360 * the better performing choice.
361 */
362 static int ixgbe_txd = PERFORM_TXD;
363 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
364 "Number of transmit descriptors per queue");
365
366 /* Number of RX descriptors per ring */
367 static int ixgbe_rxd = PERFORM_RXD;
368 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
369 "Number of receive descriptors per queue");
370
371 /*
372 * Defining this on will allow the use
373 * of unsupported SFP+ modules, note that
374 * doing so you are on your own :)
375 */
376 static int allow_unsupported_sfp = false;
377 #define TUNABLE_INT(__x, __y)
378 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
379
380 /*
381 * Not sure if Flow Director is fully baked,
382 * so we'll default to turning it off.
383 */
384 static int ixgbe_enable_fdir = 0;
385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
386 "Enable Flow Director");
387
388 /* Legacy Transmit (single queue) */
389 static int ixgbe_enable_legacy_tx = 0;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
391 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
392
393 /* Receive-Side Scaling */
394 static int ixgbe_enable_rss = 1;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
396 "Enable Receive-Side Scaling (RSS)");
397
398 #if 0
399 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
400 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
401 #endif
402
403 #ifdef NET_MPSAFE
404 #define IXGBE_MPSAFE 1
405 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
406 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
407 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
408 #else
409 #define IXGBE_CALLOUT_FLAGS 0
410 #define IXGBE_SOFTINFT_FLAGS 0
411 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
412 #endif
413 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
414
415 /************************************************************************
416 * ixgbe_initialize_rss_mapping
417 ************************************************************************/
418 static void
419 ixgbe_initialize_rss_mapping(struct adapter *adapter)
420 {
421 struct ixgbe_hw *hw = &adapter->hw;
422 u32 reta = 0, mrqc, rss_key[10];
423 int queue_id, table_size, index_mult;
424 int i, j;
425 u32 rss_hash_config;
426
427 /* force use default RSS key. */
428 #ifdef __NetBSD__
429 rss_getkey((uint8_t *) &rss_key);
430 #else
431 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
432 /* Fetch the configured RSS key */
433 rss_getkey((uint8_t *) &rss_key);
434 } else {
435 /* set up random bits */
436 cprng_fast(&rss_key, sizeof(rss_key));
437 }
438 #endif
439
440 /* Set multiplier for RETA setup and table size based on MAC */
441 index_mult = 0x1;
442 table_size = 128;
443 switch (adapter->hw.mac.type) {
444 case ixgbe_mac_82598EB:
445 index_mult = 0x11;
446 break;
447 case ixgbe_mac_X550:
448 case ixgbe_mac_X550EM_x:
449 case ixgbe_mac_X550EM_a:
450 table_size = 512;
451 break;
452 default:
453 break;
454 }
455
456 /* Set up the redirection table */
457 for (i = 0, j = 0; i < table_size; i++, j++) {
458 if (j == adapter->num_queues)
459 j = 0;
460
461 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
462 /*
463 * Fetch the RSS bucket id for the given indirection
464 * entry. Cap it at the number of configured buckets
465 * (which is num_queues.)
466 */
467 queue_id = rss_get_indirection_to_bucket(i);
468 queue_id = queue_id % adapter->num_queues;
469 } else
470 queue_id = (j * index_mult);
471
472 /*
473 * The low 8 bits are for hash value (n+0);
474 * The next 8 bits are for hash value (n+1), etc.
475 */
476 reta = reta >> 8;
477 reta = reta | (((uint32_t) queue_id) << 24);
478 if ((i & 3) == 3) {
479 if (i < 128)
480 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
481 else
482 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
483 reta);
484 reta = 0;
485 }
486 }
487
488 /* Now fill our hash function seeds */
489 for (i = 0; i < 10; i++)
490 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
491
492 /* Perform hash on these packet types */
493 if (adapter->feat_en & IXGBE_FEATURE_RSS)
494 rss_hash_config = rss_gethashconfig();
495 else {
496 /*
497 * Disable UDP - IP fragments aren't currently being handled
498 * and so we end up with a mix of 2-tuple and 4-tuple
499 * traffic.
500 */
501 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
502 | RSS_HASHTYPE_RSS_TCP_IPV4
503 | RSS_HASHTYPE_RSS_IPV6
504 | RSS_HASHTYPE_RSS_TCP_IPV6
505 | RSS_HASHTYPE_RSS_IPV6_EX
506 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
507 }
508
509 mrqc = IXGBE_MRQC_RSSEN;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
528 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
529 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
530 } /* ixgbe_initialize_rss_mapping */
531
532 /************************************************************************
533 * ixgbe_initialize_receive_units - Setup receive registers and features.
534 ************************************************************************/
535 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
536
537 static void
538 ixgbe_initialize_receive_units(struct adapter *adapter)
539 {
540 struct rx_ring *rxr = adapter->rx_rings;
541 struct ixgbe_hw *hw = &adapter->hw;
542 struct ifnet *ifp = adapter->ifp;
543 int i, j;
544 u32 bufsz, fctrl, srrctl, rxcsum;
545 u32 hlreg;
546
547 /*
548 * Make sure receives are disabled while
549 * setting up the descriptor ring
550 */
551 ixgbe_disable_rx(hw);
552
553 /* Enable broadcasts */
554 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
555 fctrl |= IXGBE_FCTRL_BAM;
556 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
557 fctrl |= IXGBE_FCTRL_DPF;
558 fctrl |= IXGBE_FCTRL_PMCF;
559 }
560 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
561
562 /* Set for Jumbo Frames? */
563 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
564 if (ifp->if_mtu > ETHERMTU)
565 hlreg |= IXGBE_HLREG0_JUMBOEN;
566 else
567 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
568
569 #ifdef DEV_NETMAP
570 /* CRC stripping is conditional in Netmap */
571 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
572 (ifp->if_capenable & IFCAP_NETMAP) &&
573 !ix_crcstrip)
574 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
575 else
576 #endif /* DEV_NETMAP */
577 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
578
579 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
580
581 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
582 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
583
584 for (i = 0; i < adapter->num_queues; i++, rxr++) {
585 u64 rdba = rxr->rxdma.dma_paddr;
586 u32 reg;
587 int regnum = i / 4; /* 1 register per 4 queues */
588 int regshift = i % 4; /* 4 bits per 1 queue */
589 j = rxr->me;
590
591 /* Setup the Base and Length of the Rx Descriptor Ring */
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
593 (rdba & 0x00000000ffffffffULL));
594 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
595 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
596 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
597
598 /* Set up the SRRCTL register */
599 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
600 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
601 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
602 srrctl |= bufsz;
603 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
604
605 /* Set RQSMR (Receive Queue Statistic Mapping) register */
606 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
607 reg &= ~(0x000000ff << (regshift * 8));
608 reg |= i << (regshift * 8);
609 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
610
611 /*
612 * Set DROP_EN iff we have no flow control and >1 queue.
613 * Note that srrctl was cleared shortly before during reset,
614 * so we do not need to clear the bit, but do it just in case
615 * this code is moved elsewhere.
616 */
617 if (adapter->num_queues > 1 &&
618 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
619 srrctl |= IXGBE_SRRCTL_DROP_EN;
620 } else {
621 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
622 }
623
624 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
625
626 /* Setup the HW Rx Head and Tail Descriptor Pointers */
627 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
628 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
629
630 /* Set the driver rx tail address */
631 rxr->tail = IXGBE_RDT(rxr->me);
632 }
633
634 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
635 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
636 | IXGBE_PSRTYPE_UDPHDR
637 | IXGBE_PSRTYPE_IPV4HDR
638 | IXGBE_PSRTYPE_IPV6HDR;
639 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
640 }
641
642 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
643
644 ixgbe_initialize_rss_mapping(adapter);
645
646 if (adapter->num_queues > 1) {
647 /* RSS and RX IPP Checksum are mutually exclusive */
648 rxcsum |= IXGBE_RXCSUM_PCSD;
649 }
650
651 if (ifp->if_capenable & IFCAP_RXCSUM)
652 rxcsum |= IXGBE_RXCSUM_PCSD;
653
654 /* This is useful for calculating UDP/IP fragment checksums */
655 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
656 rxcsum |= IXGBE_RXCSUM_IPPCSE;
657
658 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
659
660 } /* ixgbe_initialize_receive_units */
661
662 /************************************************************************
663 * ixgbe_initialize_transmit_units - Enable transmit units.
664 ************************************************************************/
665 static void
666 ixgbe_initialize_transmit_units(struct adapter *adapter)
667 {
668 struct tx_ring *txr = adapter->tx_rings;
669 struct ixgbe_hw *hw = &adapter->hw;
670 int i;
671
672 /* Setup the Base and Length of the Tx Descriptor Ring */
673 for (i = 0; i < adapter->num_queues; i++, txr++) {
674 u64 tdba = txr->txdma.dma_paddr;
675 u32 txctrl = 0;
676 u32 tqsmreg, reg;
677 int regnum = i / 4; /* 1 register per 4 queues */
678 int regshift = i % 4; /* 4 bits per 1 queue */
679 int j = txr->me;
680
681 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
682 (tdba & 0x00000000ffffffffULL));
683 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
684 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
685 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
686
687 /*
688 * Set TQSMR (Transmit Queue Statistic Mapping) register.
689 * Register location is different between 82598 and others.
690 */
691 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
692 tqsmreg = IXGBE_TQSMR(regnum);
693 else
694 tqsmreg = IXGBE_TQSM(regnum);
695 reg = IXGBE_READ_REG(hw, tqsmreg);
696 reg &= ~(0x000000ff << (regshift * 8));
697 reg |= i << (regshift * 8);
698 IXGBE_WRITE_REG(hw, tqsmreg, reg);
699
700 /* Setup the HW Tx Head and Tail descriptor pointers */
701 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
702 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
703
704 /* Cache the tail address */
705 txr->tail = IXGBE_TDT(j);
706
707 txr->txr_no_space = false;
708
709 /* Disable Head Writeback */
710 /*
711 * Note: for X550 series devices, these registers are actually
712 * prefixed with TPH_ isntead of DCA_, but the addresses and
713 * fields remain the same.
714 */
715 switch (hw->mac.type) {
716 case ixgbe_mac_82598EB:
717 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
718 break;
719 default:
720 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
721 break;
722 }
723 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
724 switch (hw->mac.type) {
725 case ixgbe_mac_82598EB:
726 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
727 break;
728 default:
729 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
730 break;
731 }
732
733 }
734
735 if (hw->mac.type != ixgbe_mac_82598EB) {
736 u32 dmatxctl, rttdcs;
737
738 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
739 dmatxctl |= IXGBE_DMATXCTL_TE;
740 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
741 /* Disable arbiter to set MTQC */
742 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
743 rttdcs |= IXGBE_RTTDCS_ARBDIS;
744 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
745 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
746 ixgbe_get_mtqc(adapter->iov_mode));
747 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
748 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
749 }
750
751 return;
752 } /* ixgbe_initialize_transmit_units */
753
754 /************************************************************************
755 * ixgbe_attach - Device initialization routine
756 *
757 * Called when the driver is being loaded.
758 * Identifies the type of hardware, allocates all resources
759 * and initializes the hardware.
760 *
761 * return 0 on success, positive on failure
762 ************************************************************************/
763 static void
764 ixgbe_attach(device_t parent, device_t dev, void *aux)
765 {
766 struct adapter *adapter;
767 struct ixgbe_hw *hw;
768 int error = -1;
769 u32 ctrl_ext;
770 u16 high, low, nvmreg;
771 pcireg_t id, subid;
772 const ixgbe_vendor_info_t *ent;
773 struct pci_attach_args *pa = aux;
774 const char *str;
775 char buf[256];
776
777 INIT_DEBUGOUT("ixgbe_attach: begin");
778
779 /* Allocate, clear, and link in our adapter structure */
780 adapter = device_private(dev);
781 adapter->hw.back = adapter;
782 adapter->dev = dev;
783 hw = &adapter->hw;
784 adapter->osdep.pc = pa->pa_pc;
785 adapter->osdep.tag = pa->pa_tag;
786 if (pci_dma64_available(pa))
787 adapter->osdep.dmat = pa->pa_dmat64;
788 else
789 adapter->osdep.dmat = pa->pa_dmat;
790 adapter->osdep.attached = false;
791
792 ent = ixgbe_lookup(pa);
793
794 KASSERT(ent != NULL);
795
796 aprint_normal(": %s, Version - %s\n",
797 ixgbe_strings[ent->index], ixgbe_driver_version);
798
799 /* Core Lock Init*/
800 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
801
802 /* Set up the timer callout */
803 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
804
805 /* Determine hardware revision */
806 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
807 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
808
809 hw->vendor_id = PCI_VENDOR(id);
810 hw->device_id = PCI_PRODUCT(id);
811 hw->revision_id =
812 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
813 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
814 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
815
816 /*
817 * Make sure BUSMASTER is set
818 */
819 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
820
821 /* Do base PCI setup - map BAR0 */
822 if (ixgbe_allocate_pci_resources(adapter, pa)) {
823 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
824 error = ENXIO;
825 goto err_out;
826 }
827
828 /* let hardware know driver is loaded */
829 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
830 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
831 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
832
833 /*
834 * Initialize the shared code
835 */
836 if (ixgbe_init_shared_code(hw) != 0) {
837 aprint_error_dev(dev, "Unable to initialize the shared code\n");
838 error = ENXIO;
839 goto err_out;
840 }
841
842 switch (hw->mac.type) {
843 case ixgbe_mac_82598EB:
844 str = "82598EB";
845 break;
846 case ixgbe_mac_82599EB:
847 str = "82599EB";
848 break;
849 case ixgbe_mac_X540:
850 str = "X540";
851 break;
852 case ixgbe_mac_X550:
853 str = "X550";
854 break;
855 case ixgbe_mac_X550EM_x:
856 str = "X550EM";
857 break;
858 case ixgbe_mac_X550EM_a:
859 str = "X550EM A";
860 break;
861 default:
862 str = "Unknown";
863 break;
864 }
865 aprint_normal_dev(dev, "device %s\n", str);
866
867 if (hw->mbx.ops.init_params)
868 hw->mbx.ops.init_params(hw);
869
870 hw->allow_unsupported_sfp = allow_unsupported_sfp;
871
872 /* Pick up the 82599 settings */
873 if (hw->mac.type != ixgbe_mac_82598EB) {
874 hw->phy.smart_speed = ixgbe_smart_speed;
875 adapter->num_segs = IXGBE_82599_SCATTER;
876 } else
877 adapter->num_segs = IXGBE_82598_SCATTER;
878
879 hw->mac.ops.set_lan_id(hw);
880 ixgbe_init_device_features(adapter);
881
882 if (ixgbe_configure_interrupts(adapter)) {
883 error = ENXIO;
884 goto err_out;
885 }
886
887 /* Allocate multicast array memory. */
888 adapter->mta = malloc(sizeof(*adapter->mta) *
889 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
890 if (adapter->mta == NULL) {
891 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
892 error = ENOMEM;
893 goto err_out;
894 }
895
896 /* Enable WoL (if supported) */
897 ixgbe_check_wol_support(adapter);
898
899 /* Verify adapter fan is still functional (if applicable) */
900 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
901 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
902 ixgbe_check_fan_failure(adapter, esdp, FALSE);
903 }
904
905 /* Ensure SW/FW semaphore is free */
906 ixgbe_init_swfw_semaphore(hw);
907
908 /* Enable EEE power saving */
909 if (adapter->feat_en & IXGBE_FEATURE_EEE)
910 hw->mac.ops.setup_eee(hw, TRUE);
911
912 /* Set an initial default flow control value */
913 hw->fc.requested_mode = ixgbe_flow_control;
914
915 /* Sysctls for limiting the amount of work done in the taskqueues */
916 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
917 "max number of rx packets to process",
918 &adapter->rx_process_limit, ixgbe_rx_process_limit);
919
920 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
921 "max number of tx packets to process",
922 &adapter->tx_process_limit, ixgbe_tx_process_limit);
923
924 /* Do descriptor calc and sanity checks */
925 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
926 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
927 aprint_error_dev(dev, "TXD config issue, using default!\n");
928 adapter->num_tx_desc = DEFAULT_TXD;
929 } else
930 adapter->num_tx_desc = ixgbe_txd;
931
932 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
933 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
934 aprint_error_dev(dev, "RXD config issue, using default!\n");
935 adapter->num_rx_desc = DEFAULT_RXD;
936 } else
937 adapter->num_rx_desc = ixgbe_rxd;
938
939 /* Allocate our TX/RX Queues */
940 if (ixgbe_allocate_queues(adapter)) {
941 error = ENOMEM;
942 goto err_out;
943 }
944
945 hw->phy.reset_if_overtemp = TRUE;
946 error = ixgbe_reset_hw(hw);
947 hw->phy.reset_if_overtemp = FALSE;
948 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
949 /*
950 * No optics in this port, set up
951 * so the timer routine will probe
952 * for later insertion.
953 */
954 adapter->sfp_probe = TRUE;
955 error = IXGBE_SUCCESS;
956 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
957 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
958 error = EIO;
959 goto err_late;
960 } else if (error) {
961 aprint_error_dev(dev, "Hardware initialization failed\n");
962 error = EIO;
963 goto err_late;
964 }
965
966 /* Make sure we have a good EEPROM before we read from it */
967 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
968 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
969 error = EIO;
970 goto err_late;
971 }
972
973 aprint_normal("%s:", device_xname(dev));
974 /* NVM Image Version */
975 high = low = 0;
976 switch (hw->mac.type) {
977 case ixgbe_mac_X540:
978 case ixgbe_mac_X550EM_a:
979 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
980 if (nvmreg == 0xffff)
981 break;
982 high = (nvmreg >> 12) & 0x0f;
983 low = (nvmreg >> 4) & 0xff;
984 id = nvmreg & 0x0f;
985 aprint_normal(" NVM Image Version %u.", high);
986 if (hw->mac.type == ixgbe_mac_X540)
987 str = "%x";
988 else
989 str = "%02x";
990 aprint_normal(str, low);
991 aprint_normal(" ID 0x%x,", id);
992 break;
993 case ixgbe_mac_X550EM_x:
994 case ixgbe_mac_X550:
995 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
996 if (nvmreg == 0xffff)
997 break;
998 high = (nvmreg >> 12) & 0x0f;
999 low = nvmreg & 0xff;
1000 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1001 break;
1002 default:
1003 break;
1004 }
1005 hw->eeprom.nvm_image_ver_high = high;
1006 hw->eeprom.nvm_image_ver_low = low;
1007
1008 /* PHY firmware revision */
1009 switch (hw->mac.type) {
1010 case ixgbe_mac_X540:
1011 case ixgbe_mac_X550:
1012 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1013 if (nvmreg == 0xffff)
1014 break;
1015 high = (nvmreg >> 12) & 0x0f;
1016 low = (nvmreg >> 4) & 0xff;
1017 id = nvmreg & 0x000f;
1018 aprint_normal(" PHY FW Revision %u.", high);
1019 if (hw->mac.type == ixgbe_mac_X540)
1020 str = "%x";
1021 else
1022 str = "%02x";
1023 aprint_normal(str, low);
1024 aprint_normal(" ID 0x%x,", id);
1025 break;
1026 default:
1027 break;
1028 }
1029
1030 /* NVM Map version & OEM NVM Image version */
1031 switch (hw->mac.type) {
1032 case ixgbe_mac_X550:
1033 case ixgbe_mac_X550EM_x:
1034 case ixgbe_mac_X550EM_a:
1035 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1036 if (nvmreg != 0xffff) {
1037 high = (nvmreg >> 12) & 0x0f;
1038 low = nvmreg & 0x00ff;
1039 aprint_normal(" NVM Map version %u.%02x,", high, low);
1040 }
1041 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1042 if (nvmreg != 0xffff) {
1043 high = (nvmreg >> 12) & 0x0f;
1044 low = nvmreg & 0x00ff;
1045 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1046 low);
1047 }
1048 break;
1049 default:
1050 break;
1051 }
1052
1053 /* Print the ETrackID */
1054 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1055 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1056 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1057
1058 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1059 error = ixgbe_allocate_msix(adapter, pa);
1060 if (error) {
1061 /* Free allocated queue structures first */
1062 ixgbe_free_transmit_structures(adapter);
1063 ixgbe_free_receive_structures(adapter);
1064 free(adapter->queues, M_DEVBUF);
1065
1066 /* Fallback to legacy interrupt */
1067 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1068 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1069 adapter->feat_en |= IXGBE_FEATURE_MSI;
1070 adapter->num_queues = 1;
1071
1072 /* Allocate our TX/RX Queues again */
1073 if (ixgbe_allocate_queues(adapter)) {
1074 error = ENOMEM;
1075 goto err_out;
1076 }
1077 }
1078 }
1079 /* Recovery mode */
1080 switch (adapter->hw.mac.type) {
1081 case ixgbe_mac_X550:
1082 case ixgbe_mac_X550EM_x:
1083 case ixgbe_mac_X550EM_a:
1084 /* >= 2.00 */
1085 if (hw->eeprom.nvm_image_ver_high >= 2) {
1086 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1087 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1088 }
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1095 error = ixgbe_allocate_legacy(adapter, pa);
1096 if (error)
1097 goto err_late;
1098
1099 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1100 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_handle_link, adapter);
1102 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_mod, adapter);
1104 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1105 ixgbe_handle_msf, adapter);
1106 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_handle_phy, adapter);
1108 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1109 adapter->fdir_si =
1110 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1111 ixgbe_reinit_fdir, adapter);
1112 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1113 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1114 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1115 && (adapter->fdir_si == NULL))) {
1116 aprint_error_dev(dev,
1117 "could not establish software interrupts ()\n");
1118 goto err_out;
1119 }
1120
1121 error = ixgbe_start_hw(hw);
1122 switch (error) {
1123 case IXGBE_ERR_EEPROM_VERSION:
1124 aprint_error_dev(dev, "This device is a pre-production adapter/"
1125 "LOM. Please be aware there may be issues associated "
1126 "with your hardware.\nIf you are experiencing problems "
1127 "please contact your Intel or hardware representative "
1128 "who provided you with this hardware.\n");
1129 break;
1130 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1131 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1132 error = EIO;
1133 goto err_late;
1134 case IXGBE_ERR_SFP_NOT_PRESENT:
1135 aprint_error_dev(dev, "No SFP+ Module found\n");
1136 /* falls thru */
1137 default:
1138 break;
1139 }
1140
1141 /* Setup OS specific network interface */
1142 if (ixgbe_setup_interface(dev, adapter) != 0)
1143 goto err_late;
1144
1145 /*
1146 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1147 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1148 */
1149 if (hw->phy.media_type == ixgbe_media_type_copper) {
1150 uint16_t id1, id2;
1151 int oui, model, rev;
1152 const char *descr;
1153
1154 id1 = hw->phy.id >> 16;
1155 id2 = hw->phy.id & 0xffff;
1156 oui = MII_OUI(id1, id2);
1157 model = MII_MODEL(id2);
1158 rev = MII_REV(id2);
1159 if ((descr = mii_get_descr(oui, model)) != NULL)
1160 aprint_normal_dev(dev,
1161 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1162 descr, oui, model, rev);
1163 else
1164 aprint_normal_dev(dev,
1165 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1166 oui, model, rev);
1167 }
1168
1169 /* Enable the optics for 82599 SFP+ fiber */
1170 ixgbe_enable_tx_laser(hw);
1171
1172 /* Enable power to the phy. */
1173 ixgbe_set_phy_power(hw, TRUE);
1174
1175 /* Initialize statistics */
1176 ixgbe_update_stats_counters(adapter);
1177
1178 /* Check PCIE slot type/speed/width */
1179 ixgbe_get_slot_info(adapter);
1180
1181 /*
1182 * Do time init and sysctl init here, but
1183 * only on the first port of a bypass adapter.
1184 */
1185 ixgbe_bypass_init(adapter);
1186
1187 /* Set an initial dmac value */
1188 adapter->dmac = 0;
1189 /* Set initial advertised speeds (if applicable) */
1190 adapter->advertise = ixgbe_get_advertise(adapter);
1191
1192 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1193 ixgbe_define_iov_schemas(dev, &error);
1194
1195 /* Add sysctls */
1196 ixgbe_add_device_sysctls(adapter);
1197 ixgbe_add_hw_stats(adapter);
1198
1199 /* For Netmap */
1200 adapter->init_locked = ixgbe_init_locked;
1201 adapter->stop_locked = ixgbe_stop;
1202
1203 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1204 ixgbe_netmap_attach(adapter);
1205
1206 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1207 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1208 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1209 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1210
1211 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1212 pmf_class_network_register(dev, adapter->ifp);
1213 else
1214 aprint_error_dev(dev, "couldn't establish power handler\n");
1215
1216 /* Init recovery mode timer and state variable */
1217 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1218 adapter->recovery_mode = 0;
1219
1220 /* Set up the timer callout */
1221 callout_init(&adapter->recovery_mode_timer,
1222 IXGBE_CALLOUT_FLAGS);
1223
1224 /* Start the task */
1225 callout_reset(&adapter->recovery_mode_timer, hz,
1226 ixgbe_recovery_mode_timer, adapter);
1227 }
1228
1229 INIT_DEBUGOUT("ixgbe_attach: end");
1230 adapter->osdep.attached = true;
1231
1232 return;
1233
1234 err_late:
1235 ixgbe_free_transmit_structures(adapter);
1236 ixgbe_free_receive_structures(adapter);
1237 free(adapter->queues, M_DEVBUF);
1238 err_out:
1239 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1240 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1241 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1242 ixgbe_free_softint(adapter);
1243 ixgbe_free_pci_resources(adapter);
1244 if (adapter->mta != NULL)
1245 free(adapter->mta, M_DEVBUF);
1246 IXGBE_CORE_LOCK_DESTROY(adapter);
1247
1248 return;
1249 } /* ixgbe_attach */
1250
1251 /************************************************************************
1252 * ixgbe_check_wol_support
1253 *
1254 * Checks whether the adapter's ports are capable of
1255 * Wake On LAN by reading the adapter's NVM.
1256 *
1257 * Sets each port's hw->wol_enabled value depending
1258 * on the value read here.
1259 ************************************************************************/
1260 static void
1261 ixgbe_check_wol_support(struct adapter *adapter)
1262 {
1263 struct ixgbe_hw *hw = &adapter->hw;
1264 u16 dev_caps = 0;
1265
1266 /* Find out WoL support for port */
1267 adapter->wol_support = hw->wol_enabled = 0;
1268 ixgbe_get_device_caps(hw, &dev_caps);
1269 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1270 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1271 hw->bus.func == 0))
1272 adapter->wol_support = hw->wol_enabled = 1;
1273
1274 /* Save initial wake up filter configuration */
1275 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1276
1277 return;
1278 } /* ixgbe_check_wol_support */
1279
1280 /************************************************************************
1281 * ixgbe_setup_interface
1282 *
1283 * Setup networking device structure and register an interface.
1284 ************************************************************************/
1285 static int
1286 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1287 {
1288 struct ethercom *ec = &adapter->osdep.ec;
1289 struct ifnet *ifp;
1290 int rv;
1291
1292 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1293
1294 ifp = adapter->ifp = &ec->ec_if;
1295 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1296 ifp->if_baudrate = IF_Gbps(10);
1297 ifp->if_init = ixgbe_init;
1298 ifp->if_stop = ixgbe_ifstop;
1299 ifp->if_softc = adapter;
1300 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1301 #ifdef IXGBE_MPSAFE
1302 ifp->if_extflags = IFEF_MPSAFE;
1303 #endif
1304 ifp->if_ioctl = ixgbe_ioctl;
1305 #if __FreeBSD_version >= 1100045
1306 /* TSO parameters */
1307 ifp->if_hw_tsomax = 65518;
1308 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1309 ifp->if_hw_tsomaxsegsize = 2048;
1310 #endif
1311 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1312 #if 0
1313 ixgbe_start_locked = ixgbe_legacy_start_locked;
1314 #endif
1315 } else {
1316 ifp->if_transmit = ixgbe_mq_start;
1317 #if 0
1318 ixgbe_start_locked = ixgbe_mq_start_locked;
1319 #endif
1320 }
1321 ifp->if_start = ixgbe_legacy_start;
1322 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1323 IFQ_SET_READY(&ifp->if_snd);
1324
1325 rv = if_initialize(ifp);
1326 if (rv != 0) {
1327 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1328 return rv;
1329 }
1330 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1331 ether_ifattach(ifp, adapter->hw.mac.addr);
1332 /*
1333 * We use per TX queue softint, so if_deferred_start_init() isn't
1334 * used.
1335 */
1336 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1337
1338 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1339
1340 /*
1341 * Tell the upper layer(s) we support long frames.
1342 */
1343 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1344
1345 /* Set capability flags */
1346 ifp->if_capabilities |= IFCAP_RXCSUM
1347 | IFCAP_TXCSUM
1348 | IFCAP_TSOv4
1349 | IFCAP_TSOv6;
1350 ifp->if_capenable = 0;
1351
1352 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1353 | ETHERCAP_VLAN_HWCSUM
1354 | ETHERCAP_JUMBO_MTU
1355 | ETHERCAP_VLAN_MTU;
1356
1357 /* Enable the above capabilities by default */
1358 ec->ec_capenable = ec->ec_capabilities;
1359
1360 /*
1361 * Don't turn this on by default, if vlans are
1362 * created on another pseudo device (eg. lagg)
1363 * then vlan events are not passed thru, breaking
1364 * operation, but with HW FILTER off it works. If
1365 * using vlans directly on the ixgbe driver you can
1366 * enable this and get full hardware tag filtering.
1367 */
1368 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1369
1370 /*
1371 * Specify the media types supported by this adapter and register
1372 * callbacks to update media and link information
1373 */
1374 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1375 ixgbe_media_status);
1376
1377 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1378 ixgbe_add_media_types(adapter);
1379
1380 /* Set autoselect media by default */
1381 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1382
1383 if_register(ifp);
1384
1385 return (0);
1386 } /* ixgbe_setup_interface */
1387
1388 /************************************************************************
1389 * ixgbe_add_media_types
1390 ************************************************************************/
1391 static void
1392 ixgbe_add_media_types(struct adapter *adapter)
1393 {
1394 struct ixgbe_hw *hw = &adapter->hw;
1395 device_t dev = adapter->dev;
1396 u64 layer;
1397
1398 layer = adapter->phy_layer;
1399
1400 #define ADD(mm, dd) \
1401 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1402
1403 ADD(IFM_NONE, 0);
1404
1405 /* Media types with matching NetBSD media defines */
1406 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1407 ADD(IFM_10G_T | IFM_FDX, 0);
1408 }
1409 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1410 ADD(IFM_1000_T | IFM_FDX, 0);
1411 }
1412 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1413 ADD(IFM_100_TX | IFM_FDX, 0);
1414 }
1415 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1416 ADD(IFM_10_T | IFM_FDX, 0);
1417 }
1418
1419 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1420 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1421 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1422 }
1423
1424 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1425 ADD(IFM_10G_LR | IFM_FDX, 0);
1426 if (hw->phy.multispeed_fiber) {
1427 ADD(IFM_1000_LX | IFM_FDX, 0);
1428 }
1429 }
1430 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1431 ADD(IFM_10G_SR | IFM_FDX, 0);
1432 if (hw->phy.multispeed_fiber) {
1433 ADD(IFM_1000_SX | IFM_FDX, 0);
1434 }
1435 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1436 ADD(IFM_1000_SX | IFM_FDX, 0);
1437 }
1438 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1439 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1440 }
1441
1442 #ifdef IFM_ETH_XTYPE
1443 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1444 ADD(IFM_10G_KR | IFM_FDX, 0);
1445 }
1446 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1447 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1448 }
1449 #else
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1451 device_printf(dev, "Media supported: 10GbaseKR\n");
1452 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1453 ADD(IFM_10G_SR | IFM_FDX, 0);
1454 }
1455 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1456 device_printf(dev, "Media supported: 10GbaseKX4\n");
1457 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1458 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1459 }
1460 #endif
1461 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1462 ADD(IFM_1000_KX | IFM_FDX, 0);
1463 }
1464 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1465 ADD(IFM_2500_KX | IFM_FDX, 0);
1466 }
1467 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1468 ADD(IFM_2500_T | IFM_FDX, 0);
1469 }
1470 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1471 ADD(IFM_5000_T | IFM_FDX, 0);
1472 }
1473 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1474 device_printf(dev, "Media supported: 1000baseBX\n");
1475 /* XXX no ifmedia_set? */
1476
1477 ADD(IFM_AUTO, 0);
1478
1479 #undef ADD
1480 } /* ixgbe_add_media_types */
1481
1482 /************************************************************************
1483 * ixgbe_is_sfp
1484 ************************************************************************/
1485 static inline bool
1486 ixgbe_is_sfp(struct ixgbe_hw *hw)
1487 {
1488 switch (hw->mac.type) {
1489 case ixgbe_mac_82598EB:
1490 if (hw->phy.type == ixgbe_phy_nl)
1491 return (TRUE);
1492 return (FALSE);
1493 case ixgbe_mac_82599EB:
1494 switch (hw->mac.ops.get_media_type(hw)) {
1495 case ixgbe_media_type_fiber:
1496 case ixgbe_media_type_fiber_qsfp:
1497 return (TRUE);
1498 default:
1499 return (FALSE);
1500 }
1501 case ixgbe_mac_X550EM_x:
1502 case ixgbe_mac_X550EM_a:
1503 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1504 return (TRUE);
1505 return (FALSE);
1506 default:
1507 return (FALSE);
1508 }
1509 } /* ixgbe_is_sfp */
1510
1511 /************************************************************************
1512 * ixgbe_config_link
1513 ************************************************************************/
1514 static void
1515 ixgbe_config_link(struct adapter *adapter)
1516 {
1517 struct ixgbe_hw *hw = &adapter->hw;
1518 u32 autoneg, err = 0;
1519 bool sfp, negotiate = false;
1520
1521 sfp = ixgbe_is_sfp(hw);
1522
1523 if (sfp) {
1524 if (hw->phy.multispeed_fiber) {
1525 ixgbe_enable_tx_laser(hw);
1526 kpreempt_disable();
1527 softint_schedule(adapter->msf_si);
1528 kpreempt_enable();
1529 }
1530 kpreempt_disable();
1531 softint_schedule(adapter->mod_si);
1532 kpreempt_enable();
1533 } else {
1534 struct ifmedia *ifm = &adapter->media;
1535
1536 if (hw->mac.ops.check_link)
1537 err = ixgbe_check_link(hw, &adapter->link_speed,
1538 &adapter->link_up, FALSE);
1539 if (err)
1540 return;
1541
1542 /*
1543 * Check if it's the first call. If it's the first call,
1544 * get value for auto negotiation.
1545 */
1546 autoneg = hw->phy.autoneg_advertised;
1547 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1548 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1549 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1550 &negotiate);
1551 if (err)
1552 return;
1553 if (hw->mac.ops.setup_link)
1554 err = hw->mac.ops.setup_link(hw, autoneg,
1555 adapter->link_up);
1556 }
1557
1558 } /* ixgbe_config_link */
1559
1560 /************************************************************************
1561 * ixgbe_update_stats_counters - Update board statistics counters.
1562 ************************************************************************/
1563 static void
1564 ixgbe_update_stats_counters(struct adapter *adapter)
1565 {
1566 struct ifnet *ifp = adapter->ifp;
1567 struct ixgbe_hw *hw = &adapter->hw;
1568 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1569 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1570 u64 total_missed_rx = 0;
1571 uint64_t crcerrs, rlec;
1572 int i, j;
1573
1574 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1575 stats->crcerrs.ev_count += crcerrs;
1576 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1577 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1578 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1579 if (hw->mac.type == ixgbe_mac_X550)
1580 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1581
1582 /* 16 registers */
1583 for (i = 0; i < __arraycount(stats->qprc); i++) {
1584 j = i % adapter->num_queues;
1585
1586 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1587 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1588 if (hw->mac.type >= ixgbe_mac_82599EB) {
1589 stats->qprdc[j].ev_count
1590 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1591 }
1592 }
1593
1594 /* 8 registers */
1595 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1596 uint32_t mp;
1597
1598 /* MPC */
1599 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1600 /* global total per queue */
1601 stats->mpc[i].ev_count += mp;
1602 /* running comprehensive total for stats display */
1603 total_missed_rx += mp;
1604
1605 if (hw->mac.type == ixgbe_mac_82598EB)
1606 stats->rnbc[i].ev_count
1607 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1608
1609 stats->pxontxc[i].ev_count
1610 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1611 stats->pxofftxc[i].ev_count
1612 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1613 if (hw->mac.type >= ixgbe_mac_82599EB) {
1614 stats->pxonrxc[i].ev_count
1615 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1616 stats->pxoffrxc[i].ev_count
1617 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1618 stats->pxon2offc[i].ev_count
1619 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1620 } else {
1621 stats->pxonrxc[i].ev_count
1622 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1623 stats->pxoffrxc[i].ev_count
1624 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1625 }
1626 }
1627 stats->mpctotal.ev_count += total_missed_rx;
1628
1629 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1630 if ((adapter->link_active == TRUE)
1631 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1632 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1633 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1634 }
1635 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1636 stats->rlec.ev_count += rlec;
1637
1638 /* Hardware workaround, gprc counts missed packets */
1639 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1640
1641 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1642 stats->lxontxc.ev_count += lxon;
1643 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1644 stats->lxofftxc.ev_count += lxoff;
1645 total = lxon + lxoff;
1646
1647 if (hw->mac.type != ixgbe_mac_82598EB) {
1648 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1649 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1650 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1651 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1652 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1653 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1654 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1655 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1656 } else {
1657 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1658 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1659 /* 82598 only has a counter in the high register */
1660 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1661 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1662 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1663 }
1664
1665 /*
1666 * Workaround: mprc hardware is incorrectly counting
1667 * broadcasts, so for now we subtract those.
1668 */
1669 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1670 stats->bprc.ev_count += bprc;
1671 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1672 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1673
1674 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1675 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1676 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1677 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1678 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1679 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1680
1681 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1682 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1683 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1684
1685 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1686 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1687 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1688 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1689 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1690 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1691 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1692 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1693 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1694 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1695 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1696 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1697 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1698 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1699 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1700 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1701 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1702 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1703 /* Only read FCOE on 82599 */
1704 if (hw->mac.type != ixgbe_mac_82598EB) {
1705 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1706 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1707 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1708 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1709 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1710 }
1711
1712 /* Fill out the OS statistics structure */
1713 /*
1714 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1715 * adapter->stats counters. It's required to make ifconfig -z
1716 * (SOICZIFDATA) work.
1717 */
1718 ifp->if_collisions = 0;
1719
1720 /* Rx Errors */
1721 ifp->if_iqdrops += total_missed_rx;
1722 ifp->if_ierrors += crcerrs + rlec;
1723 } /* ixgbe_update_stats_counters */
1724
1725 /************************************************************************
1726 * ixgbe_add_hw_stats
1727 *
1728 * Add sysctl variables, one per statistic, to the system.
1729 ************************************************************************/
1730 static void
1731 ixgbe_add_hw_stats(struct adapter *adapter)
1732 {
1733 device_t dev = adapter->dev;
1734 const struct sysctlnode *rnode, *cnode;
1735 struct sysctllog **log = &adapter->sysctllog;
1736 struct tx_ring *txr = adapter->tx_rings;
1737 struct rx_ring *rxr = adapter->rx_rings;
1738 struct ixgbe_hw *hw = &adapter->hw;
1739 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1740 const char *xname = device_xname(dev);
1741 int i;
1742
1743 /* Driver Statistics */
1744 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1745 NULL, xname, "Driver tx dma soft fail EFBIG");
1746 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1747 NULL, xname, "m_defrag() failed");
1748 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1749 NULL, xname, "Driver tx dma hard fail EFBIG");
1750 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1751 NULL, xname, "Driver tx dma hard fail EINVAL");
1752 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1753 NULL, xname, "Driver tx dma hard fail other");
1754 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1755 NULL, xname, "Driver tx dma soft fail EAGAIN");
1756 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1757 NULL, xname, "Driver tx dma soft fail ENOMEM");
1758 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1759 NULL, xname, "Watchdog timeouts");
1760 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1761 NULL, xname, "TSO errors");
1762 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1763 NULL, xname, "Link MSI-X IRQ Handled");
1764 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1765 NULL, xname, "Link softint");
1766 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1767 NULL, xname, "module softint");
1768 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1769 NULL, xname, "multimode softint");
1770 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1771 NULL, xname, "external PHY softint");
1772
1773 /* Max number of traffic class is 8 */
1774 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1775 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1776 snprintf(adapter->tcs[i].evnamebuf,
1777 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1778 xname, i);
1779 if (i < __arraycount(stats->mpc)) {
1780 evcnt_attach_dynamic(&stats->mpc[i],
1781 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1782 "RX Missed Packet Count");
1783 if (hw->mac.type == ixgbe_mac_82598EB)
1784 evcnt_attach_dynamic(&stats->rnbc[i],
1785 EVCNT_TYPE_MISC, NULL,
1786 adapter->tcs[i].evnamebuf,
1787 "Receive No Buffers");
1788 }
1789 if (i < __arraycount(stats->pxontxc)) {
1790 evcnt_attach_dynamic(&stats->pxontxc[i],
1791 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1792 "pxontxc");
1793 evcnt_attach_dynamic(&stats->pxonrxc[i],
1794 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1795 "pxonrxc");
1796 evcnt_attach_dynamic(&stats->pxofftxc[i],
1797 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1798 "pxofftxc");
1799 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1800 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1801 "pxoffrxc");
1802 if (hw->mac.type >= ixgbe_mac_82599EB)
1803 evcnt_attach_dynamic(&stats->pxon2offc[i],
1804 EVCNT_TYPE_MISC, NULL,
1805 adapter->tcs[i].evnamebuf,
1806 "pxon2offc");
1807 }
1808 }
1809
1810 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1811 #ifdef LRO
1812 struct lro_ctrl *lro = &rxr->lro;
1813 #endif /* LRO */
1814
1815 snprintf(adapter->queues[i].evnamebuf,
1816 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1817 xname, i);
1818 snprintf(adapter->queues[i].namebuf,
1819 sizeof(adapter->queues[i].namebuf), "q%d", i);
1820
1821 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1822 aprint_error_dev(dev, "could not create sysctl root\n");
1823 break;
1824 }
1825
1826 if (sysctl_createv(log, 0, &rnode, &rnode,
1827 0, CTLTYPE_NODE,
1828 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1829 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1830 break;
1831
1832 if (sysctl_createv(log, 0, &rnode, &cnode,
1833 CTLFLAG_READWRITE, CTLTYPE_INT,
1834 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1835 ixgbe_sysctl_interrupt_rate_handler, 0,
1836 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1837 break;
1838
1839 if (sysctl_createv(log, 0, &rnode, &cnode,
1840 CTLFLAG_READONLY, CTLTYPE_INT,
1841 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1842 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1843 0, CTL_CREATE, CTL_EOL) != 0)
1844 break;
1845
1846 if (sysctl_createv(log, 0, &rnode, &cnode,
1847 CTLFLAG_READONLY, CTLTYPE_INT,
1848 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1849 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1850 0, CTL_CREATE, CTL_EOL) != 0)
1851 break;
1852
1853 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1854 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1855 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1856 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1857 "Handled queue in softint");
1858 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1859 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1860 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1861 NULL, adapter->queues[i].evnamebuf, "TSO");
1862 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1863 NULL, adapter->queues[i].evnamebuf,
1864 "Queue No Descriptor Available");
1865 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1866 NULL, adapter->queues[i].evnamebuf,
1867 "Queue Packets Transmitted");
1868 #ifndef IXGBE_LEGACY_TX
1869 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1870 NULL, adapter->queues[i].evnamebuf,
1871 "Packets dropped in pcq");
1872 #endif
1873
1874 if (sysctl_createv(log, 0, &rnode, &cnode,
1875 CTLFLAG_READONLY,
1876 CTLTYPE_INT,
1877 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1878 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1879 CTL_CREATE, CTL_EOL) != 0)
1880 break;
1881
1882 if (sysctl_createv(log, 0, &rnode, &cnode,
1883 CTLFLAG_READONLY,
1884 CTLTYPE_INT,
1885 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1886 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1887 CTL_CREATE, CTL_EOL) != 0)
1888 break;
1889
1890 if (sysctl_createv(log, 0, &rnode, &cnode,
1891 CTLFLAG_READONLY,
1892 CTLTYPE_INT,
1893 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1894 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1895 CTL_CREATE, CTL_EOL) != 0)
1896 break;
1897
1898 if (i < __arraycount(stats->qprc)) {
1899 evcnt_attach_dynamic(&stats->qprc[i],
1900 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1901 "qprc");
1902 evcnt_attach_dynamic(&stats->qptc[i],
1903 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1904 "qptc");
1905 evcnt_attach_dynamic(&stats->qbrc[i],
1906 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1907 "qbrc");
1908 evcnt_attach_dynamic(&stats->qbtc[i],
1909 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1910 "qbtc");
1911 if (hw->mac.type >= ixgbe_mac_82599EB)
1912 evcnt_attach_dynamic(&stats->qprdc[i],
1913 EVCNT_TYPE_MISC, NULL,
1914 adapter->queues[i].evnamebuf, "qprdc");
1915 }
1916
1917 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1918 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1919 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1920 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1921 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1922 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1923 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1924 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1925 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1926 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1927 #ifdef LRO
1928 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1929 CTLFLAG_RD, &lro->lro_queued, 0,
1930 "LRO Queued");
1931 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1932 CTLFLAG_RD, &lro->lro_flushed, 0,
1933 "LRO Flushed");
1934 #endif /* LRO */
1935 }
1936
1937 /* MAC stats get their own sub node */
1938
1939 snprintf(stats->namebuf,
1940 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1941
1942 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "rx csum offload - IP");
1944 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "rx csum offload - L4");
1946 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "rx csum offload - IP bad");
1948 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "rx csum offload - L4 bad");
1950 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Interrupt conditions zero");
1952 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Legacy interrupts");
1954
1955 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "CRC Errors");
1957 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "Illegal Byte Errors");
1959 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "Byte Errors");
1961 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "MAC Short Packets Discarded");
1963 if (hw->mac.type >= ixgbe_mac_X550)
1964 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "Bad SFD");
1966 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "Total Packets Missed");
1968 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "MAC Local Faults");
1970 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "MAC Remote Faults");
1972 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "Receive Length Errors");
1974 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Link XON Transmitted");
1976 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Link XON Received");
1978 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Link XOFF Transmitted");
1980 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "Link XOFF Received");
1982
1983 /* Packet Reception Stats */
1984 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Total Octets Received");
1986 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Good Octets Received");
1988 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Total Packets Received");
1990 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Good Packets Received");
1992 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Multicast Packets Received");
1994 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Broadcast Packets Received");
1996 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "64 byte frames received ");
1998 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "65-127 byte frames received");
2000 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "128-255 byte frames received");
2002 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "256-511 byte frames received");
2004 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "512-1023 byte frames received");
2006 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "1023-1522 byte frames received");
2008 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Receive Undersized");
2010 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Fragmented Packets Received ");
2012 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Oversized Packets Received");
2014 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Received Jabber");
2016 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Management Packets Received");
2018 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Management Packets Dropped");
2020 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Checksum Errors");
2022
2023 /* Packet Transmission Stats */
2024 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Good Octets Transmitted");
2026 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Total Packets Transmitted");
2028 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Good Packets Transmitted");
2030 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Broadcast Packets Transmitted");
2032 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Multicast Packets Transmitted");
2034 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Management Packets Transmitted");
2036 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "64 byte frames transmitted ");
2038 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "65-127 byte frames transmitted");
2040 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "128-255 byte frames transmitted");
2042 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "256-511 byte frames transmitted");
2044 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "512-1023 byte frames transmitted");
2046 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "1024-1522 byte frames transmitted");
2048 } /* ixgbe_add_hw_stats */
2049
2050 static void
2051 ixgbe_clear_evcnt(struct adapter *adapter)
2052 {
2053 struct tx_ring *txr = adapter->tx_rings;
2054 struct rx_ring *rxr = adapter->rx_rings;
2055 struct ixgbe_hw *hw = &adapter->hw;
2056 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2057 int i;
2058
2059 adapter->efbig_tx_dma_setup.ev_count = 0;
2060 adapter->mbuf_defrag_failed.ev_count = 0;
2061 adapter->efbig2_tx_dma_setup.ev_count = 0;
2062 adapter->einval_tx_dma_setup.ev_count = 0;
2063 adapter->other_tx_dma_setup.ev_count = 0;
2064 adapter->eagain_tx_dma_setup.ev_count = 0;
2065 adapter->enomem_tx_dma_setup.ev_count = 0;
2066 adapter->tso_err.ev_count = 0;
2067 adapter->watchdog_events.ev_count = 0;
2068 adapter->link_irq.ev_count = 0;
2069 adapter->link_sicount.ev_count = 0;
2070 adapter->mod_sicount.ev_count = 0;
2071 adapter->msf_sicount.ev_count = 0;
2072 adapter->phy_sicount.ev_count = 0;
2073
2074 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2075 if (i < __arraycount(stats->mpc)) {
2076 stats->mpc[i].ev_count = 0;
2077 if (hw->mac.type == ixgbe_mac_82598EB)
2078 stats->rnbc[i].ev_count = 0;
2079 }
2080 if (i < __arraycount(stats->pxontxc)) {
2081 stats->pxontxc[i].ev_count = 0;
2082 stats->pxonrxc[i].ev_count = 0;
2083 stats->pxofftxc[i].ev_count = 0;
2084 stats->pxoffrxc[i].ev_count = 0;
2085 if (hw->mac.type >= ixgbe_mac_82599EB)
2086 stats->pxon2offc[i].ev_count = 0;
2087 }
2088 }
2089
2090 txr = adapter->tx_rings;
2091 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2092 adapter->queues[i].irqs.ev_count = 0;
2093 adapter->queues[i].handleq.ev_count = 0;
2094 adapter->queues[i].req.ev_count = 0;
2095 txr->no_desc_avail.ev_count = 0;
2096 txr->total_packets.ev_count = 0;
2097 txr->tso_tx.ev_count = 0;
2098 #ifndef IXGBE_LEGACY_TX
2099 txr->pcq_drops.ev_count = 0;
2100 #endif
2101 txr->q_efbig_tx_dma_setup = 0;
2102 txr->q_mbuf_defrag_failed = 0;
2103 txr->q_efbig2_tx_dma_setup = 0;
2104 txr->q_einval_tx_dma_setup = 0;
2105 txr->q_other_tx_dma_setup = 0;
2106 txr->q_eagain_tx_dma_setup = 0;
2107 txr->q_enomem_tx_dma_setup = 0;
2108 txr->q_tso_err = 0;
2109
2110 if (i < __arraycount(stats->qprc)) {
2111 stats->qprc[i].ev_count = 0;
2112 stats->qptc[i].ev_count = 0;
2113 stats->qbrc[i].ev_count = 0;
2114 stats->qbtc[i].ev_count = 0;
2115 if (hw->mac.type >= ixgbe_mac_82599EB)
2116 stats->qprdc[i].ev_count = 0;
2117 }
2118
2119 rxr->rx_packets.ev_count = 0;
2120 rxr->rx_bytes.ev_count = 0;
2121 rxr->rx_copies.ev_count = 0;
2122 rxr->no_jmbuf.ev_count = 0;
2123 rxr->rx_discarded.ev_count = 0;
2124 }
2125 stats->ipcs.ev_count = 0;
2126 stats->l4cs.ev_count = 0;
2127 stats->ipcs_bad.ev_count = 0;
2128 stats->l4cs_bad.ev_count = 0;
2129 stats->intzero.ev_count = 0;
2130 stats->legint.ev_count = 0;
2131 stats->crcerrs.ev_count = 0;
2132 stats->illerrc.ev_count = 0;
2133 stats->errbc.ev_count = 0;
2134 stats->mspdc.ev_count = 0;
2135 stats->mbsdc.ev_count = 0;
2136 stats->mpctotal.ev_count = 0;
2137 stats->mlfc.ev_count = 0;
2138 stats->mrfc.ev_count = 0;
2139 stats->rlec.ev_count = 0;
2140 stats->lxontxc.ev_count = 0;
2141 stats->lxonrxc.ev_count = 0;
2142 stats->lxofftxc.ev_count = 0;
2143 stats->lxoffrxc.ev_count = 0;
2144
2145 /* Packet Reception Stats */
2146 stats->tor.ev_count = 0;
2147 stats->gorc.ev_count = 0;
2148 stats->tpr.ev_count = 0;
2149 stats->gprc.ev_count = 0;
2150 stats->mprc.ev_count = 0;
2151 stats->bprc.ev_count = 0;
2152 stats->prc64.ev_count = 0;
2153 stats->prc127.ev_count = 0;
2154 stats->prc255.ev_count = 0;
2155 stats->prc511.ev_count = 0;
2156 stats->prc1023.ev_count = 0;
2157 stats->prc1522.ev_count = 0;
2158 stats->ruc.ev_count = 0;
2159 stats->rfc.ev_count = 0;
2160 stats->roc.ev_count = 0;
2161 stats->rjc.ev_count = 0;
2162 stats->mngprc.ev_count = 0;
2163 stats->mngpdc.ev_count = 0;
2164 stats->xec.ev_count = 0;
2165
2166 /* Packet Transmission Stats */
2167 stats->gotc.ev_count = 0;
2168 stats->tpt.ev_count = 0;
2169 stats->gptc.ev_count = 0;
2170 stats->bptc.ev_count = 0;
2171 stats->mptc.ev_count = 0;
2172 stats->mngptc.ev_count = 0;
2173 stats->ptc64.ev_count = 0;
2174 stats->ptc127.ev_count = 0;
2175 stats->ptc255.ev_count = 0;
2176 stats->ptc511.ev_count = 0;
2177 stats->ptc1023.ev_count = 0;
2178 stats->ptc1522.ev_count = 0;
2179 }
2180
2181 /************************************************************************
2182 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2183 *
2184 * Retrieves the TDH value from the hardware
2185 ************************************************************************/
2186 static int
2187 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2188 {
2189 struct sysctlnode node = *rnode;
2190 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2191 struct adapter *adapter;
2192 uint32_t val;
2193
2194 if (!txr)
2195 return (0);
2196
2197 adapter = txr->adapter;
2198 if (ixgbe_fw_recovery_mode_swflag(adapter))
2199 return (EPERM);
2200
2201 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2202 node.sysctl_data = &val;
2203 return sysctl_lookup(SYSCTLFN_CALL(&node));
2204 } /* ixgbe_sysctl_tdh_handler */
2205
2206 /************************************************************************
2207 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2208 *
2209 * Retrieves the TDT value from the hardware
2210 ************************************************************************/
2211 static int
2212 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2213 {
2214 struct sysctlnode node = *rnode;
2215 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2216 struct adapter *adapter;
2217 uint32_t val;
2218
2219 if (!txr)
2220 return (0);
2221
2222 adapter = txr->adapter;
2223 if (ixgbe_fw_recovery_mode_swflag(adapter))
2224 return (EPERM);
2225
2226 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2227 node.sysctl_data = &val;
2228 return sysctl_lookup(SYSCTLFN_CALL(&node));
2229 } /* ixgbe_sysctl_tdt_handler */
2230
2231 /************************************************************************
2232 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2233 * handler function
2234 *
2235 * Retrieves the next_to_check value
2236 ************************************************************************/
2237 static int
2238 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2239 {
2240 struct sysctlnode node = *rnode;
2241 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2242 struct adapter *adapter;
2243 uint32_t val;
2244
2245 if (!rxr)
2246 return (0);
2247
2248 adapter = rxr->adapter;
2249 if (ixgbe_fw_recovery_mode_swflag(adapter))
2250 return (EPERM);
2251
2252 val = rxr->next_to_check;
2253 node.sysctl_data = &val;
2254 return sysctl_lookup(SYSCTLFN_CALL(&node));
2255 } /* ixgbe_sysctl_next_to_check_handler */
2256
2257 /************************************************************************
2258 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2259 *
2260 * Retrieves the RDH value from the hardware
2261 ************************************************************************/
2262 static int
2263 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2264 {
2265 struct sysctlnode node = *rnode;
2266 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2267 struct adapter *adapter;
2268 uint32_t val;
2269
2270 if (!rxr)
2271 return (0);
2272
2273 adapter = rxr->adapter;
2274 if (ixgbe_fw_recovery_mode_swflag(adapter))
2275 return (EPERM);
2276
2277 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2278 node.sysctl_data = &val;
2279 return sysctl_lookup(SYSCTLFN_CALL(&node));
2280 } /* ixgbe_sysctl_rdh_handler */
2281
2282 /************************************************************************
2283 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2284 *
2285 * Retrieves the RDT value from the hardware
2286 ************************************************************************/
2287 static int
2288 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2289 {
2290 struct sysctlnode node = *rnode;
2291 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2292 struct adapter *adapter;
2293 uint32_t val;
2294
2295 if (!rxr)
2296 return (0);
2297
2298 adapter = rxr->adapter;
2299 if (ixgbe_fw_recovery_mode_swflag(adapter))
2300 return (EPERM);
2301
2302 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2303 node.sysctl_data = &val;
2304 return sysctl_lookup(SYSCTLFN_CALL(&node));
2305 } /* ixgbe_sysctl_rdt_handler */
2306
2307 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2308 /************************************************************************
2309 * ixgbe_register_vlan
2310 *
2311 * Run via vlan config EVENT, it enables us to use the
2312 * HW Filter table since we can get the vlan id. This
2313 * just creates the entry in the soft version of the
2314 * VFTA, init will repopulate the real table.
2315 ************************************************************************/
2316 static void
2317 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2318 {
2319 struct adapter *adapter = ifp->if_softc;
2320 u16 index, bit;
2321
2322 if (ifp->if_softc != arg) /* Not our event */
2323 return;
2324
2325 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2326 return;
2327
2328 IXGBE_CORE_LOCK(adapter);
2329 index = (vtag >> 5) & 0x7F;
2330 bit = vtag & 0x1F;
2331 adapter->shadow_vfta[index] |= (1 << bit);
2332 ixgbe_setup_vlan_hw_support(adapter);
2333 IXGBE_CORE_UNLOCK(adapter);
2334 } /* ixgbe_register_vlan */
2335
2336 /************************************************************************
2337 * ixgbe_unregister_vlan
2338 *
2339 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2340 ************************************************************************/
2341 static void
2342 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2343 {
2344 struct adapter *adapter = ifp->if_softc;
2345 u16 index, bit;
2346
2347 if (ifp->if_softc != arg)
2348 return;
2349
2350 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2351 return;
2352
2353 IXGBE_CORE_LOCK(adapter);
2354 index = (vtag >> 5) & 0x7F;
2355 bit = vtag & 0x1F;
2356 adapter->shadow_vfta[index] &= ~(1 << bit);
2357 /* Re-init to load the changes */
2358 ixgbe_setup_vlan_hw_support(adapter);
2359 IXGBE_CORE_UNLOCK(adapter);
2360 } /* ixgbe_unregister_vlan */
2361 #endif
2362
2363 static void
2364 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2365 {
2366 struct ethercom *ec = &adapter->osdep.ec;
2367 struct ixgbe_hw *hw = &adapter->hw;
2368 struct rx_ring *rxr;
2369 int i;
2370 u32 ctrl;
2371
2372
2373 /*
2374 * We get here thru init_locked, meaning
2375 * a soft reset, this has already cleared
2376 * the VFTA and other state, so if there
2377 * have been no vlan's registered do nothing.
2378 */
2379 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2380 return;
2381
2382 /* Setup the queues for vlans */
2383 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2384 for (i = 0; i < adapter->num_queues; i++) {
2385 rxr = &adapter->rx_rings[i];
2386 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2387 if (hw->mac.type != ixgbe_mac_82598EB) {
2388 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2389 ctrl |= IXGBE_RXDCTL_VME;
2390 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2391 }
2392 rxr->vtag_strip = TRUE;
2393 }
2394 }
2395
2396 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2397 return;
2398 /*
2399 * A soft reset zero's out the VFTA, so
2400 * we need to repopulate it now.
2401 */
2402 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2403 if (adapter->shadow_vfta[i] != 0)
2404 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2405 adapter->shadow_vfta[i]);
2406
2407 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2408 /* Enable the Filter Table if enabled */
2409 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2410 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2411 ctrl |= IXGBE_VLNCTRL_VFE;
2412 }
2413 if (hw->mac.type == ixgbe_mac_82598EB)
2414 ctrl |= IXGBE_VLNCTRL_VME;
2415 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2416 } /* ixgbe_setup_vlan_hw_support */
2417
2418 /************************************************************************
2419 * ixgbe_get_slot_info
2420 *
2421 * Get the width and transaction speed of
2422 * the slot this adapter is plugged into.
2423 ************************************************************************/
2424 static void
2425 ixgbe_get_slot_info(struct adapter *adapter)
2426 {
2427 device_t dev = adapter->dev;
2428 struct ixgbe_hw *hw = &adapter->hw;
2429 u32 offset;
2430 u16 link;
2431 int bus_info_valid = TRUE;
2432
2433 /* Some devices are behind an internal bridge */
2434 switch (hw->device_id) {
2435 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2436 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2437 goto get_parent_info;
2438 default:
2439 break;
2440 }
2441
2442 ixgbe_get_bus_info(hw);
2443
2444 /*
2445 * Some devices don't use PCI-E, but there is no need
2446 * to display "Unknown" for bus speed and width.
2447 */
2448 switch (hw->mac.type) {
2449 case ixgbe_mac_X550EM_x:
2450 case ixgbe_mac_X550EM_a:
2451 return;
2452 default:
2453 goto display;
2454 }
2455
2456 get_parent_info:
2457 /*
2458 * For the Quad port adapter we need to parse back
2459 * up the PCI tree to find the speed of the expansion
2460 * slot into which this adapter is plugged. A bit more work.
2461 */
2462 dev = device_parent(device_parent(dev));
2463 #if 0
2464 #ifdef IXGBE_DEBUG
2465 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2466 pci_get_slot(dev), pci_get_function(dev));
2467 #endif
2468 dev = device_parent(device_parent(dev));
2469 #ifdef IXGBE_DEBUG
2470 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2471 pci_get_slot(dev), pci_get_function(dev));
2472 #endif
2473 #endif
2474 /* Now get the PCI Express Capabilities offset */
2475 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2476 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2477 /*
2478 * Hmm...can't get PCI-Express capabilities.
2479 * Falling back to default method.
2480 */
2481 bus_info_valid = FALSE;
2482 ixgbe_get_bus_info(hw);
2483 goto display;
2484 }
2485 /* ...and read the Link Status Register */
2486 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2487 offset + PCIE_LCSR) >> 16;
2488 ixgbe_set_pci_config_data_generic(hw, link);
2489
2490 display:
2491 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2492 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2493 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2494 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2495 "Unknown"),
2496 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2497 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2498 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2499 "Unknown"));
2500
2501 if (bus_info_valid) {
2502 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2503 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2504 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2505 device_printf(dev, "PCI-Express bandwidth available"
2506 " for this card\n is not sufficient for"
2507 " optimal performance.\n");
2508 device_printf(dev, "For optimal performance a x8 "
2509 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2510 }
2511 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2512 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2513 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2514 device_printf(dev, "PCI-Express bandwidth available"
2515 " for this card\n is not sufficient for"
2516 " optimal performance.\n");
2517 device_printf(dev, "For optimal performance a x8 "
2518 "PCIE Gen3 slot is required.\n");
2519 }
2520 } else
2521 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2522
2523 return;
2524 } /* ixgbe_get_slot_info */
2525
2526 /************************************************************************
2527 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2528 ************************************************************************/
2529 static inline void
2530 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2531 {
2532 struct ixgbe_hw *hw = &adapter->hw;
2533 struct ix_queue *que = &adapter->queues[vector];
2534 u64 queue = (u64)(1ULL << vector);
2535 u32 mask;
2536
2537 mutex_enter(&que->dc_mtx);
2538 if (que->disabled_count > 0 && --que->disabled_count > 0)
2539 goto out;
2540
2541 if (hw->mac.type == ixgbe_mac_82598EB) {
2542 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2543 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2544 } else {
2545 mask = (queue & 0xFFFFFFFF);
2546 if (mask)
2547 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2548 mask = (queue >> 32);
2549 if (mask)
2550 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2551 }
2552 out:
2553 mutex_exit(&que->dc_mtx);
2554 } /* ixgbe_enable_queue */
2555
2556 /************************************************************************
2557 * ixgbe_disable_queue_internal
2558 ************************************************************************/
2559 static inline void
2560 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2561 {
2562 struct ixgbe_hw *hw = &adapter->hw;
2563 struct ix_queue *que = &adapter->queues[vector];
2564 u64 queue = (u64)(1ULL << vector);
2565 u32 mask;
2566
2567 mutex_enter(&que->dc_mtx);
2568
2569 if (que->disabled_count > 0) {
2570 if (nestok)
2571 que->disabled_count++;
2572 goto out;
2573 }
2574 que->disabled_count++;
2575
2576 if (hw->mac.type == ixgbe_mac_82598EB) {
2577 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2578 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2579 } else {
2580 mask = (queue & 0xFFFFFFFF);
2581 if (mask)
2582 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2583 mask = (queue >> 32);
2584 if (mask)
2585 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2586 }
2587 out:
2588 mutex_exit(&que->dc_mtx);
2589 } /* ixgbe_disable_queue_internal */
2590
2591 /************************************************************************
2592 * ixgbe_disable_queue
2593 ************************************************************************/
2594 static inline void
2595 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2596 {
2597
2598 ixgbe_disable_queue_internal(adapter, vector, true);
2599 } /* ixgbe_disable_queue */
2600
2601 /************************************************************************
2602 * ixgbe_sched_handle_que - schedule deferred packet processing
2603 ************************************************************************/
2604 static inline void
2605 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2606 {
2607
2608 if(que->txrx_use_workqueue) {
2609 /*
2610 * adapter->que_wq is bound to each CPU instead of
2611 * each NIC queue to reduce workqueue kthread. As we
2612 * should consider about interrupt affinity in this
2613 * function, the workqueue kthread must be WQ_PERCPU.
2614 * If create WQ_PERCPU workqueue kthread for each NIC
2615 * queue, that number of created workqueue kthread is
2616 * (number of used NIC queue) * (number of CPUs) =
2617 * (number of CPUs) ^ 2 most often.
2618 *
2619 * The same NIC queue's interrupts are avoided by
2620 * masking the queue's interrupt. And different
2621 * NIC queue's interrupts use different struct work
2622 * (que->wq_cookie). So, "enqueued flag" to avoid
2623 * twice workqueue_enqueue() is not required .
2624 */
2625 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2626 } else {
2627 softint_schedule(que->que_si);
2628 }
2629 }
2630
2631 /************************************************************************
2632 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2633 ************************************************************************/
2634 static int
2635 ixgbe_msix_que(void *arg)
2636 {
2637 struct ix_queue *que = arg;
2638 struct adapter *adapter = que->adapter;
2639 struct ifnet *ifp = adapter->ifp;
2640 struct tx_ring *txr = que->txr;
2641 struct rx_ring *rxr = que->rxr;
2642 bool more;
2643 u32 newitr = 0;
2644
2645 /* Protect against spurious interrupts */
2646 if ((ifp->if_flags & IFF_RUNNING) == 0)
2647 return 0;
2648
2649 ixgbe_disable_queue(adapter, que->msix);
2650 ++que->irqs.ev_count;
2651
2652 /*
2653 * Don't change "que->txrx_use_workqueue" from this point to avoid
2654 * flip-flopping softint/workqueue mode in one deferred processing.
2655 */
2656 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2657
2658 #ifdef __NetBSD__
2659 /* Don't run ixgbe_rxeof in interrupt context */
2660 more = true;
2661 #else
2662 more = ixgbe_rxeof(que);
2663 #endif
2664
2665 IXGBE_TX_LOCK(txr);
2666 ixgbe_txeof(txr);
2667 IXGBE_TX_UNLOCK(txr);
2668
2669 /* Do AIM now? */
2670
2671 if (adapter->enable_aim == false)
2672 goto no_calc;
2673 /*
2674 * Do Adaptive Interrupt Moderation:
2675 * - Write out last calculated setting
2676 * - Calculate based on average size over
2677 * the last interval.
2678 */
2679 if (que->eitr_setting)
2680 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2681
2682 que->eitr_setting = 0;
2683
2684 /* Idle, do nothing */
2685 if ((txr->bytes == 0) && (rxr->bytes == 0))
2686 goto no_calc;
2687
2688 if ((txr->bytes) && (txr->packets))
2689 newitr = txr->bytes/txr->packets;
2690 if ((rxr->bytes) && (rxr->packets))
2691 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2692 newitr += 24; /* account for hardware frame, crc */
2693
2694 /* set an upper boundary */
2695 newitr = uimin(newitr, 3000);
2696
2697 /* Be nice to the mid range */
2698 if ((newitr > 300) && (newitr < 1200))
2699 newitr = (newitr / 3);
2700 else
2701 newitr = (newitr / 2);
2702
2703 /*
2704 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2705 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2706 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2707 * on 1G and higher.
2708 */
2709 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2710 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2711 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2712 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2713 }
2714
2715 /* save for next interrupt */
2716 que->eitr_setting = newitr;
2717
2718 /* Reset state */
2719 txr->bytes = 0;
2720 txr->packets = 0;
2721 rxr->bytes = 0;
2722 rxr->packets = 0;
2723
2724 no_calc:
2725 if (more)
2726 ixgbe_sched_handle_que(adapter, que);
2727 else
2728 ixgbe_enable_queue(adapter, que->msix);
2729
2730 return 1;
2731 } /* ixgbe_msix_que */
2732
2733 /************************************************************************
2734 * ixgbe_media_status - Media Ioctl callback
2735 *
2736 * Called whenever the user queries the status of
2737 * the interface using ifconfig.
2738 ************************************************************************/
2739 static void
2740 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2741 {
2742 struct adapter *adapter = ifp->if_softc;
2743 struct ixgbe_hw *hw = &adapter->hw;
2744 int layer;
2745
2746 INIT_DEBUGOUT("ixgbe_media_status: begin");
2747 IXGBE_CORE_LOCK(adapter);
2748 ixgbe_update_link_status(adapter);
2749
2750 ifmr->ifm_status = IFM_AVALID;
2751 ifmr->ifm_active = IFM_ETHER;
2752
2753 if (!adapter->link_active) {
2754 ifmr->ifm_active |= IFM_NONE;
2755 IXGBE_CORE_UNLOCK(adapter);
2756 return;
2757 }
2758
2759 ifmr->ifm_status |= IFM_ACTIVE;
2760 layer = adapter->phy_layer;
2761
2762 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2763 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2764 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2765 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2766 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2767 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2768 switch (adapter->link_speed) {
2769 case IXGBE_LINK_SPEED_10GB_FULL:
2770 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2771 break;
2772 case IXGBE_LINK_SPEED_5GB_FULL:
2773 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2774 break;
2775 case IXGBE_LINK_SPEED_2_5GB_FULL:
2776 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2777 break;
2778 case IXGBE_LINK_SPEED_1GB_FULL:
2779 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2780 break;
2781 case IXGBE_LINK_SPEED_100_FULL:
2782 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2783 break;
2784 case IXGBE_LINK_SPEED_10_FULL:
2785 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2786 break;
2787 }
2788 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2789 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2790 switch (adapter->link_speed) {
2791 case IXGBE_LINK_SPEED_10GB_FULL:
2792 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2793 break;
2794 }
2795 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2796 switch (adapter->link_speed) {
2797 case IXGBE_LINK_SPEED_10GB_FULL:
2798 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2799 break;
2800 case IXGBE_LINK_SPEED_1GB_FULL:
2801 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2802 break;
2803 }
2804 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2805 switch (adapter->link_speed) {
2806 case IXGBE_LINK_SPEED_10GB_FULL:
2807 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2808 break;
2809 case IXGBE_LINK_SPEED_1GB_FULL:
2810 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2811 break;
2812 }
2813 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2814 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2815 switch (adapter->link_speed) {
2816 case IXGBE_LINK_SPEED_10GB_FULL:
2817 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2818 break;
2819 case IXGBE_LINK_SPEED_1GB_FULL:
2820 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2821 break;
2822 }
2823 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2824 switch (adapter->link_speed) {
2825 case IXGBE_LINK_SPEED_10GB_FULL:
2826 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2827 break;
2828 }
2829 /*
2830 * XXX: These need to use the proper media types once
2831 * they're added.
2832 */
2833 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2834 switch (adapter->link_speed) {
2835 case IXGBE_LINK_SPEED_10GB_FULL:
2836 #ifndef IFM_ETH_XTYPE
2837 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2838 #else
2839 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2840 #endif
2841 break;
2842 case IXGBE_LINK_SPEED_2_5GB_FULL:
2843 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2844 break;
2845 case IXGBE_LINK_SPEED_1GB_FULL:
2846 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2847 break;
2848 }
2849 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2850 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2851 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2852 switch (adapter->link_speed) {
2853 case IXGBE_LINK_SPEED_10GB_FULL:
2854 #ifndef IFM_ETH_XTYPE
2855 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2856 #else
2857 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2858 #endif
2859 break;
2860 case IXGBE_LINK_SPEED_2_5GB_FULL:
2861 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2862 break;
2863 case IXGBE_LINK_SPEED_1GB_FULL:
2864 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2865 break;
2866 }
2867
2868 /* If nothing is recognized... */
2869 #if 0
2870 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2871 ifmr->ifm_active |= IFM_UNKNOWN;
2872 #endif
2873
2874 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2875
2876 /* Display current flow control setting used on link */
2877 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2878 hw->fc.current_mode == ixgbe_fc_full)
2879 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2880 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2881 hw->fc.current_mode == ixgbe_fc_full)
2882 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2883
2884 IXGBE_CORE_UNLOCK(adapter);
2885
2886 return;
2887 } /* ixgbe_media_status */
2888
2889 /************************************************************************
2890 * ixgbe_media_change - Media Ioctl callback
2891 *
2892 * Called when the user changes speed/duplex using
2893 * media/mediopt option with ifconfig.
2894 ************************************************************************/
2895 static int
2896 ixgbe_media_change(struct ifnet *ifp)
2897 {
2898 struct adapter *adapter = ifp->if_softc;
2899 struct ifmedia *ifm = &adapter->media;
2900 struct ixgbe_hw *hw = &adapter->hw;
2901 ixgbe_link_speed speed = 0;
2902 ixgbe_link_speed link_caps = 0;
2903 bool negotiate = false;
2904 s32 err = IXGBE_NOT_IMPLEMENTED;
2905
2906 INIT_DEBUGOUT("ixgbe_media_change: begin");
2907
2908 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2909 return (EINVAL);
2910
2911 if (hw->phy.media_type == ixgbe_media_type_backplane)
2912 return (EPERM);
2913
2914 IXGBE_CORE_LOCK(adapter);
2915 /*
2916 * We don't actually need to check against the supported
2917 * media types of the adapter; ifmedia will take care of
2918 * that for us.
2919 */
2920 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2921 case IFM_AUTO:
2922 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2923 &negotiate);
2924 if (err != IXGBE_SUCCESS) {
2925 device_printf(adapter->dev, "Unable to determine "
2926 "supported advertise speeds\n");
2927 IXGBE_CORE_UNLOCK(adapter);
2928 return (ENODEV);
2929 }
2930 speed |= link_caps;
2931 break;
2932 case IFM_10G_T:
2933 case IFM_10G_LRM:
2934 case IFM_10G_LR:
2935 case IFM_10G_TWINAX:
2936 #ifndef IFM_ETH_XTYPE
2937 case IFM_10G_SR: /* KR, too */
2938 case IFM_10G_CX4: /* KX4 */
2939 #else
2940 case IFM_10G_KR:
2941 case IFM_10G_KX4:
2942 #endif
2943 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2944 break;
2945 case IFM_5000_T:
2946 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2947 break;
2948 case IFM_2500_T:
2949 case IFM_2500_KX:
2950 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2951 break;
2952 case IFM_1000_T:
2953 case IFM_1000_LX:
2954 case IFM_1000_SX:
2955 case IFM_1000_KX:
2956 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2957 break;
2958 case IFM_100_TX:
2959 speed |= IXGBE_LINK_SPEED_100_FULL;
2960 break;
2961 case IFM_10_T:
2962 speed |= IXGBE_LINK_SPEED_10_FULL;
2963 break;
2964 case IFM_NONE:
2965 break;
2966 default:
2967 goto invalid;
2968 }
2969
2970 hw->mac.autotry_restart = TRUE;
2971 hw->mac.ops.setup_link(hw, speed, TRUE);
2972 adapter->advertise = 0;
2973 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2974 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2975 adapter->advertise |= 1 << 2;
2976 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2977 adapter->advertise |= 1 << 1;
2978 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2979 adapter->advertise |= 1 << 0;
2980 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2981 adapter->advertise |= 1 << 3;
2982 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2983 adapter->advertise |= 1 << 4;
2984 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2985 adapter->advertise |= 1 << 5;
2986 }
2987
2988 IXGBE_CORE_UNLOCK(adapter);
2989 return (0);
2990
2991 invalid:
2992 device_printf(adapter->dev, "Invalid media type!\n");
2993 IXGBE_CORE_UNLOCK(adapter);
2994
2995 return (EINVAL);
2996 } /* ixgbe_media_change */
2997
2998 /************************************************************************
2999 * ixgbe_set_promisc
3000 ************************************************************************/
3001 static void
3002 ixgbe_set_promisc(struct adapter *adapter)
3003 {
3004 struct ifnet *ifp = adapter->ifp;
3005 int mcnt = 0;
3006 u32 rctl;
3007 struct ether_multi *enm;
3008 struct ether_multistep step;
3009 struct ethercom *ec = &adapter->osdep.ec;
3010
3011 KASSERT(mutex_owned(&adapter->core_mtx));
3012 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3013 rctl &= (~IXGBE_FCTRL_UPE);
3014 if (ifp->if_flags & IFF_ALLMULTI)
3015 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
3016 else {
3017 ETHER_LOCK(ec);
3018 ETHER_FIRST_MULTI(step, ec, enm);
3019 while (enm != NULL) {
3020 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3021 break;
3022 mcnt++;
3023 ETHER_NEXT_MULTI(step, enm);
3024 }
3025 ETHER_UNLOCK(ec);
3026 }
3027 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
3028 rctl &= (~IXGBE_FCTRL_MPE);
3029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3030
3031 if (ifp->if_flags & IFF_PROMISC) {
3032 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3034 } else if (ifp->if_flags & IFF_ALLMULTI) {
3035 rctl |= IXGBE_FCTRL_MPE;
3036 rctl &= ~IXGBE_FCTRL_UPE;
3037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3038 }
3039 } /* ixgbe_set_promisc */
3040
3041 /************************************************************************
3042 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3043 ************************************************************************/
3044 static int
3045 ixgbe_msix_link(void *arg)
3046 {
3047 struct adapter *adapter = arg;
3048 struct ixgbe_hw *hw = &adapter->hw;
3049 u32 eicr, eicr_mask;
3050 s32 retval;
3051
3052 ++adapter->link_irq.ev_count;
3053
3054 /* Pause other interrupts */
3055 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3056
3057 /* First get the cause */
3058 /*
3059 * The specifications of 82598, 82599, X540 and X550 say EICS register
3060 * is write only. However, Linux says it is a workaround for silicon
3061 * errata to read EICS instead of EICR to get interrupt cause. It seems
3062 * there is a problem about read clear mechanism for EICR register.
3063 */
3064 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3065 /* Be sure the queue bits are not cleared */
3066 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3067 /* Clear interrupt with write */
3068 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3069
3070 /* Link status change */
3071 if (eicr & IXGBE_EICR_LSC) {
3072 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3073 softint_schedule(adapter->link_si);
3074 }
3075
3076 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3077 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3078 (eicr & IXGBE_EICR_FLOW_DIR)) {
3079 /* This is probably overkill :) */
3080 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3081 return 1;
3082 /* Disable the interrupt */
3083 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3084 softint_schedule(adapter->fdir_si);
3085 }
3086
3087 if (eicr & IXGBE_EICR_ECC) {
3088 device_printf(adapter->dev,
3089 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3090 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3091 }
3092
3093 /* Check for over temp condition */
3094 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3095 switch (adapter->hw.mac.type) {
3096 case ixgbe_mac_X550EM_a:
3097 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3098 break;
3099 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3100 IXGBE_EICR_GPI_SDP0_X550EM_a);
3101 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3102 IXGBE_EICR_GPI_SDP0_X550EM_a);
3103 retval = hw->phy.ops.check_overtemp(hw);
3104 if (retval != IXGBE_ERR_OVERTEMP)
3105 break;
3106 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3107 device_printf(adapter->dev, "System shutdown required!\n");
3108 break;
3109 default:
3110 if (!(eicr & IXGBE_EICR_TS))
3111 break;
3112 retval = hw->phy.ops.check_overtemp(hw);
3113 if (retval != IXGBE_ERR_OVERTEMP)
3114 break;
3115 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3116 device_printf(adapter->dev, "System shutdown required!\n");
3117 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3118 break;
3119 }
3120 }
3121
3122 /* Check for VF message */
3123 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3124 (eicr & IXGBE_EICR_MAILBOX))
3125 softint_schedule(adapter->mbx_si);
3126 }
3127
3128 if (ixgbe_is_sfp(hw)) {
3129 /* Pluggable optics-related interrupt */
3130 if (hw->mac.type >= ixgbe_mac_X540)
3131 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3132 else
3133 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3134
3135 if (eicr & eicr_mask) {
3136 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3137 softint_schedule(adapter->mod_si);
3138 }
3139
3140 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3141 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3142 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3143 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3144 softint_schedule(adapter->msf_si);
3145 }
3146 }
3147
3148 /* Check for fan failure */
3149 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3150 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3151 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3152 }
3153
3154 /* External PHY interrupt */
3155 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3156 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3157 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3158 softint_schedule(adapter->phy_si);
3159 }
3160
3161 /* Re-enable other interrupts */
3162 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3163 return 1;
3164 } /* ixgbe_msix_link */
3165
3166 static void
3167 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3168 {
3169
3170 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3171 itr |= itr << 16;
3172 else
3173 itr |= IXGBE_EITR_CNT_WDIS;
3174
3175 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3176 }
3177
3178
3179 /************************************************************************
3180 * ixgbe_sysctl_interrupt_rate_handler
3181 ************************************************************************/
3182 static int
3183 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3184 {
3185 struct sysctlnode node = *rnode;
3186 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3187 struct adapter *adapter;
3188 uint32_t reg, usec, rate;
3189 int error;
3190
3191 if (que == NULL)
3192 return 0;
3193
3194 adapter = que->adapter;
3195 if (ixgbe_fw_recovery_mode_swflag(adapter))
3196 return (EPERM);
3197
3198 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3199 usec = ((reg & 0x0FF8) >> 3);
3200 if (usec > 0)
3201 rate = 500000 / usec;
3202 else
3203 rate = 0;
3204 node.sysctl_data = &rate;
3205 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3206 if (error || newp == NULL)
3207 return error;
3208 reg &= ~0xfff; /* default, no limitation */
3209 if (rate > 0 && rate < 500000) {
3210 if (rate < 1000)
3211 rate = 1000;
3212 reg |= ((4000000/rate) & 0xff8);
3213 /*
3214 * When RSC is used, ITR interval must be larger than
3215 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3216 * The minimum value is always greater than 2us on 100M
3217 * (and 10M?(not documented)), but it's not on 1G and higher.
3218 */
3219 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3220 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3221 if ((adapter->num_queues > 1)
3222 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3223 return EINVAL;
3224 }
3225 ixgbe_max_interrupt_rate = rate;
3226 } else
3227 ixgbe_max_interrupt_rate = 0;
3228 ixgbe_eitr_write(adapter, que->msix, reg);
3229
3230 return (0);
3231 } /* ixgbe_sysctl_interrupt_rate_handler */
3232
3233 const struct sysctlnode *
3234 ixgbe_sysctl_instance(struct adapter *adapter)
3235 {
3236 const char *dvname;
3237 struct sysctllog **log;
3238 int rc;
3239 const struct sysctlnode *rnode;
3240
3241 if (adapter->sysctltop != NULL)
3242 return adapter->sysctltop;
3243
3244 log = &adapter->sysctllog;
3245 dvname = device_xname(adapter->dev);
3246
3247 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3248 0, CTLTYPE_NODE, dvname,
3249 SYSCTL_DESCR("ixgbe information and settings"),
3250 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3251 goto err;
3252
3253 return rnode;
3254 err:
3255 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3256 return NULL;
3257 }
3258
3259 /************************************************************************
3260 * ixgbe_add_device_sysctls
3261 ************************************************************************/
3262 static void
3263 ixgbe_add_device_sysctls(struct adapter *adapter)
3264 {
3265 device_t dev = adapter->dev;
3266 struct ixgbe_hw *hw = &adapter->hw;
3267 struct sysctllog **log;
3268 const struct sysctlnode *rnode, *cnode;
3269
3270 log = &adapter->sysctllog;
3271
3272 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3273 aprint_error_dev(dev, "could not create sysctl root\n");
3274 return;
3275 }
3276
3277 if (sysctl_createv(log, 0, &rnode, &cnode,
3278 CTLFLAG_READWRITE, CTLTYPE_INT,
3279 "debug", SYSCTL_DESCR("Debug Info"),
3280 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3281 aprint_error_dev(dev, "could not create sysctl\n");
3282
3283 if (sysctl_createv(log, 0, &rnode, &cnode,
3284 CTLFLAG_READONLY, CTLTYPE_INT,
3285 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3286 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3287 aprint_error_dev(dev, "could not create sysctl\n");
3288
3289 if (sysctl_createv(log, 0, &rnode, &cnode,
3290 CTLFLAG_READONLY, CTLTYPE_INT,
3291 "num_queues", SYSCTL_DESCR("Number of queues"),
3292 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3293 aprint_error_dev(dev, "could not create sysctl\n");
3294
3295 /* Sysctls for all devices */
3296 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3297 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3298 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3299 CTL_EOL) != 0)
3300 aprint_error_dev(dev, "could not create sysctl\n");
3301
3302 adapter->enable_aim = ixgbe_enable_aim;
3303 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3304 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3305 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3306 aprint_error_dev(dev, "could not create sysctl\n");
3307
3308 if (sysctl_createv(log, 0, &rnode, &cnode,
3309 CTLFLAG_READWRITE, CTLTYPE_INT,
3310 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3311 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3312 CTL_EOL) != 0)
3313 aprint_error_dev(dev, "could not create sysctl\n");
3314
3315 /*
3316 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3317 * it causesflip-flopping softint/workqueue mode in one deferred
3318 * processing. Therefore, preempt_disable()/preempt_enable() are
3319 * required in ixgbe_sched_handle_que() to avoid
3320 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3321 * I think changing "que->txrx_use_workqueue" in interrupt handler
3322 * is lighter than doing preempt_disable()/preempt_enable() in every
3323 * ixgbe_sched_handle_que().
3324 */
3325 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3326 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3327 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3328 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3329 aprint_error_dev(dev, "could not create sysctl\n");
3330
3331 #ifdef IXGBE_DEBUG
3332 /* testing sysctls (for all devices) */
3333 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3334 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3335 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3336 CTL_EOL) != 0)
3337 aprint_error_dev(dev, "could not create sysctl\n");
3338
3339 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3340 CTLTYPE_STRING, "print_rss_config",
3341 SYSCTL_DESCR("Prints RSS Configuration"),
3342 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3343 CTL_EOL) != 0)
3344 aprint_error_dev(dev, "could not create sysctl\n");
3345 #endif
3346 /* for X550 series devices */
3347 if (hw->mac.type >= ixgbe_mac_X550)
3348 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3349 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3350 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3351 CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353
3354 /* for WoL-capable devices */
3355 if (adapter->wol_support) {
3356 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3357 CTLTYPE_BOOL, "wol_enable",
3358 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3359 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3360 CTL_EOL) != 0)
3361 aprint_error_dev(dev, "could not create sysctl\n");
3362
3363 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3364 CTLTYPE_INT, "wufc",
3365 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3366 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3367 CTL_EOL) != 0)
3368 aprint_error_dev(dev, "could not create sysctl\n");
3369 }
3370
3371 /* for X552/X557-AT devices */
3372 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3373 const struct sysctlnode *phy_node;
3374
3375 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3376 "phy", SYSCTL_DESCR("External PHY sysctls"),
3377 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3378 aprint_error_dev(dev, "could not create sysctl\n");
3379 return;
3380 }
3381
3382 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3383 CTLTYPE_INT, "temp",
3384 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3385 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3386 CTL_EOL) != 0)
3387 aprint_error_dev(dev, "could not create sysctl\n");
3388
3389 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3390 CTLTYPE_INT, "overtemp_occurred",
3391 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3392 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3393 CTL_CREATE, CTL_EOL) != 0)
3394 aprint_error_dev(dev, "could not create sysctl\n");
3395 }
3396
3397 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3398 && (hw->phy.type == ixgbe_phy_fw))
3399 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3400 CTLTYPE_BOOL, "force_10_100_autonego",
3401 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3402 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3403 CTL_CREATE, CTL_EOL) != 0)
3404 aprint_error_dev(dev, "could not create sysctl\n");
3405
3406 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3407 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3408 CTLTYPE_INT, "eee_state",
3409 SYSCTL_DESCR("EEE Power Save State"),
3410 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3411 CTL_EOL) != 0)
3412 aprint_error_dev(dev, "could not create sysctl\n");
3413 }
3414 } /* ixgbe_add_device_sysctls */
3415
3416 /************************************************************************
3417 * ixgbe_allocate_pci_resources
3418 ************************************************************************/
3419 static int
3420 ixgbe_allocate_pci_resources(struct adapter *adapter,
3421 const struct pci_attach_args *pa)
3422 {
3423 pcireg_t memtype;
3424 device_t dev = adapter->dev;
3425 bus_addr_t addr;
3426 int flags;
3427
3428 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3429 switch (memtype) {
3430 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3431 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3432 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3433 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3434 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3435 goto map_err;
3436 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3437 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3438 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3439 }
3440 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3441 adapter->osdep.mem_size, flags,
3442 &adapter->osdep.mem_bus_space_handle) != 0) {
3443 map_err:
3444 adapter->osdep.mem_size = 0;
3445 aprint_error_dev(dev, "unable to map BAR0\n");
3446 return ENXIO;
3447 }
3448 break;
3449 default:
3450 aprint_error_dev(dev, "unexpected type on BAR0\n");
3451 return ENXIO;
3452 }
3453
3454 return (0);
3455 } /* ixgbe_allocate_pci_resources */
3456
3457 static void
3458 ixgbe_free_softint(struct adapter *adapter)
3459 {
3460 struct ix_queue *que = adapter->queues;
3461 struct tx_ring *txr = adapter->tx_rings;
3462 int i;
3463
3464 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3465 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3466 if (txr->txr_si != NULL)
3467 softint_disestablish(txr->txr_si);
3468 }
3469 if (que->que_si != NULL)
3470 softint_disestablish(que->que_si);
3471 }
3472 if (adapter->txr_wq != NULL)
3473 workqueue_destroy(adapter->txr_wq);
3474 if (adapter->txr_wq_enqueued != NULL)
3475 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3476 if (adapter->que_wq != NULL)
3477 workqueue_destroy(adapter->que_wq);
3478
3479 /* Drain the Link queue */
3480 if (adapter->link_si != NULL) {
3481 softint_disestablish(adapter->link_si);
3482 adapter->link_si = NULL;
3483 }
3484 if (adapter->mod_si != NULL) {
3485 softint_disestablish(adapter->mod_si);
3486 adapter->mod_si = NULL;
3487 }
3488 if (adapter->msf_si != NULL) {
3489 softint_disestablish(adapter->msf_si);
3490 adapter->msf_si = NULL;
3491 }
3492 if (adapter->phy_si != NULL) {
3493 softint_disestablish(adapter->phy_si);
3494 adapter->phy_si = NULL;
3495 }
3496 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3497 if (adapter->fdir_si != NULL) {
3498 softint_disestablish(adapter->fdir_si);
3499 adapter->fdir_si = NULL;
3500 }
3501 }
3502 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3503 if (adapter->mbx_si != NULL) {
3504 softint_disestablish(adapter->mbx_si);
3505 adapter->mbx_si = NULL;
3506 }
3507 }
3508 } /* ixgbe_free_softint */
3509
3510 /************************************************************************
3511 * ixgbe_detach - Device removal routine
3512 *
3513 * Called when the driver is being removed.
3514 * Stops the adapter and deallocates all the resources
3515 * that were allocated for driver operation.
3516 *
3517 * return 0 on success, positive on failure
3518 ************************************************************************/
3519 static int
3520 ixgbe_detach(device_t dev, int flags)
3521 {
3522 struct adapter *adapter = device_private(dev);
3523 struct rx_ring *rxr = adapter->rx_rings;
3524 struct tx_ring *txr = adapter->tx_rings;
3525 struct ixgbe_hw *hw = &adapter->hw;
3526 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3527 u32 ctrl_ext;
3528 int i;
3529
3530 INIT_DEBUGOUT("ixgbe_detach: begin");
3531 if (adapter->osdep.attached == false)
3532 return 0;
3533
3534 if (ixgbe_pci_iov_detach(dev) != 0) {
3535 device_printf(dev, "SR-IOV in use; detach first.\n");
3536 return (EBUSY);
3537 }
3538
3539 /* Stop the interface. Callouts are stopped in it. */
3540 ixgbe_ifstop(adapter->ifp, 1);
3541 #if NVLAN > 0
3542 /* Make sure VLANs are not using driver */
3543 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3544 ; /* nothing to do: no VLANs */
3545 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3546 vlan_ifdetach(adapter->ifp);
3547 else {
3548 aprint_error_dev(dev, "VLANs in use, detach first\n");
3549 return (EBUSY);
3550 }
3551 #endif
3552
3553 pmf_device_deregister(dev);
3554
3555 ether_ifdetach(adapter->ifp);
3556 /* Stop the adapter */
3557 IXGBE_CORE_LOCK(adapter);
3558 ixgbe_setup_low_power_mode(adapter);
3559 IXGBE_CORE_UNLOCK(adapter);
3560
3561 ixgbe_free_softint(adapter);
3562
3563 /* let hardware know driver is unloading */
3564 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3565 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3566 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3567
3568 callout_halt(&adapter->timer, NULL);
3569 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3570 callout_halt(&adapter->recovery_mode_timer, NULL);
3571
3572 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3573 netmap_detach(adapter->ifp);
3574
3575 ixgbe_free_pci_resources(adapter);
3576 #if 0 /* XXX the NetBSD port is probably missing something here */
3577 bus_generic_detach(dev);
3578 #endif
3579 if_detach(adapter->ifp);
3580 if_percpuq_destroy(adapter->ipq);
3581
3582 sysctl_teardown(&adapter->sysctllog);
3583 evcnt_detach(&adapter->efbig_tx_dma_setup);
3584 evcnt_detach(&adapter->mbuf_defrag_failed);
3585 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3586 evcnt_detach(&adapter->einval_tx_dma_setup);
3587 evcnt_detach(&adapter->other_tx_dma_setup);
3588 evcnt_detach(&adapter->eagain_tx_dma_setup);
3589 evcnt_detach(&adapter->enomem_tx_dma_setup);
3590 evcnt_detach(&adapter->watchdog_events);
3591 evcnt_detach(&adapter->tso_err);
3592 evcnt_detach(&adapter->link_irq);
3593 evcnt_detach(&adapter->link_sicount);
3594 evcnt_detach(&adapter->mod_sicount);
3595 evcnt_detach(&adapter->msf_sicount);
3596 evcnt_detach(&adapter->phy_sicount);
3597
3598 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3599 if (i < __arraycount(stats->mpc)) {
3600 evcnt_detach(&stats->mpc[i]);
3601 if (hw->mac.type == ixgbe_mac_82598EB)
3602 evcnt_detach(&stats->rnbc[i]);
3603 }
3604 if (i < __arraycount(stats->pxontxc)) {
3605 evcnt_detach(&stats->pxontxc[i]);
3606 evcnt_detach(&stats->pxonrxc[i]);
3607 evcnt_detach(&stats->pxofftxc[i]);
3608 evcnt_detach(&stats->pxoffrxc[i]);
3609 if (hw->mac.type >= ixgbe_mac_82599EB)
3610 evcnt_detach(&stats->pxon2offc[i]);
3611 }
3612 }
3613
3614 txr = adapter->tx_rings;
3615 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3616 evcnt_detach(&adapter->queues[i].irqs);
3617 evcnt_detach(&adapter->queues[i].handleq);
3618 evcnt_detach(&adapter->queues[i].req);
3619 evcnt_detach(&txr->no_desc_avail);
3620 evcnt_detach(&txr->total_packets);
3621 evcnt_detach(&txr->tso_tx);
3622 #ifndef IXGBE_LEGACY_TX
3623 evcnt_detach(&txr->pcq_drops);
3624 #endif
3625
3626 if (i < __arraycount(stats->qprc)) {
3627 evcnt_detach(&stats->qprc[i]);
3628 evcnt_detach(&stats->qptc[i]);
3629 evcnt_detach(&stats->qbrc[i]);
3630 evcnt_detach(&stats->qbtc[i]);
3631 if (hw->mac.type >= ixgbe_mac_82599EB)
3632 evcnt_detach(&stats->qprdc[i]);
3633 }
3634
3635 evcnt_detach(&rxr->rx_packets);
3636 evcnt_detach(&rxr->rx_bytes);
3637 evcnt_detach(&rxr->rx_copies);
3638 evcnt_detach(&rxr->no_jmbuf);
3639 evcnt_detach(&rxr->rx_discarded);
3640 }
3641 evcnt_detach(&stats->ipcs);
3642 evcnt_detach(&stats->l4cs);
3643 evcnt_detach(&stats->ipcs_bad);
3644 evcnt_detach(&stats->l4cs_bad);
3645 evcnt_detach(&stats->intzero);
3646 evcnt_detach(&stats->legint);
3647 evcnt_detach(&stats->crcerrs);
3648 evcnt_detach(&stats->illerrc);
3649 evcnt_detach(&stats->errbc);
3650 evcnt_detach(&stats->mspdc);
3651 if (hw->mac.type >= ixgbe_mac_X550)
3652 evcnt_detach(&stats->mbsdc);
3653 evcnt_detach(&stats->mpctotal);
3654 evcnt_detach(&stats->mlfc);
3655 evcnt_detach(&stats->mrfc);
3656 evcnt_detach(&stats->rlec);
3657 evcnt_detach(&stats->lxontxc);
3658 evcnt_detach(&stats->lxonrxc);
3659 evcnt_detach(&stats->lxofftxc);
3660 evcnt_detach(&stats->lxoffrxc);
3661
3662 /* Packet Reception Stats */
3663 evcnt_detach(&stats->tor);
3664 evcnt_detach(&stats->gorc);
3665 evcnt_detach(&stats->tpr);
3666 evcnt_detach(&stats->gprc);
3667 evcnt_detach(&stats->mprc);
3668 evcnt_detach(&stats->bprc);
3669 evcnt_detach(&stats->prc64);
3670 evcnt_detach(&stats->prc127);
3671 evcnt_detach(&stats->prc255);
3672 evcnt_detach(&stats->prc511);
3673 evcnt_detach(&stats->prc1023);
3674 evcnt_detach(&stats->prc1522);
3675 evcnt_detach(&stats->ruc);
3676 evcnt_detach(&stats->rfc);
3677 evcnt_detach(&stats->roc);
3678 evcnt_detach(&stats->rjc);
3679 evcnt_detach(&stats->mngprc);
3680 evcnt_detach(&stats->mngpdc);
3681 evcnt_detach(&stats->xec);
3682
3683 /* Packet Transmission Stats */
3684 evcnt_detach(&stats->gotc);
3685 evcnt_detach(&stats->tpt);
3686 evcnt_detach(&stats->gptc);
3687 evcnt_detach(&stats->bptc);
3688 evcnt_detach(&stats->mptc);
3689 evcnt_detach(&stats->mngptc);
3690 evcnt_detach(&stats->ptc64);
3691 evcnt_detach(&stats->ptc127);
3692 evcnt_detach(&stats->ptc255);
3693 evcnt_detach(&stats->ptc511);
3694 evcnt_detach(&stats->ptc1023);
3695 evcnt_detach(&stats->ptc1522);
3696
3697 ixgbe_free_transmit_structures(adapter);
3698 ixgbe_free_receive_structures(adapter);
3699 for (i = 0; i < adapter->num_queues; i++) {
3700 struct ix_queue * que = &adapter->queues[i];
3701 mutex_destroy(&que->dc_mtx);
3702 }
3703 free(adapter->queues, M_DEVBUF);
3704 free(adapter->mta, M_DEVBUF);
3705
3706 IXGBE_CORE_LOCK_DESTROY(adapter);
3707
3708 return (0);
3709 } /* ixgbe_detach */
3710
3711 /************************************************************************
3712 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3713 *
3714 * Prepare the adapter/port for LPLU and/or WoL
3715 ************************************************************************/
3716 static int
3717 ixgbe_setup_low_power_mode(struct adapter *adapter)
3718 {
3719 struct ixgbe_hw *hw = &adapter->hw;
3720 device_t dev = adapter->dev;
3721 s32 error = 0;
3722
3723 KASSERT(mutex_owned(&adapter->core_mtx));
3724
3725 /* Limit power management flow to X550EM baseT */
3726 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3727 hw->phy.ops.enter_lplu) {
3728 /* X550EM baseT adapters need a special LPLU flow */
3729 hw->phy.reset_disable = true;
3730 ixgbe_stop(adapter);
3731 error = hw->phy.ops.enter_lplu(hw);
3732 if (error)
3733 device_printf(dev,
3734 "Error entering LPLU: %d\n", error);
3735 hw->phy.reset_disable = false;
3736 } else {
3737 /* Just stop for other adapters */
3738 ixgbe_stop(adapter);
3739 }
3740
3741 if (!hw->wol_enabled) {
3742 ixgbe_set_phy_power(hw, FALSE);
3743 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3744 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3745 } else {
3746 /* Turn off support for APM wakeup. (Using ACPI instead) */
3747 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3748 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3749
3750 /*
3751 * Clear Wake Up Status register to prevent any previous wakeup
3752 * events from waking us up immediately after we suspend.
3753 */
3754 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3755
3756 /*
3757 * Program the Wakeup Filter Control register with user filter
3758 * settings
3759 */
3760 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3761
3762 /* Enable wakeups and power management in Wakeup Control */
3763 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3764 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3765
3766 }
3767
3768 return error;
3769 } /* ixgbe_setup_low_power_mode */
3770
3771 /************************************************************************
3772 * ixgbe_shutdown - Shutdown entry point
3773 ************************************************************************/
3774 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3775 static int
3776 ixgbe_shutdown(device_t dev)
3777 {
3778 struct adapter *adapter = device_private(dev);
3779 int error = 0;
3780
3781 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3782
3783 IXGBE_CORE_LOCK(adapter);
3784 error = ixgbe_setup_low_power_mode(adapter);
3785 IXGBE_CORE_UNLOCK(adapter);
3786
3787 return (error);
3788 } /* ixgbe_shutdown */
3789 #endif
3790
3791 /************************************************************************
3792 * ixgbe_suspend
3793 *
3794 * From D0 to D3
3795 ************************************************************************/
3796 static bool
3797 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3798 {
3799 struct adapter *adapter = device_private(dev);
3800 int error = 0;
3801
3802 INIT_DEBUGOUT("ixgbe_suspend: begin");
3803
3804 IXGBE_CORE_LOCK(adapter);
3805
3806 error = ixgbe_setup_low_power_mode(adapter);
3807
3808 IXGBE_CORE_UNLOCK(adapter);
3809
3810 return (error);
3811 } /* ixgbe_suspend */
3812
3813 /************************************************************************
3814 * ixgbe_resume
3815 *
3816 * From D3 to D0
3817 ************************************************************************/
3818 static bool
3819 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3820 {
3821 struct adapter *adapter = device_private(dev);
3822 struct ifnet *ifp = adapter->ifp;
3823 struct ixgbe_hw *hw = &adapter->hw;
3824 u32 wus;
3825
3826 INIT_DEBUGOUT("ixgbe_resume: begin");
3827
3828 IXGBE_CORE_LOCK(adapter);
3829
3830 /* Read & clear WUS register */
3831 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3832 if (wus)
3833 device_printf(dev, "Woken up by (WUS): %#010x\n",
3834 IXGBE_READ_REG(hw, IXGBE_WUS));
3835 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3836 /* And clear WUFC until next low-power transition */
3837 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3838
3839 /*
3840 * Required after D3->D0 transition;
3841 * will re-advertise all previous advertised speeds
3842 */
3843 if (ifp->if_flags & IFF_UP)
3844 ixgbe_init_locked(adapter);
3845
3846 IXGBE_CORE_UNLOCK(adapter);
3847
3848 return true;
3849 } /* ixgbe_resume */
3850
3851 /*
3852 * Set the various hardware offload abilities.
3853 *
3854 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3855 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3856 * mbuf offload flags the driver will understand.
3857 */
3858 static void
3859 ixgbe_set_if_hwassist(struct adapter *adapter)
3860 {
3861 /* XXX */
3862 }
3863
3864 /************************************************************************
3865 * ixgbe_init_locked - Init entry point
3866 *
3867 * Used in two ways: It is used by the stack as an init
3868 * entry point in network interface structure. It is also
3869 * used by the driver as a hw/sw initialization routine to
3870 * get to a consistent state.
3871 *
3872 * return 0 on success, positive on failure
3873 ************************************************************************/
3874 static void
3875 ixgbe_init_locked(struct adapter *adapter)
3876 {
3877 struct ifnet *ifp = adapter->ifp;
3878 device_t dev = adapter->dev;
3879 struct ixgbe_hw *hw = &adapter->hw;
3880 struct ix_queue *que;
3881 struct tx_ring *txr;
3882 struct rx_ring *rxr;
3883 u32 txdctl, mhadd;
3884 u32 rxdctl, rxctrl;
3885 u32 ctrl_ext;
3886 int i, j, err;
3887
3888 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3889
3890 KASSERT(mutex_owned(&adapter->core_mtx));
3891 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3892
3893 hw->adapter_stopped = FALSE;
3894 ixgbe_stop_adapter(hw);
3895 callout_stop(&adapter->timer);
3896 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3897 que->disabled_count = 0;
3898
3899 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3900 adapter->max_frame_size =
3901 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3902
3903 /* Queue indices may change with IOV mode */
3904 ixgbe_align_all_queue_indices(adapter);
3905
3906 /* reprogram the RAR[0] in case user changed it. */
3907 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3908
3909 /* Get the latest mac address, User can use a LAA */
3910 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3911 IXGBE_ETH_LENGTH_OF_ADDRESS);
3912 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3913 hw->addr_ctrl.rar_used_count = 1;
3914
3915 /* Set hardware offload abilities from ifnet flags */
3916 ixgbe_set_if_hwassist(adapter);
3917
3918 /* Prepare transmit descriptors and buffers */
3919 if (ixgbe_setup_transmit_structures(adapter)) {
3920 device_printf(dev, "Could not setup transmit structures\n");
3921 ixgbe_stop(adapter);
3922 return;
3923 }
3924
3925 ixgbe_init_hw(hw);
3926
3927 ixgbe_initialize_iov(adapter);
3928
3929 ixgbe_initialize_transmit_units(adapter);
3930
3931 /* Setup Multicast table */
3932 ixgbe_set_multi(adapter);
3933
3934 /* Determine the correct mbuf pool, based on frame size */
3935 if (adapter->max_frame_size <= MCLBYTES)
3936 adapter->rx_mbuf_sz = MCLBYTES;
3937 else
3938 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3939
3940 /* Prepare receive descriptors and buffers */
3941 if (ixgbe_setup_receive_structures(adapter)) {
3942 device_printf(dev, "Could not setup receive structures\n");
3943 ixgbe_stop(adapter);
3944 return;
3945 }
3946
3947 /* Configure RX settings */
3948 ixgbe_initialize_receive_units(adapter);
3949
3950 /* Enable SDP & MSI-X interrupts based on adapter */
3951 ixgbe_config_gpie(adapter);
3952
3953 /* Set MTU size */
3954 if (ifp->if_mtu > ETHERMTU) {
3955 /* aka IXGBE_MAXFRS on 82599 and newer */
3956 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3957 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3958 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3959 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3960 }
3961
3962 /* Now enable all the queues */
3963 for (i = 0; i < adapter->num_queues; i++) {
3964 txr = &adapter->tx_rings[i];
3965 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3966 txdctl |= IXGBE_TXDCTL_ENABLE;
3967 /* Set WTHRESH to 8, burst writeback */
3968 txdctl |= (8 << 16);
3969 /*
3970 * When the internal queue falls below PTHRESH (32),
3971 * start prefetching as long as there are at least
3972 * HTHRESH (1) buffers ready. The values are taken
3973 * from the Intel linux driver 3.8.21.
3974 * Prefetching enables tx line rate even with 1 queue.
3975 */
3976 txdctl |= (32 << 0) | (1 << 8);
3977 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3978 }
3979
3980 for (i = 0; i < adapter->num_queues; i++) {
3981 rxr = &adapter->rx_rings[i];
3982 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3983 if (hw->mac.type == ixgbe_mac_82598EB) {
3984 /*
3985 * PTHRESH = 21
3986 * HTHRESH = 4
3987 * WTHRESH = 8
3988 */
3989 rxdctl &= ~0x3FFFFF;
3990 rxdctl |= 0x080420;
3991 }
3992 rxdctl |= IXGBE_RXDCTL_ENABLE;
3993 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3994 for (j = 0; j < 10; j++) {
3995 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3996 IXGBE_RXDCTL_ENABLE)
3997 break;
3998 else
3999 msec_delay(1);
4000 }
4001 wmb();
4002
4003 /*
4004 * In netmap mode, we must preserve the buffers made
4005 * available to userspace before the if_init()
4006 * (this is true by default on the TX side, because
4007 * init makes all buffers available to userspace).
4008 *
4009 * netmap_reset() and the device specific routines
4010 * (e.g. ixgbe_setup_receive_rings()) map these
4011 * buffers at the end of the NIC ring, so here we
4012 * must set the RDT (tail) register to make sure
4013 * they are not overwritten.
4014 *
4015 * In this driver the NIC ring starts at RDH = 0,
4016 * RDT points to the last slot available for reception (?),
4017 * so RDT = num_rx_desc - 1 means the whole ring is available.
4018 */
4019 #ifdef DEV_NETMAP
4020 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4021 (ifp->if_capenable & IFCAP_NETMAP)) {
4022 struct netmap_adapter *na = NA(adapter->ifp);
4023 struct netmap_kring *kring = &na->rx_rings[i];
4024 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4025
4026 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4027 } else
4028 #endif /* DEV_NETMAP */
4029 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4030 adapter->num_rx_desc - 1);
4031 }
4032
4033 /* Enable Receive engine */
4034 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4035 if (hw->mac.type == ixgbe_mac_82598EB)
4036 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4037 rxctrl |= IXGBE_RXCTRL_RXEN;
4038 ixgbe_enable_rx_dma(hw, rxctrl);
4039
4040 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4041
4042 /* Set up MSI/MSI-X routing */
4043 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4044 ixgbe_configure_ivars(adapter);
4045 /* Set up auto-mask */
4046 if (hw->mac.type == ixgbe_mac_82598EB)
4047 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4048 else {
4049 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4050 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4051 }
4052 } else { /* Simple settings for Legacy/MSI */
4053 ixgbe_set_ivar(adapter, 0, 0, 0);
4054 ixgbe_set_ivar(adapter, 0, 0, 1);
4055 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4056 }
4057
4058 ixgbe_init_fdir(adapter);
4059
4060 /*
4061 * Check on any SFP devices that
4062 * need to be kick-started
4063 */
4064 if (hw->phy.type == ixgbe_phy_none) {
4065 err = hw->phy.ops.identify(hw);
4066 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4067 device_printf(dev,
4068 "Unsupported SFP+ module type was detected.\n");
4069 return;
4070 }
4071 }
4072
4073 /* Set moderation on the Link interrupt */
4074 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4075
4076 /* Enable power to the phy. */
4077 ixgbe_set_phy_power(hw, TRUE);
4078
4079 /* Config/Enable Link */
4080 ixgbe_config_link(adapter);
4081
4082 /* Hardware Packet Buffer & Flow Control setup */
4083 ixgbe_config_delay_values(adapter);
4084
4085 /* Initialize the FC settings */
4086 ixgbe_start_hw(hw);
4087
4088 /* Set up VLAN support and filter */
4089 ixgbe_setup_vlan_hw_support(adapter);
4090
4091 /* Setup DMA Coalescing */
4092 ixgbe_config_dmac(adapter);
4093
4094 /* And now turn on interrupts */
4095 ixgbe_enable_intr(adapter);
4096
4097 /* Enable the use of the MBX by the VF's */
4098 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4099 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4100 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4101 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4102 }
4103
4104 /* Update saved flags. See ixgbe_ifflags_cb() */
4105 adapter->if_flags = ifp->if_flags;
4106
4107 /* Now inform the stack we're ready */
4108 ifp->if_flags |= IFF_RUNNING;
4109
4110 return;
4111 } /* ixgbe_init_locked */
4112
4113 /************************************************************************
4114 * ixgbe_init
4115 ************************************************************************/
4116 static int
4117 ixgbe_init(struct ifnet *ifp)
4118 {
4119 struct adapter *adapter = ifp->if_softc;
4120
4121 IXGBE_CORE_LOCK(adapter);
4122 ixgbe_init_locked(adapter);
4123 IXGBE_CORE_UNLOCK(adapter);
4124
4125 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4126 } /* ixgbe_init */
4127
4128 /************************************************************************
4129 * ixgbe_set_ivar
4130 *
4131 * Setup the correct IVAR register for a particular MSI-X interrupt
4132 * (yes this is all very magic and confusing :)
4133 * - entry is the register array entry
4134 * - vector is the MSI-X vector for this queue
4135 * - type is RX/TX/MISC
4136 ************************************************************************/
4137 static void
4138 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4139 {
4140 struct ixgbe_hw *hw = &adapter->hw;
4141 u32 ivar, index;
4142
4143 vector |= IXGBE_IVAR_ALLOC_VAL;
4144
4145 switch (hw->mac.type) {
4146 case ixgbe_mac_82598EB:
4147 if (type == -1)
4148 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4149 else
4150 entry += (type * 64);
4151 index = (entry >> 2) & 0x1F;
4152 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4153 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4154 ivar |= (vector << (8 * (entry & 0x3)));
4155 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4156 break;
4157 case ixgbe_mac_82599EB:
4158 case ixgbe_mac_X540:
4159 case ixgbe_mac_X550:
4160 case ixgbe_mac_X550EM_x:
4161 case ixgbe_mac_X550EM_a:
4162 if (type == -1) { /* MISC IVAR */
4163 index = (entry & 1) * 8;
4164 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4165 ivar &= ~(0xFF << index);
4166 ivar |= (vector << index);
4167 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4168 } else { /* RX/TX IVARS */
4169 index = (16 * (entry & 1)) + (8 * type);
4170 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4171 ivar &= ~(0xFF << index);
4172 ivar |= (vector << index);
4173 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4174 }
4175 break;
4176 default:
4177 break;
4178 }
4179 } /* ixgbe_set_ivar */
4180
4181 /************************************************************************
4182 * ixgbe_configure_ivars
4183 ************************************************************************/
4184 static void
4185 ixgbe_configure_ivars(struct adapter *adapter)
4186 {
4187 struct ix_queue *que = adapter->queues;
4188 u32 newitr;
4189
4190 if (ixgbe_max_interrupt_rate > 0)
4191 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4192 else {
4193 /*
4194 * Disable DMA coalescing if interrupt moderation is
4195 * disabled.
4196 */
4197 adapter->dmac = 0;
4198 newitr = 0;
4199 }
4200
4201 for (int i = 0; i < adapter->num_queues; i++, que++) {
4202 struct rx_ring *rxr = &adapter->rx_rings[i];
4203 struct tx_ring *txr = &adapter->tx_rings[i];
4204 /* First the RX queue entry */
4205 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4206 /* ... and the TX */
4207 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4208 /* Set an Initial EITR value */
4209 ixgbe_eitr_write(adapter, que->msix, newitr);
4210 /*
4211 * To eliminate influence of the previous state.
4212 * At this point, Tx/Rx interrupt handler
4213 * (ixgbe_msix_que()) cannot be called, so both
4214 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4215 */
4216 que->eitr_setting = 0;
4217 }
4218
4219 /* For the Link interrupt */
4220 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4221 } /* ixgbe_configure_ivars */
4222
4223 /************************************************************************
4224 * ixgbe_config_gpie
4225 ************************************************************************/
4226 static void
4227 ixgbe_config_gpie(struct adapter *adapter)
4228 {
4229 struct ixgbe_hw *hw = &adapter->hw;
4230 u32 gpie;
4231
4232 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4233
4234 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4235 /* Enable Enhanced MSI-X mode */
4236 gpie |= IXGBE_GPIE_MSIX_MODE
4237 | IXGBE_GPIE_EIAME
4238 | IXGBE_GPIE_PBA_SUPPORT
4239 | IXGBE_GPIE_OCD;
4240 }
4241
4242 /* Fan Failure Interrupt */
4243 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4244 gpie |= IXGBE_SDP1_GPIEN;
4245
4246 /* Thermal Sensor Interrupt */
4247 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4248 gpie |= IXGBE_SDP0_GPIEN_X540;
4249
4250 /* Link detection */
4251 switch (hw->mac.type) {
4252 case ixgbe_mac_82599EB:
4253 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4254 break;
4255 case ixgbe_mac_X550EM_x:
4256 case ixgbe_mac_X550EM_a:
4257 gpie |= IXGBE_SDP0_GPIEN_X540;
4258 break;
4259 default:
4260 break;
4261 }
4262
4263 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4264
4265 } /* ixgbe_config_gpie */
4266
4267 /************************************************************************
4268 * ixgbe_config_delay_values
4269 *
4270 * Requires adapter->max_frame_size to be set.
4271 ************************************************************************/
4272 static void
4273 ixgbe_config_delay_values(struct adapter *adapter)
4274 {
4275 struct ixgbe_hw *hw = &adapter->hw;
4276 u32 rxpb, frame, size, tmp;
4277
4278 frame = adapter->max_frame_size;
4279
4280 /* Calculate High Water */
4281 switch (hw->mac.type) {
4282 case ixgbe_mac_X540:
4283 case ixgbe_mac_X550:
4284 case ixgbe_mac_X550EM_x:
4285 case ixgbe_mac_X550EM_a:
4286 tmp = IXGBE_DV_X540(frame, frame);
4287 break;
4288 default:
4289 tmp = IXGBE_DV(frame, frame);
4290 break;
4291 }
4292 size = IXGBE_BT2KB(tmp);
4293 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4294 hw->fc.high_water[0] = rxpb - size;
4295
4296 /* Now calculate Low Water */
4297 switch (hw->mac.type) {
4298 case ixgbe_mac_X540:
4299 case ixgbe_mac_X550:
4300 case ixgbe_mac_X550EM_x:
4301 case ixgbe_mac_X550EM_a:
4302 tmp = IXGBE_LOW_DV_X540(frame);
4303 break;
4304 default:
4305 tmp = IXGBE_LOW_DV(frame);
4306 break;
4307 }
4308 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4309
4310 hw->fc.pause_time = IXGBE_FC_PAUSE;
4311 hw->fc.send_xon = TRUE;
4312 } /* ixgbe_config_delay_values */
4313
4314 /************************************************************************
4315 * ixgbe_set_multi - Multicast Update
4316 *
4317 * Called whenever multicast address list is updated.
4318 ************************************************************************/
4319 static void
4320 ixgbe_set_multi(struct adapter *adapter)
4321 {
4322 struct ixgbe_mc_addr *mta;
4323 struct ifnet *ifp = adapter->ifp;
4324 u8 *update_ptr;
4325 int mcnt = 0;
4326 u32 fctrl;
4327 struct ethercom *ec = &adapter->osdep.ec;
4328 struct ether_multi *enm;
4329 struct ether_multistep step;
4330
4331 KASSERT(mutex_owned(&adapter->core_mtx));
4332 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4333
4334 mta = adapter->mta;
4335 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4336
4337 ifp->if_flags &= ~IFF_ALLMULTI;
4338 ETHER_LOCK(ec);
4339 ETHER_FIRST_MULTI(step, ec, enm);
4340 while (enm != NULL) {
4341 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4342 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4343 ETHER_ADDR_LEN) != 0)) {
4344 ifp->if_flags |= IFF_ALLMULTI;
4345 break;
4346 }
4347 bcopy(enm->enm_addrlo,
4348 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4349 mta[mcnt].vmdq = adapter->pool;
4350 mcnt++;
4351 ETHER_NEXT_MULTI(step, enm);
4352 }
4353 ETHER_UNLOCK(ec);
4354
4355 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4356 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4357 if (ifp->if_flags & IFF_PROMISC)
4358 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4359 else if (ifp->if_flags & IFF_ALLMULTI) {
4360 fctrl |= IXGBE_FCTRL_MPE;
4361 }
4362
4363 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4364
4365 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4366 update_ptr = (u8 *)mta;
4367 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4368 ixgbe_mc_array_itr, TRUE);
4369 }
4370
4371 } /* ixgbe_set_multi */
4372
4373 /************************************************************************
4374 * ixgbe_mc_array_itr
4375 *
4376 * An iterator function needed by the multicast shared code.
4377 * It feeds the shared code routine the addresses in the
4378 * array of ixgbe_set_multi() one by one.
4379 ************************************************************************/
4380 static u8 *
4381 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4382 {
4383 struct ixgbe_mc_addr *mta;
4384
4385 mta = (struct ixgbe_mc_addr *)*update_ptr;
4386 *vmdq = mta->vmdq;
4387
4388 *update_ptr = (u8*)(mta + 1);
4389
4390 return (mta->addr);
4391 } /* ixgbe_mc_array_itr */
4392
4393 /************************************************************************
4394 * ixgbe_local_timer - Timer routine
4395 *
4396 * Checks for link status, updates statistics,
4397 * and runs the watchdog check.
4398 ************************************************************************/
4399 static void
4400 ixgbe_local_timer(void *arg)
4401 {
4402 struct adapter *adapter = arg;
4403
4404 IXGBE_CORE_LOCK(adapter);
4405 ixgbe_local_timer1(adapter);
4406 IXGBE_CORE_UNLOCK(adapter);
4407 }
4408
4409 static void
4410 ixgbe_local_timer1(void *arg)
4411 {
4412 struct adapter *adapter = arg;
4413 device_t dev = adapter->dev;
4414 struct ix_queue *que = adapter->queues;
4415 u64 queues = 0;
4416 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4417 int hung = 0;
4418 int i;
4419
4420 KASSERT(mutex_owned(&adapter->core_mtx));
4421
4422 /* Check for pluggable optics */
4423 if (adapter->sfp_probe)
4424 if (!ixgbe_sfp_probe(adapter))
4425 goto out; /* Nothing to do */
4426
4427 ixgbe_update_link_status(adapter);
4428 ixgbe_update_stats_counters(adapter);
4429
4430 /* Update some event counters */
4431 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4432 que = adapter->queues;
4433 for (i = 0; i < adapter->num_queues; i++, que++) {
4434 struct tx_ring *txr = que->txr;
4435
4436 v0 += txr->q_efbig_tx_dma_setup;
4437 v1 += txr->q_mbuf_defrag_failed;
4438 v2 += txr->q_efbig2_tx_dma_setup;
4439 v3 += txr->q_einval_tx_dma_setup;
4440 v4 += txr->q_other_tx_dma_setup;
4441 v5 += txr->q_eagain_tx_dma_setup;
4442 v6 += txr->q_enomem_tx_dma_setup;
4443 v7 += txr->q_tso_err;
4444 }
4445 adapter->efbig_tx_dma_setup.ev_count = v0;
4446 adapter->mbuf_defrag_failed.ev_count = v1;
4447 adapter->efbig2_tx_dma_setup.ev_count = v2;
4448 adapter->einval_tx_dma_setup.ev_count = v3;
4449 adapter->other_tx_dma_setup.ev_count = v4;
4450 adapter->eagain_tx_dma_setup.ev_count = v5;
4451 adapter->enomem_tx_dma_setup.ev_count = v6;
4452 adapter->tso_err.ev_count = v7;
4453
4454 /*
4455 * Check the TX queues status
4456 * - mark hung queues so we don't schedule on them
4457 * - watchdog only if all queues show hung
4458 */
4459 que = adapter->queues;
4460 for (i = 0; i < adapter->num_queues; i++, que++) {
4461 /* Keep track of queues with work for soft irq */
4462 if (que->txr->busy)
4463 queues |= ((u64)1 << que->me);
4464 /*
4465 * Each time txeof runs without cleaning, but there
4466 * are uncleaned descriptors it increments busy. If
4467 * we get to the MAX we declare it hung.
4468 */
4469 if (que->busy == IXGBE_QUEUE_HUNG) {
4470 ++hung;
4471 /* Mark the queue as inactive */
4472 adapter->active_queues &= ~((u64)1 << que->me);
4473 continue;
4474 } else {
4475 /* Check if we've come back from hung */
4476 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4477 adapter->active_queues |= ((u64)1 << que->me);
4478 }
4479 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4480 device_printf(dev,
4481 "Warning queue %d appears to be hung!\n", i);
4482 que->txr->busy = IXGBE_QUEUE_HUNG;
4483 ++hung;
4484 }
4485 }
4486
4487 /* Only truely watchdog if all queues show hung */
4488 if (hung == adapter->num_queues)
4489 goto watchdog;
4490 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4491 else if (queues != 0) { /* Force an IRQ on queues with work */
4492 que = adapter->queues;
4493 for (i = 0; i < adapter->num_queues; i++, que++) {
4494 mutex_enter(&que->dc_mtx);
4495 if (que->disabled_count == 0)
4496 ixgbe_rearm_queues(adapter,
4497 queues & ((u64)1 << i));
4498 mutex_exit(&que->dc_mtx);
4499 }
4500 }
4501 #endif
4502
4503 out:
4504 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4505 return;
4506
4507 watchdog:
4508 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4509 adapter->ifp->if_flags &= ~IFF_RUNNING;
4510 adapter->watchdog_events.ev_count++;
4511 ixgbe_init_locked(adapter);
4512 } /* ixgbe_local_timer */
4513
4514 /************************************************************************
4515 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4516 ************************************************************************/
4517 static void
4518 ixgbe_recovery_mode_timer(void *arg)
4519 {
4520 struct adapter *adapter = arg;
4521 struct ixgbe_hw *hw = &adapter->hw;
4522
4523 IXGBE_CORE_LOCK(adapter);
4524 if (ixgbe_fw_recovery_mode(hw)) {
4525 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4526 /* Firmware error detected, entering recovery mode */
4527 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4528
4529 if (hw->adapter_stopped == FALSE)
4530 ixgbe_stop(adapter);
4531 }
4532 } else
4533 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4534
4535 callout_reset(&adapter->recovery_mode_timer, hz,
4536 ixgbe_recovery_mode_timer, adapter);
4537 IXGBE_CORE_UNLOCK(adapter);
4538 } /* ixgbe_recovery_mode_timer */
4539
4540 /************************************************************************
4541 * ixgbe_sfp_probe
4542 *
4543 * Determine if a port had optics inserted.
4544 ************************************************************************/
4545 static bool
4546 ixgbe_sfp_probe(struct adapter *adapter)
4547 {
4548 struct ixgbe_hw *hw = &adapter->hw;
4549 device_t dev = adapter->dev;
4550 bool result = FALSE;
4551
4552 if ((hw->phy.type == ixgbe_phy_nl) &&
4553 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4554 s32 ret = hw->phy.ops.identify_sfp(hw);
4555 if (ret)
4556 goto out;
4557 ret = hw->phy.ops.reset(hw);
4558 adapter->sfp_probe = FALSE;
4559 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4560 device_printf(dev,"Unsupported SFP+ module detected!");
4561 device_printf(dev,
4562 "Reload driver with supported module.\n");
4563 goto out;
4564 } else
4565 device_printf(dev, "SFP+ module detected!\n");
4566 /* We now have supported optics */
4567 result = TRUE;
4568 }
4569 out:
4570
4571 return (result);
4572 } /* ixgbe_sfp_probe */
4573
4574 /************************************************************************
4575 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4576 ************************************************************************/
4577 static void
4578 ixgbe_handle_mod(void *context)
4579 {
4580 struct adapter *adapter = context;
4581 struct ixgbe_hw *hw = &adapter->hw;
4582 device_t dev = adapter->dev;
4583 u32 err, cage_full = 0;
4584
4585 ++adapter->mod_sicount.ev_count;
4586 if (adapter->hw.need_crosstalk_fix) {
4587 switch (hw->mac.type) {
4588 case ixgbe_mac_82599EB:
4589 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4590 IXGBE_ESDP_SDP2;
4591 break;
4592 case ixgbe_mac_X550EM_x:
4593 case ixgbe_mac_X550EM_a:
4594 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4595 IXGBE_ESDP_SDP0;
4596 break;
4597 default:
4598 break;
4599 }
4600
4601 if (!cage_full)
4602 return;
4603 }
4604
4605 err = hw->phy.ops.identify_sfp(hw);
4606 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4607 device_printf(dev,
4608 "Unsupported SFP+ module type was detected.\n");
4609 return;
4610 }
4611
4612 if (hw->mac.type == ixgbe_mac_82598EB)
4613 err = hw->phy.ops.reset(hw);
4614 else
4615 err = hw->mac.ops.setup_sfp(hw);
4616
4617 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4618 device_printf(dev,
4619 "Setup failure - unsupported SFP+ module type.\n");
4620 return;
4621 }
4622 softint_schedule(adapter->msf_si);
4623 } /* ixgbe_handle_mod */
4624
4625
4626 /************************************************************************
4627 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4628 ************************************************************************/
4629 static void
4630 ixgbe_handle_msf(void *context)
4631 {
4632 struct adapter *adapter = context;
4633 struct ixgbe_hw *hw = &adapter->hw;
4634 u32 autoneg;
4635 bool negotiate;
4636
4637 IXGBE_CORE_LOCK(adapter);
4638 ++adapter->msf_sicount.ev_count;
4639 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4640 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4641
4642 autoneg = hw->phy.autoneg_advertised;
4643 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4644 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4645 else
4646 negotiate = 0;
4647 if (hw->mac.ops.setup_link)
4648 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4649
4650 /* Adjust media types shown in ifconfig */
4651 ifmedia_removeall(&adapter->media);
4652 ixgbe_add_media_types(adapter);
4653 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4654 IXGBE_CORE_UNLOCK(adapter);
4655 } /* ixgbe_handle_msf */
4656
4657 /************************************************************************
4658 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4659 ************************************************************************/
4660 static void
4661 ixgbe_handle_phy(void *context)
4662 {
4663 struct adapter *adapter = context;
4664 struct ixgbe_hw *hw = &adapter->hw;
4665 int error;
4666
4667 ++adapter->phy_sicount.ev_count;
4668 error = hw->phy.ops.handle_lasi(hw);
4669 if (error == IXGBE_ERR_OVERTEMP)
4670 device_printf(adapter->dev,
4671 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4672 " PHY will downshift to lower power state!\n");
4673 else if (error)
4674 device_printf(adapter->dev,
4675 "Error handling LASI interrupt: %d\n", error);
4676 } /* ixgbe_handle_phy */
4677
4678 static void
4679 ixgbe_ifstop(struct ifnet *ifp, int disable)
4680 {
4681 struct adapter *adapter = ifp->if_softc;
4682
4683 IXGBE_CORE_LOCK(adapter);
4684 ixgbe_stop(adapter);
4685 IXGBE_CORE_UNLOCK(adapter);
4686 }
4687
4688 /************************************************************************
4689 * ixgbe_stop - Stop the hardware
4690 *
4691 * Disables all traffic on the adapter by issuing a
4692 * global reset on the MAC and deallocates TX/RX buffers.
4693 ************************************************************************/
4694 static void
4695 ixgbe_stop(void *arg)
4696 {
4697 struct ifnet *ifp;
4698 struct adapter *adapter = arg;
4699 struct ixgbe_hw *hw = &adapter->hw;
4700
4701 ifp = adapter->ifp;
4702
4703 KASSERT(mutex_owned(&adapter->core_mtx));
4704
4705 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4706 ixgbe_disable_intr(adapter);
4707 callout_stop(&adapter->timer);
4708
4709 /* Let the stack know...*/
4710 ifp->if_flags &= ~IFF_RUNNING;
4711
4712 ixgbe_reset_hw(hw);
4713 hw->adapter_stopped = FALSE;
4714 ixgbe_stop_adapter(hw);
4715 if (hw->mac.type == ixgbe_mac_82599EB)
4716 ixgbe_stop_mac_link_on_d3_82599(hw);
4717 /* Turn off the laser - noop with no optics */
4718 ixgbe_disable_tx_laser(hw);
4719
4720 /* Update the stack */
4721 adapter->link_up = FALSE;
4722 ixgbe_update_link_status(adapter);
4723
4724 /* reprogram the RAR[0] in case user changed it. */
4725 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4726
4727 return;
4728 } /* ixgbe_stop */
4729
4730 /************************************************************************
4731 * ixgbe_update_link_status - Update OS on link state
4732 *
4733 * Note: Only updates the OS on the cached link state.
4734 * The real check of the hardware only happens with
4735 * a link interrupt.
4736 ************************************************************************/
4737 static void
4738 ixgbe_update_link_status(struct adapter *adapter)
4739 {
4740 struct ifnet *ifp = adapter->ifp;
4741 device_t dev = adapter->dev;
4742 struct ixgbe_hw *hw = &adapter->hw;
4743
4744 KASSERT(mutex_owned(&adapter->core_mtx));
4745
4746 if (adapter->link_up) {
4747 if (adapter->link_active == FALSE) {
4748 /*
4749 * To eliminate influence of the previous state
4750 * in the same way as ixgbe_init_locked().
4751 */
4752 struct ix_queue *que = adapter->queues;
4753 for (int i = 0; i < adapter->num_queues; i++, que++)
4754 que->eitr_setting = 0;
4755
4756 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4757 /*
4758 * Discard count for both MAC Local Fault and
4759 * Remote Fault because those registers are
4760 * valid only when the link speed is up and
4761 * 10Gbps.
4762 */
4763 IXGBE_READ_REG(hw, IXGBE_MLFC);
4764 IXGBE_READ_REG(hw, IXGBE_MRFC);
4765 }
4766
4767 if (bootverbose) {
4768 const char *bpsmsg;
4769
4770 switch (adapter->link_speed) {
4771 case IXGBE_LINK_SPEED_10GB_FULL:
4772 bpsmsg = "10 Gbps";
4773 break;
4774 case IXGBE_LINK_SPEED_5GB_FULL:
4775 bpsmsg = "5 Gbps";
4776 break;
4777 case IXGBE_LINK_SPEED_2_5GB_FULL:
4778 bpsmsg = "2.5 Gbps";
4779 break;
4780 case IXGBE_LINK_SPEED_1GB_FULL:
4781 bpsmsg = "1 Gbps";
4782 break;
4783 case IXGBE_LINK_SPEED_100_FULL:
4784 bpsmsg = "100 Mbps";
4785 break;
4786 case IXGBE_LINK_SPEED_10_FULL:
4787 bpsmsg = "10 Mbps";
4788 break;
4789 default:
4790 bpsmsg = "unknown speed";
4791 break;
4792 }
4793 device_printf(dev, "Link is up %s %s \n",
4794 bpsmsg, "Full Duplex");
4795 }
4796 adapter->link_active = TRUE;
4797 /* Update any Flow Control changes */
4798 ixgbe_fc_enable(&adapter->hw);
4799 /* Update DMA coalescing config */
4800 ixgbe_config_dmac(adapter);
4801 if_link_state_change(ifp, LINK_STATE_UP);
4802
4803 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4804 ixgbe_ping_all_vfs(adapter);
4805 }
4806 } else { /* Link down */
4807 if (adapter->link_active == TRUE) {
4808 if (bootverbose)
4809 device_printf(dev, "Link is Down\n");
4810 if_link_state_change(ifp, LINK_STATE_DOWN);
4811 adapter->link_active = FALSE;
4812 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4813 ixgbe_ping_all_vfs(adapter);
4814 ixgbe_drain_all(adapter);
4815 }
4816 }
4817 } /* ixgbe_update_link_status */
4818
4819 /************************************************************************
4820 * ixgbe_config_dmac - Configure DMA Coalescing
4821 ************************************************************************/
4822 static void
4823 ixgbe_config_dmac(struct adapter *adapter)
4824 {
4825 struct ixgbe_hw *hw = &adapter->hw;
4826 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4827
4828 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4829 return;
4830
4831 if (dcfg->watchdog_timer ^ adapter->dmac ||
4832 dcfg->link_speed ^ adapter->link_speed) {
4833 dcfg->watchdog_timer = adapter->dmac;
4834 dcfg->fcoe_en = false;
4835 dcfg->link_speed = adapter->link_speed;
4836 dcfg->num_tcs = 1;
4837
4838 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4839 dcfg->watchdog_timer, dcfg->link_speed);
4840
4841 hw->mac.ops.dmac_config(hw);
4842 }
4843 } /* ixgbe_config_dmac */
4844
4845 /************************************************************************
4846 * ixgbe_enable_intr
4847 ************************************************************************/
4848 static void
4849 ixgbe_enable_intr(struct adapter *adapter)
4850 {
4851 struct ixgbe_hw *hw = &adapter->hw;
4852 struct ix_queue *que = adapter->queues;
4853 u32 mask, fwsm;
4854
4855 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4856
4857 switch (adapter->hw.mac.type) {
4858 case ixgbe_mac_82599EB:
4859 mask |= IXGBE_EIMS_ECC;
4860 /* Temperature sensor on some adapters */
4861 mask |= IXGBE_EIMS_GPI_SDP0;
4862 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4863 mask |= IXGBE_EIMS_GPI_SDP1;
4864 mask |= IXGBE_EIMS_GPI_SDP2;
4865 break;
4866 case ixgbe_mac_X540:
4867 /* Detect if Thermal Sensor is enabled */
4868 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4869 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4870 mask |= IXGBE_EIMS_TS;
4871 mask |= IXGBE_EIMS_ECC;
4872 break;
4873 case ixgbe_mac_X550:
4874 /* MAC thermal sensor is automatically enabled */
4875 mask |= IXGBE_EIMS_TS;
4876 mask |= IXGBE_EIMS_ECC;
4877 break;
4878 case ixgbe_mac_X550EM_x:
4879 case ixgbe_mac_X550EM_a:
4880 /* Some devices use SDP0 for important information */
4881 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4882 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4883 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4884 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4885 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4886 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4887 mask |= IXGBE_EICR_GPI_SDP0_X540;
4888 mask |= IXGBE_EIMS_ECC;
4889 break;
4890 default:
4891 break;
4892 }
4893
4894 /* Enable Fan Failure detection */
4895 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4896 mask |= IXGBE_EIMS_GPI_SDP1;
4897 /* Enable SR-IOV */
4898 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4899 mask |= IXGBE_EIMS_MAILBOX;
4900 /* Enable Flow Director */
4901 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4902 mask |= IXGBE_EIMS_FLOW_DIR;
4903
4904 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4905
4906 /* With MSI-X we use auto clear */
4907 if (adapter->msix_mem) {
4908 mask = IXGBE_EIMS_ENABLE_MASK;
4909 /* Don't autoclear Link */
4910 mask &= ~IXGBE_EIMS_OTHER;
4911 mask &= ~IXGBE_EIMS_LSC;
4912 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4913 mask &= ~IXGBE_EIMS_MAILBOX;
4914 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4915 }
4916
4917 /*
4918 * Now enable all queues, this is done separately to
4919 * allow for handling the extended (beyond 32) MSI-X
4920 * vectors that can be used by 82599
4921 */
4922 for (int i = 0; i < adapter->num_queues; i++, que++)
4923 ixgbe_enable_queue(adapter, que->msix);
4924
4925 IXGBE_WRITE_FLUSH(hw);
4926
4927 } /* ixgbe_enable_intr */
4928
4929 /************************************************************************
4930 * ixgbe_disable_intr_internal
4931 ************************************************************************/
4932 static void
4933 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4934 {
4935 struct ix_queue *que = adapter->queues;
4936
4937 /* disable interrupts other than queues */
4938 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4939
4940 if (adapter->msix_mem)
4941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4942
4943 for (int i = 0; i < adapter->num_queues; i++, que++)
4944 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4945
4946 IXGBE_WRITE_FLUSH(&adapter->hw);
4947
4948 } /* ixgbe_do_disable_intr_internal */
4949
4950 /************************************************************************
4951 * ixgbe_disable_intr
4952 ************************************************************************/
4953 static void
4954 ixgbe_disable_intr(struct adapter *adapter)
4955 {
4956
4957 ixgbe_disable_intr_internal(adapter, true);
4958 } /* ixgbe_disable_intr */
4959
4960 /************************************************************************
4961 * ixgbe_ensure_disabled_intr
4962 ************************************************************************/
4963 void
4964 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4965 {
4966
4967 ixgbe_disable_intr_internal(adapter, false);
4968 } /* ixgbe_ensure_disabled_intr */
4969
4970 /************************************************************************
4971 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4972 ************************************************************************/
4973 static int
4974 ixgbe_legacy_irq(void *arg)
4975 {
4976 struct ix_queue *que = arg;
4977 struct adapter *adapter = que->adapter;
4978 struct ixgbe_hw *hw = &adapter->hw;
4979 struct ifnet *ifp = adapter->ifp;
4980 struct tx_ring *txr = adapter->tx_rings;
4981 bool more = false;
4982 u32 eicr, eicr_mask;
4983
4984 /* Silicon errata #26 on 82598 */
4985 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4986
4987 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4988
4989 adapter->stats.pf.legint.ev_count++;
4990 ++que->irqs.ev_count;
4991 if (eicr == 0) {
4992 adapter->stats.pf.intzero.ev_count++;
4993 if ((ifp->if_flags & IFF_UP) != 0)
4994 ixgbe_enable_intr(adapter);
4995 return 0;
4996 }
4997
4998 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4999 /*
5000 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5001 */
5002 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5003
5004 #ifdef __NetBSD__
5005 /* Don't run ixgbe_rxeof in interrupt context */
5006 more = true;
5007 #else
5008 more = ixgbe_rxeof(que);
5009 #endif
5010
5011 IXGBE_TX_LOCK(txr);
5012 ixgbe_txeof(txr);
5013 #ifdef notyet
5014 if (!ixgbe_ring_empty(ifp, txr->br))
5015 ixgbe_start_locked(ifp, txr);
5016 #endif
5017 IXGBE_TX_UNLOCK(txr);
5018 }
5019
5020 /* Check for fan failure */
5021 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5022 ixgbe_check_fan_failure(adapter, eicr, true);
5023 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5024 }
5025
5026 /* Link status change */
5027 if (eicr & IXGBE_EICR_LSC)
5028 softint_schedule(adapter->link_si);
5029
5030 if (ixgbe_is_sfp(hw)) {
5031 /* Pluggable optics-related interrupt */
5032 if (hw->mac.type >= ixgbe_mac_X540)
5033 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5034 else
5035 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5036
5037 if (eicr & eicr_mask) {
5038 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5039 softint_schedule(adapter->mod_si);
5040 }
5041
5042 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5043 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5044 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5045 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5046 softint_schedule(adapter->msf_si);
5047 }
5048 }
5049
5050 /* External PHY interrupt */
5051 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5052 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5053 softint_schedule(adapter->phy_si);
5054
5055 if (more) {
5056 que->req.ev_count++;
5057 ixgbe_sched_handle_que(adapter, que);
5058 } else
5059 ixgbe_enable_intr(adapter);
5060
5061 return 1;
5062 } /* ixgbe_legacy_irq */
5063
5064 /************************************************************************
5065 * ixgbe_free_pciintr_resources
5066 ************************************************************************/
5067 static void
5068 ixgbe_free_pciintr_resources(struct adapter *adapter)
5069 {
5070 struct ix_queue *que = adapter->queues;
5071 int rid;
5072
5073 /*
5074 * Release all msix queue resources:
5075 */
5076 for (int i = 0; i < adapter->num_queues; i++, que++) {
5077 if (que->res != NULL) {
5078 pci_intr_disestablish(adapter->osdep.pc,
5079 adapter->osdep.ihs[i]);
5080 adapter->osdep.ihs[i] = NULL;
5081 }
5082 }
5083
5084 /* Clean the Legacy or Link interrupt last */
5085 if (adapter->vector) /* we are doing MSIX */
5086 rid = adapter->vector;
5087 else
5088 rid = 0;
5089
5090 if (adapter->osdep.ihs[rid] != NULL) {
5091 pci_intr_disestablish(adapter->osdep.pc,
5092 adapter->osdep.ihs[rid]);
5093 adapter->osdep.ihs[rid] = NULL;
5094 }
5095
5096 if (adapter->osdep.intrs != NULL) {
5097 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5098 adapter->osdep.nintrs);
5099 adapter->osdep.intrs = NULL;
5100 }
5101 } /* ixgbe_free_pciintr_resources */
5102
5103 /************************************************************************
5104 * ixgbe_free_pci_resources
5105 ************************************************************************/
5106 static void
5107 ixgbe_free_pci_resources(struct adapter *adapter)
5108 {
5109
5110 ixgbe_free_pciintr_resources(adapter);
5111
5112 if (adapter->osdep.mem_size != 0) {
5113 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5114 adapter->osdep.mem_bus_space_handle,
5115 adapter->osdep.mem_size);
5116 }
5117
5118 } /* ixgbe_free_pci_resources */
5119
5120 /************************************************************************
5121 * ixgbe_set_sysctl_value
5122 ************************************************************************/
5123 static void
5124 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5125 const char *description, int *limit, int value)
5126 {
5127 device_t dev = adapter->dev;
5128 struct sysctllog **log;
5129 const struct sysctlnode *rnode, *cnode;
5130
5131 /*
5132 * It's not required to check recovery mode because this function never
5133 * touches hardware.
5134 */
5135
5136 log = &adapter->sysctllog;
5137 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5138 aprint_error_dev(dev, "could not create sysctl root\n");
5139 return;
5140 }
5141 if (sysctl_createv(log, 0, &rnode, &cnode,
5142 CTLFLAG_READWRITE, CTLTYPE_INT,
5143 name, SYSCTL_DESCR(description),
5144 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5145 aprint_error_dev(dev, "could not create sysctl\n");
5146 *limit = value;
5147 } /* ixgbe_set_sysctl_value */
5148
5149 /************************************************************************
5150 * ixgbe_sysctl_flowcntl
5151 *
5152 * SYSCTL wrapper around setting Flow Control
5153 ************************************************************************/
5154 static int
5155 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5156 {
5157 struct sysctlnode node = *rnode;
5158 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5159 int error, fc;
5160
5161 if (ixgbe_fw_recovery_mode_swflag(adapter))
5162 return (EPERM);
5163
5164 fc = adapter->hw.fc.current_mode;
5165 node.sysctl_data = &fc;
5166 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5167 if (error != 0 || newp == NULL)
5168 return error;
5169
5170 /* Don't bother if it's not changed */
5171 if (fc == adapter->hw.fc.current_mode)
5172 return (0);
5173
5174 return ixgbe_set_flowcntl(adapter, fc);
5175 } /* ixgbe_sysctl_flowcntl */
5176
5177 /************************************************************************
5178 * ixgbe_set_flowcntl - Set flow control
5179 *
5180 * Flow control values:
5181 * 0 - off
5182 * 1 - rx pause
5183 * 2 - tx pause
5184 * 3 - full
5185 ************************************************************************/
5186 static int
5187 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5188 {
5189 switch (fc) {
5190 case ixgbe_fc_rx_pause:
5191 case ixgbe_fc_tx_pause:
5192 case ixgbe_fc_full:
5193 adapter->hw.fc.requested_mode = fc;
5194 if (adapter->num_queues > 1)
5195 ixgbe_disable_rx_drop(adapter);
5196 break;
5197 case ixgbe_fc_none:
5198 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5199 if (adapter->num_queues > 1)
5200 ixgbe_enable_rx_drop(adapter);
5201 break;
5202 default:
5203 return (EINVAL);
5204 }
5205
5206 #if 0 /* XXX NetBSD */
5207 /* Don't autoneg if forcing a value */
5208 adapter->hw.fc.disable_fc_autoneg = TRUE;
5209 #endif
5210 ixgbe_fc_enable(&adapter->hw);
5211
5212 return (0);
5213 } /* ixgbe_set_flowcntl */
5214
5215 /************************************************************************
5216 * ixgbe_enable_rx_drop
5217 *
5218 * Enable the hardware to drop packets when the buffer is
5219 * full. This is useful with multiqueue, so that no single
5220 * queue being full stalls the entire RX engine. We only
5221 * enable this when Multiqueue is enabled AND Flow Control
5222 * is disabled.
5223 ************************************************************************/
5224 static void
5225 ixgbe_enable_rx_drop(struct adapter *adapter)
5226 {
5227 struct ixgbe_hw *hw = &adapter->hw;
5228 struct rx_ring *rxr;
5229 u32 srrctl;
5230
5231 for (int i = 0; i < adapter->num_queues; i++) {
5232 rxr = &adapter->rx_rings[i];
5233 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5234 srrctl |= IXGBE_SRRCTL_DROP_EN;
5235 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5236 }
5237
5238 /* enable drop for each vf */
5239 for (int i = 0; i < adapter->num_vfs; i++) {
5240 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5241 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5242 IXGBE_QDE_ENABLE));
5243 }
5244 } /* ixgbe_enable_rx_drop */
5245
5246 /************************************************************************
5247 * ixgbe_disable_rx_drop
5248 ************************************************************************/
5249 static void
5250 ixgbe_disable_rx_drop(struct adapter *adapter)
5251 {
5252 struct ixgbe_hw *hw = &adapter->hw;
5253 struct rx_ring *rxr;
5254 u32 srrctl;
5255
5256 for (int i = 0; i < adapter->num_queues; i++) {
5257 rxr = &adapter->rx_rings[i];
5258 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5259 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5260 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5261 }
5262
5263 /* disable drop for each vf */
5264 for (int i = 0; i < adapter->num_vfs; i++) {
5265 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5266 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5267 }
5268 } /* ixgbe_disable_rx_drop */
5269
5270 /************************************************************************
5271 * ixgbe_sysctl_advertise
5272 *
5273 * SYSCTL wrapper around setting advertised speed
5274 ************************************************************************/
5275 static int
5276 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5277 {
5278 struct sysctlnode node = *rnode;
5279 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5280 int error = 0, advertise;
5281
5282 if (ixgbe_fw_recovery_mode_swflag(adapter))
5283 return (EPERM);
5284
5285 advertise = adapter->advertise;
5286 node.sysctl_data = &advertise;
5287 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5288 if (error != 0 || newp == NULL)
5289 return error;
5290
5291 return ixgbe_set_advertise(adapter, advertise);
5292 } /* ixgbe_sysctl_advertise */
5293
5294 /************************************************************************
5295 * ixgbe_set_advertise - Control advertised link speed
5296 *
5297 * Flags:
5298 * 0x00 - Default (all capable link speed)
5299 * 0x01 - advertise 100 Mb
5300 * 0x02 - advertise 1G
5301 * 0x04 - advertise 10G
5302 * 0x08 - advertise 10 Mb
5303 * 0x10 - advertise 2.5G
5304 * 0x20 - advertise 5G
5305 ************************************************************************/
5306 static int
5307 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5308 {
5309 device_t dev;
5310 struct ixgbe_hw *hw;
5311 ixgbe_link_speed speed = 0;
5312 ixgbe_link_speed link_caps = 0;
5313 s32 err = IXGBE_NOT_IMPLEMENTED;
5314 bool negotiate = FALSE;
5315
5316 /* Checks to validate new value */
5317 if (adapter->advertise == advertise) /* no change */
5318 return (0);
5319
5320 dev = adapter->dev;
5321 hw = &adapter->hw;
5322
5323 /* No speed changes for backplane media */
5324 if (hw->phy.media_type == ixgbe_media_type_backplane)
5325 return (ENODEV);
5326
5327 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5328 (hw->phy.multispeed_fiber))) {
5329 device_printf(dev,
5330 "Advertised speed can only be set on copper or "
5331 "multispeed fiber media types.\n");
5332 return (EINVAL);
5333 }
5334
5335 if (advertise < 0x0 || advertise > 0x2f) {
5336 device_printf(dev,
5337 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5338 return (EINVAL);
5339 }
5340
5341 if (hw->mac.ops.get_link_capabilities) {
5342 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5343 &negotiate);
5344 if (err != IXGBE_SUCCESS) {
5345 device_printf(dev, "Unable to determine supported advertise speeds\n");
5346 return (ENODEV);
5347 }
5348 }
5349
5350 /* Set new value and report new advertised mode */
5351 if (advertise & 0x1) {
5352 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5353 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5354 return (EINVAL);
5355 }
5356 speed |= IXGBE_LINK_SPEED_100_FULL;
5357 }
5358 if (advertise & 0x2) {
5359 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5360 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5361 return (EINVAL);
5362 }
5363 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5364 }
5365 if (advertise & 0x4) {
5366 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5367 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5368 return (EINVAL);
5369 }
5370 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5371 }
5372 if (advertise & 0x8) {
5373 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5374 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5375 return (EINVAL);
5376 }
5377 speed |= IXGBE_LINK_SPEED_10_FULL;
5378 }
5379 if (advertise & 0x10) {
5380 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5381 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5382 return (EINVAL);
5383 }
5384 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5385 }
5386 if (advertise & 0x20) {
5387 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5388 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5389 return (EINVAL);
5390 }
5391 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5392 }
5393 if (advertise == 0)
5394 speed = link_caps; /* All capable link speed */
5395
5396 hw->mac.autotry_restart = TRUE;
5397 hw->mac.ops.setup_link(hw, speed, TRUE);
5398 adapter->advertise = advertise;
5399
5400 return (0);
5401 } /* ixgbe_set_advertise */
5402
5403 /************************************************************************
5404 * ixgbe_get_advertise - Get current advertised speed settings
5405 *
5406 * Formatted for sysctl usage.
5407 * Flags:
5408 * 0x01 - advertise 100 Mb
5409 * 0x02 - advertise 1G
5410 * 0x04 - advertise 10G
5411 * 0x08 - advertise 10 Mb (yes, Mb)
5412 * 0x10 - advertise 2.5G
5413 * 0x20 - advertise 5G
5414 ************************************************************************/
5415 static int
5416 ixgbe_get_advertise(struct adapter *adapter)
5417 {
5418 struct ixgbe_hw *hw = &adapter->hw;
5419 int speed;
5420 ixgbe_link_speed link_caps = 0;
5421 s32 err;
5422 bool negotiate = FALSE;
5423
5424 /*
5425 * Advertised speed means nothing unless it's copper or
5426 * multi-speed fiber
5427 */
5428 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5429 !(hw->phy.multispeed_fiber))
5430 return (0);
5431
5432 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5433 if (err != IXGBE_SUCCESS)
5434 return (0);
5435
5436 speed =
5437 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5438 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5439 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5440 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5441 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5442 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5443
5444 return speed;
5445 } /* ixgbe_get_advertise */
5446
5447 /************************************************************************
5448 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5449 *
5450 * Control values:
5451 * 0/1 - off / on (use default value of 1000)
5452 *
5453 * Legal timer values are:
5454 * 50,100,250,500,1000,2000,5000,10000
5455 *
5456 * Turning off interrupt moderation will also turn this off.
5457 ************************************************************************/
5458 static int
5459 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5460 {
5461 struct sysctlnode node = *rnode;
5462 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5463 struct ifnet *ifp = adapter->ifp;
5464 int error;
5465 int newval;
5466
5467 if (ixgbe_fw_recovery_mode_swflag(adapter))
5468 return (EPERM);
5469
5470 newval = adapter->dmac;
5471 node.sysctl_data = &newval;
5472 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5473 if ((error) || (newp == NULL))
5474 return (error);
5475
5476 switch (newval) {
5477 case 0:
5478 /* Disabled */
5479 adapter->dmac = 0;
5480 break;
5481 case 1:
5482 /* Enable and use default */
5483 adapter->dmac = 1000;
5484 break;
5485 case 50:
5486 case 100:
5487 case 250:
5488 case 500:
5489 case 1000:
5490 case 2000:
5491 case 5000:
5492 case 10000:
5493 /* Legal values - allow */
5494 adapter->dmac = newval;
5495 break;
5496 default:
5497 /* Do nothing, illegal value */
5498 return (EINVAL);
5499 }
5500
5501 /* Re-initialize hardware if it's already running */
5502 if (ifp->if_flags & IFF_RUNNING)
5503 ifp->if_init(ifp);
5504
5505 return (0);
5506 }
5507
5508 #ifdef IXGBE_DEBUG
5509 /************************************************************************
5510 * ixgbe_sysctl_power_state
5511 *
5512 * Sysctl to test power states
5513 * Values:
5514 * 0 - set device to D0
5515 * 3 - set device to D3
5516 * (none) - get current device power state
5517 ************************************************************************/
5518 static int
5519 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5520 {
5521 #ifdef notyet
5522 struct sysctlnode node = *rnode;
5523 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5524 device_t dev = adapter->dev;
5525 int curr_ps, new_ps, error = 0;
5526
5527 if (ixgbe_fw_recovery_mode_swflag(adapter))
5528 return (EPERM);
5529
5530 curr_ps = new_ps = pci_get_powerstate(dev);
5531
5532 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5533 if ((error) || (req->newp == NULL))
5534 return (error);
5535
5536 if (new_ps == curr_ps)
5537 return (0);
5538
5539 if (new_ps == 3 && curr_ps == 0)
5540 error = DEVICE_SUSPEND(dev);
5541 else if (new_ps == 0 && curr_ps == 3)
5542 error = DEVICE_RESUME(dev);
5543 else
5544 return (EINVAL);
5545
5546 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5547
5548 return (error);
5549 #else
5550 return 0;
5551 #endif
5552 } /* ixgbe_sysctl_power_state */
5553 #endif
5554
5555 /************************************************************************
5556 * ixgbe_sysctl_wol_enable
5557 *
5558 * Sysctl to enable/disable the WoL capability,
5559 * if supported by the adapter.
5560 *
5561 * Values:
5562 * 0 - disabled
5563 * 1 - enabled
5564 ************************************************************************/
5565 static int
5566 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5567 {
5568 struct sysctlnode node = *rnode;
5569 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5570 struct ixgbe_hw *hw = &adapter->hw;
5571 bool new_wol_enabled;
5572 int error = 0;
5573
5574 /*
5575 * It's not required to check recovery mode because this function never
5576 * touches hardware.
5577 */
5578 new_wol_enabled = hw->wol_enabled;
5579 node.sysctl_data = &new_wol_enabled;
5580 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5581 if ((error) || (newp == NULL))
5582 return (error);
5583 if (new_wol_enabled == hw->wol_enabled)
5584 return (0);
5585
5586 if (new_wol_enabled && !adapter->wol_support)
5587 return (ENODEV);
5588 else
5589 hw->wol_enabled = new_wol_enabled;
5590
5591 return (0);
5592 } /* ixgbe_sysctl_wol_enable */
5593
5594 /************************************************************************
5595 * ixgbe_sysctl_wufc - Wake Up Filter Control
5596 *
5597 * Sysctl to enable/disable the types of packets that the
5598 * adapter will wake up on upon receipt.
5599 * Flags:
5600 * 0x1 - Link Status Change
5601 * 0x2 - Magic Packet
5602 * 0x4 - Direct Exact
5603 * 0x8 - Directed Multicast
5604 * 0x10 - Broadcast
5605 * 0x20 - ARP/IPv4 Request Packet
5606 * 0x40 - Direct IPv4 Packet
5607 * 0x80 - Direct IPv6 Packet
5608 *
5609 * Settings not listed above will cause the sysctl to return an error.
5610 ************************************************************************/
5611 static int
5612 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5613 {
5614 struct sysctlnode node = *rnode;
5615 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5616 int error = 0;
5617 u32 new_wufc;
5618
5619 /*
5620 * It's not required to check recovery mode because this function never
5621 * touches hardware.
5622 */
5623 new_wufc = adapter->wufc;
5624 node.sysctl_data = &new_wufc;
5625 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5626 if ((error) || (newp == NULL))
5627 return (error);
5628 if (new_wufc == adapter->wufc)
5629 return (0);
5630
5631 if (new_wufc & 0xffffff00)
5632 return (EINVAL);
5633
5634 new_wufc &= 0xff;
5635 new_wufc |= (0xffffff & adapter->wufc);
5636 adapter->wufc = new_wufc;
5637
5638 return (0);
5639 } /* ixgbe_sysctl_wufc */
5640
5641 #ifdef IXGBE_DEBUG
5642 /************************************************************************
5643 * ixgbe_sysctl_print_rss_config
5644 ************************************************************************/
5645 static int
5646 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5647 {
5648 #ifdef notyet
5649 struct sysctlnode node = *rnode;
5650 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5651 struct ixgbe_hw *hw = &adapter->hw;
5652 device_t dev = adapter->dev;
5653 struct sbuf *buf;
5654 int error = 0, reta_size;
5655 u32 reg;
5656
5657 if (ixgbe_fw_recovery_mode_swflag(adapter))
5658 return (EPERM);
5659
5660 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5661 if (!buf) {
5662 device_printf(dev, "Could not allocate sbuf for output.\n");
5663 return (ENOMEM);
5664 }
5665
5666 // TODO: use sbufs to make a string to print out
5667 /* Set multiplier for RETA setup and table size based on MAC */
5668 switch (adapter->hw.mac.type) {
5669 case ixgbe_mac_X550:
5670 case ixgbe_mac_X550EM_x:
5671 case ixgbe_mac_X550EM_a:
5672 reta_size = 128;
5673 break;
5674 default:
5675 reta_size = 32;
5676 break;
5677 }
5678
5679 /* Print out the redirection table */
5680 sbuf_cat(buf, "\n");
5681 for (int i = 0; i < reta_size; i++) {
5682 if (i < 32) {
5683 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5684 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5685 } else {
5686 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5687 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5688 }
5689 }
5690
5691 // TODO: print more config
5692
5693 error = sbuf_finish(buf);
5694 if (error)
5695 device_printf(dev, "Error finishing sbuf: %d\n", error);
5696
5697 sbuf_delete(buf);
5698 #endif
5699 return (0);
5700 } /* ixgbe_sysctl_print_rss_config */
5701 #endif /* IXGBE_DEBUG */
5702
5703 /************************************************************************
5704 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5705 *
5706 * For X552/X557-AT devices using an external PHY
5707 ************************************************************************/
5708 static int
5709 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5710 {
5711 struct sysctlnode node = *rnode;
5712 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5713 struct ixgbe_hw *hw = &adapter->hw;
5714 int val;
5715 u16 reg;
5716 int error;
5717
5718 if (ixgbe_fw_recovery_mode_swflag(adapter))
5719 return (EPERM);
5720
5721 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5722 device_printf(adapter->dev,
5723 "Device has no supported external thermal sensor.\n");
5724 return (ENODEV);
5725 }
5726
5727 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5728 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5729 device_printf(adapter->dev,
5730 "Error reading from PHY's current temperature register\n");
5731 return (EAGAIN);
5732 }
5733
5734 node.sysctl_data = &val;
5735
5736 /* Shift temp for output */
5737 val = reg >> 8;
5738
5739 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5740 if ((error) || (newp == NULL))
5741 return (error);
5742
5743 return (0);
5744 } /* ixgbe_sysctl_phy_temp */
5745
5746 /************************************************************************
5747 * ixgbe_sysctl_phy_overtemp_occurred
5748 *
5749 * Reports (directly from the PHY) whether the current PHY
5750 * temperature is over the overtemp threshold.
5751 ************************************************************************/
5752 static int
5753 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5754 {
5755 struct sysctlnode node = *rnode;
5756 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5757 struct ixgbe_hw *hw = &adapter->hw;
5758 int val, error;
5759 u16 reg;
5760
5761 if (ixgbe_fw_recovery_mode_swflag(adapter))
5762 return (EPERM);
5763
5764 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5765 device_printf(adapter->dev,
5766 "Device has no supported external thermal sensor.\n");
5767 return (ENODEV);
5768 }
5769
5770 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5771 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5772 device_printf(adapter->dev,
5773 "Error reading from PHY's temperature status register\n");
5774 return (EAGAIN);
5775 }
5776
5777 node.sysctl_data = &val;
5778
5779 /* Get occurrence bit */
5780 val = !!(reg & 0x4000);
5781
5782 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5783 if ((error) || (newp == NULL))
5784 return (error);
5785
5786 return (0);
5787 } /* ixgbe_sysctl_phy_overtemp_occurred */
5788
5789 /************************************************************************
5790 * ixgbe_sysctl_eee_state
5791 *
5792 * Sysctl to set EEE power saving feature
5793 * Values:
5794 * 0 - disable EEE
5795 * 1 - enable EEE
5796 * (none) - get current device EEE state
5797 ************************************************************************/
5798 static int
5799 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5800 {
5801 struct sysctlnode node = *rnode;
5802 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5803 struct ifnet *ifp = adapter->ifp;
5804 device_t dev = adapter->dev;
5805 int curr_eee, new_eee, error = 0;
5806 s32 retval;
5807
5808 if (ixgbe_fw_recovery_mode_swflag(adapter))
5809 return (EPERM);
5810
5811 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5812 node.sysctl_data = &new_eee;
5813 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5814 if ((error) || (newp == NULL))
5815 return (error);
5816
5817 /* Nothing to do */
5818 if (new_eee == curr_eee)
5819 return (0);
5820
5821 /* Not supported */
5822 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5823 return (EINVAL);
5824
5825 /* Bounds checking */
5826 if ((new_eee < 0) || (new_eee > 1))
5827 return (EINVAL);
5828
5829 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5830 if (retval) {
5831 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5832 return (EINVAL);
5833 }
5834
5835 /* Restart auto-neg */
5836 ifp->if_init(ifp);
5837
5838 device_printf(dev, "New EEE state: %d\n", new_eee);
5839
5840 /* Cache new value */
5841 if (new_eee)
5842 adapter->feat_en |= IXGBE_FEATURE_EEE;
5843 else
5844 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5845
5846 return (error);
5847 } /* ixgbe_sysctl_eee_state */
5848
5849 #define PRINTQS(adapter, regname) \
5850 do { \
5851 struct ixgbe_hw *_hw = &(adapter)->hw; \
5852 int _i; \
5853 \
5854 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5855 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5856 printf((_i == 0) ? "\t" : " "); \
5857 printf("%08x", IXGBE_READ_REG(_hw, \
5858 IXGBE_##regname(_i))); \
5859 } \
5860 printf("\n"); \
5861 } while (0)
5862
5863 /************************************************************************
5864 * ixgbe_print_debug_info
5865 *
5866 * Called only when em_display_debug_stats is enabled.
5867 * Provides a way to take a look at important statistics
5868 * maintained by the driver and hardware.
5869 ************************************************************************/
5870 static void
5871 ixgbe_print_debug_info(struct adapter *adapter)
5872 {
5873 device_t dev = adapter->dev;
5874 struct ixgbe_hw *hw = &adapter->hw;
5875 int table_size;
5876 int i;
5877
5878 switch (adapter->hw.mac.type) {
5879 case ixgbe_mac_X550:
5880 case ixgbe_mac_X550EM_x:
5881 case ixgbe_mac_X550EM_a:
5882 table_size = 128;
5883 break;
5884 default:
5885 table_size = 32;
5886 break;
5887 }
5888
5889 device_printf(dev, "[E]RETA:\n");
5890 for (i = 0; i < table_size; i++) {
5891 if (i < 32)
5892 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5893 IXGBE_RETA(i)));
5894 else
5895 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5896 IXGBE_ERETA(i - 32)));
5897 }
5898
5899 device_printf(dev, "queue:");
5900 for (i = 0; i < adapter->num_queues; i++) {
5901 printf((i == 0) ? "\t" : " ");
5902 printf("%8d", i);
5903 }
5904 printf("\n");
5905 PRINTQS(adapter, RDBAL);
5906 PRINTQS(adapter, RDBAH);
5907 PRINTQS(adapter, RDLEN);
5908 PRINTQS(adapter, SRRCTL);
5909 PRINTQS(adapter, RDH);
5910 PRINTQS(adapter, RDT);
5911 PRINTQS(adapter, RXDCTL);
5912
5913 device_printf(dev, "RQSMR:");
5914 for (i = 0; i < adapter->num_queues / 4; i++) {
5915 printf((i == 0) ? "\t" : " ");
5916 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5917 }
5918 printf("\n");
5919
5920 device_printf(dev, "disabled_count:");
5921 for (i = 0; i < adapter->num_queues; i++) {
5922 printf((i == 0) ? "\t" : " ");
5923 printf("%8d", adapter->queues[i].disabled_count);
5924 }
5925 printf("\n");
5926
5927 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5928 if (hw->mac.type != ixgbe_mac_82598EB) {
5929 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5930 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5931 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5932 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5933 }
5934 } /* ixgbe_print_debug_info */
5935
5936 /************************************************************************
5937 * ixgbe_sysctl_debug
5938 ************************************************************************/
5939 static int
5940 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5941 {
5942 struct sysctlnode node = *rnode;
5943 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5944 int error, result = 0;
5945
5946 if (ixgbe_fw_recovery_mode_swflag(adapter))
5947 return (EPERM);
5948
5949 node.sysctl_data = &result;
5950 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5951
5952 if (error || newp == NULL)
5953 return error;
5954
5955 if (result == 1)
5956 ixgbe_print_debug_info(adapter);
5957
5958 return 0;
5959 } /* ixgbe_sysctl_debug */
5960
5961 /************************************************************************
5962 * ixgbe_init_device_features
5963 ************************************************************************/
5964 static void
5965 ixgbe_init_device_features(struct adapter *adapter)
5966 {
5967 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5968 | IXGBE_FEATURE_RSS
5969 | IXGBE_FEATURE_MSI
5970 | IXGBE_FEATURE_MSIX
5971 | IXGBE_FEATURE_LEGACY_IRQ
5972 | IXGBE_FEATURE_LEGACY_TX;
5973
5974 /* Set capabilities first... */
5975 switch (adapter->hw.mac.type) {
5976 case ixgbe_mac_82598EB:
5977 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5978 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5979 break;
5980 case ixgbe_mac_X540:
5981 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5982 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5983 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5984 (adapter->hw.bus.func == 0))
5985 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5986 break;
5987 case ixgbe_mac_X550:
5988 /*
5989 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
5990 * NVM Image version.
5991 */
5992 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5993 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5994 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5995 break;
5996 case ixgbe_mac_X550EM_x:
5997 /*
5998 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
5999 * NVM Image version.
6000 */
6001 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6002 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6003 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
6004 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6005 break;
6006 case ixgbe_mac_X550EM_a:
6007 /*
6008 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6009 * NVM Image version.
6010 */
6011 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6012 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6013 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6014 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6015 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6016 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6017 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6018 }
6019 break;
6020 case ixgbe_mac_82599EB:
6021 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6022 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6023 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6024 (adapter->hw.bus.func == 0))
6025 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6026 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6027 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6028 break;
6029 default:
6030 break;
6031 }
6032
6033 /* Enabled by default... */
6034 /* Fan failure detection */
6035 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6036 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6037 /* Netmap */
6038 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6039 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6040 /* EEE */
6041 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6042 adapter->feat_en |= IXGBE_FEATURE_EEE;
6043 /* Thermal Sensor */
6044 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6045 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6046 /*
6047 * Recovery mode:
6048 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6049 * NVM Image version.
6050 */
6051
6052 /* Enabled via global sysctl... */
6053 /* Flow Director */
6054 if (ixgbe_enable_fdir) {
6055 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6056 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6057 else
6058 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6059 }
6060 /* Legacy (single queue) transmit */
6061 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6062 ixgbe_enable_legacy_tx)
6063 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6064 /*
6065 * Message Signal Interrupts - Extended (MSI-X)
6066 * Normal MSI is only enabled if MSI-X calls fail.
6067 */
6068 if (!ixgbe_enable_msix)
6069 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6070 /* Receive-Side Scaling (RSS) */
6071 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6072 adapter->feat_en |= IXGBE_FEATURE_RSS;
6073
6074 /* Disable features with unmet dependencies... */
6075 /* No MSI-X */
6076 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6077 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6078 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6079 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6080 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6081 }
6082 } /* ixgbe_init_device_features */
6083
6084 /************************************************************************
6085 * ixgbe_probe - Device identification routine
6086 *
6087 * Determines if the driver should be loaded on
6088 * adapter based on its PCI vendor/device ID.
6089 *
6090 * return BUS_PROBE_DEFAULT on success, positive on failure
6091 ************************************************************************/
6092 static int
6093 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6094 {
6095 const struct pci_attach_args *pa = aux;
6096
6097 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6098 }
6099
6100 static const ixgbe_vendor_info_t *
6101 ixgbe_lookup(const struct pci_attach_args *pa)
6102 {
6103 const ixgbe_vendor_info_t *ent;
6104 pcireg_t subid;
6105
6106 INIT_DEBUGOUT("ixgbe_lookup: begin");
6107
6108 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6109 return NULL;
6110
6111 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6112
6113 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6114 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6115 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6116 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6117 (ent->subvendor_id == 0)) &&
6118 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6119 (ent->subdevice_id == 0))) {
6120 return ent;
6121 }
6122 }
6123 return NULL;
6124 }
6125
6126 static int
6127 ixgbe_ifflags_cb(struct ethercom *ec)
6128 {
6129 struct ifnet *ifp = &ec->ec_if;
6130 struct adapter *adapter = ifp->if_softc;
6131 int change, rc = 0;
6132
6133 IXGBE_CORE_LOCK(adapter);
6134
6135 change = ifp->if_flags ^ adapter->if_flags;
6136 if (change != 0)
6137 adapter->if_flags = ifp->if_flags;
6138
6139 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
6140 rc = ENETRESET;
6141 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
6142 ixgbe_set_promisc(adapter);
6143
6144 /* Set up VLAN support and filter */
6145 ixgbe_setup_vlan_hw_support(adapter);
6146
6147 IXGBE_CORE_UNLOCK(adapter);
6148
6149 return rc;
6150 }
6151
6152 /************************************************************************
6153 * ixgbe_ioctl - Ioctl entry point
6154 *
6155 * Called when the user wants to configure the interface.
6156 *
6157 * return 0 on success, positive on failure
6158 ************************************************************************/
6159 static int
6160 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
6161 {
6162 struct adapter *adapter = ifp->if_softc;
6163 struct ixgbe_hw *hw = &adapter->hw;
6164 struct ifcapreq *ifcr = data;
6165 struct ifreq *ifr = data;
6166 int error = 0;
6167 int l4csum_en;
6168 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
6169 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
6170
6171 if (ixgbe_fw_recovery_mode_swflag(adapter))
6172 return (EPERM);
6173
6174 switch (command) {
6175 case SIOCSIFFLAGS:
6176 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6177 break;
6178 case SIOCADDMULTI:
6179 case SIOCDELMULTI:
6180 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6181 break;
6182 case SIOCSIFMEDIA:
6183 case SIOCGIFMEDIA:
6184 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6185 break;
6186 case SIOCSIFCAP:
6187 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6188 break;
6189 case SIOCSIFMTU:
6190 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6191 break;
6192 #ifdef __NetBSD__
6193 case SIOCINITIFADDR:
6194 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6195 break;
6196 case SIOCGIFFLAGS:
6197 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6198 break;
6199 case SIOCGIFAFLAG_IN:
6200 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6201 break;
6202 case SIOCGIFADDR:
6203 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6204 break;
6205 case SIOCGIFMTU:
6206 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6207 break;
6208 case SIOCGIFCAP:
6209 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6210 break;
6211 case SIOCGETHERCAP:
6212 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6213 break;
6214 case SIOCGLIFADDR:
6215 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6216 break;
6217 case SIOCZIFDATA:
6218 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6219 hw->mac.ops.clear_hw_cntrs(hw);
6220 ixgbe_clear_evcnt(adapter);
6221 break;
6222 case SIOCAIFADDR:
6223 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6224 break;
6225 #endif
6226 default:
6227 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6228 break;
6229 }
6230
6231 switch (command) {
6232 case SIOCSIFMEDIA:
6233 case SIOCGIFMEDIA:
6234 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6235 case SIOCGI2C:
6236 {
6237 struct ixgbe_i2c_req i2c;
6238
6239 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6240 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6241 if (error != 0)
6242 break;
6243 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6244 error = EINVAL;
6245 break;
6246 }
6247 if (i2c.len > sizeof(i2c.data)) {
6248 error = EINVAL;
6249 break;
6250 }
6251
6252 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6253 i2c.dev_addr, i2c.data);
6254 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6255 break;
6256 }
6257 case SIOCSIFCAP:
6258 /* Layer-4 Rx checksum offload has to be turned on and
6259 * off as a unit.
6260 */
6261 l4csum_en = ifcr->ifcr_capenable & l4csum;
6262 if (l4csum_en != l4csum && l4csum_en != 0)
6263 return EINVAL;
6264 /*FALLTHROUGH*/
6265 case SIOCADDMULTI:
6266 case SIOCDELMULTI:
6267 case SIOCSIFFLAGS:
6268 case SIOCSIFMTU:
6269 default:
6270 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6271 return error;
6272 if ((ifp->if_flags & IFF_RUNNING) == 0)
6273 ;
6274 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6275 IXGBE_CORE_LOCK(adapter);
6276 if ((ifp->if_flags & IFF_RUNNING) != 0)
6277 ixgbe_init_locked(adapter);
6278 ixgbe_recalculate_max_frame(adapter);
6279 IXGBE_CORE_UNLOCK(adapter);
6280 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6281 /*
6282 * Multicast list has changed; set the hardware filter
6283 * accordingly.
6284 */
6285 IXGBE_CORE_LOCK(adapter);
6286 ixgbe_disable_intr(adapter);
6287 ixgbe_set_multi(adapter);
6288 ixgbe_enable_intr(adapter);
6289 IXGBE_CORE_UNLOCK(adapter);
6290 }
6291 return 0;
6292 }
6293
6294 return error;
6295 } /* ixgbe_ioctl */
6296
6297 /************************************************************************
6298 * ixgbe_check_fan_failure
6299 ************************************************************************/
6300 static void
6301 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6302 {
6303 u32 mask;
6304
6305 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6306 IXGBE_ESDP_SDP1;
6307
6308 if (reg & mask)
6309 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6310 } /* ixgbe_check_fan_failure */
6311
6312 /************************************************************************
6313 * ixgbe_handle_que
6314 ************************************************************************/
6315 static void
6316 ixgbe_handle_que(void *context)
6317 {
6318 struct ix_queue *que = context;
6319 struct adapter *adapter = que->adapter;
6320 struct tx_ring *txr = que->txr;
6321 struct ifnet *ifp = adapter->ifp;
6322 bool more = false;
6323
6324 que->handleq.ev_count++;
6325
6326 if (ifp->if_flags & IFF_RUNNING) {
6327 more = ixgbe_rxeof(que);
6328 IXGBE_TX_LOCK(txr);
6329 more |= ixgbe_txeof(txr);
6330 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6331 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6332 ixgbe_mq_start_locked(ifp, txr);
6333 /* Only for queue 0 */
6334 /* NetBSD still needs this for CBQ */
6335 if ((&adapter->queues[0] == que)
6336 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6337 ixgbe_legacy_start_locked(ifp, txr);
6338 IXGBE_TX_UNLOCK(txr);
6339 }
6340
6341 if (more) {
6342 que->req.ev_count++;
6343 ixgbe_sched_handle_que(adapter, que);
6344 } else if (que->res != NULL) {
6345 /* Re-enable this interrupt */
6346 ixgbe_enable_queue(adapter, que->msix);
6347 } else
6348 ixgbe_enable_intr(adapter);
6349
6350 return;
6351 } /* ixgbe_handle_que */
6352
6353 /************************************************************************
6354 * ixgbe_handle_que_work
6355 ************************************************************************/
6356 static void
6357 ixgbe_handle_que_work(struct work *wk, void *context)
6358 {
6359 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6360
6361 /*
6362 * "enqueued flag" is not required here.
6363 * See ixgbe_msix_que().
6364 */
6365 ixgbe_handle_que(que);
6366 }
6367
6368 /************************************************************************
6369 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6370 ************************************************************************/
6371 static int
6372 ixgbe_allocate_legacy(struct adapter *adapter,
6373 const struct pci_attach_args *pa)
6374 {
6375 device_t dev = adapter->dev;
6376 struct ix_queue *que = adapter->queues;
6377 struct tx_ring *txr = adapter->tx_rings;
6378 int counts[PCI_INTR_TYPE_SIZE];
6379 pci_intr_type_t intr_type, max_type;
6380 char intrbuf[PCI_INTRSTR_LEN];
6381 const char *intrstr = NULL;
6382
6383 /* We allocate a single interrupt resource */
6384 max_type = PCI_INTR_TYPE_MSI;
6385 counts[PCI_INTR_TYPE_MSIX] = 0;
6386 counts[PCI_INTR_TYPE_MSI] =
6387 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6388 /* Check not feat_en but feat_cap to fallback to INTx */
6389 counts[PCI_INTR_TYPE_INTX] =
6390 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6391
6392 alloc_retry:
6393 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6394 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6395 return ENXIO;
6396 }
6397 adapter->osdep.nintrs = 1;
6398 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6399 intrbuf, sizeof(intrbuf));
6400 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6401 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6402 device_xname(dev));
6403 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6404 if (adapter->osdep.ihs[0] == NULL) {
6405 aprint_error_dev(dev,"unable to establish %s\n",
6406 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6407 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6408 adapter->osdep.intrs = NULL;
6409 switch (intr_type) {
6410 case PCI_INTR_TYPE_MSI:
6411 /* The next try is for INTx: Disable MSI */
6412 max_type = PCI_INTR_TYPE_INTX;
6413 counts[PCI_INTR_TYPE_INTX] = 1;
6414 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6415 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6416 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6417 goto alloc_retry;
6418 } else
6419 break;
6420 case PCI_INTR_TYPE_INTX:
6421 default:
6422 /* See below */
6423 break;
6424 }
6425 }
6426 if (intr_type == PCI_INTR_TYPE_INTX) {
6427 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6428 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6429 }
6430 if (adapter->osdep.ihs[0] == NULL) {
6431 aprint_error_dev(dev,
6432 "couldn't establish interrupt%s%s\n",
6433 intrstr ? " at " : "", intrstr ? intrstr : "");
6434 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6435 adapter->osdep.intrs = NULL;
6436 return ENXIO;
6437 }
6438 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6439 /*
6440 * Try allocating a fast interrupt and the associated deferred
6441 * processing contexts.
6442 */
6443 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6444 txr->txr_si =
6445 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6446 ixgbe_deferred_mq_start, txr);
6447 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6448 ixgbe_handle_que, que);
6449
6450 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6451 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6452 aprint_error_dev(dev,
6453 "could not establish software interrupts\n");
6454
6455 return ENXIO;
6456 }
6457 /* For simplicity in the handlers */
6458 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6459
6460 return (0);
6461 } /* ixgbe_allocate_legacy */
6462
6463 /************************************************************************
6464 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6465 ************************************************************************/
6466 static int
6467 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6468 {
6469 device_t dev = adapter->dev;
6470 struct ix_queue *que = adapter->queues;
6471 struct tx_ring *txr = adapter->tx_rings;
6472 pci_chipset_tag_t pc;
6473 char intrbuf[PCI_INTRSTR_LEN];
6474 char intr_xname[32];
6475 char wqname[MAXCOMLEN];
6476 const char *intrstr = NULL;
6477 int error, vector = 0;
6478 int cpu_id = 0;
6479 kcpuset_t *affinity;
6480 #ifdef RSS
6481 unsigned int rss_buckets = 0;
6482 kcpuset_t cpu_mask;
6483 #endif
6484
6485 pc = adapter->osdep.pc;
6486 #ifdef RSS
6487 /*
6488 * If we're doing RSS, the number of queues needs to
6489 * match the number of RSS buckets that are configured.
6490 *
6491 * + If there's more queues than RSS buckets, we'll end
6492 * up with queues that get no traffic.
6493 *
6494 * + If there's more RSS buckets than queues, we'll end
6495 * up having multiple RSS buckets map to the same queue,
6496 * so there'll be some contention.
6497 */
6498 rss_buckets = rss_getnumbuckets();
6499 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6500 (adapter->num_queues != rss_buckets)) {
6501 device_printf(dev,
6502 "%s: number of queues (%d) != number of RSS buckets (%d)"
6503 "; performance will be impacted.\n",
6504 __func__, adapter->num_queues, rss_buckets);
6505 }
6506 #endif
6507
6508 adapter->osdep.nintrs = adapter->num_queues + 1;
6509 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6510 adapter->osdep.nintrs) != 0) {
6511 aprint_error_dev(dev,
6512 "failed to allocate MSI-X interrupt\n");
6513 return (ENXIO);
6514 }
6515
6516 kcpuset_create(&affinity, false);
6517 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6518 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6519 device_xname(dev), i);
6520 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6521 sizeof(intrbuf));
6522 #ifdef IXGBE_MPSAFE
6523 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6524 true);
6525 #endif
6526 /* Set the handler function */
6527 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6528 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6529 intr_xname);
6530 if (que->res == NULL) {
6531 aprint_error_dev(dev,
6532 "Failed to register QUE handler\n");
6533 error = ENXIO;
6534 goto err_out;
6535 }
6536 que->msix = vector;
6537 adapter->active_queues |= (u64)(1 << que->msix);
6538
6539 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6540 #ifdef RSS
6541 /*
6542 * The queue ID is used as the RSS layer bucket ID.
6543 * We look up the queue ID -> RSS CPU ID and select
6544 * that.
6545 */
6546 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6547 CPU_SETOF(cpu_id, &cpu_mask);
6548 #endif
6549 } else {
6550 /*
6551 * Bind the MSI-X vector, and thus the
6552 * rings to the corresponding CPU.
6553 *
6554 * This just happens to match the default RSS
6555 * round-robin bucket -> queue -> CPU allocation.
6556 */
6557 if (adapter->num_queues > 1)
6558 cpu_id = i;
6559 }
6560 /* Round-robin affinity */
6561 kcpuset_zero(affinity);
6562 kcpuset_set(affinity, cpu_id % ncpu);
6563 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6564 NULL);
6565 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6566 intrstr);
6567 if (error == 0) {
6568 #if 1 /* def IXGBE_DEBUG */
6569 #ifdef RSS
6570 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6571 cpu_id % ncpu);
6572 #else
6573 aprint_normal(", bound queue %d to cpu %d", i,
6574 cpu_id % ncpu);
6575 #endif
6576 #endif /* IXGBE_DEBUG */
6577 }
6578 aprint_normal("\n");
6579
6580 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6581 txr->txr_si = softint_establish(
6582 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6583 ixgbe_deferred_mq_start, txr);
6584 if (txr->txr_si == NULL) {
6585 aprint_error_dev(dev,
6586 "couldn't establish software interrupt\n");
6587 error = ENXIO;
6588 goto err_out;
6589 }
6590 }
6591 que->que_si
6592 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6593 ixgbe_handle_que, que);
6594 if (que->que_si == NULL) {
6595 aprint_error_dev(dev,
6596 "couldn't establish software interrupt\n");
6597 error = ENXIO;
6598 goto err_out;
6599 }
6600 }
6601 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6602 error = workqueue_create(&adapter->txr_wq, wqname,
6603 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6604 IXGBE_WORKQUEUE_FLAGS);
6605 if (error) {
6606 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6607 goto err_out;
6608 }
6609 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6610
6611 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6612 error = workqueue_create(&adapter->que_wq, wqname,
6613 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6614 IXGBE_WORKQUEUE_FLAGS);
6615 if (error) {
6616 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6617 goto err_out;
6618 }
6619
6620 /* and Link */
6621 cpu_id++;
6622 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6623 adapter->vector = vector;
6624 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6625 sizeof(intrbuf));
6626 #ifdef IXGBE_MPSAFE
6627 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6628 true);
6629 #endif
6630 /* Set the link handler function */
6631 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6632 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6633 intr_xname);
6634 if (adapter->osdep.ihs[vector] == NULL) {
6635 aprint_error_dev(dev, "Failed to register LINK handler\n");
6636 error = ENXIO;
6637 goto err_out;
6638 }
6639 /* Round-robin affinity */
6640 kcpuset_zero(affinity);
6641 kcpuset_set(affinity, cpu_id % ncpu);
6642 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6643 NULL);
6644
6645 aprint_normal_dev(dev,
6646 "for link, interrupting at %s", intrstr);
6647 if (error == 0)
6648 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6649 else
6650 aprint_normal("\n");
6651
6652 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6653 adapter->mbx_si =
6654 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6655 ixgbe_handle_mbx, adapter);
6656 if (adapter->mbx_si == NULL) {
6657 aprint_error_dev(dev,
6658 "could not establish software interrupts\n");
6659
6660 error = ENXIO;
6661 goto err_out;
6662 }
6663 }
6664
6665 kcpuset_destroy(affinity);
6666 aprint_normal_dev(dev,
6667 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6668
6669 return (0);
6670
6671 err_out:
6672 kcpuset_destroy(affinity);
6673 ixgbe_free_softint(adapter);
6674 ixgbe_free_pciintr_resources(adapter);
6675 return (error);
6676 } /* ixgbe_allocate_msix */
6677
6678 /************************************************************************
6679 * ixgbe_configure_interrupts
6680 *
6681 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6682 * This will also depend on user settings.
6683 ************************************************************************/
6684 static int
6685 ixgbe_configure_interrupts(struct adapter *adapter)
6686 {
6687 device_t dev = adapter->dev;
6688 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6689 int want, queues, msgs;
6690
6691 /* Default to 1 queue if MSI-X setup fails */
6692 adapter->num_queues = 1;
6693
6694 /* Override by tuneable */
6695 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6696 goto msi;
6697
6698 /*
6699 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6700 * interrupt slot.
6701 */
6702 if (ncpu == 1)
6703 goto msi;
6704
6705 /* First try MSI-X */
6706 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6707 msgs = MIN(msgs, IXG_MAX_NINTR);
6708 if (msgs < 2)
6709 goto msi;
6710
6711 adapter->msix_mem = (void *)1; /* XXX */
6712
6713 /* Figure out a reasonable auto config value */
6714 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6715
6716 #ifdef RSS
6717 /* If we're doing RSS, clamp at the number of RSS buckets */
6718 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6719 queues = uimin(queues, rss_getnumbuckets());
6720 #endif
6721 if (ixgbe_num_queues > queues) {
6722 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6723 ixgbe_num_queues = queues;
6724 }
6725
6726 if (ixgbe_num_queues != 0)
6727 queues = ixgbe_num_queues;
6728 else
6729 queues = uimin(queues,
6730 uimin(mac->max_tx_queues, mac->max_rx_queues));
6731
6732 /* reflect correct sysctl value */
6733 ixgbe_num_queues = queues;
6734
6735 /*
6736 * Want one vector (RX/TX pair) per queue
6737 * plus an additional for Link.
6738 */
6739 want = queues + 1;
6740 if (msgs >= want)
6741 msgs = want;
6742 else {
6743 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6744 "%d vectors but %d queues wanted!\n",
6745 msgs, want);
6746 goto msi;
6747 }
6748 adapter->num_queues = queues;
6749 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6750 return (0);
6751
6752 /*
6753 * MSI-X allocation failed or provided us with
6754 * less vectors than needed. Free MSI-X resources
6755 * and we'll try enabling MSI.
6756 */
6757 msi:
6758 /* Without MSI-X, some features are no longer supported */
6759 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6760 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6761 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6762 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6763
6764 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6765 adapter->msix_mem = NULL; /* XXX */
6766 if (msgs > 1)
6767 msgs = 1;
6768 if (msgs != 0) {
6769 msgs = 1;
6770 adapter->feat_en |= IXGBE_FEATURE_MSI;
6771 return (0);
6772 }
6773
6774 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6775 aprint_error_dev(dev,
6776 "Device does not support legacy interrupts.\n");
6777 return 1;
6778 }
6779
6780 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6781
6782 return (0);
6783 } /* ixgbe_configure_interrupts */
6784
6785
6786 /************************************************************************
6787 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6788 *
6789 * Done outside of interrupt context since the driver might sleep
6790 ************************************************************************/
6791 static void
6792 ixgbe_handle_link(void *context)
6793 {
6794 struct adapter *adapter = context;
6795 struct ixgbe_hw *hw = &adapter->hw;
6796
6797 IXGBE_CORE_LOCK(adapter);
6798 ++adapter->link_sicount.ev_count;
6799 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6800 ixgbe_update_link_status(adapter);
6801
6802 /* Re-enable link interrupts */
6803 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6804
6805 IXGBE_CORE_UNLOCK(adapter);
6806 } /* ixgbe_handle_link */
6807
6808 #if 0
6809 /************************************************************************
6810 * ixgbe_rearm_queues
6811 ************************************************************************/
6812 static __inline void
6813 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6814 {
6815 u32 mask;
6816
6817 switch (adapter->hw.mac.type) {
6818 case ixgbe_mac_82598EB:
6819 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6820 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6821 break;
6822 case ixgbe_mac_82599EB:
6823 case ixgbe_mac_X540:
6824 case ixgbe_mac_X550:
6825 case ixgbe_mac_X550EM_x:
6826 case ixgbe_mac_X550EM_a:
6827 mask = (queues & 0xFFFFFFFF);
6828 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6829 mask = (queues >> 32);
6830 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6831 break;
6832 default:
6833 break;
6834 }
6835 } /* ixgbe_rearm_queues */
6836 #endif
6837