ixgbe.c revision 1.155 1 /* $NetBSD: ixgbe.c,v 1.155 2018/05/23 10:11:07 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 char ixgbe_driver_version[] = "4.0.1-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 static void ixgbe_rearm_queues(struct adapter *, u64);
195
196 static void ixgbe_initialize_transmit_units(struct adapter *);
197 static void ixgbe_initialize_receive_units(struct adapter *);
198 static void ixgbe_enable_rx_drop(struct adapter *);
199 static void ixgbe_disable_rx_drop(struct adapter *);
200 static void ixgbe_initialize_rss_mapping(struct adapter *);
201
202 static void ixgbe_enable_intr(struct adapter *);
203 static void ixgbe_disable_intr(struct adapter *);
204 static void ixgbe_update_stats_counters(struct adapter *);
205 static void ixgbe_set_promisc(struct adapter *);
206 static void ixgbe_set_multi(struct adapter *);
207 static void ixgbe_update_link_status(struct adapter *);
208 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
209 static void ixgbe_configure_ivars(struct adapter *);
210 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
211 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
212
213 static void ixgbe_setup_vlan_hw_support(struct adapter *);
214 #if 0
215 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
216 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
217 #endif
218
219 static void ixgbe_add_device_sysctls(struct adapter *);
220 static void ixgbe_add_hw_stats(struct adapter *);
221 static void ixgbe_clear_evcnt(struct adapter *);
222 static int ixgbe_set_flowcntl(struct adapter *, int);
223 static int ixgbe_set_advertise(struct adapter *, int);
224 static int ixgbe_get_advertise(struct adapter *);
225
226 /* Sysctl handlers */
227 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
228 const char *, int *, int);
229 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
230 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
231 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
235 #ifdef IXGBE_DEBUG
236 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
238 #endif
239 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
247
248 /* Support for pluggable optic modules */
249 static bool ixgbe_sfp_probe(struct adapter *);
250
251 /* Legacy (single vector) interrupt handler */
252 static int ixgbe_legacy_irq(void *);
253
254 /* The MSI/MSI-X Interrupt handlers */
255 static int ixgbe_msix_que(void *);
256 static int ixgbe_msix_link(void *);
257
258 /* Software interrupts for deferred work */
259 static void ixgbe_handle_que(void *);
260 static void ixgbe_handle_link(void *);
261 static void ixgbe_handle_msf(void *);
262 static void ixgbe_handle_mod(void *);
263 static void ixgbe_handle_phy(void *);
264
265 /* Workqueue handler for deferred work */
266 static void ixgbe_handle_que_work(struct work *, void *);
267
268 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
269
270 /************************************************************************
271 * NetBSD Device Interface Entry Points
272 ************************************************************************/
273 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
274 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
275 DVF_DETACH_SHUTDOWN);
276
277 #if 0
278 devclass_t ix_devclass;
279 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
280
281 MODULE_DEPEND(ix, pci, 1, 1, 1);
282 MODULE_DEPEND(ix, ether, 1, 1, 1);
283 #ifdef DEV_NETMAP
284 MODULE_DEPEND(ix, netmap, 1, 1, 1);
285 #endif
286 #endif
287
288 /*
289 * TUNEABLE PARAMETERS:
290 */
291
292 /*
293 * AIM: Adaptive Interrupt Moderation
294 * which means that the interrupt rate
295 * is varied over time based on the
296 * traffic for that interrupt vector
297 */
298 static bool ixgbe_enable_aim = true;
299 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
300 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
301 "Enable adaptive interrupt moderation");
302
303 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
304 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
305 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
306
307 /* How many packets rxeof tries to clean at a time */
308 static int ixgbe_rx_process_limit = 256;
309 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
310 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
311
312 /* How many packets txeof tries to clean at a time */
313 static int ixgbe_tx_process_limit = 256;
314 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
315 &ixgbe_tx_process_limit, 0,
316 "Maximum number of sent packets to process at a time, -1 means unlimited");
317
318 /* Flow control setting, default to full */
319 static int ixgbe_flow_control = ixgbe_fc_full;
320 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
321 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
322
323 /* Which pakcet processing uses workqueue or softint */
324 static bool ixgbe_txrx_workqueue = false;
325
326 /*
327 * Smart speed setting, default to on
328 * this only works as a compile option
329 * right now as its during attach, set
330 * this to 'ixgbe_smart_speed_off' to
331 * disable.
332 */
333 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
334
335 /*
336 * MSI-X should be the default for best performance,
337 * but this allows it to be forced off for testing.
338 */
339 static int ixgbe_enable_msix = 1;
340 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
341 "Enable MSI-X interrupts");
342
343 /*
344 * Number of Queues, can be set to 0,
345 * it then autoconfigures based on the
346 * number of cpus with a max of 8. This
347 * can be overriden manually here.
348 */
349 static int ixgbe_num_queues = 0;
350 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
351 "Number of queues to configure, 0 indicates autoconfigure");
352
353 /*
354 * Number of TX descriptors per ring,
355 * setting higher than RX as this seems
356 * the better performing choice.
357 */
358 static int ixgbe_txd = PERFORM_TXD;
359 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
360 "Number of transmit descriptors per queue");
361
362 /* Number of RX descriptors per ring */
363 static int ixgbe_rxd = PERFORM_RXD;
364 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
365 "Number of receive descriptors per queue");
366
367 /*
368 * Defining this on will allow the use
369 * of unsupported SFP+ modules, note that
370 * doing so you are on your own :)
371 */
372 static int allow_unsupported_sfp = false;
373 #define TUNABLE_INT(__x, __y)
374 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
375
376 /*
377 * Not sure if Flow Director is fully baked,
378 * so we'll default to turning it off.
379 */
380 static int ixgbe_enable_fdir = 0;
381 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
382 "Enable Flow Director");
383
384 /* Legacy Transmit (single queue) */
385 static int ixgbe_enable_legacy_tx = 0;
386 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
387 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
388
389 /* Receive-Side Scaling */
390 static int ixgbe_enable_rss = 1;
391 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
392 "Enable Receive-Side Scaling (RSS)");
393
394 /* Keep running tab on them for sanity check */
395 static int ixgbe_total_ports;
396
397 #if 0
398 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
399 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
400 #endif
401
402 #ifdef NET_MPSAFE
403 #define IXGBE_MPSAFE 1
404 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
405 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
406 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
407 #else
408 #define IXGBE_CALLOUT_FLAGS 0
409 #define IXGBE_SOFTINFT_FLAGS 0
410 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
411 #endif
412 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
413
414 /************************************************************************
415 * ixgbe_initialize_rss_mapping
416 ************************************************************************/
417 static void
418 ixgbe_initialize_rss_mapping(struct adapter *adapter)
419 {
420 struct ixgbe_hw *hw = &adapter->hw;
421 u32 reta = 0, mrqc, rss_key[10];
422 int queue_id, table_size, index_mult;
423 int i, j;
424 u32 rss_hash_config;
425
426 /* force use default RSS key. */
427 #ifdef __NetBSD__
428 rss_getkey((uint8_t *) &rss_key);
429 #else
430 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
431 /* Fetch the configured RSS key */
432 rss_getkey((uint8_t *) &rss_key);
433 } else {
434 /* set up random bits */
435 cprng_fast(&rss_key, sizeof(rss_key));
436 }
437 #endif
438
439 /* Set multiplier for RETA setup and table size based on MAC */
440 index_mult = 0x1;
441 table_size = 128;
442 switch (adapter->hw.mac.type) {
443 case ixgbe_mac_82598EB:
444 index_mult = 0x11;
445 break;
446 case ixgbe_mac_X550:
447 case ixgbe_mac_X550EM_x:
448 case ixgbe_mac_X550EM_a:
449 table_size = 512;
450 break;
451 default:
452 break;
453 }
454
455 /* Set up the redirection table */
456 for (i = 0, j = 0; i < table_size; i++, j++) {
457 if (j == adapter->num_queues)
458 j = 0;
459
460 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
461 /*
462 * Fetch the RSS bucket id for the given indirection
463 * entry. Cap it at the number of configured buckets
464 * (which is num_queues.)
465 */
466 queue_id = rss_get_indirection_to_bucket(i);
467 queue_id = queue_id % adapter->num_queues;
468 } else
469 queue_id = (j * index_mult);
470
471 /*
472 * The low 8 bits are for hash value (n+0);
473 * The next 8 bits are for hash value (n+1), etc.
474 */
475 reta = reta >> 8;
476 reta = reta | (((uint32_t) queue_id) << 24);
477 if ((i & 3) == 3) {
478 if (i < 128)
479 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
480 else
481 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
482 reta);
483 reta = 0;
484 }
485 }
486
487 /* Now fill our hash function seeds */
488 for (i = 0; i < 10; i++)
489 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
490
491 /* Perform hash on these packet types */
492 if (adapter->feat_en & IXGBE_FEATURE_RSS)
493 rss_hash_config = rss_gethashconfig();
494 else {
495 /*
496 * Disable UDP - IP fragments aren't currently being handled
497 * and so we end up with a mix of 2-tuple and 4-tuple
498 * traffic.
499 */
500 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
501 | RSS_HASHTYPE_RSS_TCP_IPV4
502 | RSS_HASHTYPE_RSS_IPV6
503 | RSS_HASHTYPE_RSS_TCP_IPV6
504 | RSS_HASHTYPE_RSS_IPV6_EX
505 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
506 }
507
508 mrqc = IXGBE_MRQC_RSSEN;
509 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
510 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
511 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
512 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
513 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
514 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
527 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
528 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
529 } /* ixgbe_initialize_rss_mapping */
530
531 /************************************************************************
532 * ixgbe_initialize_receive_units - Setup receive registers and features.
533 ************************************************************************/
534 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
535
536 static void
537 ixgbe_initialize_receive_units(struct adapter *adapter)
538 {
539 struct rx_ring *rxr = adapter->rx_rings;
540 struct ixgbe_hw *hw = &adapter->hw;
541 struct ifnet *ifp = adapter->ifp;
542 int i, j;
543 u32 bufsz, fctrl, srrctl, rxcsum;
544 u32 hlreg;
545
546 /*
547 * Make sure receives are disabled while
548 * setting up the descriptor ring
549 */
550 ixgbe_disable_rx(hw);
551
552 /* Enable broadcasts */
553 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
554 fctrl |= IXGBE_FCTRL_BAM;
555 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
556 fctrl |= IXGBE_FCTRL_DPF;
557 fctrl |= IXGBE_FCTRL_PMCF;
558 }
559 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
560
561 /* Set for Jumbo Frames? */
562 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
563 if (ifp->if_mtu > ETHERMTU)
564 hlreg |= IXGBE_HLREG0_JUMBOEN;
565 else
566 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
567
568 #ifdef DEV_NETMAP
569 /* CRC stripping is conditional in Netmap */
570 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
571 (ifp->if_capenable & IFCAP_NETMAP) &&
572 !ix_crcstrip)
573 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
574 else
575 #endif /* DEV_NETMAP */
576 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
577
578 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
579
580 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
581 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
582
583 for (i = 0; i < adapter->num_queues; i++, rxr++) {
584 u64 rdba = rxr->rxdma.dma_paddr;
585 u32 reg;
586 int regnum = i / 4; /* 1 register per 4 queues */
587 int regshift = i % 4; /* 4 bits per 1 queue */
588 j = rxr->me;
589
590 /* Setup the Base and Length of the Rx Descriptor Ring */
591 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
592 (rdba & 0x00000000ffffffffULL));
593 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
594 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
595 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
596
597 /* Set up the SRRCTL register */
598 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
599 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
600 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
601 srrctl |= bufsz;
602 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
603
604 /* Set RQSMR (Receive Queue Statistic Mapping) register */
605 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
606 reg &= ~(0x000000ff << (regshift * 8));
607 reg |= i << (regshift * 8);
608 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
609
610 /*
611 * Set DROP_EN iff we have no flow control and >1 queue.
612 * Note that srrctl was cleared shortly before during reset,
613 * so we do not need to clear the bit, but do it just in case
614 * this code is moved elsewhere.
615 */
616 if (adapter->num_queues > 1 &&
617 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
618 srrctl |= IXGBE_SRRCTL_DROP_EN;
619 } else {
620 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
621 }
622
623 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
624
625 /* Setup the HW Rx Head and Tail Descriptor Pointers */
626 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
627 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
628
629 /* Set the driver rx tail address */
630 rxr->tail = IXGBE_RDT(rxr->me);
631 }
632
633 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
634 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
635 | IXGBE_PSRTYPE_UDPHDR
636 | IXGBE_PSRTYPE_IPV4HDR
637 | IXGBE_PSRTYPE_IPV6HDR;
638 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
639 }
640
641 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
642
643 ixgbe_initialize_rss_mapping(adapter);
644
645 if (adapter->num_queues > 1) {
646 /* RSS and RX IPP Checksum are mutually exclusive */
647 rxcsum |= IXGBE_RXCSUM_PCSD;
648 }
649
650 if (ifp->if_capenable & IFCAP_RXCSUM)
651 rxcsum |= IXGBE_RXCSUM_PCSD;
652
653 /* This is useful for calculating UDP/IP fragment checksums */
654 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
655 rxcsum |= IXGBE_RXCSUM_IPPCSE;
656
657 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
658
659 } /* ixgbe_initialize_receive_units */
660
661 /************************************************************************
662 * ixgbe_initialize_transmit_units - Enable transmit units.
663 ************************************************************************/
664 static void
665 ixgbe_initialize_transmit_units(struct adapter *adapter)
666 {
667 struct tx_ring *txr = adapter->tx_rings;
668 struct ixgbe_hw *hw = &adapter->hw;
669 int i;
670
671 /* Setup the Base and Length of the Tx Descriptor Ring */
672 for (i = 0; i < adapter->num_queues; i++, txr++) {
673 u64 tdba = txr->txdma.dma_paddr;
674 u32 txctrl = 0;
675 u32 tqsmreg, reg;
676 int regnum = i / 4; /* 1 register per 4 queues */
677 int regshift = i % 4; /* 4 bits per 1 queue */
678 int j = txr->me;
679
680 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
681 (tdba & 0x00000000ffffffffULL));
682 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
683 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
684 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
685
686 /*
687 * Set TQSMR (Transmit Queue Statistic Mapping) register.
688 * Register location is different between 82598 and others.
689 */
690 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
691 tqsmreg = IXGBE_TQSMR(regnum);
692 else
693 tqsmreg = IXGBE_TQSM(regnum);
694 reg = IXGBE_READ_REG(hw, tqsmreg);
695 reg &= ~(0x000000ff << (regshift * 8));
696 reg |= i << (regshift * 8);
697 IXGBE_WRITE_REG(hw, tqsmreg, reg);
698
699 /* Setup the HW Tx Head and Tail descriptor pointers */
700 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
701 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
702
703 /* Cache the tail address */
704 txr->tail = IXGBE_TDT(j);
705
706 txr->txr_no_space = false;
707
708 /* Disable Head Writeback */
709 /*
710 * Note: for X550 series devices, these registers are actually
711 * prefixed with TPH_ isntead of DCA_, but the addresses and
712 * fields remain the same.
713 */
714 switch (hw->mac.type) {
715 case ixgbe_mac_82598EB:
716 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
717 break;
718 default:
719 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
720 break;
721 }
722 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
723 switch (hw->mac.type) {
724 case ixgbe_mac_82598EB:
725 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
726 break;
727 default:
728 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
729 break;
730 }
731
732 }
733
734 if (hw->mac.type != ixgbe_mac_82598EB) {
735 u32 dmatxctl, rttdcs;
736
737 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
738 dmatxctl |= IXGBE_DMATXCTL_TE;
739 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
740 /* Disable arbiter to set MTQC */
741 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
742 rttdcs |= IXGBE_RTTDCS_ARBDIS;
743 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
744 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
745 ixgbe_get_mtqc(adapter->iov_mode));
746 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
747 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
748 }
749
750 return;
751 } /* ixgbe_initialize_transmit_units */
752
753 /************************************************************************
754 * ixgbe_attach - Device initialization routine
755 *
756 * Called when the driver is being loaded.
757 * Identifies the type of hardware, allocates all resources
758 * and initializes the hardware.
759 *
760 * return 0 on success, positive on failure
761 ************************************************************************/
762 static void
763 ixgbe_attach(device_t parent, device_t dev, void *aux)
764 {
765 struct adapter *adapter;
766 struct ixgbe_hw *hw;
767 int error = -1;
768 u32 ctrl_ext;
769 u16 high, low, nvmreg;
770 pcireg_t id, subid;
771 ixgbe_vendor_info_t *ent;
772 struct pci_attach_args *pa = aux;
773 const char *str;
774 char buf[256];
775
776 INIT_DEBUGOUT("ixgbe_attach: begin");
777
778 /* Allocate, clear, and link in our adapter structure */
779 adapter = device_private(dev);
780 adapter->hw.back = adapter;
781 adapter->dev = dev;
782 hw = &adapter->hw;
783 adapter->osdep.pc = pa->pa_pc;
784 adapter->osdep.tag = pa->pa_tag;
785 if (pci_dma64_available(pa))
786 adapter->osdep.dmat = pa->pa_dmat64;
787 else
788 adapter->osdep.dmat = pa->pa_dmat;
789 adapter->osdep.attached = false;
790
791 ent = ixgbe_lookup(pa);
792
793 KASSERT(ent != NULL);
794
795 aprint_normal(": %s, Version - %s\n",
796 ixgbe_strings[ent->index], ixgbe_driver_version);
797
798 /* Core Lock Init*/
799 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
800
801 /* Set up the timer callout */
802 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
803
804 /* Determine hardware revision */
805 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
806 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
807
808 hw->vendor_id = PCI_VENDOR(id);
809 hw->device_id = PCI_PRODUCT(id);
810 hw->revision_id =
811 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
812 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
813 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
814
815 /*
816 * Make sure BUSMASTER is set
817 */
818 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
819
820 /* Do base PCI setup - map BAR0 */
821 if (ixgbe_allocate_pci_resources(adapter, pa)) {
822 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
823 error = ENXIO;
824 goto err_out;
825 }
826
827 /* let hardware know driver is loaded */
828 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
829 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
830 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
831
832 /*
833 * Initialize the shared code
834 */
835 if (ixgbe_init_shared_code(hw) != 0) {
836 aprint_error_dev(dev, "Unable to initialize the shared code\n");
837 error = ENXIO;
838 goto err_out;
839 }
840
841 switch (hw->mac.type) {
842 case ixgbe_mac_82598EB:
843 str = "82598EB";
844 break;
845 case ixgbe_mac_82599EB:
846 str = "82599EB";
847 break;
848 case ixgbe_mac_X540:
849 str = "X540";
850 break;
851 case ixgbe_mac_X550:
852 str = "X550";
853 break;
854 case ixgbe_mac_X550EM_x:
855 str = "X550EM";
856 break;
857 case ixgbe_mac_X550EM_a:
858 str = "X550EM A";
859 break;
860 default:
861 str = "Unknown";
862 break;
863 }
864 aprint_normal_dev(dev, "device %s\n", str);
865
866 if (hw->mbx.ops.init_params)
867 hw->mbx.ops.init_params(hw);
868
869 hw->allow_unsupported_sfp = allow_unsupported_sfp;
870
871 /* Pick up the 82599 settings */
872 if (hw->mac.type != ixgbe_mac_82598EB) {
873 hw->phy.smart_speed = ixgbe_smart_speed;
874 adapter->num_segs = IXGBE_82599_SCATTER;
875 } else
876 adapter->num_segs = IXGBE_82598_SCATTER;
877
878 hw->mac.ops.set_lan_id(hw);
879 ixgbe_init_device_features(adapter);
880
881 if (ixgbe_configure_interrupts(adapter)) {
882 error = ENXIO;
883 goto err_out;
884 }
885
886 /* Allocate multicast array memory. */
887 adapter->mta = malloc(sizeof(*adapter->mta) *
888 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
889 if (adapter->mta == NULL) {
890 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
891 error = ENOMEM;
892 goto err_out;
893 }
894
895 /* Enable WoL (if supported) */
896 ixgbe_check_wol_support(adapter);
897
898 /* Verify adapter fan is still functional (if applicable) */
899 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
900 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
901 ixgbe_check_fan_failure(adapter, esdp, FALSE);
902 }
903
904 /* Ensure SW/FW semaphore is free */
905 ixgbe_init_swfw_semaphore(hw);
906
907 /* Enable EEE power saving */
908 if (adapter->feat_en & IXGBE_FEATURE_EEE)
909 hw->mac.ops.setup_eee(hw, TRUE);
910
911 /* Set an initial default flow control value */
912 hw->fc.requested_mode = ixgbe_flow_control;
913
914 /* Sysctls for limiting the amount of work done in the taskqueues */
915 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
916 "max number of rx packets to process",
917 &adapter->rx_process_limit, ixgbe_rx_process_limit);
918
919 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
920 "max number of tx packets to process",
921 &adapter->tx_process_limit, ixgbe_tx_process_limit);
922
923 /* Do descriptor calc and sanity checks */
924 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
925 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
926 aprint_error_dev(dev, "TXD config issue, using default!\n");
927 adapter->num_tx_desc = DEFAULT_TXD;
928 } else
929 adapter->num_tx_desc = ixgbe_txd;
930
931 /*
932 * With many RX rings it is easy to exceed the
933 * system mbuf allocation. Tuning nmbclusters
934 * can alleviate this.
935 */
936 if (nmbclusters > 0) {
937 int s;
938 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
939 if (s > nmbclusters) {
940 aprint_error_dev(dev, "RX Descriptors exceed "
941 "system mbuf max, using default instead!\n");
942 ixgbe_rxd = DEFAULT_RXD;
943 }
944 }
945
946 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
947 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
948 aprint_error_dev(dev, "RXD config issue, using default!\n");
949 adapter->num_rx_desc = DEFAULT_RXD;
950 } else
951 adapter->num_rx_desc = ixgbe_rxd;
952
953 /* Allocate our TX/RX Queues */
954 if (ixgbe_allocate_queues(adapter)) {
955 error = ENOMEM;
956 goto err_out;
957 }
958
959 hw->phy.reset_if_overtemp = TRUE;
960 error = ixgbe_reset_hw(hw);
961 hw->phy.reset_if_overtemp = FALSE;
962 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
963 /*
964 * No optics in this port, set up
965 * so the timer routine will probe
966 * for later insertion.
967 */
968 adapter->sfp_probe = TRUE;
969 error = IXGBE_SUCCESS;
970 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
971 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
972 error = EIO;
973 goto err_late;
974 } else if (error) {
975 aprint_error_dev(dev, "Hardware initialization failed\n");
976 error = EIO;
977 goto err_late;
978 }
979
980 /* Make sure we have a good EEPROM before we read from it */
981 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
982 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
983 error = EIO;
984 goto err_late;
985 }
986
987 aprint_normal("%s:", device_xname(dev));
988 /* NVM Image Version */
989 switch (hw->mac.type) {
990 case ixgbe_mac_X540:
991 case ixgbe_mac_X550EM_a:
992 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
993 if (nvmreg == 0xffff)
994 break;
995 high = (nvmreg >> 12) & 0x0f;
996 low = (nvmreg >> 4) & 0xff;
997 id = nvmreg & 0x0f;
998 aprint_normal(" NVM Image Version %u.", high);
999 if (hw->mac.type == ixgbe_mac_X540)
1000 str = "%x";
1001 else
1002 str = "%02x";
1003 aprint_normal(str, low);
1004 aprint_normal(" ID 0x%x,", id);
1005 break;
1006 case ixgbe_mac_X550EM_x:
1007 case ixgbe_mac_X550:
1008 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1009 if (nvmreg == 0xffff)
1010 break;
1011 high = (nvmreg >> 12) & 0x0f;
1012 low = nvmreg & 0xff;
1013 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1014 break;
1015 default:
1016 break;
1017 }
1018
1019 /* PHY firmware revision */
1020 switch (hw->mac.type) {
1021 case ixgbe_mac_X540:
1022 case ixgbe_mac_X550:
1023 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1024 if (nvmreg == 0xffff)
1025 break;
1026 high = (nvmreg >> 12) & 0x0f;
1027 low = (nvmreg >> 4) & 0xff;
1028 id = nvmreg & 0x000f;
1029 aprint_normal(" PHY FW Revision %u.", high);
1030 if (hw->mac.type == ixgbe_mac_X540)
1031 str = "%x";
1032 else
1033 str = "%02x";
1034 aprint_normal(str, low);
1035 aprint_normal(" ID 0x%x,", id);
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 /* NVM Map version & OEM NVM Image version */
1042 switch (hw->mac.type) {
1043 case ixgbe_mac_X550:
1044 case ixgbe_mac_X550EM_x:
1045 case ixgbe_mac_X550EM_a:
1046 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1047 if (nvmreg != 0xffff) {
1048 high = (nvmreg >> 12) & 0x0f;
1049 low = nvmreg & 0x00ff;
1050 aprint_normal(" NVM Map version %u.%02x,", high, low);
1051 }
1052 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1053 if (nvmreg != 0xffff) {
1054 high = (nvmreg >> 12) & 0x0f;
1055 low = nvmreg & 0x00ff;
1056 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1057 low);
1058 }
1059 break;
1060 default:
1061 break;
1062 }
1063
1064 /* Print the ETrackID */
1065 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1066 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1067 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1068
1069 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1070 error = ixgbe_allocate_msix(adapter, pa);
1071 if (error) {
1072 /* Free allocated queue structures first */
1073 ixgbe_free_transmit_structures(adapter);
1074 ixgbe_free_receive_structures(adapter);
1075 free(adapter->queues, M_DEVBUF);
1076
1077 /* Fallback to legacy interrupt */
1078 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1079 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1080 adapter->feat_en |= IXGBE_FEATURE_MSI;
1081 adapter->num_queues = 1;
1082
1083 /* Allocate our TX/RX Queues again */
1084 if (ixgbe_allocate_queues(adapter)) {
1085 error = ENOMEM;
1086 goto err_out;
1087 }
1088 }
1089 }
1090 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1091 error = ixgbe_allocate_legacy(adapter, pa);
1092 if (error)
1093 goto err_late;
1094
1095 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1096 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1097 ixgbe_handle_link, adapter);
1098 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1099 ixgbe_handle_mod, adapter);
1100 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_handle_msf, adapter);
1102 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_phy, adapter);
1104 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1105 adapter->fdir_si =
1106 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_reinit_fdir, adapter);
1108 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1109 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1110 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1111 && (adapter->fdir_si == NULL))) {
1112 aprint_error_dev(dev,
1113 "could not establish software interrupts ()\n");
1114 goto err_out;
1115 }
1116
1117 error = ixgbe_start_hw(hw);
1118 switch (error) {
1119 case IXGBE_ERR_EEPROM_VERSION:
1120 aprint_error_dev(dev, "This device is a pre-production adapter/"
1121 "LOM. Please be aware there may be issues associated "
1122 "with your hardware.\nIf you are experiencing problems "
1123 "please contact your Intel or hardware representative "
1124 "who provided you with this hardware.\n");
1125 break;
1126 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1127 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1128 error = EIO;
1129 goto err_late;
1130 case IXGBE_ERR_SFP_NOT_PRESENT:
1131 aprint_error_dev(dev, "No SFP+ Module found\n");
1132 /* falls thru */
1133 default:
1134 break;
1135 }
1136
1137 /* Setup OS specific network interface */
1138 if (ixgbe_setup_interface(dev, adapter) != 0)
1139 goto err_late;
1140
1141 /*
1142 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1143 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1144 */
1145 if (hw->phy.media_type == ixgbe_media_type_copper) {
1146 uint16_t id1, id2;
1147 int oui, model, rev;
1148 const char *descr;
1149
1150 id1 = hw->phy.id >> 16;
1151 id2 = hw->phy.id & 0xffff;
1152 oui = MII_OUI(id1, id2);
1153 model = MII_MODEL(id2);
1154 rev = MII_REV(id2);
1155 if ((descr = mii_get_descr(oui, model)) != NULL)
1156 aprint_normal_dev(dev,
1157 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1158 descr, oui, model, rev);
1159 else
1160 aprint_normal_dev(dev,
1161 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1162 oui, model, rev);
1163 }
1164
1165 /* Enable the optics for 82599 SFP+ fiber */
1166 ixgbe_enable_tx_laser(hw);
1167
1168 /* Enable power to the phy. */
1169 ixgbe_set_phy_power(hw, TRUE);
1170
1171 /* Initialize statistics */
1172 ixgbe_update_stats_counters(adapter);
1173
1174 /* Check PCIE slot type/speed/width */
1175 ixgbe_get_slot_info(adapter);
1176
1177 /*
1178 * Do time init and sysctl init here, but
1179 * only on the first port of a bypass adapter.
1180 */
1181 ixgbe_bypass_init(adapter);
1182
1183 /* Set an initial dmac value */
1184 adapter->dmac = 0;
1185 /* Set initial advertised speeds (if applicable) */
1186 adapter->advertise = ixgbe_get_advertise(adapter);
1187
1188 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1189 ixgbe_define_iov_schemas(dev, &error);
1190
1191 /* Add sysctls */
1192 ixgbe_add_device_sysctls(adapter);
1193 ixgbe_add_hw_stats(adapter);
1194
1195 /* For Netmap */
1196 adapter->init_locked = ixgbe_init_locked;
1197 adapter->stop_locked = ixgbe_stop;
1198
1199 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1200 ixgbe_netmap_attach(adapter);
1201
1202 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1203 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1204 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1205 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1206
1207 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1208 pmf_class_network_register(dev, adapter->ifp);
1209 else
1210 aprint_error_dev(dev, "couldn't establish power handler\n");
1211
1212 INIT_DEBUGOUT("ixgbe_attach: end");
1213 adapter->osdep.attached = true;
1214
1215 return;
1216
1217 err_late:
1218 ixgbe_free_transmit_structures(adapter);
1219 ixgbe_free_receive_structures(adapter);
1220 free(adapter->queues, M_DEVBUF);
1221 err_out:
1222 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1223 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1224 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1225 ixgbe_free_softint(adapter);
1226 ixgbe_free_pci_resources(adapter);
1227 if (adapter->mta != NULL)
1228 free(adapter->mta, M_DEVBUF);
1229 IXGBE_CORE_LOCK_DESTROY(adapter);
1230
1231 return;
1232 } /* ixgbe_attach */
1233
1234 /************************************************************************
1235 * ixgbe_check_wol_support
1236 *
1237 * Checks whether the adapter's ports are capable of
1238 * Wake On LAN by reading the adapter's NVM.
1239 *
1240 * Sets each port's hw->wol_enabled value depending
1241 * on the value read here.
1242 ************************************************************************/
1243 static void
1244 ixgbe_check_wol_support(struct adapter *adapter)
1245 {
1246 struct ixgbe_hw *hw = &adapter->hw;
1247 u16 dev_caps = 0;
1248
1249 /* Find out WoL support for port */
1250 adapter->wol_support = hw->wol_enabled = 0;
1251 ixgbe_get_device_caps(hw, &dev_caps);
1252 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1253 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1254 hw->bus.func == 0))
1255 adapter->wol_support = hw->wol_enabled = 1;
1256
1257 /* Save initial wake up filter configuration */
1258 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1259
1260 return;
1261 } /* ixgbe_check_wol_support */
1262
1263 /************************************************************************
1264 * ixgbe_setup_interface
1265 *
1266 * Setup networking device structure and register an interface.
1267 ************************************************************************/
1268 static int
1269 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1270 {
1271 struct ethercom *ec = &adapter->osdep.ec;
1272 struct ifnet *ifp;
1273 int rv;
1274
1275 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1276
1277 ifp = adapter->ifp = &ec->ec_if;
1278 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1279 ifp->if_baudrate = IF_Gbps(10);
1280 ifp->if_init = ixgbe_init;
1281 ifp->if_stop = ixgbe_ifstop;
1282 ifp->if_softc = adapter;
1283 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1284 #ifdef IXGBE_MPSAFE
1285 ifp->if_extflags = IFEF_MPSAFE;
1286 #endif
1287 ifp->if_ioctl = ixgbe_ioctl;
1288 #if __FreeBSD_version >= 1100045
1289 /* TSO parameters */
1290 ifp->if_hw_tsomax = 65518;
1291 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1292 ifp->if_hw_tsomaxsegsize = 2048;
1293 #endif
1294 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1295 #if 0
1296 ixgbe_start_locked = ixgbe_legacy_start_locked;
1297 #endif
1298 } else {
1299 ifp->if_transmit = ixgbe_mq_start;
1300 #if 0
1301 ixgbe_start_locked = ixgbe_mq_start_locked;
1302 #endif
1303 }
1304 ifp->if_start = ixgbe_legacy_start;
1305 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1306 IFQ_SET_READY(&ifp->if_snd);
1307
1308 rv = if_initialize(ifp);
1309 if (rv != 0) {
1310 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1311 return rv;
1312 }
1313 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1314 ether_ifattach(ifp, adapter->hw.mac.addr);
1315 /*
1316 * We use per TX queue softint, so if_deferred_start_init() isn't
1317 * used.
1318 */
1319 if_register(ifp);
1320 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1321
1322 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1323
1324 /*
1325 * Tell the upper layer(s) we support long frames.
1326 */
1327 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1328
1329 /* Set capability flags */
1330 ifp->if_capabilities |= IFCAP_RXCSUM
1331 | IFCAP_TXCSUM
1332 | IFCAP_TSOv4
1333 | IFCAP_TSOv6
1334 | IFCAP_LRO;
1335 ifp->if_capenable = 0;
1336
1337 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1338 | ETHERCAP_VLAN_HWCSUM
1339 | ETHERCAP_JUMBO_MTU
1340 | ETHERCAP_VLAN_MTU;
1341
1342 /* Enable the above capabilities by default */
1343 ec->ec_capenable = ec->ec_capabilities;
1344
1345 /*
1346 * Don't turn this on by default, if vlans are
1347 * created on another pseudo device (eg. lagg)
1348 * then vlan events are not passed thru, breaking
1349 * operation, but with HW FILTER off it works. If
1350 * using vlans directly on the ixgbe driver you can
1351 * enable this and get full hardware tag filtering.
1352 */
1353 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1354
1355 /*
1356 * Specify the media types supported by this adapter and register
1357 * callbacks to update media and link information
1358 */
1359 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1360 ixgbe_media_status);
1361
1362 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1363 ixgbe_add_media_types(adapter);
1364
1365 /* Set autoselect media by default */
1366 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1367
1368 return (0);
1369 } /* ixgbe_setup_interface */
1370
1371 /************************************************************************
1372 * ixgbe_add_media_types
1373 ************************************************************************/
1374 static void
1375 ixgbe_add_media_types(struct adapter *adapter)
1376 {
1377 struct ixgbe_hw *hw = &adapter->hw;
1378 device_t dev = adapter->dev;
1379 u64 layer;
1380
1381 layer = adapter->phy_layer;
1382
1383 #define ADD(mm, dd) \
1384 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1385
1386 ADD(IFM_NONE, 0);
1387
1388 /* Media types with matching NetBSD media defines */
1389 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1390 ADD(IFM_10G_T | IFM_FDX, 0);
1391 }
1392 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1393 ADD(IFM_1000_T | IFM_FDX, 0);
1394 }
1395 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1396 ADD(IFM_100_TX | IFM_FDX, 0);
1397 }
1398 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1399 ADD(IFM_10_T | IFM_FDX, 0);
1400 }
1401
1402 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1403 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1404 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1405 }
1406
1407 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1408 ADD(IFM_10G_LR | IFM_FDX, 0);
1409 if (hw->phy.multispeed_fiber) {
1410 ADD(IFM_1000_LX | IFM_FDX, 0);
1411 }
1412 }
1413 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1414 ADD(IFM_10G_SR | IFM_FDX, 0);
1415 if (hw->phy.multispeed_fiber) {
1416 ADD(IFM_1000_SX | IFM_FDX, 0);
1417 }
1418 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1419 ADD(IFM_1000_SX | IFM_FDX, 0);
1420 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1422 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1423 }
1424
1425 #ifdef IFM_ETH_XTYPE
1426 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1427 ADD(IFM_10G_KR | IFM_FDX, 0);
1428 }
1429 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1430 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1431 }
1432 #else
1433 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1434 device_printf(dev, "Media supported: 10GbaseKR\n");
1435 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1436 ADD(IFM_10G_SR | IFM_FDX, 0);
1437 }
1438 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1439 device_printf(dev, "Media supported: 10GbaseKX4\n");
1440 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1441 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1442 }
1443 #endif
1444 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1445 ADD(IFM_1000_KX | IFM_FDX, 0);
1446 }
1447 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1448 ADD(IFM_2500_KX | IFM_FDX, 0);
1449 }
1450 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1451 ADD(IFM_2500_T | IFM_FDX, 0);
1452 }
1453 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1454 ADD(IFM_5000_T | IFM_FDX, 0);
1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1457 device_printf(dev, "Media supported: 1000baseBX\n");
1458 /* XXX no ifmedia_set? */
1459
1460 ADD(IFM_AUTO, 0);
1461
1462 #undef ADD
1463 } /* ixgbe_add_media_types */
1464
1465 /************************************************************************
1466 * ixgbe_is_sfp
1467 ************************************************************************/
1468 static inline bool
1469 ixgbe_is_sfp(struct ixgbe_hw *hw)
1470 {
1471 switch (hw->mac.type) {
1472 case ixgbe_mac_82598EB:
1473 if (hw->phy.type == ixgbe_phy_nl)
1474 return (TRUE);
1475 return (FALSE);
1476 case ixgbe_mac_82599EB:
1477 switch (hw->mac.ops.get_media_type(hw)) {
1478 case ixgbe_media_type_fiber:
1479 case ixgbe_media_type_fiber_qsfp:
1480 return (TRUE);
1481 default:
1482 return (FALSE);
1483 }
1484 case ixgbe_mac_X550EM_x:
1485 case ixgbe_mac_X550EM_a:
1486 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1487 return (TRUE);
1488 return (FALSE);
1489 default:
1490 return (FALSE);
1491 }
1492 } /* ixgbe_is_sfp */
1493
1494 /************************************************************************
1495 * ixgbe_config_link
1496 ************************************************************************/
1497 static void
1498 ixgbe_config_link(struct adapter *adapter)
1499 {
1500 struct ixgbe_hw *hw = &adapter->hw;
1501 u32 autoneg, err = 0;
1502 bool sfp, negotiate = false;
1503
1504 sfp = ixgbe_is_sfp(hw);
1505
1506 if (sfp) {
1507 if (hw->phy.multispeed_fiber) {
1508 ixgbe_enable_tx_laser(hw);
1509 kpreempt_disable();
1510 softint_schedule(adapter->msf_si);
1511 kpreempt_enable();
1512 }
1513 kpreempt_disable();
1514 softint_schedule(adapter->mod_si);
1515 kpreempt_enable();
1516 } else {
1517 struct ifmedia *ifm = &adapter->media;
1518
1519 if (hw->mac.ops.check_link)
1520 err = ixgbe_check_link(hw, &adapter->link_speed,
1521 &adapter->link_up, FALSE);
1522 if (err)
1523 return;
1524
1525 /*
1526 * Check if it's the first call. If it's the first call,
1527 * get value for auto negotiation.
1528 */
1529 autoneg = hw->phy.autoneg_advertised;
1530 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1531 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1532 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1533 &negotiate);
1534 if (err)
1535 return;
1536 if (hw->mac.ops.setup_link)
1537 err = hw->mac.ops.setup_link(hw, autoneg,
1538 adapter->link_up);
1539 }
1540
1541 } /* ixgbe_config_link */
1542
1543 /************************************************************************
1544 * ixgbe_update_stats_counters - Update board statistics counters.
1545 ************************************************************************/
1546 static void
1547 ixgbe_update_stats_counters(struct adapter *adapter)
1548 {
1549 struct ifnet *ifp = adapter->ifp;
1550 struct ixgbe_hw *hw = &adapter->hw;
1551 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1552 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1553 u64 total_missed_rx = 0;
1554 uint64_t crcerrs, rlec;
1555
1556 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1557 stats->crcerrs.ev_count += crcerrs;
1558 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1559 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1560 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1561 if (hw->mac.type == ixgbe_mac_X550)
1562 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1563
1564 /* 16 registers */
1565 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1566 int j = i % adapter->num_queues;
1567
1568 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1569 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1570 if (hw->mac.type >= ixgbe_mac_82599EB) {
1571 stats->qprdc[j].ev_count
1572 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1573 }
1574 }
1575
1576 /* 8 registers */
1577 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1578 uint32_t mp;
1579 int j = i % adapter->num_queues;
1580
1581 /* MPC */
1582 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1583 /* global total per queue */
1584 stats->mpc[j].ev_count += mp;
1585 /* running comprehensive total for stats display */
1586 total_missed_rx += mp;
1587
1588 if (hw->mac.type == ixgbe_mac_82598EB)
1589 stats->rnbc[j].ev_count
1590 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1591
1592 stats->pxontxc[j].ev_count
1593 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1594 stats->pxofftxc[j].ev_count
1595 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1596 if (hw->mac.type >= ixgbe_mac_82599EB) {
1597 stats->pxonrxc[j].ev_count
1598 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1599 stats->pxoffrxc[j].ev_count
1600 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1601 stats->pxon2offc[j].ev_count
1602 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1603 } else {
1604 stats->pxonrxc[j].ev_count
1605 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1606 stats->pxoffrxc[j].ev_count
1607 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1608 }
1609 }
1610 stats->mpctotal.ev_count += total_missed_rx;
1611
1612 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1613 if ((adapter->link_active == TRUE)
1614 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1615 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1616 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1617 }
1618 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1619 stats->rlec.ev_count += rlec;
1620
1621 /* Hardware workaround, gprc counts missed packets */
1622 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1623
1624 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1625 stats->lxontxc.ev_count += lxon;
1626 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1627 stats->lxofftxc.ev_count += lxoff;
1628 total = lxon + lxoff;
1629
1630 if (hw->mac.type != ixgbe_mac_82598EB) {
1631 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1632 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1633 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1634 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1635 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1636 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1637 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1638 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1639 } else {
1640 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1641 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1642 /* 82598 only has a counter in the high register */
1643 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1644 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1645 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1646 }
1647
1648 /*
1649 * Workaround: mprc hardware is incorrectly counting
1650 * broadcasts, so for now we subtract those.
1651 */
1652 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1653 stats->bprc.ev_count += bprc;
1654 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1655 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1656
1657 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1658 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1659 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1660 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1661 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1662 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1663
1664 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1665 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1666 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1667
1668 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1669 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1670 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1671 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1672 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1673 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1674 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1675 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1676 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1677 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1678 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1679 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1680 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1681 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1682 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1683 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1684 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1685 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1686 /* Only read FCOE on 82599 */
1687 if (hw->mac.type != ixgbe_mac_82598EB) {
1688 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1689 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1690 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1691 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1692 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1693 }
1694
1695 /* Fill out the OS statistics structure */
1696 /*
1697 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1698 * adapter->stats counters. It's required to make ifconfig -z
1699 * (SOICZIFDATA) work.
1700 */
1701 ifp->if_collisions = 0;
1702
1703 /* Rx Errors */
1704 ifp->if_iqdrops += total_missed_rx;
1705 ifp->if_ierrors += crcerrs + rlec;
1706 } /* ixgbe_update_stats_counters */
1707
1708 /************************************************************************
1709 * ixgbe_add_hw_stats
1710 *
1711 * Add sysctl variables, one per statistic, to the system.
1712 ************************************************************************/
1713 static void
1714 ixgbe_add_hw_stats(struct adapter *adapter)
1715 {
1716 device_t dev = adapter->dev;
1717 const struct sysctlnode *rnode, *cnode;
1718 struct sysctllog **log = &adapter->sysctllog;
1719 struct tx_ring *txr = adapter->tx_rings;
1720 struct rx_ring *rxr = adapter->rx_rings;
1721 struct ixgbe_hw *hw = &adapter->hw;
1722 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1723 const char *xname = device_xname(dev);
1724 int i;
1725
1726 /* Driver Statistics */
1727 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1728 NULL, xname, "Driver tx dma soft fail EFBIG");
1729 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1730 NULL, xname, "m_defrag() failed");
1731 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1732 NULL, xname, "Driver tx dma hard fail EFBIG");
1733 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1734 NULL, xname, "Driver tx dma hard fail EINVAL");
1735 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1736 NULL, xname, "Driver tx dma hard fail other");
1737 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1738 NULL, xname, "Driver tx dma soft fail EAGAIN");
1739 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1740 NULL, xname, "Driver tx dma soft fail ENOMEM");
1741 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1742 NULL, xname, "Watchdog timeouts");
1743 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1744 NULL, xname, "TSO errors");
1745 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1746 NULL, xname, "Link MSI-X IRQ Handled");
1747 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1748 NULL, xname, "Link softint");
1749 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1750 NULL, xname, "module softint");
1751 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1752 NULL, xname, "multimode softint");
1753 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1754 NULL, xname, "external PHY softint");
1755
1756 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1757 #ifdef LRO
1758 struct lro_ctrl *lro = &rxr->lro;
1759 #endif /* LRO */
1760
1761 snprintf(adapter->queues[i].evnamebuf,
1762 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1763 xname, i);
1764 snprintf(adapter->queues[i].namebuf,
1765 sizeof(adapter->queues[i].namebuf), "q%d", i);
1766
1767 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1768 aprint_error_dev(dev, "could not create sysctl root\n");
1769 break;
1770 }
1771
1772 if (sysctl_createv(log, 0, &rnode, &rnode,
1773 0, CTLTYPE_NODE,
1774 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1775 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1776 break;
1777
1778 if (sysctl_createv(log, 0, &rnode, &cnode,
1779 CTLFLAG_READWRITE, CTLTYPE_INT,
1780 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1781 ixgbe_sysctl_interrupt_rate_handler, 0,
1782 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1783 break;
1784
1785 if (sysctl_createv(log, 0, &rnode, &cnode,
1786 CTLFLAG_READONLY, CTLTYPE_INT,
1787 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1788 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1789 0, CTL_CREATE, CTL_EOL) != 0)
1790 break;
1791
1792 if (sysctl_createv(log, 0, &rnode, &cnode,
1793 CTLFLAG_READONLY, CTLTYPE_INT,
1794 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1795 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1796 0, CTL_CREATE, CTL_EOL) != 0)
1797 break;
1798
1799 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1800 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1801 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1802 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1803 "Handled queue in softint");
1804 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1805 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1806 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1807 NULL, adapter->queues[i].evnamebuf, "TSO");
1808 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1809 NULL, adapter->queues[i].evnamebuf,
1810 "Queue No Descriptor Available");
1811 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1812 NULL, adapter->queues[i].evnamebuf,
1813 "Queue Packets Transmitted");
1814 #ifndef IXGBE_LEGACY_TX
1815 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1816 NULL, adapter->queues[i].evnamebuf,
1817 "Packets dropped in pcq");
1818 #endif
1819
1820 if (sysctl_createv(log, 0, &rnode, &cnode,
1821 CTLFLAG_READONLY,
1822 CTLTYPE_INT,
1823 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1824 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1825 CTL_CREATE, CTL_EOL) != 0)
1826 break;
1827
1828 if (sysctl_createv(log, 0, &rnode, &cnode,
1829 CTLFLAG_READONLY,
1830 CTLTYPE_INT,
1831 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1832 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1833 CTL_CREATE, CTL_EOL) != 0)
1834 break;
1835
1836 if (sysctl_createv(log, 0, &rnode, &cnode,
1837 CTLFLAG_READONLY,
1838 CTLTYPE_INT,
1839 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1840 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1841 CTL_CREATE, CTL_EOL) != 0)
1842 break;
1843
1844 if (i < __arraycount(stats->mpc)) {
1845 evcnt_attach_dynamic(&stats->mpc[i],
1846 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1847 "RX Missed Packet Count");
1848 if (hw->mac.type == ixgbe_mac_82598EB)
1849 evcnt_attach_dynamic(&stats->rnbc[i],
1850 EVCNT_TYPE_MISC, NULL,
1851 adapter->queues[i].evnamebuf,
1852 "Receive No Buffers");
1853 }
1854 if (i < __arraycount(stats->pxontxc)) {
1855 evcnt_attach_dynamic(&stats->pxontxc[i],
1856 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1857 "pxontxc");
1858 evcnt_attach_dynamic(&stats->pxonrxc[i],
1859 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1860 "pxonrxc");
1861 evcnt_attach_dynamic(&stats->pxofftxc[i],
1862 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1863 "pxofftxc");
1864 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1865 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1866 "pxoffrxc");
1867 if (hw->mac.type >= ixgbe_mac_82599EB)
1868 evcnt_attach_dynamic(&stats->pxon2offc[i],
1869 EVCNT_TYPE_MISC, NULL,
1870 adapter->queues[i].evnamebuf,
1871 "pxon2offc");
1872 }
1873 if (i < __arraycount(stats->qprc)) {
1874 evcnt_attach_dynamic(&stats->qprc[i],
1875 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1876 "qprc");
1877 evcnt_attach_dynamic(&stats->qptc[i],
1878 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1879 "qptc");
1880 evcnt_attach_dynamic(&stats->qbrc[i],
1881 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1882 "qbrc");
1883 evcnt_attach_dynamic(&stats->qbtc[i],
1884 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1885 "qbtc");
1886 if (hw->mac.type >= ixgbe_mac_82599EB)
1887 evcnt_attach_dynamic(&stats->qprdc[i],
1888 EVCNT_TYPE_MISC, NULL,
1889 adapter->queues[i].evnamebuf, "qprdc");
1890 }
1891
1892 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1893 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1894 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1895 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1896 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1897 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1898 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1899 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1900 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1901 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1902 #ifdef LRO
1903 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1904 CTLFLAG_RD, &lro->lro_queued, 0,
1905 "LRO Queued");
1906 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1907 CTLFLAG_RD, &lro->lro_flushed, 0,
1908 "LRO Flushed");
1909 #endif /* LRO */
1910 }
1911
1912 /* MAC stats get their own sub node */
1913
1914 snprintf(stats->namebuf,
1915 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1916
1917 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1918 stats->namebuf, "rx csum offload - IP");
1919 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1920 stats->namebuf, "rx csum offload - L4");
1921 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1922 stats->namebuf, "rx csum offload - IP bad");
1923 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1924 stats->namebuf, "rx csum offload - L4 bad");
1925 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1926 stats->namebuf, "Interrupt conditions zero");
1927 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1928 stats->namebuf, "Legacy interrupts");
1929
1930 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1931 stats->namebuf, "CRC Errors");
1932 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1933 stats->namebuf, "Illegal Byte Errors");
1934 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1935 stats->namebuf, "Byte Errors");
1936 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1937 stats->namebuf, "MAC Short Packets Discarded");
1938 if (hw->mac.type >= ixgbe_mac_X550)
1939 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "Bad SFD");
1941 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "Total Packets Missed");
1943 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "MAC Local Faults");
1945 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "MAC Remote Faults");
1947 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Receive Length Errors");
1949 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "Link XON Transmitted");
1951 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "Link XON Received");
1953 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Link XOFF Transmitted");
1955 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "Link XOFF Received");
1957
1958 /* Packet Reception Stats */
1959 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "Total Octets Received");
1961 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "Good Octets Received");
1963 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "Total Packets Received");
1965 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "Good Packets Received");
1967 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Multicast Packets Received");
1969 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Broadcast Packets Received");
1971 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "64 byte frames received ");
1973 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "65-127 byte frames received");
1975 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "128-255 byte frames received");
1977 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "256-511 byte frames received");
1979 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "512-1023 byte frames received");
1981 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "1023-1522 byte frames received");
1983 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Receive Undersized");
1985 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Fragmented Packets Received ");
1987 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Oversized Packets Received");
1989 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Received Jabber");
1991 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Management Packets Received");
1993 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Management Packets Dropped");
1995 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Checksum Errors");
1997
1998 /* Packet Transmission Stats */
1999 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "Good Octets Transmitted");
2001 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "Total Packets Transmitted");
2003 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "Good Packets Transmitted");
2005 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "Broadcast Packets Transmitted");
2007 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "Multicast Packets Transmitted");
2009 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Management Packets Transmitted");
2011 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "64 byte frames transmitted ");
2013 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "65-127 byte frames transmitted");
2015 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "128-255 byte frames transmitted");
2017 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "256-511 byte frames transmitted");
2019 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "512-1023 byte frames transmitted");
2021 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "1024-1522 byte frames transmitted");
2023 } /* ixgbe_add_hw_stats */
2024
2025 static void
2026 ixgbe_clear_evcnt(struct adapter *adapter)
2027 {
2028 struct tx_ring *txr = adapter->tx_rings;
2029 struct rx_ring *rxr = adapter->rx_rings;
2030 struct ixgbe_hw *hw = &adapter->hw;
2031 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2032
2033 adapter->efbig_tx_dma_setup.ev_count = 0;
2034 adapter->mbuf_defrag_failed.ev_count = 0;
2035 adapter->efbig2_tx_dma_setup.ev_count = 0;
2036 adapter->einval_tx_dma_setup.ev_count = 0;
2037 adapter->other_tx_dma_setup.ev_count = 0;
2038 adapter->eagain_tx_dma_setup.ev_count = 0;
2039 adapter->enomem_tx_dma_setup.ev_count = 0;
2040 adapter->tso_err.ev_count = 0;
2041 adapter->watchdog_events.ev_count = 0;
2042 adapter->link_irq.ev_count = 0;
2043 adapter->link_sicount.ev_count = 0;
2044 adapter->mod_sicount.ev_count = 0;
2045 adapter->msf_sicount.ev_count = 0;
2046 adapter->phy_sicount.ev_count = 0;
2047
2048 txr = adapter->tx_rings;
2049 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2050 adapter->queues[i].irqs.ev_count = 0;
2051 adapter->queues[i].handleq.ev_count = 0;
2052 adapter->queues[i].req.ev_count = 0;
2053 txr->no_desc_avail.ev_count = 0;
2054 txr->total_packets.ev_count = 0;
2055 txr->tso_tx.ev_count = 0;
2056 #ifndef IXGBE_LEGACY_TX
2057 txr->pcq_drops.ev_count = 0;
2058 #endif
2059 txr->q_efbig_tx_dma_setup = 0;
2060 txr->q_mbuf_defrag_failed = 0;
2061 txr->q_efbig2_tx_dma_setup = 0;
2062 txr->q_einval_tx_dma_setup = 0;
2063 txr->q_other_tx_dma_setup = 0;
2064 txr->q_eagain_tx_dma_setup = 0;
2065 txr->q_enomem_tx_dma_setup = 0;
2066 txr->q_tso_err = 0;
2067
2068 if (i < __arraycount(stats->mpc)) {
2069 stats->mpc[i].ev_count = 0;
2070 if (hw->mac.type == ixgbe_mac_82598EB)
2071 stats->rnbc[i].ev_count = 0;
2072 }
2073 if (i < __arraycount(stats->pxontxc)) {
2074 stats->pxontxc[i].ev_count = 0;
2075 stats->pxonrxc[i].ev_count = 0;
2076 stats->pxofftxc[i].ev_count = 0;
2077 stats->pxoffrxc[i].ev_count = 0;
2078 if (hw->mac.type >= ixgbe_mac_82599EB)
2079 stats->pxon2offc[i].ev_count = 0;
2080 }
2081 if (i < __arraycount(stats->qprc)) {
2082 stats->qprc[i].ev_count = 0;
2083 stats->qptc[i].ev_count = 0;
2084 stats->qbrc[i].ev_count = 0;
2085 stats->qbtc[i].ev_count = 0;
2086 if (hw->mac.type >= ixgbe_mac_82599EB)
2087 stats->qprdc[i].ev_count = 0;
2088 }
2089
2090 rxr->rx_packets.ev_count = 0;
2091 rxr->rx_bytes.ev_count = 0;
2092 rxr->rx_copies.ev_count = 0;
2093 rxr->no_jmbuf.ev_count = 0;
2094 rxr->rx_discarded.ev_count = 0;
2095 }
2096 stats->ipcs.ev_count = 0;
2097 stats->l4cs.ev_count = 0;
2098 stats->ipcs_bad.ev_count = 0;
2099 stats->l4cs_bad.ev_count = 0;
2100 stats->intzero.ev_count = 0;
2101 stats->legint.ev_count = 0;
2102 stats->crcerrs.ev_count = 0;
2103 stats->illerrc.ev_count = 0;
2104 stats->errbc.ev_count = 0;
2105 stats->mspdc.ev_count = 0;
2106 stats->mbsdc.ev_count = 0;
2107 stats->mpctotal.ev_count = 0;
2108 stats->mlfc.ev_count = 0;
2109 stats->mrfc.ev_count = 0;
2110 stats->rlec.ev_count = 0;
2111 stats->lxontxc.ev_count = 0;
2112 stats->lxonrxc.ev_count = 0;
2113 stats->lxofftxc.ev_count = 0;
2114 stats->lxoffrxc.ev_count = 0;
2115
2116 /* Packet Reception Stats */
2117 stats->tor.ev_count = 0;
2118 stats->gorc.ev_count = 0;
2119 stats->tpr.ev_count = 0;
2120 stats->gprc.ev_count = 0;
2121 stats->mprc.ev_count = 0;
2122 stats->bprc.ev_count = 0;
2123 stats->prc64.ev_count = 0;
2124 stats->prc127.ev_count = 0;
2125 stats->prc255.ev_count = 0;
2126 stats->prc511.ev_count = 0;
2127 stats->prc1023.ev_count = 0;
2128 stats->prc1522.ev_count = 0;
2129 stats->ruc.ev_count = 0;
2130 stats->rfc.ev_count = 0;
2131 stats->roc.ev_count = 0;
2132 stats->rjc.ev_count = 0;
2133 stats->mngprc.ev_count = 0;
2134 stats->mngpdc.ev_count = 0;
2135 stats->xec.ev_count = 0;
2136
2137 /* Packet Transmission Stats */
2138 stats->gotc.ev_count = 0;
2139 stats->tpt.ev_count = 0;
2140 stats->gptc.ev_count = 0;
2141 stats->bptc.ev_count = 0;
2142 stats->mptc.ev_count = 0;
2143 stats->mngptc.ev_count = 0;
2144 stats->ptc64.ev_count = 0;
2145 stats->ptc127.ev_count = 0;
2146 stats->ptc255.ev_count = 0;
2147 stats->ptc511.ev_count = 0;
2148 stats->ptc1023.ev_count = 0;
2149 stats->ptc1522.ev_count = 0;
2150 }
2151
2152 /************************************************************************
2153 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2154 *
2155 * Retrieves the TDH value from the hardware
2156 ************************************************************************/
2157 static int
2158 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2159 {
2160 struct sysctlnode node = *rnode;
2161 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2162 uint32_t val;
2163
2164 if (!txr)
2165 return (0);
2166
2167 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2168 node.sysctl_data = &val;
2169 return sysctl_lookup(SYSCTLFN_CALL(&node));
2170 } /* ixgbe_sysctl_tdh_handler */
2171
2172 /************************************************************************
2173 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2174 *
2175 * Retrieves the TDT value from the hardware
2176 ************************************************************************/
2177 static int
2178 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2179 {
2180 struct sysctlnode node = *rnode;
2181 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2182 uint32_t val;
2183
2184 if (!txr)
2185 return (0);
2186
2187 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2188 node.sysctl_data = &val;
2189 return sysctl_lookup(SYSCTLFN_CALL(&node));
2190 } /* ixgbe_sysctl_tdt_handler */
2191
2192 /************************************************************************
2193 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2194 * handler function
2195 *
2196 * Retrieves the next_to_check value
2197 ************************************************************************/
2198 static int
2199 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2200 {
2201 struct sysctlnode node = *rnode;
2202 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2203 uint32_t val;
2204
2205 if (!rxr)
2206 return (0);
2207
2208 val = rxr->next_to_check;
2209 node.sysctl_data = &val;
2210 return sysctl_lookup(SYSCTLFN_CALL(&node));
2211 } /* ixgbe_sysctl_next_to_check_handler */
2212
2213 /************************************************************************
2214 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2215 *
2216 * Retrieves the RDH value from the hardware
2217 ************************************************************************/
2218 static int
2219 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2220 {
2221 struct sysctlnode node = *rnode;
2222 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2223 uint32_t val;
2224
2225 if (!rxr)
2226 return (0);
2227
2228 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2229 node.sysctl_data = &val;
2230 return sysctl_lookup(SYSCTLFN_CALL(&node));
2231 } /* ixgbe_sysctl_rdh_handler */
2232
2233 /************************************************************************
2234 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2235 *
2236 * Retrieves the RDT value from the hardware
2237 ************************************************************************/
2238 static int
2239 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2240 {
2241 struct sysctlnode node = *rnode;
2242 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2243 uint32_t val;
2244
2245 if (!rxr)
2246 return (0);
2247
2248 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2249 node.sysctl_data = &val;
2250 return sysctl_lookup(SYSCTLFN_CALL(&node));
2251 } /* ixgbe_sysctl_rdt_handler */
2252
2253 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2254 /************************************************************************
2255 * ixgbe_register_vlan
2256 *
2257 * Run via vlan config EVENT, it enables us to use the
2258 * HW Filter table since we can get the vlan id. This
2259 * just creates the entry in the soft version of the
2260 * VFTA, init will repopulate the real table.
2261 ************************************************************************/
2262 static void
2263 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2264 {
2265 struct adapter *adapter = ifp->if_softc;
2266 u16 index, bit;
2267
2268 if (ifp->if_softc != arg) /* Not our event */
2269 return;
2270
2271 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2272 return;
2273
2274 IXGBE_CORE_LOCK(adapter);
2275 index = (vtag >> 5) & 0x7F;
2276 bit = vtag & 0x1F;
2277 adapter->shadow_vfta[index] |= (1 << bit);
2278 ixgbe_setup_vlan_hw_support(adapter);
2279 IXGBE_CORE_UNLOCK(adapter);
2280 } /* ixgbe_register_vlan */
2281
2282 /************************************************************************
2283 * ixgbe_unregister_vlan
2284 *
2285 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2286 ************************************************************************/
2287 static void
2288 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2289 {
2290 struct adapter *adapter = ifp->if_softc;
2291 u16 index, bit;
2292
2293 if (ifp->if_softc != arg)
2294 return;
2295
2296 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2297 return;
2298
2299 IXGBE_CORE_LOCK(adapter);
2300 index = (vtag >> 5) & 0x7F;
2301 bit = vtag & 0x1F;
2302 adapter->shadow_vfta[index] &= ~(1 << bit);
2303 /* Re-init to load the changes */
2304 ixgbe_setup_vlan_hw_support(adapter);
2305 IXGBE_CORE_UNLOCK(adapter);
2306 } /* ixgbe_unregister_vlan */
2307 #endif
2308
2309 static void
2310 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2311 {
2312 struct ethercom *ec = &adapter->osdep.ec;
2313 struct ixgbe_hw *hw = &adapter->hw;
2314 struct rx_ring *rxr;
2315 int i;
2316 u32 ctrl;
2317
2318
2319 /*
2320 * We get here thru init_locked, meaning
2321 * a soft reset, this has already cleared
2322 * the VFTA and other state, so if there
2323 * have been no vlan's registered do nothing.
2324 */
2325 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2326 return;
2327
2328 /* Setup the queues for vlans */
2329 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2330 for (i = 0; i < adapter->num_queues; i++) {
2331 rxr = &adapter->rx_rings[i];
2332 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2333 if (hw->mac.type != ixgbe_mac_82598EB) {
2334 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2335 ctrl |= IXGBE_RXDCTL_VME;
2336 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2337 }
2338 rxr->vtag_strip = TRUE;
2339 }
2340 }
2341
2342 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2343 return;
2344 /*
2345 * A soft reset zero's out the VFTA, so
2346 * we need to repopulate it now.
2347 */
2348 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2349 if (adapter->shadow_vfta[i] != 0)
2350 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2351 adapter->shadow_vfta[i]);
2352
2353 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2354 /* Enable the Filter Table if enabled */
2355 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2356 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2357 ctrl |= IXGBE_VLNCTRL_VFE;
2358 }
2359 if (hw->mac.type == ixgbe_mac_82598EB)
2360 ctrl |= IXGBE_VLNCTRL_VME;
2361 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2362 } /* ixgbe_setup_vlan_hw_support */
2363
2364 /************************************************************************
2365 * ixgbe_get_slot_info
2366 *
2367 * Get the width and transaction speed of
2368 * the slot this adapter is plugged into.
2369 ************************************************************************/
2370 static void
2371 ixgbe_get_slot_info(struct adapter *adapter)
2372 {
2373 device_t dev = adapter->dev;
2374 struct ixgbe_hw *hw = &adapter->hw;
2375 u32 offset;
2376 u16 link;
2377 int bus_info_valid = TRUE;
2378
2379 /* Some devices are behind an internal bridge */
2380 switch (hw->device_id) {
2381 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2382 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2383 goto get_parent_info;
2384 default:
2385 break;
2386 }
2387
2388 ixgbe_get_bus_info(hw);
2389
2390 /*
2391 * Some devices don't use PCI-E, but there is no need
2392 * to display "Unknown" for bus speed and width.
2393 */
2394 switch (hw->mac.type) {
2395 case ixgbe_mac_X550EM_x:
2396 case ixgbe_mac_X550EM_a:
2397 return;
2398 default:
2399 goto display;
2400 }
2401
2402 get_parent_info:
2403 /*
2404 * For the Quad port adapter we need to parse back
2405 * up the PCI tree to find the speed of the expansion
2406 * slot into which this adapter is plugged. A bit more work.
2407 */
2408 dev = device_parent(device_parent(dev));
2409 #if 0
2410 #ifdef IXGBE_DEBUG
2411 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2412 pci_get_slot(dev), pci_get_function(dev));
2413 #endif
2414 dev = device_parent(device_parent(dev));
2415 #ifdef IXGBE_DEBUG
2416 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2417 pci_get_slot(dev), pci_get_function(dev));
2418 #endif
2419 #endif
2420 /* Now get the PCI Express Capabilities offset */
2421 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2422 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2423 /*
2424 * Hmm...can't get PCI-Express capabilities.
2425 * Falling back to default method.
2426 */
2427 bus_info_valid = FALSE;
2428 ixgbe_get_bus_info(hw);
2429 goto display;
2430 }
2431 /* ...and read the Link Status Register */
2432 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2433 offset + PCIE_LCSR) >> 16;
2434 ixgbe_set_pci_config_data_generic(hw, link);
2435
2436 display:
2437 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2438 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2439 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2440 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2441 "Unknown"),
2442 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2443 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2444 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2445 "Unknown"));
2446
2447 if (bus_info_valid) {
2448 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2449 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2450 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2451 device_printf(dev, "PCI-Express bandwidth available"
2452 " for this card\n is not sufficient for"
2453 " optimal performance.\n");
2454 device_printf(dev, "For optimal performance a x8 "
2455 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2456 }
2457 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2458 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2459 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2460 device_printf(dev, "PCI-Express bandwidth available"
2461 " for this card\n is not sufficient for"
2462 " optimal performance.\n");
2463 device_printf(dev, "For optimal performance a x8 "
2464 "PCIE Gen3 slot is required.\n");
2465 }
2466 } else
2467 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2468
2469 return;
2470 } /* ixgbe_get_slot_info */
2471
2472 /************************************************************************
2473 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2474 ************************************************************************/
2475 static inline void
2476 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2477 {
2478 struct ixgbe_hw *hw = &adapter->hw;
2479 struct ix_queue *que = &adapter->queues[vector];
2480 u64 queue = (u64)(1ULL << vector);
2481 u32 mask;
2482
2483 mutex_enter(&que->dc_mtx);
2484 if (que->disabled_count > 0 && --que->disabled_count > 0)
2485 goto out;
2486
2487 if (hw->mac.type == ixgbe_mac_82598EB) {
2488 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2489 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2490 } else {
2491 mask = (queue & 0xFFFFFFFF);
2492 if (mask)
2493 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2494 mask = (queue >> 32);
2495 if (mask)
2496 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2497 }
2498 out:
2499 mutex_exit(&que->dc_mtx);
2500 } /* ixgbe_enable_queue */
2501
2502 /************************************************************************
2503 * ixgbe_disable_queue_internal
2504 ************************************************************************/
2505 static inline void
2506 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2507 {
2508 struct ixgbe_hw *hw = &adapter->hw;
2509 struct ix_queue *que = &adapter->queues[vector];
2510 u64 queue = (u64)(1ULL << vector);
2511 u32 mask;
2512
2513 mutex_enter(&que->dc_mtx);
2514
2515 if (que->disabled_count > 0) {
2516 if (nestok)
2517 que->disabled_count++;
2518 goto out;
2519 }
2520 que->disabled_count++;
2521
2522 if (hw->mac.type == ixgbe_mac_82598EB) {
2523 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2524 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2525 } else {
2526 mask = (queue & 0xFFFFFFFF);
2527 if (mask)
2528 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2529 mask = (queue >> 32);
2530 if (mask)
2531 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2532 }
2533 out:
2534 mutex_exit(&que->dc_mtx);
2535 } /* ixgbe_disable_queue_internal */
2536
2537 /************************************************************************
2538 * ixgbe_disable_queue
2539 ************************************************************************/
2540 static inline void
2541 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2542 {
2543
2544 ixgbe_disable_queue_internal(adapter, vector, true);
2545 } /* ixgbe_disable_queue */
2546
2547 /************************************************************************
2548 * ixgbe_sched_handle_que - schedule deferred packet processing
2549 ************************************************************************/
2550 static inline void
2551 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2552 {
2553
2554 if(que->txrx_use_workqueue) {
2555 /*
2556 * adapter->que_wq is bound to each CPU instead of
2557 * each NIC queue to reduce workqueue kthread. As we
2558 * should consider about interrupt affinity in this
2559 * function, the workqueue kthread must be WQ_PERCPU.
2560 * If create WQ_PERCPU workqueue kthread for each NIC
2561 * queue, that number of created workqueue kthread is
2562 * (number of used NIC queue) * (number of CPUs) =
2563 * (number of CPUs) ^ 2 most often.
2564 *
2565 * The same NIC queue's interrupts are avoided by
2566 * masking the queue's interrupt. And different
2567 * NIC queue's interrupts use different struct work
2568 * (que->wq_cookie). So, "enqueued flag" to avoid
2569 * twice workqueue_enqueue() is not required .
2570 */
2571 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2572 } else {
2573 softint_schedule(que->que_si);
2574 }
2575 }
2576
2577 /************************************************************************
2578 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2579 ************************************************************************/
2580 static int
2581 ixgbe_msix_que(void *arg)
2582 {
2583 struct ix_queue *que = arg;
2584 struct adapter *adapter = que->adapter;
2585 struct ifnet *ifp = adapter->ifp;
2586 struct tx_ring *txr = que->txr;
2587 struct rx_ring *rxr = que->rxr;
2588 bool more;
2589 u32 newitr = 0;
2590
2591 /* Protect against spurious interrupts */
2592 if ((ifp->if_flags & IFF_RUNNING) == 0)
2593 return 0;
2594
2595 ixgbe_disable_queue(adapter, que->msix);
2596 ++que->irqs.ev_count;
2597
2598 /*
2599 * Don't change "que->txrx_use_workqueue" from this point to avoid
2600 * flip-flopping softint/workqueue mode in one deferred processing.
2601 */
2602 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2603
2604 #ifdef __NetBSD__
2605 /* Don't run ixgbe_rxeof in interrupt context */
2606 more = true;
2607 #else
2608 more = ixgbe_rxeof(que);
2609 #endif
2610
2611 IXGBE_TX_LOCK(txr);
2612 ixgbe_txeof(txr);
2613 IXGBE_TX_UNLOCK(txr);
2614
2615 /* Do AIM now? */
2616
2617 if (adapter->enable_aim == false)
2618 goto no_calc;
2619 /*
2620 * Do Adaptive Interrupt Moderation:
2621 * - Write out last calculated setting
2622 * - Calculate based on average size over
2623 * the last interval.
2624 */
2625 if (que->eitr_setting)
2626 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2627
2628 que->eitr_setting = 0;
2629
2630 /* Idle, do nothing */
2631 if ((txr->bytes == 0) && (rxr->bytes == 0))
2632 goto no_calc;
2633
2634 if ((txr->bytes) && (txr->packets))
2635 newitr = txr->bytes/txr->packets;
2636 if ((rxr->bytes) && (rxr->packets))
2637 newitr = max(newitr, (rxr->bytes / rxr->packets));
2638 newitr += 24; /* account for hardware frame, crc */
2639
2640 /* set an upper boundary */
2641 newitr = min(newitr, 3000);
2642
2643 /* Be nice to the mid range */
2644 if ((newitr > 300) && (newitr < 1200))
2645 newitr = (newitr / 3);
2646 else
2647 newitr = (newitr / 2);
2648
2649 /*
2650 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2651 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2652 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2653 * on 1G and higher.
2654 */
2655 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2656 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2657 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2658 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2659 }
2660
2661 /* save for next interrupt */
2662 que->eitr_setting = newitr;
2663
2664 /* Reset state */
2665 txr->bytes = 0;
2666 txr->packets = 0;
2667 rxr->bytes = 0;
2668 rxr->packets = 0;
2669
2670 no_calc:
2671 if (more)
2672 ixgbe_sched_handle_que(adapter, que);
2673 else
2674 ixgbe_enable_queue(adapter, que->msix);
2675
2676 return 1;
2677 } /* ixgbe_msix_que */
2678
2679 /************************************************************************
2680 * ixgbe_media_status - Media Ioctl callback
2681 *
2682 * Called whenever the user queries the status of
2683 * the interface using ifconfig.
2684 ************************************************************************/
2685 static void
2686 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2687 {
2688 struct adapter *adapter = ifp->if_softc;
2689 struct ixgbe_hw *hw = &adapter->hw;
2690 int layer;
2691
2692 INIT_DEBUGOUT("ixgbe_media_status: begin");
2693 IXGBE_CORE_LOCK(adapter);
2694 ixgbe_update_link_status(adapter);
2695
2696 ifmr->ifm_status = IFM_AVALID;
2697 ifmr->ifm_active = IFM_ETHER;
2698
2699 if (!adapter->link_active) {
2700 ifmr->ifm_active |= IFM_NONE;
2701 IXGBE_CORE_UNLOCK(adapter);
2702 return;
2703 }
2704
2705 ifmr->ifm_status |= IFM_ACTIVE;
2706 layer = adapter->phy_layer;
2707
2708 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2709 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2710 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2711 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2712 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2713 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2714 switch (adapter->link_speed) {
2715 case IXGBE_LINK_SPEED_10GB_FULL:
2716 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2717 break;
2718 case IXGBE_LINK_SPEED_5GB_FULL:
2719 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2720 break;
2721 case IXGBE_LINK_SPEED_2_5GB_FULL:
2722 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2723 break;
2724 case IXGBE_LINK_SPEED_1GB_FULL:
2725 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2726 break;
2727 case IXGBE_LINK_SPEED_100_FULL:
2728 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2729 break;
2730 case IXGBE_LINK_SPEED_10_FULL:
2731 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2732 break;
2733 }
2734 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2735 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2736 switch (adapter->link_speed) {
2737 case IXGBE_LINK_SPEED_10GB_FULL:
2738 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2739 break;
2740 }
2741 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2742 switch (adapter->link_speed) {
2743 case IXGBE_LINK_SPEED_10GB_FULL:
2744 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2745 break;
2746 case IXGBE_LINK_SPEED_1GB_FULL:
2747 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2748 break;
2749 }
2750 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2751 switch (adapter->link_speed) {
2752 case IXGBE_LINK_SPEED_10GB_FULL:
2753 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2754 break;
2755 case IXGBE_LINK_SPEED_1GB_FULL:
2756 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2757 break;
2758 }
2759 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2760 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2761 switch (adapter->link_speed) {
2762 case IXGBE_LINK_SPEED_10GB_FULL:
2763 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2764 break;
2765 case IXGBE_LINK_SPEED_1GB_FULL:
2766 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2767 break;
2768 }
2769 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2770 switch (adapter->link_speed) {
2771 case IXGBE_LINK_SPEED_10GB_FULL:
2772 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2773 break;
2774 }
2775 /*
2776 * XXX: These need to use the proper media types once
2777 * they're added.
2778 */
2779 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2780 switch (adapter->link_speed) {
2781 case IXGBE_LINK_SPEED_10GB_FULL:
2782 #ifndef IFM_ETH_XTYPE
2783 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2784 #else
2785 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2786 #endif
2787 break;
2788 case IXGBE_LINK_SPEED_2_5GB_FULL:
2789 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2790 break;
2791 case IXGBE_LINK_SPEED_1GB_FULL:
2792 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2793 break;
2794 }
2795 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2796 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2797 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2798 switch (adapter->link_speed) {
2799 case IXGBE_LINK_SPEED_10GB_FULL:
2800 #ifndef IFM_ETH_XTYPE
2801 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2802 #else
2803 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2804 #endif
2805 break;
2806 case IXGBE_LINK_SPEED_2_5GB_FULL:
2807 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2808 break;
2809 case IXGBE_LINK_SPEED_1GB_FULL:
2810 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2811 break;
2812 }
2813
2814 /* If nothing is recognized... */
2815 #if 0
2816 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2817 ifmr->ifm_active |= IFM_UNKNOWN;
2818 #endif
2819
2820 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2821
2822 /* Display current flow control setting used on link */
2823 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2824 hw->fc.current_mode == ixgbe_fc_full)
2825 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2826 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2827 hw->fc.current_mode == ixgbe_fc_full)
2828 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2829
2830 IXGBE_CORE_UNLOCK(adapter);
2831
2832 return;
2833 } /* ixgbe_media_status */
2834
2835 /************************************************************************
2836 * ixgbe_media_change - Media Ioctl callback
2837 *
2838 * Called when the user changes speed/duplex using
2839 * media/mediopt option with ifconfig.
2840 ************************************************************************/
2841 static int
2842 ixgbe_media_change(struct ifnet *ifp)
2843 {
2844 struct adapter *adapter = ifp->if_softc;
2845 struct ifmedia *ifm = &adapter->media;
2846 struct ixgbe_hw *hw = &adapter->hw;
2847 ixgbe_link_speed speed = 0;
2848 ixgbe_link_speed link_caps = 0;
2849 bool negotiate = false;
2850 s32 err = IXGBE_NOT_IMPLEMENTED;
2851
2852 INIT_DEBUGOUT("ixgbe_media_change: begin");
2853
2854 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2855 return (EINVAL);
2856
2857 if (hw->phy.media_type == ixgbe_media_type_backplane)
2858 return (EPERM);
2859
2860 /*
2861 * We don't actually need to check against the supported
2862 * media types of the adapter; ifmedia will take care of
2863 * that for us.
2864 */
2865 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2866 case IFM_AUTO:
2867 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2868 &negotiate);
2869 if (err != IXGBE_SUCCESS) {
2870 device_printf(adapter->dev, "Unable to determine "
2871 "supported advertise speeds\n");
2872 return (ENODEV);
2873 }
2874 speed |= link_caps;
2875 break;
2876 case IFM_10G_T:
2877 case IFM_10G_LRM:
2878 case IFM_10G_LR:
2879 case IFM_10G_TWINAX:
2880 #ifndef IFM_ETH_XTYPE
2881 case IFM_10G_SR: /* KR, too */
2882 case IFM_10G_CX4: /* KX4 */
2883 #else
2884 case IFM_10G_KR:
2885 case IFM_10G_KX4:
2886 #endif
2887 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2888 break;
2889 case IFM_5000_T:
2890 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2891 break;
2892 case IFM_2500_T:
2893 case IFM_2500_KX:
2894 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2895 break;
2896 case IFM_1000_T:
2897 case IFM_1000_LX:
2898 case IFM_1000_SX:
2899 case IFM_1000_KX:
2900 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2901 break;
2902 case IFM_100_TX:
2903 speed |= IXGBE_LINK_SPEED_100_FULL;
2904 break;
2905 case IFM_10_T:
2906 speed |= IXGBE_LINK_SPEED_10_FULL;
2907 break;
2908 case IFM_NONE:
2909 break;
2910 default:
2911 goto invalid;
2912 }
2913
2914 hw->mac.autotry_restart = TRUE;
2915 hw->mac.ops.setup_link(hw, speed, TRUE);
2916 adapter->advertise = 0;
2917 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2918 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2919 adapter->advertise |= 1 << 2;
2920 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2921 adapter->advertise |= 1 << 1;
2922 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2923 adapter->advertise |= 1 << 0;
2924 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2925 adapter->advertise |= 1 << 3;
2926 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2927 adapter->advertise |= 1 << 4;
2928 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2929 adapter->advertise |= 1 << 5;
2930 }
2931
2932 return (0);
2933
2934 invalid:
2935 device_printf(adapter->dev, "Invalid media type!\n");
2936
2937 return (EINVAL);
2938 } /* ixgbe_media_change */
2939
2940 /************************************************************************
2941 * ixgbe_set_promisc
2942 ************************************************************************/
2943 static void
2944 ixgbe_set_promisc(struct adapter *adapter)
2945 {
2946 struct ifnet *ifp = adapter->ifp;
2947 int mcnt = 0;
2948 u32 rctl;
2949 struct ether_multi *enm;
2950 struct ether_multistep step;
2951 struct ethercom *ec = &adapter->osdep.ec;
2952
2953 KASSERT(mutex_owned(&adapter->core_mtx));
2954 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2955 rctl &= (~IXGBE_FCTRL_UPE);
2956 if (ifp->if_flags & IFF_ALLMULTI)
2957 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2958 else {
2959 ETHER_LOCK(ec);
2960 ETHER_FIRST_MULTI(step, ec, enm);
2961 while (enm != NULL) {
2962 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2963 break;
2964 mcnt++;
2965 ETHER_NEXT_MULTI(step, enm);
2966 }
2967 ETHER_UNLOCK(ec);
2968 }
2969 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2970 rctl &= (~IXGBE_FCTRL_MPE);
2971 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2972
2973 if (ifp->if_flags & IFF_PROMISC) {
2974 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2975 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2976 } else if (ifp->if_flags & IFF_ALLMULTI) {
2977 rctl |= IXGBE_FCTRL_MPE;
2978 rctl &= ~IXGBE_FCTRL_UPE;
2979 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2980 }
2981 } /* ixgbe_set_promisc */
2982
2983 /************************************************************************
2984 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2985 ************************************************************************/
2986 static int
2987 ixgbe_msix_link(void *arg)
2988 {
2989 struct adapter *adapter = arg;
2990 struct ixgbe_hw *hw = &adapter->hw;
2991 u32 eicr, eicr_mask;
2992 s32 retval;
2993
2994 ++adapter->link_irq.ev_count;
2995
2996 /* Pause other interrupts */
2997 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2998
2999 /* First get the cause */
3000 /*
3001 * The specifications of 82598, 82599, X540 and X550 say EICS register
3002 * is write only. However, Linux says it is a workaround for silicon
3003 * errata to read EICS instead of EICR to get interrupt cause. It seems
3004 * there is a problem about read clear mechanism for EICR register.
3005 */
3006 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3007 /* Be sure the queue bits are not cleared */
3008 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3009 /* Clear interrupt with write */
3010 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3011
3012 /* Link status change */
3013 if (eicr & IXGBE_EICR_LSC) {
3014 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3015 softint_schedule(adapter->link_si);
3016 }
3017
3018 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3019 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3020 (eicr & IXGBE_EICR_FLOW_DIR)) {
3021 /* This is probably overkill :) */
3022 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3023 return 1;
3024 /* Disable the interrupt */
3025 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3026 softint_schedule(adapter->fdir_si);
3027 }
3028
3029 if (eicr & IXGBE_EICR_ECC) {
3030 device_printf(adapter->dev,
3031 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3032 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3033 }
3034
3035 /* Check for over temp condition */
3036 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3037 switch (adapter->hw.mac.type) {
3038 case ixgbe_mac_X550EM_a:
3039 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3040 break;
3041 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3042 IXGBE_EICR_GPI_SDP0_X550EM_a);
3043 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3044 IXGBE_EICR_GPI_SDP0_X550EM_a);
3045 retval = hw->phy.ops.check_overtemp(hw);
3046 if (retval != IXGBE_ERR_OVERTEMP)
3047 break;
3048 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3049 device_printf(adapter->dev, "System shutdown required!\n");
3050 break;
3051 default:
3052 if (!(eicr & IXGBE_EICR_TS))
3053 break;
3054 retval = hw->phy.ops.check_overtemp(hw);
3055 if (retval != IXGBE_ERR_OVERTEMP)
3056 break;
3057 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3058 device_printf(adapter->dev, "System shutdown required!\n");
3059 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3060 break;
3061 }
3062 }
3063
3064 /* Check for VF message */
3065 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3066 (eicr & IXGBE_EICR_MAILBOX))
3067 softint_schedule(adapter->mbx_si);
3068 }
3069
3070 if (ixgbe_is_sfp(hw)) {
3071 /* Pluggable optics-related interrupt */
3072 if (hw->mac.type >= ixgbe_mac_X540)
3073 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3074 else
3075 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3076
3077 if (eicr & eicr_mask) {
3078 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3079 softint_schedule(adapter->mod_si);
3080 }
3081
3082 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3083 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3084 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3085 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3086 softint_schedule(adapter->msf_si);
3087 }
3088 }
3089
3090 /* Check for fan failure */
3091 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3092 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3093 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3094 }
3095
3096 /* External PHY interrupt */
3097 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3098 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3099 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3100 softint_schedule(adapter->phy_si);
3101 }
3102
3103 /* Re-enable other interrupts */
3104 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3105 return 1;
3106 } /* ixgbe_msix_link */
3107
3108 static void
3109 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3110 {
3111
3112 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3113 itr |= itr << 16;
3114 else
3115 itr |= IXGBE_EITR_CNT_WDIS;
3116
3117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3118 }
3119
3120
3121 /************************************************************************
3122 * ixgbe_sysctl_interrupt_rate_handler
3123 ************************************************************************/
3124 static int
3125 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3126 {
3127 struct sysctlnode node = *rnode;
3128 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3129 struct adapter *adapter = que->adapter;
3130 uint32_t reg, usec, rate;
3131 int error;
3132
3133 if (que == NULL)
3134 return 0;
3135 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3136 usec = ((reg & 0x0FF8) >> 3);
3137 if (usec > 0)
3138 rate = 500000 / usec;
3139 else
3140 rate = 0;
3141 node.sysctl_data = &rate;
3142 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3143 if (error || newp == NULL)
3144 return error;
3145 reg &= ~0xfff; /* default, no limitation */
3146 if (rate > 0 && rate < 500000) {
3147 if (rate < 1000)
3148 rate = 1000;
3149 reg |= ((4000000/rate) & 0xff8);
3150 /*
3151 * When RSC is used, ITR interval must be larger than
3152 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3153 * The minimum value is always greater than 2us on 100M
3154 * (and 10M?(not documented)), but it's not on 1G and higher.
3155 */
3156 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3157 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3158 if ((adapter->num_queues > 1)
3159 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3160 return EINVAL;
3161 }
3162 ixgbe_max_interrupt_rate = rate;
3163 } else
3164 ixgbe_max_interrupt_rate = 0;
3165 ixgbe_eitr_write(adapter, que->msix, reg);
3166
3167 return (0);
3168 } /* ixgbe_sysctl_interrupt_rate_handler */
3169
3170 const struct sysctlnode *
3171 ixgbe_sysctl_instance(struct adapter *adapter)
3172 {
3173 const char *dvname;
3174 struct sysctllog **log;
3175 int rc;
3176 const struct sysctlnode *rnode;
3177
3178 if (adapter->sysctltop != NULL)
3179 return adapter->sysctltop;
3180
3181 log = &adapter->sysctllog;
3182 dvname = device_xname(adapter->dev);
3183
3184 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3185 0, CTLTYPE_NODE, dvname,
3186 SYSCTL_DESCR("ixgbe information and settings"),
3187 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3188 goto err;
3189
3190 return rnode;
3191 err:
3192 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3193 return NULL;
3194 }
3195
3196 /************************************************************************
3197 * ixgbe_add_device_sysctls
3198 ************************************************************************/
3199 static void
3200 ixgbe_add_device_sysctls(struct adapter *adapter)
3201 {
3202 device_t dev = adapter->dev;
3203 struct ixgbe_hw *hw = &adapter->hw;
3204 struct sysctllog **log;
3205 const struct sysctlnode *rnode, *cnode;
3206
3207 log = &adapter->sysctllog;
3208
3209 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3210 aprint_error_dev(dev, "could not create sysctl root\n");
3211 return;
3212 }
3213
3214 if (sysctl_createv(log, 0, &rnode, &cnode,
3215 CTLFLAG_READONLY, CTLTYPE_INT,
3216 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3217 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3218 aprint_error_dev(dev, "could not create sysctl\n");
3219
3220 if (sysctl_createv(log, 0, &rnode, &cnode,
3221 CTLFLAG_READONLY, CTLTYPE_INT,
3222 "num_queues", SYSCTL_DESCR("Number of queues"),
3223 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3224 aprint_error_dev(dev, "could not create sysctl\n");
3225
3226 /* Sysctls for all devices */
3227 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3228 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3229 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3230 CTL_EOL) != 0)
3231 aprint_error_dev(dev, "could not create sysctl\n");
3232
3233 adapter->enable_aim = ixgbe_enable_aim;
3234 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3235 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3236 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3237 aprint_error_dev(dev, "could not create sysctl\n");
3238
3239 if (sysctl_createv(log, 0, &rnode, &cnode,
3240 CTLFLAG_READWRITE, CTLTYPE_INT,
3241 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3242 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3243 CTL_EOL) != 0)
3244 aprint_error_dev(dev, "could not create sysctl\n");
3245
3246 /*
3247 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3248 * it causesflip-flopping softint/workqueue mode in one deferred
3249 * processing. Therefore, preempt_disable()/preempt_enable() are
3250 * required in ixgbe_sched_handle_que() to avoid
3251 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3252 * I think changing "que->txrx_use_workqueue" in interrupt handler
3253 * is lighter than doing preempt_disable()/preempt_enable() in every
3254 * ixgbe_sched_handle_que().
3255 */
3256 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3257 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3258 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3259 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3260 aprint_error_dev(dev, "could not create sysctl\n");
3261
3262 #ifdef IXGBE_DEBUG
3263 /* testing sysctls (for all devices) */
3264 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3265 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3266 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3267 CTL_EOL) != 0)
3268 aprint_error_dev(dev, "could not create sysctl\n");
3269
3270 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3271 CTLTYPE_STRING, "print_rss_config",
3272 SYSCTL_DESCR("Prints RSS Configuration"),
3273 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3274 CTL_EOL) != 0)
3275 aprint_error_dev(dev, "could not create sysctl\n");
3276 #endif
3277 /* for X550 series devices */
3278 if (hw->mac.type >= ixgbe_mac_X550)
3279 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3280 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3281 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3282 CTL_EOL) != 0)
3283 aprint_error_dev(dev, "could not create sysctl\n");
3284
3285 /* for WoL-capable devices */
3286 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3287 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3288 CTLTYPE_BOOL, "wol_enable",
3289 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3290 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3291 CTL_EOL) != 0)
3292 aprint_error_dev(dev, "could not create sysctl\n");
3293
3294 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3295 CTLTYPE_INT, "wufc",
3296 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3297 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3298 CTL_EOL) != 0)
3299 aprint_error_dev(dev, "could not create sysctl\n");
3300 }
3301
3302 /* for X552/X557-AT devices */
3303 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3304 const struct sysctlnode *phy_node;
3305
3306 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3307 "phy", SYSCTL_DESCR("External PHY sysctls"),
3308 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3309 aprint_error_dev(dev, "could not create sysctl\n");
3310 return;
3311 }
3312
3313 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3314 CTLTYPE_INT, "temp",
3315 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3316 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3317 CTL_EOL) != 0)
3318 aprint_error_dev(dev, "could not create sysctl\n");
3319
3320 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3321 CTLTYPE_INT, "overtemp_occurred",
3322 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3323 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3324 CTL_CREATE, CTL_EOL) != 0)
3325 aprint_error_dev(dev, "could not create sysctl\n");
3326 }
3327
3328 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3329 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3330 CTLTYPE_INT, "eee_state",
3331 SYSCTL_DESCR("EEE Power Save State"),
3332 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3333 CTL_EOL) != 0)
3334 aprint_error_dev(dev, "could not create sysctl\n");
3335 }
3336 } /* ixgbe_add_device_sysctls */
3337
3338 /************************************************************************
3339 * ixgbe_allocate_pci_resources
3340 ************************************************************************/
3341 static int
3342 ixgbe_allocate_pci_resources(struct adapter *adapter,
3343 const struct pci_attach_args *pa)
3344 {
3345 pcireg_t memtype;
3346 device_t dev = adapter->dev;
3347 bus_addr_t addr;
3348 int flags;
3349
3350 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3351 switch (memtype) {
3352 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3353 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3354 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3355 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3356 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3357 goto map_err;
3358 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3359 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3360 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3361 }
3362 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3363 adapter->osdep.mem_size, flags,
3364 &adapter->osdep.mem_bus_space_handle) != 0) {
3365 map_err:
3366 adapter->osdep.mem_size = 0;
3367 aprint_error_dev(dev, "unable to map BAR0\n");
3368 return ENXIO;
3369 }
3370 break;
3371 default:
3372 aprint_error_dev(dev, "unexpected type on BAR0\n");
3373 return ENXIO;
3374 }
3375
3376 return (0);
3377 } /* ixgbe_allocate_pci_resources */
3378
3379 static void
3380 ixgbe_free_softint(struct adapter *adapter)
3381 {
3382 struct ix_queue *que = adapter->queues;
3383 struct tx_ring *txr = adapter->tx_rings;
3384 int i;
3385
3386 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3387 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3388 if (txr->txr_si != NULL)
3389 softint_disestablish(txr->txr_si);
3390 }
3391 if (que->que_si != NULL)
3392 softint_disestablish(que->que_si);
3393 }
3394 if (adapter->txr_wq != NULL)
3395 workqueue_destroy(adapter->txr_wq);
3396 if (adapter->txr_wq_enqueued != NULL)
3397 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3398 if (adapter->que_wq != NULL)
3399 workqueue_destroy(adapter->que_wq);
3400
3401 /* Drain the Link queue */
3402 if (adapter->link_si != NULL) {
3403 softint_disestablish(adapter->link_si);
3404 adapter->link_si = NULL;
3405 }
3406 if (adapter->mod_si != NULL) {
3407 softint_disestablish(adapter->mod_si);
3408 adapter->mod_si = NULL;
3409 }
3410 if (adapter->msf_si != NULL) {
3411 softint_disestablish(adapter->msf_si);
3412 adapter->msf_si = NULL;
3413 }
3414 if (adapter->phy_si != NULL) {
3415 softint_disestablish(adapter->phy_si);
3416 adapter->phy_si = NULL;
3417 }
3418 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3419 if (adapter->fdir_si != NULL) {
3420 softint_disestablish(adapter->fdir_si);
3421 adapter->fdir_si = NULL;
3422 }
3423 }
3424 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3425 if (adapter->mbx_si != NULL) {
3426 softint_disestablish(adapter->mbx_si);
3427 adapter->mbx_si = NULL;
3428 }
3429 }
3430 } /* ixgbe_free_softint */
3431
3432 /************************************************************************
3433 * ixgbe_detach - Device removal routine
3434 *
3435 * Called when the driver is being removed.
3436 * Stops the adapter and deallocates all the resources
3437 * that were allocated for driver operation.
3438 *
3439 * return 0 on success, positive on failure
3440 ************************************************************************/
3441 static int
3442 ixgbe_detach(device_t dev, int flags)
3443 {
3444 struct adapter *adapter = device_private(dev);
3445 struct rx_ring *rxr = adapter->rx_rings;
3446 struct tx_ring *txr = adapter->tx_rings;
3447 struct ixgbe_hw *hw = &adapter->hw;
3448 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3449 u32 ctrl_ext;
3450
3451 INIT_DEBUGOUT("ixgbe_detach: begin");
3452 if (adapter->osdep.attached == false)
3453 return 0;
3454
3455 if (ixgbe_pci_iov_detach(dev) != 0) {
3456 device_printf(dev, "SR-IOV in use; detach first.\n");
3457 return (EBUSY);
3458 }
3459
3460 /* Stop the interface. Callouts are stopped in it. */
3461 ixgbe_ifstop(adapter->ifp, 1);
3462 #if NVLAN > 0
3463 /* Make sure VLANs are not using driver */
3464 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3465 ; /* nothing to do: no VLANs */
3466 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3467 vlan_ifdetach(adapter->ifp);
3468 else {
3469 aprint_error_dev(dev, "VLANs in use, detach first\n");
3470 return (EBUSY);
3471 }
3472 #endif
3473
3474 pmf_device_deregister(dev);
3475
3476 ether_ifdetach(adapter->ifp);
3477 /* Stop the adapter */
3478 IXGBE_CORE_LOCK(adapter);
3479 ixgbe_setup_low_power_mode(adapter);
3480 IXGBE_CORE_UNLOCK(adapter);
3481
3482 ixgbe_free_softint(adapter);
3483
3484 /* let hardware know driver is unloading */
3485 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3486 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3487 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3488
3489 callout_halt(&adapter->timer, NULL);
3490
3491 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3492 netmap_detach(adapter->ifp);
3493
3494 ixgbe_free_pci_resources(adapter);
3495 #if 0 /* XXX the NetBSD port is probably missing something here */
3496 bus_generic_detach(dev);
3497 #endif
3498 if_detach(adapter->ifp);
3499 if_percpuq_destroy(adapter->ipq);
3500
3501 sysctl_teardown(&adapter->sysctllog);
3502 evcnt_detach(&adapter->efbig_tx_dma_setup);
3503 evcnt_detach(&adapter->mbuf_defrag_failed);
3504 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3505 evcnt_detach(&adapter->einval_tx_dma_setup);
3506 evcnt_detach(&adapter->other_tx_dma_setup);
3507 evcnt_detach(&adapter->eagain_tx_dma_setup);
3508 evcnt_detach(&adapter->enomem_tx_dma_setup);
3509 evcnt_detach(&adapter->watchdog_events);
3510 evcnt_detach(&adapter->tso_err);
3511 evcnt_detach(&adapter->link_irq);
3512 evcnt_detach(&adapter->link_sicount);
3513 evcnt_detach(&adapter->mod_sicount);
3514 evcnt_detach(&adapter->msf_sicount);
3515 evcnt_detach(&adapter->phy_sicount);
3516
3517 txr = adapter->tx_rings;
3518 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3519 evcnt_detach(&adapter->queues[i].irqs);
3520 evcnt_detach(&adapter->queues[i].handleq);
3521 evcnt_detach(&adapter->queues[i].req);
3522 evcnt_detach(&txr->no_desc_avail);
3523 evcnt_detach(&txr->total_packets);
3524 evcnt_detach(&txr->tso_tx);
3525 #ifndef IXGBE_LEGACY_TX
3526 evcnt_detach(&txr->pcq_drops);
3527 #endif
3528
3529 if (i < __arraycount(stats->mpc)) {
3530 evcnt_detach(&stats->mpc[i]);
3531 if (hw->mac.type == ixgbe_mac_82598EB)
3532 evcnt_detach(&stats->rnbc[i]);
3533 }
3534 if (i < __arraycount(stats->pxontxc)) {
3535 evcnt_detach(&stats->pxontxc[i]);
3536 evcnt_detach(&stats->pxonrxc[i]);
3537 evcnt_detach(&stats->pxofftxc[i]);
3538 evcnt_detach(&stats->pxoffrxc[i]);
3539 if (hw->mac.type >= ixgbe_mac_82599EB)
3540 evcnt_detach(&stats->pxon2offc[i]);
3541 }
3542 if (i < __arraycount(stats->qprc)) {
3543 evcnt_detach(&stats->qprc[i]);
3544 evcnt_detach(&stats->qptc[i]);
3545 evcnt_detach(&stats->qbrc[i]);
3546 evcnt_detach(&stats->qbtc[i]);
3547 if (hw->mac.type >= ixgbe_mac_82599EB)
3548 evcnt_detach(&stats->qprdc[i]);
3549 }
3550
3551 evcnt_detach(&rxr->rx_packets);
3552 evcnt_detach(&rxr->rx_bytes);
3553 evcnt_detach(&rxr->rx_copies);
3554 evcnt_detach(&rxr->no_jmbuf);
3555 evcnt_detach(&rxr->rx_discarded);
3556 }
3557 evcnt_detach(&stats->ipcs);
3558 evcnt_detach(&stats->l4cs);
3559 evcnt_detach(&stats->ipcs_bad);
3560 evcnt_detach(&stats->l4cs_bad);
3561 evcnt_detach(&stats->intzero);
3562 evcnt_detach(&stats->legint);
3563 evcnt_detach(&stats->crcerrs);
3564 evcnt_detach(&stats->illerrc);
3565 evcnt_detach(&stats->errbc);
3566 evcnt_detach(&stats->mspdc);
3567 if (hw->mac.type >= ixgbe_mac_X550)
3568 evcnt_detach(&stats->mbsdc);
3569 evcnt_detach(&stats->mpctotal);
3570 evcnt_detach(&stats->mlfc);
3571 evcnt_detach(&stats->mrfc);
3572 evcnt_detach(&stats->rlec);
3573 evcnt_detach(&stats->lxontxc);
3574 evcnt_detach(&stats->lxonrxc);
3575 evcnt_detach(&stats->lxofftxc);
3576 evcnt_detach(&stats->lxoffrxc);
3577
3578 /* Packet Reception Stats */
3579 evcnt_detach(&stats->tor);
3580 evcnt_detach(&stats->gorc);
3581 evcnt_detach(&stats->tpr);
3582 evcnt_detach(&stats->gprc);
3583 evcnt_detach(&stats->mprc);
3584 evcnt_detach(&stats->bprc);
3585 evcnt_detach(&stats->prc64);
3586 evcnt_detach(&stats->prc127);
3587 evcnt_detach(&stats->prc255);
3588 evcnt_detach(&stats->prc511);
3589 evcnt_detach(&stats->prc1023);
3590 evcnt_detach(&stats->prc1522);
3591 evcnt_detach(&stats->ruc);
3592 evcnt_detach(&stats->rfc);
3593 evcnt_detach(&stats->roc);
3594 evcnt_detach(&stats->rjc);
3595 evcnt_detach(&stats->mngprc);
3596 evcnt_detach(&stats->mngpdc);
3597 evcnt_detach(&stats->xec);
3598
3599 /* Packet Transmission Stats */
3600 evcnt_detach(&stats->gotc);
3601 evcnt_detach(&stats->tpt);
3602 evcnt_detach(&stats->gptc);
3603 evcnt_detach(&stats->bptc);
3604 evcnt_detach(&stats->mptc);
3605 evcnt_detach(&stats->mngptc);
3606 evcnt_detach(&stats->ptc64);
3607 evcnt_detach(&stats->ptc127);
3608 evcnt_detach(&stats->ptc255);
3609 evcnt_detach(&stats->ptc511);
3610 evcnt_detach(&stats->ptc1023);
3611 evcnt_detach(&stats->ptc1522);
3612
3613 ixgbe_free_transmit_structures(adapter);
3614 ixgbe_free_receive_structures(adapter);
3615 for (int i = 0; i < adapter->num_queues; i++) {
3616 struct ix_queue * que = &adapter->queues[i];
3617 mutex_destroy(&que->dc_mtx);
3618 }
3619 free(adapter->queues, M_DEVBUF);
3620 free(adapter->mta, M_DEVBUF);
3621
3622 IXGBE_CORE_LOCK_DESTROY(adapter);
3623
3624 return (0);
3625 } /* ixgbe_detach */
3626
3627 /************************************************************************
3628 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3629 *
3630 * Prepare the adapter/port for LPLU and/or WoL
3631 ************************************************************************/
3632 static int
3633 ixgbe_setup_low_power_mode(struct adapter *adapter)
3634 {
3635 struct ixgbe_hw *hw = &adapter->hw;
3636 device_t dev = adapter->dev;
3637 s32 error = 0;
3638
3639 KASSERT(mutex_owned(&adapter->core_mtx));
3640
3641 /* Limit power management flow to X550EM baseT */
3642 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3643 hw->phy.ops.enter_lplu) {
3644 /* X550EM baseT adapters need a special LPLU flow */
3645 hw->phy.reset_disable = true;
3646 ixgbe_stop(adapter);
3647 error = hw->phy.ops.enter_lplu(hw);
3648 if (error)
3649 device_printf(dev,
3650 "Error entering LPLU: %d\n", error);
3651 hw->phy.reset_disable = false;
3652 } else {
3653 /* Just stop for other adapters */
3654 ixgbe_stop(adapter);
3655 }
3656
3657 if (!hw->wol_enabled) {
3658 ixgbe_set_phy_power(hw, FALSE);
3659 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3660 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3661 } else {
3662 /* Turn off support for APM wakeup. (Using ACPI instead) */
3663 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3664 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3665
3666 /*
3667 * Clear Wake Up Status register to prevent any previous wakeup
3668 * events from waking us up immediately after we suspend.
3669 */
3670 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3671
3672 /*
3673 * Program the Wakeup Filter Control register with user filter
3674 * settings
3675 */
3676 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3677
3678 /* Enable wakeups and power management in Wakeup Control */
3679 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3680 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3681
3682 }
3683
3684 return error;
3685 } /* ixgbe_setup_low_power_mode */
3686
3687 /************************************************************************
3688 * ixgbe_shutdown - Shutdown entry point
3689 ************************************************************************/
3690 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3691 static int
3692 ixgbe_shutdown(device_t dev)
3693 {
3694 struct adapter *adapter = device_private(dev);
3695 int error = 0;
3696
3697 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3698
3699 IXGBE_CORE_LOCK(adapter);
3700 error = ixgbe_setup_low_power_mode(adapter);
3701 IXGBE_CORE_UNLOCK(adapter);
3702
3703 return (error);
3704 } /* ixgbe_shutdown */
3705 #endif
3706
3707 /************************************************************************
3708 * ixgbe_suspend
3709 *
3710 * From D0 to D3
3711 ************************************************************************/
3712 static bool
3713 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3714 {
3715 struct adapter *adapter = device_private(dev);
3716 int error = 0;
3717
3718 INIT_DEBUGOUT("ixgbe_suspend: begin");
3719
3720 IXGBE_CORE_LOCK(adapter);
3721
3722 error = ixgbe_setup_low_power_mode(adapter);
3723
3724 IXGBE_CORE_UNLOCK(adapter);
3725
3726 return (error);
3727 } /* ixgbe_suspend */
3728
3729 /************************************************************************
3730 * ixgbe_resume
3731 *
3732 * From D3 to D0
3733 ************************************************************************/
3734 static bool
3735 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3736 {
3737 struct adapter *adapter = device_private(dev);
3738 struct ifnet *ifp = adapter->ifp;
3739 struct ixgbe_hw *hw = &adapter->hw;
3740 u32 wus;
3741
3742 INIT_DEBUGOUT("ixgbe_resume: begin");
3743
3744 IXGBE_CORE_LOCK(adapter);
3745
3746 /* Read & clear WUS register */
3747 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3748 if (wus)
3749 device_printf(dev, "Woken up by (WUS): %#010x\n",
3750 IXGBE_READ_REG(hw, IXGBE_WUS));
3751 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3752 /* And clear WUFC until next low-power transition */
3753 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3754
3755 /*
3756 * Required after D3->D0 transition;
3757 * will re-advertise all previous advertised speeds
3758 */
3759 if (ifp->if_flags & IFF_UP)
3760 ixgbe_init_locked(adapter);
3761
3762 IXGBE_CORE_UNLOCK(adapter);
3763
3764 return true;
3765 } /* ixgbe_resume */
3766
3767 /*
3768 * Set the various hardware offload abilities.
3769 *
3770 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3771 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3772 * mbuf offload flags the driver will understand.
3773 */
3774 static void
3775 ixgbe_set_if_hwassist(struct adapter *adapter)
3776 {
3777 /* XXX */
3778 }
3779
3780 /************************************************************************
3781 * ixgbe_init_locked - Init entry point
3782 *
3783 * Used in two ways: It is used by the stack as an init
3784 * entry point in network interface structure. It is also
3785 * used by the driver as a hw/sw initialization routine to
3786 * get to a consistent state.
3787 *
3788 * return 0 on success, positive on failure
3789 ************************************************************************/
3790 static void
3791 ixgbe_init_locked(struct adapter *adapter)
3792 {
3793 struct ifnet *ifp = adapter->ifp;
3794 device_t dev = adapter->dev;
3795 struct ixgbe_hw *hw = &adapter->hw;
3796 struct tx_ring *txr;
3797 struct rx_ring *rxr;
3798 u32 txdctl, mhadd;
3799 u32 rxdctl, rxctrl;
3800 u32 ctrl_ext;
3801 int i, j, err;
3802
3803 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3804
3805 KASSERT(mutex_owned(&adapter->core_mtx));
3806 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3807
3808 hw->adapter_stopped = FALSE;
3809 ixgbe_stop_adapter(hw);
3810 callout_stop(&adapter->timer);
3811
3812 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3813 adapter->max_frame_size =
3814 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3815
3816 /* Queue indices may change with IOV mode */
3817 ixgbe_align_all_queue_indices(adapter);
3818
3819 /* reprogram the RAR[0] in case user changed it. */
3820 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3821
3822 /* Get the latest mac address, User can use a LAA */
3823 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3824 IXGBE_ETH_LENGTH_OF_ADDRESS);
3825 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3826 hw->addr_ctrl.rar_used_count = 1;
3827
3828 /* Set hardware offload abilities from ifnet flags */
3829 ixgbe_set_if_hwassist(adapter);
3830
3831 /* Prepare transmit descriptors and buffers */
3832 if (ixgbe_setup_transmit_structures(adapter)) {
3833 device_printf(dev, "Could not setup transmit structures\n");
3834 ixgbe_stop(adapter);
3835 return;
3836 }
3837
3838 ixgbe_init_hw(hw);
3839
3840 ixgbe_initialize_iov(adapter);
3841
3842 ixgbe_initialize_transmit_units(adapter);
3843
3844 /* Setup Multicast table */
3845 ixgbe_set_multi(adapter);
3846
3847 /* Determine the correct mbuf pool, based on frame size */
3848 if (adapter->max_frame_size <= MCLBYTES)
3849 adapter->rx_mbuf_sz = MCLBYTES;
3850 else
3851 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3852
3853 /* Prepare receive descriptors and buffers */
3854 if (ixgbe_setup_receive_structures(adapter)) {
3855 device_printf(dev, "Could not setup receive structures\n");
3856 ixgbe_stop(adapter);
3857 return;
3858 }
3859
3860 /* Configure RX settings */
3861 ixgbe_initialize_receive_units(adapter);
3862
3863 /* Enable SDP & MSI-X interrupts based on adapter */
3864 ixgbe_config_gpie(adapter);
3865
3866 /* Set MTU size */
3867 if (ifp->if_mtu > ETHERMTU) {
3868 /* aka IXGBE_MAXFRS on 82599 and newer */
3869 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3870 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3871 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3872 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3873 }
3874
3875 /* Now enable all the queues */
3876 for (i = 0; i < adapter->num_queues; i++) {
3877 txr = &adapter->tx_rings[i];
3878 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3879 txdctl |= IXGBE_TXDCTL_ENABLE;
3880 /* Set WTHRESH to 8, burst writeback */
3881 txdctl |= (8 << 16);
3882 /*
3883 * When the internal queue falls below PTHRESH (32),
3884 * start prefetching as long as there are at least
3885 * HTHRESH (1) buffers ready. The values are taken
3886 * from the Intel linux driver 3.8.21.
3887 * Prefetching enables tx line rate even with 1 queue.
3888 */
3889 txdctl |= (32 << 0) | (1 << 8);
3890 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3891 }
3892
3893 for (i = 0; i < adapter->num_queues; i++) {
3894 rxr = &adapter->rx_rings[i];
3895 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3896 if (hw->mac.type == ixgbe_mac_82598EB) {
3897 /*
3898 * PTHRESH = 21
3899 * HTHRESH = 4
3900 * WTHRESH = 8
3901 */
3902 rxdctl &= ~0x3FFFFF;
3903 rxdctl |= 0x080420;
3904 }
3905 rxdctl |= IXGBE_RXDCTL_ENABLE;
3906 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3907 for (j = 0; j < 10; j++) {
3908 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3909 IXGBE_RXDCTL_ENABLE)
3910 break;
3911 else
3912 msec_delay(1);
3913 }
3914 wmb();
3915
3916 /*
3917 * In netmap mode, we must preserve the buffers made
3918 * available to userspace before the if_init()
3919 * (this is true by default on the TX side, because
3920 * init makes all buffers available to userspace).
3921 *
3922 * netmap_reset() and the device specific routines
3923 * (e.g. ixgbe_setup_receive_rings()) map these
3924 * buffers at the end of the NIC ring, so here we
3925 * must set the RDT (tail) register to make sure
3926 * they are not overwritten.
3927 *
3928 * In this driver the NIC ring starts at RDH = 0,
3929 * RDT points to the last slot available for reception (?),
3930 * so RDT = num_rx_desc - 1 means the whole ring is available.
3931 */
3932 #ifdef DEV_NETMAP
3933 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3934 (ifp->if_capenable & IFCAP_NETMAP)) {
3935 struct netmap_adapter *na = NA(adapter->ifp);
3936 struct netmap_kring *kring = &na->rx_rings[i];
3937 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3938
3939 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3940 } else
3941 #endif /* DEV_NETMAP */
3942 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3943 adapter->num_rx_desc - 1);
3944 }
3945
3946 /* Enable Receive engine */
3947 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3948 if (hw->mac.type == ixgbe_mac_82598EB)
3949 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3950 rxctrl |= IXGBE_RXCTRL_RXEN;
3951 ixgbe_enable_rx_dma(hw, rxctrl);
3952
3953 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3954
3955 /* Set up MSI/MSI-X routing */
3956 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3957 ixgbe_configure_ivars(adapter);
3958 /* Set up auto-mask */
3959 if (hw->mac.type == ixgbe_mac_82598EB)
3960 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3961 else {
3962 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3963 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3964 }
3965 } else { /* Simple settings for Legacy/MSI */
3966 ixgbe_set_ivar(adapter, 0, 0, 0);
3967 ixgbe_set_ivar(adapter, 0, 0, 1);
3968 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3969 }
3970
3971 ixgbe_init_fdir(adapter);
3972
3973 /*
3974 * Check on any SFP devices that
3975 * need to be kick-started
3976 */
3977 if (hw->phy.type == ixgbe_phy_none) {
3978 err = hw->phy.ops.identify(hw);
3979 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3980 device_printf(dev,
3981 "Unsupported SFP+ module type was detected.\n");
3982 return;
3983 }
3984 }
3985
3986 /* Set moderation on the Link interrupt */
3987 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
3988
3989 /* Enable power to the phy. */
3990 ixgbe_set_phy_power(hw, TRUE);
3991
3992 /* Config/Enable Link */
3993 ixgbe_config_link(adapter);
3994
3995 /* Hardware Packet Buffer & Flow Control setup */
3996 ixgbe_config_delay_values(adapter);
3997
3998 /* Initialize the FC settings */
3999 ixgbe_start_hw(hw);
4000
4001 /* Set up VLAN support and filter */
4002 ixgbe_setup_vlan_hw_support(adapter);
4003
4004 /* Setup DMA Coalescing */
4005 ixgbe_config_dmac(adapter);
4006
4007 /* And now turn on interrupts */
4008 ixgbe_enable_intr(adapter);
4009
4010 /* Enable the use of the MBX by the VF's */
4011 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4012 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4013 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4014 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4015 }
4016
4017 /* Update saved flags. See ixgbe_ifflags_cb() */
4018 adapter->if_flags = ifp->if_flags;
4019
4020 /* Now inform the stack we're ready */
4021 ifp->if_flags |= IFF_RUNNING;
4022
4023 return;
4024 } /* ixgbe_init_locked */
4025
4026 /************************************************************************
4027 * ixgbe_init
4028 ************************************************************************/
4029 static int
4030 ixgbe_init(struct ifnet *ifp)
4031 {
4032 struct adapter *adapter = ifp->if_softc;
4033
4034 IXGBE_CORE_LOCK(adapter);
4035 ixgbe_init_locked(adapter);
4036 IXGBE_CORE_UNLOCK(adapter);
4037
4038 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4039 } /* ixgbe_init */
4040
4041 /************************************************************************
4042 * ixgbe_set_ivar
4043 *
4044 * Setup the correct IVAR register for a particular MSI-X interrupt
4045 * (yes this is all very magic and confusing :)
4046 * - entry is the register array entry
4047 * - vector is the MSI-X vector for this queue
4048 * - type is RX/TX/MISC
4049 ************************************************************************/
4050 static void
4051 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4052 {
4053 struct ixgbe_hw *hw = &adapter->hw;
4054 u32 ivar, index;
4055
4056 vector |= IXGBE_IVAR_ALLOC_VAL;
4057
4058 switch (hw->mac.type) {
4059 case ixgbe_mac_82598EB:
4060 if (type == -1)
4061 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4062 else
4063 entry += (type * 64);
4064 index = (entry >> 2) & 0x1F;
4065 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4066 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4067 ivar |= (vector << (8 * (entry & 0x3)));
4068 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4069 break;
4070 case ixgbe_mac_82599EB:
4071 case ixgbe_mac_X540:
4072 case ixgbe_mac_X550:
4073 case ixgbe_mac_X550EM_x:
4074 case ixgbe_mac_X550EM_a:
4075 if (type == -1) { /* MISC IVAR */
4076 index = (entry & 1) * 8;
4077 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4078 ivar &= ~(0xFF << index);
4079 ivar |= (vector << index);
4080 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4081 } else { /* RX/TX IVARS */
4082 index = (16 * (entry & 1)) + (8 * type);
4083 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4084 ivar &= ~(0xFF << index);
4085 ivar |= (vector << index);
4086 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4087 }
4088 break;
4089 default:
4090 break;
4091 }
4092 } /* ixgbe_set_ivar */
4093
4094 /************************************************************************
4095 * ixgbe_configure_ivars
4096 ************************************************************************/
4097 static void
4098 ixgbe_configure_ivars(struct adapter *adapter)
4099 {
4100 struct ix_queue *que = adapter->queues;
4101 u32 newitr;
4102
4103 if (ixgbe_max_interrupt_rate > 0)
4104 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4105 else {
4106 /*
4107 * Disable DMA coalescing if interrupt moderation is
4108 * disabled.
4109 */
4110 adapter->dmac = 0;
4111 newitr = 0;
4112 }
4113
4114 for (int i = 0; i < adapter->num_queues; i++, que++) {
4115 struct rx_ring *rxr = &adapter->rx_rings[i];
4116 struct tx_ring *txr = &adapter->tx_rings[i];
4117 /* First the RX queue entry */
4118 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4119 /* ... and the TX */
4120 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4121 /* Set an Initial EITR value */
4122 ixgbe_eitr_write(adapter, que->msix, newitr);
4123 /*
4124 * To eliminate influence of the previous state.
4125 * At this point, Tx/Rx interrupt handler
4126 * (ixgbe_msix_que()) cannot be called, so both
4127 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4128 */
4129 que->eitr_setting = 0;
4130 }
4131
4132 /* For the Link interrupt */
4133 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4134 } /* ixgbe_configure_ivars */
4135
4136 /************************************************************************
4137 * ixgbe_config_gpie
4138 ************************************************************************/
4139 static void
4140 ixgbe_config_gpie(struct adapter *adapter)
4141 {
4142 struct ixgbe_hw *hw = &adapter->hw;
4143 u32 gpie;
4144
4145 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4146
4147 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4148 /* Enable Enhanced MSI-X mode */
4149 gpie |= IXGBE_GPIE_MSIX_MODE
4150 | IXGBE_GPIE_EIAME
4151 | IXGBE_GPIE_PBA_SUPPORT
4152 | IXGBE_GPIE_OCD;
4153 }
4154
4155 /* Fan Failure Interrupt */
4156 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4157 gpie |= IXGBE_SDP1_GPIEN;
4158
4159 /* Thermal Sensor Interrupt */
4160 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4161 gpie |= IXGBE_SDP0_GPIEN_X540;
4162
4163 /* Link detection */
4164 switch (hw->mac.type) {
4165 case ixgbe_mac_82599EB:
4166 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4167 break;
4168 case ixgbe_mac_X550EM_x:
4169 case ixgbe_mac_X550EM_a:
4170 gpie |= IXGBE_SDP0_GPIEN_X540;
4171 break;
4172 default:
4173 break;
4174 }
4175
4176 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4177
4178 } /* ixgbe_config_gpie */
4179
4180 /************************************************************************
4181 * ixgbe_config_delay_values
4182 *
4183 * Requires adapter->max_frame_size to be set.
4184 ************************************************************************/
4185 static void
4186 ixgbe_config_delay_values(struct adapter *adapter)
4187 {
4188 struct ixgbe_hw *hw = &adapter->hw;
4189 u32 rxpb, frame, size, tmp;
4190
4191 frame = adapter->max_frame_size;
4192
4193 /* Calculate High Water */
4194 switch (hw->mac.type) {
4195 case ixgbe_mac_X540:
4196 case ixgbe_mac_X550:
4197 case ixgbe_mac_X550EM_x:
4198 case ixgbe_mac_X550EM_a:
4199 tmp = IXGBE_DV_X540(frame, frame);
4200 break;
4201 default:
4202 tmp = IXGBE_DV(frame, frame);
4203 break;
4204 }
4205 size = IXGBE_BT2KB(tmp);
4206 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4207 hw->fc.high_water[0] = rxpb - size;
4208
4209 /* Now calculate Low Water */
4210 switch (hw->mac.type) {
4211 case ixgbe_mac_X540:
4212 case ixgbe_mac_X550:
4213 case ixgbe_mac_X550EM_x:
4214 case ixgbe_mac_X550EM_a:
4215 tmp = IXGBE_LOW_DV_X540(frame);
4216 break;
4217 default:
4218 tmp = IXGBE_LOW_DV(frame);
4219 break;
4220 }
4221 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4222
4223 hw->fc.pause_time = IXGBE_FC_PAUSE;
4224 hw->fc.send_xon = TRUE;
4225 } /* ixgbe_config_delay_values */
4226
4227 /************************************************************************
4228 * ixgbe_set_multi - Multicast Update
4229 *
4230 * Called whenever multicast address list is updated.
4231 ************************************************************************/
4232 static void
4233 ixgbe_set_multi(struct adapter *adapter)
4234 {
4235 struct ixgbe_mc_addr *mta;
4236 struct ifnet *ifp = adapter->ifp;
4237 u8 *update_ptr;
4238 int mcnt = 0;
4239 u32 fctrl;
4240 struct ethercom *ec = &adapter->osdep.ec;
4241 struct ether_multi *enm;
4242 struct ether_multistep step;
4243
4244 KASSERT(mutex_owned(&adapter->core_mtx));
4245 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4246
4247 mta = adapter->mta;
4248 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4249
4250 ifp->if_flags &= ~IFF_ALLMULTI;
4251 ETHER_LOCK(ec);
4252 ETHER_FIRST_MULTI(step, ec, enm);
4253 while (enm != NULL) {
4254 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4255 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4256 ETHER_ADDR_LEN) != 0)) {
4257 ifp->if_flags |= IFF_ALLMULTI;
4258 break;
4259 }
4260 bcopy(enm->enm_addrlo,
4261 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4262 mta[mcnt].vmdq = adapter->pool;
4263 mcnt++;
4264 ETHER_NEXT_MULTI(step, enm);
4265 }
4266 ETHER_UNLOCK(ec);
4267
4268 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4269 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4270 if (ifp->if_flags & IFF_PROMISC)
4271 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4272 else if (ifp->if_flags & IFF_ALLMULTI) {
4273 fctrl |= IXGBE_FCTRL_MPE;
4274 }
4275
4276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4277
4278 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4279 update_ptr = (u8 *)mta;
4280 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4281 ixgbe_mc_array_itr, TRUE);
4282 }
4283
4284 } /* ixgbe_set_multi */
4285
4286 /************************************************************************
4287 * ixgbe_mc_array_itr
4288 *
4289 * An iterator function needed by the multicast shared code.
4290 * It feeds the shared code routine the addresses in the
4291 * array of ixgbe_set_multi() one by one.
4292 ************************************************************************/
4293 static u8 *
4294 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4295 {
4296 struct ixgbe_mc_addr *mta;
4297
4298 mta = (struct ixgbe_mc_addr *)*update_ptr;
4299 *vmdq = mta->vmdq;
4300
4301 *update_ptr = (u8*)(mta + 1);
4302
4303 return (mta->addr);
4304 } /* ixgbe_mc_array_itr */
4305
4306 /************************************************************************
4307 * ixgbe_local_timer - Timer routine
4308 *
4309 * Checks for link status, updates statistics,
4310 * and runs the watchdog check.
4311 ************************************************************************/
4312 static void
4313 ixgbe_local_timer(void *arg)
4314 {
4315 struct adapter *adapter = arg;
4316
4317 IXGBE_CORE_LOCK(adapter);
4318 ixgbe_local_timer1(adapter);
4319 IXGBE_CORE_UNLOCK(adapter);
4320 }
4321
4322 static void
4323 ixgbe_local_timer1(void *arg)
4324 {
4325 struct adapter *adapter = arg;
4326 device_t dev = adapter->dev;
4327 struct ix_queue *que = adapter->queues;
4328 u64 queues = 0;
4329 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4330 int hung = 0;
4331 int i;
4332
4333 KASSERT(mutex_owned(&adapter->core_mtx));
4334
4335 /* Check for pluggable optics */
4336 if (adapter->sfp_probe)
4337 if (!ixgbe_sfp_probe(adapter))
4338 goto out; /* Nothing to do */
4339
4340 ixgbe_update_link_status(adapter);
4341 ixgbe_update_stats_counters(adapter);
4342
4343 /* Update some event counters */
4344 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4345 que = adapter->queues;
4346 for (i = 0; i < adapter->num_queues; i++, que++) {
4347 struct tx_ring *txr = que->txr;
4348
4349 v0 += txr->q_efbig_tx_dma_setup;
4350 v1 += txr->q_mbuf_defrag_failed;
4351 v2 += txr->q_efbig2_tx_dma_setup;
4352 v3 += txr->q_einval_tx_dma_setup;
4353 v4 += txr->q_other_tx_dma_setup;
4354 v5 += txr->q_eagain_tx_dma_setup;
4355 v6 += txr->q_enomem_tx_dma_setup;
4356 v7 += txr->q_tso_err;
4357 }
4358 adapter->efbig_tx_dma_setup.ev_count = v0;
4359 adapter->mbuf_defrag_failed.ev_count = v1;
4360 adapter->efbig2_tx_dma_setup.ev_count = v2;
4361 adapter->einval_tx_dma_setup.ev_count = v3;
4362 adapter->other_tx_dma_setup.ev_count = v4;
4363 adapter->eagain_tx_dma_setup.ev_count = v5;
4364 adapter->enomem_tx_dma_setup.ev_count = v6;
4365 adapter->tso_err.ev_count = v7;
4366
4367 /*
4368 * Check the TX queues status
4369 * - mark hung queues so we don't schedule on them
4370 * - watchdog only if all queues show hung
4371 */
4372 que = adapter->queues;
4373 for (i = 0; i < adapter->num_queues; i++, que++) {
4374 /* Keep track of queues with work for soft irq */
4375 if (que->txr->busy)
4376 queues |= ((u64)1 << que->me);
4377 /*
4378 * Each time txeof runs without cleaning, but there
4379 * are uncleaned descriptors it increments busy. If
4380 * we get to the MAX we declare it hung.
4381 */
4382 if (que->busy == IXGBE_QUEUE_HUNG) {
4383 ++hung;
4384 /* Mark the queue as inactive */
4385 adapter->active_queues &= ~((u64)1 << que->me);
4386 continue;
4387 } else {
4388 /* Check if we've come back from hung */
4389 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4390 adapter->active_queues |= ((u64)1 << que->me);
4391 }
4392 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4393 device_printf(dev,
4394 "Warning queue %d appears to be hung!\n", i);
4395 que->txr->busy = IXGBE_QUEUE_HUNG;
4396 ++hung;
4397 }
4398 }
4399
4400 /* Only truely watchdog if all queues show hung */
4401 if (hung == adapter->num_queues)
4402 goto watchdog;
4403 else if (queues != 0) { /* Force an IRQ on queues with work */
4404 que = adapter->queues;
4405 for (i = 0; i < adapter->num_queues; i++, que++) {
4406 mutex_enter(&que->dc_mtx);
4407 if (que->disabled_count == 0)
4408 ixgbe_rearm_queues(adapter,
4409 queues & ((u64)1 << i));
4410 mutex_exit(&que->dc_mtx);
4411 }
4412 }
4413
4414 out:
4415 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4416 return;
4417
4418 watchdog:
4419 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4420 adapter->ifp->if_flags &= ~IFF_RUNNING;
4421 adapter->watchdog_events.ev_count++;
4422 ixgbe_init_locked(adapter);
4423 } /* ixgbe_local_timer */
4424
4425 /************************************************************************
4426 * ixgbe_sfp_probe
4427 *
4428 * Determine if a port had optics inserted.
4429 ************************************************************************/
4430 static bool
4431 ixgbe_sfp_probe(struct adapter *adapter)
4432 {
4433 struct ixgbe_hw *hw = &adapter->hw;
4434 device_t dev = adapter->dev;
4435 bool result = FALSE;
4436
4437 if ((hw->phy.type == ixgbe_phy_nl) &&
4438 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4439 s32 ret = hw->phy.ops.identify_sfp(hw);
4440 if (ret)
4441 goto out;
4442 ret = hw->phy.ops.reset(hw);
4443 adapter->sfp_probe = FALSE;
4444 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4445 device_printf(dev,"Unsupported SFP+ module detected!");
4446 device_printf(dev,
4447 "Reload driver with supported module.\n");
4448 goto out;
4449 } else
4450 device_printf(dev, "SFP+ module detected!\n");
4451 /* We now have supported optics */
4452 result = TRUE;
4453 }
4454 out:
4455
4456 return (result);
4457 } /* ixgbe_sfp_probe */
4458
4459 /************************************************************************
4460 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4461 ************************************************************************/
4462 static void
4463 ixgbe_handle_mod(void *context)
4464 {
4465 struct adapter *adapter = context;
4466 struct ixgbe_hw *hw = &adapter->hw;
4467 device_t dev = adapter->dev;
4468 u32 err, cage_full = 0;
4469
4470 ++adapter->mod_sicount.ev_count;
4471 if (adapter->hw.need_crosstalk_fix) {
4472 switch (hw->mac.type) {
4473 case ixgbe_mac_82599EB:
4474 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4475 IXGBE_ESDP_SDP2;
4476 break;
4477 case ixgbe_mac_X550EM_x:
4478 case ixgbe_mac_X550EM_a:
4479 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4480 IXGBE_ESDP_SDP0;
4481 break;
4482 default:
4483 break;
4484 }
4485
4486 if (!cage_full)
4487 return;
4488 }
4489
4490 err = hw->phy.ops.identify_sfp(hw);
4491 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4492 device_printf(dev,
4493 "Unsupported SFP+ module type was detected.\n");
4494 return;
4495 }
4496
4497 if (hw->mac.type == ixgbe_mac_82598EB)
4498 err = hw->phy.ops.reset(hw);
4499 else
4500 err = hw->mac.ops.setup_sfp(hw);
4501
4502 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4503 device_printf(dev,
4504 "Setup failure - unsupported SFP+ module type.\n");
4505 return;
4506 }
4507 softint_schedule(adapter->msf_si);
4508 } /* ixgbe_handle_mod */
4509
4510
4511 /************************************************************************
4512 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4513 ************************************************************************/
4514 static void
4515 ixgbe_handle_msf(void *context)
4516 {
4517 struct adapter *adapter = context;
4518 struct ixgbe_hw *hw = &adapter->hw;
4519 u32 autoneg;
4520 bool negotiate;
4521
4522 ++adapter->msf_sicount.ev_count;
4523 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4524 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4525
4526 autoneg = hw->phy.autoneg_advertised;
4527 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4528 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4529 else
4530 negotiate = 0;
4531 if (hw->mac.ops.setup_link)
4532 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4533
4534 /* Adjust media types shown in ifconfig */
4535 ifmedia_removeall(&adapter->media);
4536 ixgbe_add_media_types(adapter);
4537 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4538 } /* ixgbe_handle_msf */
4539
4540 /************************************************************************
4541 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4542 ************************************************************************/
4543 static void
4544 ixgbe_handle_phy(void *context)
4545 {
4546 struct adapter *adapter = context;
4547 struct ixgbe_hw *hw = &adapter->hw;
4548 int error;
4549
4550 ++adapter->phy_sicount.ev_count;
4551 error = hw->phy.ops.handle_lasi(hw);
4552 if (error == IXGBE_ERR_OVERTEMP)
4553 device_printf(adapter->dev,
4554 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4555 " PHY will downshift to lower power state!\n");
4556 else if (error)
4557 device_printf(adapter->dev,
4558 "Error handling LASI interrupt: %d\n", error);
4559 } /* ixgbe_handle_phy */
4560
4561 static void
4562 ixgbe_ifstop(struct ifnet *ifp, int disable)
4563 {
4564 struct adapter *adapter = ifp->if_softc;
4565
4566 IXGBE_CORE_LOCK(adapter);
4567 ixgbe_stop(adapter);
4568 IXGBE_CORE_UNLOCK(adapter);
4569 }
4570
4571 /************************************************************************
4572 * ixgbe_stop - Stop the hardware
4573 *
4574 * Disables all traffic on the adapter by issuing a
4575 * global reset on the MAC and deallocates TX/RX buffers.
4576 ************************************************************************/
4577 static void
4578 ixgbe_stop(void *arg)
4579 {
4580 struct ifnet *ifp;
4581 struct adapter *adapter = arg;
4582 struct ixgbe_hw *hw = &adapter->hw;
4583
4584 ifp = adapter->ifp;
4585
4586 KASSERT(mutex_owned(&adapter->core_mtx));
4587
4588 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4589 ixgbe_disable_intr(adapter);
4590 callout_stop(&adapter->timer);
4591
4592 /* Let the stack know...*/
4593 ifp->if_flags &= ~IFF_RUNNING;
4594
4595 ixgbe_reset_hw(hw);
4596 hw->adapter_stopped = FALSE;
4597 ixgbe_stop_adapter(hw);
4598 if (hw->mac.type == ixgbe_mac_82599EB)
4599 ixgbe_stop_mac_link_on_d3_82599(hw);
4600 /* Turn off the laser - noop with no optics */
4601 ixgbe_disable_tx_laser(hw);
4602
4603 /* Update the stack */
4604 adapter->link_up = FALSE;
4605 ixgbe_update_link_status(adapter);
4606
4607 /* reprogram the RAR[0] in case user changed it. */
4608 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4609
4610 return;
4611 } /* ixgbe_stop */
4612
4613 /************************************************************************
4614 * ixgbe_update_link_status - Update OS on link state
4615 *
4616 * Note: Only updates the OS on the cached link state.
4617 * The real check of the hardware only happens with
4618 * a link interrupt.
4619 ************************************************************************/
4620 static void
4621 ixgbe_update_link_status(struct adapter *adapter)
4622 {
4623 struct ifnet *ifp = adapter->ifp;
4624 device_t dev = adapter->dev;
4625 struct ixgbe_hw *hw = &adapter->hw;
4626
4627 KASSERT(mutex_owned(&adapter->core_mtx));
4628
4629 if (adapter->link_up) {
4630 if (adapter->link_active == FALSE) {
4631 /*
4632 * To eliminate influence of the previous state
4633 * in the same way as ixgbe_init_locked().
4634 */
4635 struct ix_queue *que = adapter->queues;
4636 for (int i = 0; i < adapter->num_queues; i++, que++)
4637 que->eitr_setting = 0;
4638
4639 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4640 /*
4641 * Discard count for both MAC Local Fault and
4642 * Remote Fault because those registers are
4643 * valid only when the link speed is up and
4644 * 10Gbps.
4645 */
4646 IXGBE_READ_REG(hw, IXGBE_MLFC);
4647 IXGBE_READ_REG(hw, IXGBE_MRFC);
4648 }
4649
4650 if (bootverbose) {
4651 const char *bpsmsg;
4652
4653 switch (adapter->link_speed) {
4654 case IXGBE_LINK_SPEED_10GB_FULL:
4655 bpsmsg = "10 Gbps";
4656 break;
4657 case IXGBE_LINK_SPEED_5GB_FULL:
4658 bpsmsg = "5 Gbps";
4659 break;
4660 case IXGBE_LINK_SPEED_2_5GB_FULL:
4661 bpsmsg = "2.5 Gbps";
4662 break;
4663 case IXGBE_LINK_SPEED_1GB_FULL:
4664 bpsmsg = "1 Gbps";
4665 break;
4666 case IXGBE_LINK_SPEED_100_FULL:
4667 bpsmsg = "100 Mbps";
4668 break;
4669 case IXGBE_LINK_SPEED_10_FULL:
4670 bpsmsg = "10 Mbps";
4671 break;
4672 default:
4673 bpsmsg = "unknown speed";
4674 break;
4675 }
4676 device_printf(dev, "Link is up %s %s \n",
4677 bpsmsg, "Full Duplex");
4678 }
4679 adapter->link_active = TRUE;
4680 /* Update any Flow Control changes */
4681 ixgbe_fc_enable(&adapter->hw);
4682 /* Update DMA coalescing config */
4683 ixgbe_config_dmac(adapter);
4684 if_link_state_change(ifp, LINK_STATE_UP);
4685
4686 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4687 ixgbe_ping_all_vfs(adapter);
4688 }
4689 } else { /* Link down */
4690 if (adapter->link_active == TRUE) {
4691 if (bootverbose)
4692 device_printf(dev, "Link is Down\n");
4693 if_link_state_change(ifp, LINK_STATE_DOWN);
4694 adapter->link_active = FALSE;
4695 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4696 ixgbe_ping_all_vfs(adapter);
4697 ixgbe_drain_all(adapter);
4698 }
4699 }
4700 } /* ixgbe_update_link_status */
4701
4702 /************************************************************************
4703 * ixgbe_config_dmac - Configure DMA Coalescing
4704 ************************************************************************/
4705 static void
4706 ixgbe_config_dmac(struct adapter *adapter)
4707 {
4708 struct ixgbe_hw *hw = &adapter->hw;
4709 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4710
4711 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4712 return;
4713
4714 if (dcfg->watchdog_timer ^ adapter->dmac ||
4715 dcfg->link_speed ^ adapter->link_speed) {
4716 dcfg->watchdog_timer = adapter->dmac;
4717 dcfg->fcoe_en = false;
4718 dcfg->link_speed = adapter->link_speed;
4719 dcfg->num_tcs = 1;
4720
4721 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4722 dcfg->watchdog_timer, dcfg->link_speed);
4723
4724 hw->mac.ops.dmac_config(hw);
4725 }
4726 } /* ixgbe_config_dmac */
4727
4728 /************************************************************************
4729 * ixgbe_enable_intr
4730 ************************************************************************/
4731 static void
4732 ixgbe_enable_intr(struct adapter *adapter)
4733 {
4734 struct ixgbe_hw *hw = &adapter->hw;
4735 struct ix_queue *que = adapter->queues;
4736 u32 mask, fwsm;
4737
4738 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4739
4740 switch (adapter->hw.mac.type) {
4741 case ixgbe_mac_82599EB:
4742 mask |= IXGBE_EIMS_ECC;
4743 /* Temperature sensor on some adapters */
4744 mask |= IXGBE_EIMS_GPI_SDP0;
4745 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4746 mask |= IXGBE_EIMS_GPI_SDP1;
4747 mask |= IXGBE_EIMS_GPI_SDP2;
4748 break;
4749 case ixgbe_mac_X540:
4750 /* Detect if Thermal Sensor is enabled */
4751 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4752 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4753 mask |= IXGBE_EIMS_TS;
4754 mask |= IXGBE_EIMS_ECC;
4755 break;
4756 case ixgbe_mac_X550:
4757 /* MAC thermal sensor is automatically enabled */
4758 mask |= IXGBE_EIMS_TS;
4759 mask |= IXGBE_EIMS_ECC;
4760 break;
4761 case ixgbe_mac_X550EM_x:
4762 case ixgbe_mac_X550EM_a:
4763 /* Some devices use SDP0 for important information */
4764 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4765 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4766 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4767 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4768 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4769 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4770 mask |= IXGBE_EICR_GPI_SDP0_X540;
4771 mask |= IXGBE_EIMS_ECC;
4772 break;
4773 default:
4774 break;
4775 }
4776
4777 /* Enable Fan Failure detection */
4778 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4779 mask |= IXGBE_EIMS_GPI_SDP1;
4780 /* Enable SR-IOV */
4781 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4782 mask |= IXGBE_EIMS_MAILBOX;
4783 /* Enable Flow Director */
4784 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4785 mask |= IXGBE_EIMS_FLOW_DIR;
4786
4787 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4788
4789 /* With MSI-X we use auto clear */
4790 if (adapter->msix_mem) {
4791 mask = IXGBE_EIMS_ENABLE_MASK;
4792 /* Don't autoclear Link */
4793 mask &= ~IXGBE_EIMS_OTHER;
4794 mask &= ~IXGBE_EIMS_LSC;
4795 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4796 mask &= ~IXGBE_EIMS_MAILBOX;
4797 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4798 }
4799
4800 /*
4801 * Now enable all queues, this is done separately to
4802 * allow for handling the extended (beyond 32) MSI-X
4803 * vectors that can be used by 82599
4804 */
4805 for (int i = 0; i < adapter->num_queues; i++, que++)
4806 ixgbe_enable_queue(adapter, que->msix);
4807
4808 IXGBE_WRITE_FLUSH(hw);
4809
4810 } /* ixgbe_enable_intr */
4811
4812 /************************************************************************
4813 * ixgbe_disable_intr_internal
4814 ************************************************************************/
4815 static void
4816 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4817 {
4818 struct ix_queue *que = adapter->queues;
4819
4820 /* disable interrupts other than queues */
4821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4822
4823 if (adapter->msix_mem)
4824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4825
4826 for (int i = 0; i < adapter->num_queues; i++, que++)
4827 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4828
4829 IXGBE_WRITE_FLUSH(&adapter->hw);
4830
4831 } /* ixgbe_do_disable_intr_internal */
4832
4833 /************************************************************************
4834 * ixgbe_disable_intr
4835 ************************************************************************/
4836 static void
4837 ixgbe_disable_intr(struct adapter *adapter)
4838 {
4839
4840 ixgbe_disable_intr_internal(adapter, true);
4841 } /* ixgbe_disable_intr */
4842
4843 /************************************************************************
4844 * ixgbe_ensure_disabled_intr
4845 ************************************************************************/
4846 void
4847 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4848 {
4849
4850 ixgbe_disable_intr_internal(adapter, false);
4851 } /* ixgbe_ensure_disabled_intr */
4852
4853 /************************************************************************
4854 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4855 ************************************************************************/
4856 static int
4857 ixgbe_legacy_irq(void *arg)
4858 {
4859 struct ix_queue *que = arg;
4860 struct adapter *adapter = que->adapter;
4861 struct ixgbe_hw *hw = &adapter->hw;
4862 struct ifnet *ifp = adapter->ifp;
4863 struct tx_ring *txr = adapter->tx_rings;
4864 bool more = false;
4865 u32 eicr, eicr_mask;
4866
4867 /* Silicon errata #26 on 82598 */
4868 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4869
4870 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4871
4872 adapter->stats.pf.legint.ev_count++;
4873 ++que->irqs.ev_count;
4874 if (eicr == 0) {
4875 adapter->stats.pf.intzero.ev_count++;
4876 if ((ifp->if_flags & IFF_UP) != 0)
4877 ixgbe_enable_intr(adapter);
4878 return 0;
4879 }
4880
4881 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4882 /*
4883 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
4884 */
4885 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
4886
4887 #ifdef __NetBSD__
4888 /* Don't run ixgbe_rxeof in interrupt context */
4889 more = true;
4890 #else
4891 more = ixgbe_rxeof(que);
4892 #endif
4893
4894 IXGBE_TX_LOCK(txr);
4895 ixgbe_txeof(txr);
4896 #ifdef notyet
4897 if (!ixgbe_ring_empty(ifp, txr->br))
4898 ixgbe_start_locked(ifp, txr);
4899 #endif
4900 IXGBE_TX_UNLOCK(txr);
4901 }
4902
4903 /* Check for fan failure */
4904 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4905 ixgbe_check_fan_failure(adapter, eicr, true);
4906 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4907 }
4908
4909 /* Link status change */
4910 if (eicr & IXGBE_EICR_LSC)
4911 softint_schedule(adapter->link_si);
4912
4913 if (ixgbe_is_sfp(hw)) {
4914 /* Pluggable optics-related interrupt */
4915 if (hw->mac.type >= ixgbe_mac_X540)
4916 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4917 else
4918 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4919
4920 if (eicr & eicr_mask) {
4921 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4922 softint_schedule(adapter->mod_si);
4923 }
4924
4925 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4926 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4927 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4928 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4929 softint_schedule(adapter->msf_si);
4930 }
4931 }
4932
4933 /* External PHY interrupt */
4934 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4935 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4936 softint_schedule(adapter->phy_si);
4937
4938 if (more) {
4939 que->req.ev_count++;
4940 ixgbe_sched_handle_que(adapter, que);
4941 } else
4942 ixgbe_enable_intr(adapter);
4943
4944 return 1;
4945 } /* ixgbe_legacy_irq */
4946
4947 /************************************************************************
4948 * ixgbe_free_pciintr_resources
4949 ************************************************************************/
4950 static void
4951 ixgbe_free_pciintr_resources(struct adapter *adapter)
4952 {
4953 struct ix_queue *que = adapter->queues;
4954 int rid;
4955
4956 /*
4957 * Release all msix queue resources:
4958 */
4959 for (int i = 0; i < adapter->num_queues; i++, que++) {
4960 if (que->res != NULL) {
4961 pci_intr_disestablish(adapter->osdep.pc,
4962 adapter->osdep.ihs[i]);
4963 adapter->osdep.ihs[i] = NULL;
4964 }
4965 }
4966
4967 /* Clean the Legacy or Link interrupt last */
4968 if (adapter->vector) /* we are doing MSIX */
4969 rid = adapter->vector;
4970 else
4971 rid = 0;
4972
4973 if (adapter->osdep.ihs[rid] != NULL) {
4974 pci_intr_disestablish(adapter->osdep.pc,
4975 adapter->osdep.ihs[rid]);
4976 adapter->osdep.ihs[rid] = NULL;
4977 }
4978
4979 if (adapter->osdep.intrs != NULL) {
4980 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4981 adapter->osdep.nintrs);
4982 adapter->osdep.intrs = NULL;
4983 }
4984 } /* ixgbe_free_pciintr_resources */
4985
4986 /************************************************************************
4987 * ixgbe_free_pci_resources
4988 ************************************************************************/
4989 static void
4990 ixgbe_free_pci_resources(struct adapter *adapter)
4991 {
4992
4993 ixgbe_free_pciintr_resources(adapter);
4994
4995 if (adapter->osdep.mem_size != 0) {
4996 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
4997 adapter->osdep.mem_bus_space_handle,
4998 adapter->osdep.mem_size);
4999 }
5000
5001 } /* ixgbe_free_pci_resources */
5002
5003 /************************************************************************
5004 * ixgbe_set_sysctl_value
5005 ************************************************************************/
5006 static void
5007 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5008 const char *description, int *limit, int value)
5009 {
5010 device_t dev = adapter->dev;
5011 struct sysctllog **log;
5012 const struct sysctlnode *rnode, *cnode;
5013
5014 log = &adapter->sysctllog;
5015 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5016 aprint_error_dev(dev, "could not create sysctl root\n");
5017 return;
5018 }
5019 if (sysctl_createv(log, 0, &rnode, &cnode,
5020 CTLFLAG_READWRITE, CTLTYPE_INT,
5021 name, SYSCTL_DESCR(description),
5022 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5023 aprint_error_dev(dev, "could not create sysctl\n");
5024 *limit = value;
5025 } /* ixgbe_set_sysctl_value */
5026
5027 /************************************************************************
5028 * ixgbe_sysctl_flowcntl
5029 *
5030 * SYSCTL wrapper around setting Flow Control
5031 ************************************************************************/
5032 static int
5033 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5034 {
5035 struct sysctlnode node = *rnode;
5036 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5037 int error, fc;
5038
5039 fc = adapter->hw.fc.current_mode;
5040 node.sysctl_data = &fc;
5041 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5042 if (error != 0 || newp == NULL)
5043 return error;
5044
5045 /* Don't bother if it's not changed */
5046 if (fc == adapter->hw.fc.current_mode)
5047 return (0);
5048
5049 return ixgbe_set_flowcntl(adapter, fc);
5050 } /* ixgbe_sysctl_flowcntl */
5051
5052 /************************************************************************
5053 * ixgbe_set_flowcntl - Set flow control
5054 *
5055 * Flow control values:
5056 * 0 - off
5057 * 1 - rx pause
5058 * 2 - tx pause
5059 * 3 - full
5060 ************************************************************************/
5061 static int
5062 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5063 {
5064 switch (fc) {
5065 case ixgbe_fc_rx_pause:
5066 case ixgbe_fc_tx_pause:
5067 case ixgbe_fc_full:
5068 adapter->hw.fc.requested_mode = fc;
5069 if (adapter->num_queues > 1)
5070 ixgbe_disable_rx_drop(adapter);
5071 break;
5072 case ixgbe_fc_none:
5073 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5074 if (adapter->num_queues > 1)
5075 ixgbe_enable_rx_drop(adapter);
5076 break;
5077 default:
5078 return (EINVAL);
5079 }
5080
5081 #if 0 /* XXX NetBSD */
5082 /* Don't autoneg if forcing a value */
5083 adapter->hw.fc.disable_fc_autoneg = TRUE;
5084 #endif
5085 ixgbe_fc_enable(&adapter->hw);
5086
5087 return (0);
5088 } /* ixgbe_set_flowcntl */
5089
5090 /************************************************************************
5091 * ixgbe_enable_rx_drop
5092 *
5093 * Enable the hardware to drop packets when the buffer is
5094 * full. This is useful with multiqueue, so that no single
5095 * queue being full stalls the entire RX engine. We only
5096 * enable this when Multiqueue is enabled AND Flow Control
5097 * is disabled.
5098 ************************************************************************/
5099 static void
5100 ixgbe_enable_rx_drop(struct adapter *adapter)
5101 {
5102 struct ixgbe_hw *hw = &adapter->hw;
5103 struct rx_ring *rxr;
5104 u32 srrctl;
5105
5106 for (int i = 0; i < adapter->num_queues; i++) {
5107 rxr = &adapter->rx_rings[i];
5108 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5109 srrctl |= IXGBE_SRRCTL_DROP_EN;
5110 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5111 }
5112
5113 /* enable drop for each vf */
5114 for (int i = 0; i < adapter->num_vfs; i++) {
5115 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5116 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5117 IXGBE_QDE_ENABLE));
5118 }
5119 } /* ixgbe_enable_rx_drop */
5120
5121 /************************************************************************
5122 * ixgbe_disable_rx_drop
5123 ************************************************************************/
5124 static void
5125 ixgbe_disable_rx_drop(struct adapter *adapter)
5126 {
5127 struct ixgbe_hw *hw = &adapter->hw;
5128 struct rx_ring *rxr;
5129 u32 srrctl;
5130
5131 for (int i = 0; i < adapter->num_queues; i++) {
5132 rxr = &adapter->rx_rings[i];
5133 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5134 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5135 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5136 }
5137
5138 /* disable drop for each vf */
5139 for (int i = 0; i < adapter->num_vfs; i++) {
5140 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5141 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5142 }
5143 } /* ixgbe_disable_rx_drop */
5144
5145 /************************************************************************
5146 * ixgbe_sysctl_advertise
5147 *
5148 * SYSCTL wrapper around setting advertised speed
5149 ************************************************************************/
5150 static int
5151 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5152 {
5153 struct sysctlnode node = *rnode;
5154 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5155 int error = 0, advertise;
5156
5157 advertise = adapter->advertise;
5158 node.sysctl_data = &advertise;
5159 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5160 if (error != 0 || newp == NULL)
5161 return error;
5162
5163 return ixgbe_set_advertise(adapter, advertise);
5164 } /* ixgbe_sysctl_advertise */
5165
5166 /************************************************************************
5167 * ixgbe_set_advertise - Control advertised link speed
5168 *
5169 * Flags:
5170 * 0x00 - Default (all capable link speed)
5171 * 0x01 - advertise 100 Mb
5172 * 0x02 - advertise 1G
5173 * 0x04 - advertise 10G
5174 * 0x08 - advertise 10 Mb
5175 * 0x10 - advertise 2.5G
5176 * 0x20 - advertise 5G
5177 ************************************************************************/
5178 static int
5179 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5180 {
5181 device_t dev;
5182 struct ixgbe_hw *hw;
5183 ixgbe_link_speed speed = 0;
5184 ixgbe_link_speed link_caps = 0;
5185 s32 err = IXGBE_NOT_IMPLEMENTED;
5186 bool negotiate = FALSE;
5187
5188 /* Checks to validate new value */
5189 if (adapter->advertise == advertise) /* no change */
5190 return (0);
5191
5192 dev = adapter->dev;
5193 hw = &adapter->hw;
5194
5195 /* No speed changes for backplane media */
5196 if (hw->phy.media_type == ixgbe_media_type_backplane)
5197 return (ENODEV);
5198
5199 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5200 (hw->phy.multispeed_fiber))) {
5201 device_printf(dev,
5202 "Advertised speed can only be set on copper or "
5203 "multispeed fiber media types.\n");
5204 return (EINVAL);
5205 }
5206
5207 if (advertise < 0x0 || advertise > 0x2f) {
5208 device_printf(dev,
5209 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5210 return (EINVAL);
5211 }
5212
5213 if (hw->mac.ops.get_link_capabilities) {
5214 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5215 &negotiate);
5216 if (err != IXGBE_SUCCESS) {
5217 device_printf(dev, "Unable to determine supported advertise speeds\n");
5218 return (ENODEV);
5219 }
5220 }
5221
5222 /* Set new value and report new advertised mode */
5223 if (advertise & 0x1) {
5224 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5225 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5226 return (EINVAL);
5227 }
5228 speed |= IXGBE_LINK_SPEED_100_FULL;
5229 }
5230 if (advertise & 0x2) {
5231 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5232 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5233 return (EINVAL);
5234 }
5235 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5236 }
5237 if (advertise & 0x4) {
5238 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5239 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5240 return (EINVAL);
5241 }
5242 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5243 }
5244 if (advertise & 0x8) {
5245 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5246 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5247 return (EINVAL);
5248 }
5249 speed |= IXGBE_LINK_SPEED_10_FULL;
5250 }
5251 if (advertise & 0x10) {
5252 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5253 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5254 return (EINVAL);
5255 }
5256 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5257 }
5258 if (advertise & 0x20) {
5259 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5260 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5261 return (EINVAL);
5262 }
5263 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5264 }
5265 if (advertise == 0)
5266 speed = link_caps; /* All capable link speed */
5267
5268 hw->mac.autotry_restart = TRUE;
5269 hw->mac.ops.setup_link(hw, speed, TRUE);
5270 adapter->advertise = advertise;
5271
5272 return (0);
5273 } /* ixgbe_set_advertise */
5274
5275 /************************************************************************
5276 * ixgbe_get_advertise - Get current advertised speed settings
5277 *
5278 * Formatted for sysctl usage.
5279 * Flags:
5280 * 0x01 - advertise 100 Mb
5281 * 0x02 - advertise 1G
5282 * 0x04 - advertise 10G
5283 * 0x08 - advertise 10 Mb (yes, Mb)
5284 * 0x10 - advertise 2.5G
5285 * 0x20 - advertise 5G
5286 ************************************************************************/
5287 static int
5288 ixgbe_get_advertise(struct adapter *adapter)
5289 {
5290 struct ixgbe_hw *hw = &adapter->hw;
5291 int speed;
5292 ixgbe_link_speed link_caps = 0;
5293 s32 err;
5294 bool negotiate = FALSE;
5295
5296 /*
5297 * Advertised speed means nothing unless it's copper or
5298 * multi-speed fiber
5299 */
5300 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5301 !(hw->phy.multispeed_fiber))
5302 return (0);
5303
5304 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5305 if (err != IXGBE_SUCCESS)
5306 return (0);
5307
5308 speed =
5309 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5310 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5311 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5312 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5313 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5314 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5315
5316 return speed;
5317 } /* ixgbe_get_advertise */
5318
5319 /************************************************************************
5320 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5321 *
5322 * Control values:
5323 * 0/1 - off / on (use default value of 1000)
5324 *
5325 * Legal timer values are:
5326 * 50,100,250,500,1000,2000,5000,10000
5327 *
5328 * Turning off interrupt moderation will also turn this off.
5329 ************************************************************************/
5330 static int
5331 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5332 {
5333 struct sysctlnode node = *rnode;
5334 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5335 struct ifnet *ifp = adapter->ifp;
5336 int error;
5337 int newval;
5338
5339 newval = adapter->dmac;
5340 node.sysctl_data = &newval;
5341 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5342 if ((error) || (newp == NULL))
5343 return (error);
5344
5345 switch (newval) {
5346 case 0:
5347 /* Disabled */
5348 adapter->dmac = 0;
5349 break;
5350 case 1:
5351 /* Enable and use default */
5352 adapter->dmac = 1000;
5353 break;
5354 case 50:
5355 case 100:
5356 case 250:
5357 case 500:
5358 case 1000:
5359 case 2000:
5360 case 5000:
5361 case 10000:
5362 /* Legal values - allow */
5363 adapter->dmac = newval;
5364 break;
5365 default:
5366 /* Do nothing, illegal value */
5367 return (EINVAL);
5368 }
5369
5370 /* Re-initialize hardware if it's already running */
5371 if (ifp->if_flags & IFF_RUNNING)
5372 ifp->if_init(ifp);
5373
5374 return (0);
5375 }
5376
5377 #ifdef IXGBE_DEBUG
5378 /************************************************************************
5379 * ixgbe_sysctl_power_state
5380 *
5381 * Sysctl to test power states
5382 * Values:
5383 * 0 - set device to D0
5384 * 3 - set device to D3
5385 * (none) - get current device power state
5386 ************************************************************************/
5387 static int
5388 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5389 {
5390 #ifdef notyet
5391 struct sysctlnode node = *rnode;
5392 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5393 device_t dev = adapter->dev;
5394 int curr_ps, new_ps, error = 0;
5395
5396 curr_ps = new_ps = pci_get_powerstate(dev);
5397
5398 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5399 if ((error) || (req->newp == NULL))
5400 return (error);
5401
5402 if (new_ps == curr_ps)
5403 return (0);
5404
5405 if (new_ps == 3 && curr_ps == 0)
5406 error = DEVICE_SUSPEND(dev);
5407 else if (new_ps == 0 && curr_ps == 3)
5408 error = DEVICE_RESUME(dev);
5409 else
5410 return (EINVAL);
5411
5412 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5413
5414 return (error);
5415 #else
5416 return 0;
5417 #endif
5418 } /* ixgbe_sysctl_power_state */
5419 #endif
5420
5421 /************************************************************************
5422 * ixgbe_sysctl_wol_enable
5423 *
5424 * Sysctl to enable/disable the WoL capability,
5425 * if supported by the adapter.
5426 *
5427 * Values:
5428 * 0 - disabled
5429 * 1 - enabled
5430 ************************************************************************/
5431 static int
5432 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5433 {
5434 struct sysctlnode node = *rnode;
5435 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5436 struct ixgbe_hw *hw = &adapter->hw;
5437 bool new_wol_enabled;
5438 int error = 0;
5439
5440 new_wol_enabled = hw->wol_enabled;
5441 node.sysctl_data = &new_wol_enabled;
5442 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5443 if ((error) || (newp == NULL))
5444 return (error);
5445 if (new_wol_enabled == hw->wol_enabled)
5446 return (0);
5447
5448 if (new_wol_enabled && !adapter->wol_support)
5449 return (ENODEV);
5450 else
5451 hw->wol_enabled = new_wol_enabled;
5452
5453 return (0);
5454 } /* ixgbe_sysctl_wol_enable */
5455
5456 /************************************************************************
5457 * ixgbe_sysctl_wufc - Wake Up Filter Control
5458 *
5459 * Sysctl to enable/disable the types of packets that the
5460 * adapter will wake up on upon receipt.
5461 * Flags:
5462 * 0x1 - Link Status Change
5463 * 0x2 - Magic Packet
5464 * 0x4 - Direct Exact
5465 * 0x8 - Directed Multicast
5466 * 0x10 - Broadcast
5467 * 0x20 - ARP/IPv4 Request Packet
5468 * 0x40 - Direct IPv4 Packet
5469 * 0x80 - Direct IPv6 Packet
5470 *
5471 * Settings not listed above will cause the sysctl to return an error.
5472 ************************************************************************/
5473 static int
5474 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5475 {
5476 struct sysctlnode node = *rnode;
5477 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5478 int error = 0;
5479 u32 new_wufc;
5480
5481 new_wufc = adapter->wufc;
5482 node.sysctl_data = &new_wufc;
5483 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5484 if ((error) || (newp == NULL))
5485 return (error);
5486 if (new_wufc == adapter->wufc)
5487 return (0);
5488
5489 if (new_wufc & 0xffffff00)
5490 return (EINVAL);
5491
5492 new_wufc &= 0xff;
5493 new_wufc |= (0xffffff & adapter->wufc);
5494 adapter->wufc = new_wufc;
5495
5496 return (0);
5497 } /* ixgbe_sysctl_wufc */
5498
5499 #ifdef IXGBE_DEBUG
5500 /************************************************************************
5501 * ixgbe_sysctl_print_rss_config
5502 ************************************************************************/
5503 static int
5504 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5505 {
5506 #ifdef notyet
5507 struct sysctlnode node = *rnode;
5508 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5509 struct ixgbe_hw *hw = &adapter->hw;
5510 device_t dev = adapter->dev;
5511 struct sbuf *buf;
5512 int error = 0, reta_size;
5513 u32 reg;
5514
5515 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5516 if (!buf) {
5517 device_printf(dev, "Could not allocate sbuf for output.\n");
5518 return (ENOMEM);
5519 }
5520
5521 // TODO: use sbufs to make a string to print out
5522 /* Set multiplier for RETA setup and table size based on MAC */
5523 switch (adapter->hw.mac.type) {
5524 case ixgbe_mac_X550:
5525 case ixgbe_mac_X550EM_x:
5526 case ixgbe_mac_X550EM_a:
5527 reta_size = 128;
5528 break;
5529 default:
5530 reta_size = 32;
5531 break;
5532 }
5533
5534 /* Print out the redirection table */
5535 sbuf_cat(buf, "\n");
5536 for (int i = 0; i < reta_size; i++) {
5537 if (i < 32) {
5538 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5539 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5540 } else {
5541 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5542 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5543 }
5544 }
5545
5546 // TODO: print more config
5547
5548 error = sbuf_finish(buf);
5549 if (error)
5550 device_printf(dev, "Error finishing sbuf: %d\n", error);
5551
5552 sbuf_delete(buf);
5553 #endif
5554 return (0);
5555 } /* ixgbe_sysctl_print_rss_config */
5556 #endif /* IXGBE_DEBUG */
5557
5558 /************************************************************************
5559 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5560 *
5561 * For X552/X557-AT devices using an external PHY
5562 ************************************************************************/
5563 static int
5564 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5565 {
5566 struct sysctlnode node = *rnode;
5567 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5568 struct ixgbe_hw *hw = &adapter->hw;
5569 int val;
5570 u16 reg;
5571 int error;
5572
5573 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5574 device_printf(adapter->dev,
5575 "Device has no supported external thermal sensor.\n");
5576 return (ENODEV);
5577 }
5578
5579 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5580 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5581 device_printf(adapter->dev,
5582 "Error reading from PHY's current temperature register\n");
5583 return (EAGAIN);
5584 }
5585
5586 node.sysctl_data = &val;
5587
5588 /* Shift temp for output */
5589 val = reg >> 8;
5590
5591 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5592 if ((error) || (newp == NULL))
5593 return (error);
5594
5595 return (0);
5596 } /* ixgbe_sysctl_phy_temp */
5597
5598 /************************************************************************
5599 * ixgbe_sysctl_phy_overtemp_occurred
5600 *
5601 * Reports (directly from the PHY) whether the current PHY
5602 * temperature is over the overtemp threshold.
5603 ************************************************************************/
5604 static int
5605 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5606 {
5607 struct sysctlnode node = *rnode;
5608 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5609 struct ixgbe_hw *hw = &adapter->hw;
5610 int val, error;
5611 u16 reg;
5612
5613 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5614 device_printf(adapter->dev,
5615 "Device has no supported external thermal sensor.\n");
5616 return (ENODEV);
5617 }
5618
5619 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5620 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5621 device_printf(adapter->dev,
5622 "Error reading from PHY's temperature status register\n");
5623 return (EAGAIN);
5624 }
5625
5626 node.sysctl_data = &val;
5627
5628 /* Get occurrence bit */
5629 val = !!(reg & 0x4000);
5630
5631 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5632 if ((error) || (newp == NULL))
5633 return (error);
5634
5635 return (0);
5636 } /* ixgbe_sysctl_phy_overtemp_occurred */
5637
5638 /************************************************************************
5639 * ixgbe_sysctl_eee_state
5640 *
5641 * Sysctl to set EEE power saving feature
5642 * Values:
5643 * 0 - disable EEE
5644 * 1 - enable EEE
5645 * (none) - get current device EEE state
5646 ************************************************************************/
5647 static int
5648 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5649 {
5650 struct sysctlnode node = *rnode;
5651 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5652 struct ifnet *ifp = adapter->ifp;
5653 device_t dev = adapter->dev;
5654 int curr_eee, new_eee, error = 0;
5655 s32 retval;
5656
5657 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5658 node.sysctl_data = &new_eee;
5659 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5660 if ((error) || (newp == NULL))
5661 return (error);
5662
5663 /* Nothing to do */
5664 if (new_eee == curr_eee)
5665 return (0);
5666
5667 /* Not supported */
5668 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5669 return (EINVAL);
5670
5671 /* Bounds checking */
5672 if ((new_eee < 0) || (new_eee > 1))
5673 return (EINVAL);
5674
5675 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5676 if (retval) {
5677 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5678 return (EINVAL);
5679 }
5680
5681 /* Restart auto-neg */
5682 ifp->if_init(ifp);
5683
5684 device_printf(dev, "New EEE state: %d\n", new_eee);
5685
5686 /* Cache new value */
5687 if (new_eee)
5688 adapter->feat_en |= IXGBE_FEATURE_EEE;
5689 else
5690 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5691
5692 return (error);
5693 } /* ixgbe_sysctl_eee_state */
5694
5695 /************************************************************************
5696 * ixgbe_init_device_features
5697 ************************************************************************/
5698 static void
5699 ixgbe_init_device_features(struct adapter *adapter)
5700 {
5701 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5702 | IXGBE_FEATURE_RSS
5703 | IXGBE_FEATURE_MSI
5704 | IXGBE_FEATURE_MSIX
5705 | IXGBE_FEATURE_LEGACY_IRQ
5706 | IXGBE_FEATURE_LEGACY_TX;
5707
5708 /* Set capabilities first... */
5709 switch (adapter->hw.mac.type) {
5710 case ixgbe_mac_82598EB:
5711 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5712 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5713 break;
5714 case ixgbe_mac_X540:
5715 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5716 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5717 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5718 (adapter->hw.bus.func == 0))
5719 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5720 break;
5721 case ixgbe_mac_X550:
5722 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5723 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5724 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5725 break;
5726 case ixgbe_mac_X550EM_x:
5727 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5728 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5729 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5730 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5731 break;
5732 case ixgbe_mac_X550EM_a:
5733 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5734 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5735 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5736 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5737 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5738 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5739 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5740 }
5741 break;
5742 case ixgbe_mac_82599EB:
5743 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5744 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5745 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5746 (adapter->hw.bus.func == 0))
5747 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5748 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5749 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5750 break;
5751 default:
5752 break;
5753 }
5754
5755 /* Enabled by default... */
5756 /* Fan failure detection */
5757 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5758 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5759 /* Netmap */
5760 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5761 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5762 /* EEE */
5763 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5764 adapter->feat_en |= IXGBE_FEATURE_EEE;
5765 /* Thermal Sensor */
5766 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5767 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5768
5769 /* Enabled via global sysctl... */
5770 /* Flow Director */
5771 if (ixgbe_enable_fdir) {
5772 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5773 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5774 else
5775 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5776 }
5777 /* Legacy (single queue) transmit */
5778 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5779 ixgbe_enable_legacy_tx)
5780 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5781 /*
5782 * Message Signal Interrupts - Extended (MSI-X)
5783 * Normal MSI is only enabled if MSI-X calls fail.
5784 */
5785 if (!ixgbe_enable_msix)
5786 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5787 /* Receive-Side Scaling (RSS) */
5788 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5789 adapter->feat_en |= IXGBE_FEATURE_RSS;
5790
5791 /* Disable features with unmet dependencies... */
5792 /* No MSI-X */
5793 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5794 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5795 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5796 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5797 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5798 }
5799 } /* ixgbe_init_device_features */
5800
5801 /************************************************************************
5802 * ixgbe_probe - Device identification routine
5803 *
5804 * Determines if the driver should be loaded on
5805 * adapter based on its PCI vendor/device ID.
5806 *
5807 * return BUS_PROBE_DEFAULT on success, positive on failure
5808 ************************************************************************/
5809 static int
5810 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5811 {
5812 const struct pci_attach_args *pa = aux;
5813
5814 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5815 }
5816
5817 static ixgbe_vendor_info_t *
5818 ixgbe_lookup(const struct pci_attach_args *pa)
5819 {
5820 ixgbe_vendor_info_t *ent;
5821 pcireg_t subid;
5822
5823 INIT_DEBUGOUT("ixgbe_lookup: begin");
5824
5825 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5826 return NULL;
5827
5828 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5829
5830 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5831 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5832 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5833 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5834 (ent->subvendor_id == 0)) &&
5835 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5836 (ent->subdevice_id == 0))) {
5837 ++ixgbe_total_ports;
5838 return ent;
5839 }
5840 }
5841 return NULL;
5842 }
5843
5844 static int
5845 ixgbe_ifflags_cb(struct ethercom *ec)
5846 {
5847 struct ifnet *ifp = &ec->ec_if;
5848 struct adapter *adapter = ifp->if_softc;
5849 int change, rc = 0;
5850
5851 IXGBE_CORE_LOCK(adapter);
5852
5853 change = ifp->if_flags ^ adapter->if_flags;
5854 if (change != 0)
5855 adapter->if_flags = ifp->if_flags;
5856
5857 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5858 rc = ENETRESET;
5859 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5860 ixgbe_set_promisc(adapter);
5861
5862 /* Set up VLAN support and filter */
5863 ixgbe_setup_vlan_hw_support(adapter);
5864
5865 IXGBE_CORE_UNLOCK(adapter);
5866
5867 return rc;
5868 }
5869
5870 /************************************************************************
5871 * ixgbe_ioctl - Ioctl entry point
5872 *
5873 * Called when the user wants to configure the interface.
5874 *
5875 * return 0 on success, positive on failure
5876 ************************************************************************/
5877 static int
5878 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5879 {
5880 struct adapter *adapter = ifp->if_softc;
5881 struct ixgbe_hw *hw = &adapter->hw;
5882 struct ifcapreq *ifcr = data;
5883 struct ifreq *ifr = data;
5884 int error = 0;
5885 int l4csum_en;
5886 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5887 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5888
5889 switch (command) {
5890 case SIOCSIFFLAGS:
5891 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5892 break;
5893 case SIOCADDMULTI:
5894 case SIOCDELMULTI:
5895 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5896 break;
5897 case SIOCSIFMEDIA:
5898 case SIOCGIFMEDIA:
5899 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5900 break;
5901 case SIOCSIFCAP:
5902 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5903 break;
5904 case SIOCSIFMTU:
5905 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5906 break;
5907 #ifdef __NetBSD__
5908 case SIOCINITIFADDR:
5909 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5910 break;
5911 case SIOCGIFFLAGS:
5912 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5913 break;
5914 case SIOCGIFAFLAG_IN:
5915 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5916 break;
5917 case SIOCGIFADDR:
5918 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5919 break;
5920 case SIOCGIFMTU:
5921 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5922 break;
5923 case SIOCGIFCAP:
5924 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5925 break;
5926 case SIOCGETHERCAP:
5927 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5928 break;
5929 case SIOCGLIFADDR:
5930 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5931 break;
5932 case SIOCZIFDATA:
5933 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5934 hw->mac.ops.clear_hw_cntrs(hw);
5935 ixgbe_clear_evcnt(adapter);
5936 break;
5937 case SIOCAIFADDR:
5938 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5939 break;
5940 #endif
5941 default:
5942 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5943 break;
5944 }
5945
5946 switch (command) {
5947 case SIOCSIFMEDIA:
5948 case SIOCGIFMEDIA:
5949 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5950 case SIOCGI2C:
5951 {
5952 struct ixgbe_i2c_req i2c;
5953
5954 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5955 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5956 if (error != 0)
5957 break;
5958 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5959 error = EINVAL;
5960 break;
5961 }
5962 if (i2c.len > sizeof(i2c.data)) {
5963 error = EINVAL;
5964 break;
5965 }
5966
5967 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5968 i2c.dev_addr, i2c.data);
5969 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5970 break;
5971 }
5972 case SIOCSIFCAP:
5973 /* Layer-4 Rx checksum offload has to be turned on and
5974 * off as a unit.
5975 */
5976 l4csum_en = ifcr->ifcr_capenable & l4csum;
5977 if (l4csum_en != l4csum && l4csum_en != 0)
5978 return EINVAL;
5979 /*FALLTHROUGH*/
5980 case SIOCADDMULTI:
5981 case SIOCDELMULTI:
5982 case SIOCSIFFLAGS:
5983 case SIOCSIFMTU:
5984 default:
5985 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5986 return error;
5987 if ((ifp->if_flags & IFF_RUNNING) == 0)
5988 ;
5989 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5990 IXGBE_CORE_LOCK(adapter);
5991 if ((ifp->if_flags & IFF_RUNNING) != 0)
5992 ixgbe_init_locked(adapter);
5993 ixgbe_recalculate_max_frame(adapter);
5994 IXGBE_CORE_UNLOCK(adapter);
5995 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
5996 /*
5997 * Multicast list has changed; set the hardware filter
5998 * accordingly.
5999 */
6000 IXGBE_CORE_LOCK(adapter);
6001 ixgbe_disable_intr(adapter);
6002 ixgbe_set_multi(adapter);
6003 ixgbe_enable_intr(adapter);
6004 IXGBE_CORE_UNLOCK(adapter);
6005 }
6006 return 0;
6007 }
6008
6009 return error;
6010 } /* ixgbe_ioctl */
6011
6012 /************************************************************************
6013 * ixgbe_check_fan_failure
6014 ************************************************************************/
6015 static void
6016 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6017 {
6018 u32 mask;
6019
6020 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6021 IXGBE_ESDP_SDP1;
6022
6023 if (reg & mask)
6024 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6025 } /* ixgbe_check_fan_failure */
6026
6027 /************************************************************************
6028 * ixgbe_handle_que
6029 ************************************************************************/
6030 static void
6031 ixgbe_handle_que(void *context)
6032 {
6033 struct ix_queue *que = context;
6034 struct adapter *adapter = que->adapter;
6035 struct tx_ring *txr = que->txr;
6036 struct ifnet *ifp = adapter->ifp;
6037 bool more = false;
6038
6039 que->handleq.ev_count++;
6040
6041 if (ifp->if_flags & IFF_RUNNING) {
6042 more = ixgbe_rxeof(que);
6043 IXGBE_TX_LOCK(txr);
6044 more |= ixgbe_txeof(txr);
6045 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6046 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6047 ixgbe_mq_start_locked(ifp, txr);
6048 /* Only for queue 0 */
6049 /* NetBSD still needs this for CBQ */
6050 if ((&adapter->queues[0] == que)
6051 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6052 ixgbe_legacy_start_locked(ifp, txr);
6053 IXGBE_TX_UNLOCK(txr);
6054 }
6055
6056 if (more) {
6057 que->req.ev_count++;
6058 ixgbe_sched_handle_que(adapter, que);
6059 } else if (que->res != NULL) {
6060 /* Re-enable this interrupt */
6061 ixgbe_enable_queue(adapter, que->msix);
6062 } else
6063 ixgbe_enable_intr(adapter);
6064
6065 return;
6066 } /* ixgbe_handle_que */
6067
6068 /************************************************************************
6069 * ixgbe_handle_que_work
6070 ************************************************************************/
6071 static void
6072 ixgbe_handle_que_work(struct work *wk, void *context)
6073 {
6074 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6075
6076 /*
6077 * "enqueued flag" is not required here.
6078 * See ixgbe_msix_que().
6079 */
6080 ixgbe_handle_que(que);
6081 }
6082
6083 /************************************************************************
6084 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6085 ************************************************************************/
6086 static int
6087 ixgbe_allocate_legacy(struct adapter *adapter,
6088 const struct pci_attach_args *pa)
6089 {
6090 device_t dev = adapter->dev;
6091 struct ix_queue *que = adapter->queues;
6092 struct tx_ring *txr = adapter->tx_rings;
6093 int counts[PCI_INTR_TYPE_SIZE];
6094 pci_intr_type_t intr_type, max_type;
6095 char intrbuf[PCI_INTRSTR_LEN];
6096 const char *intrstr = NULL;
6097
6098 /* We allocate a single interrupt resource */
6099 max_type = PCI_INTR_TYPE_MSI;
6100 counts[PCI_INTR_TYPE_MSIX] = 0;
6101 counts[PCI_INTR_TYPE_MSI] =
6102 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6103 /* Check not feat_en but feat_cap to fallback to INTx */
6104 counts[PCI_INTR_TYPE_INTX] =
6105 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6106
6107 alloc_retry:
6108 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6109 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6110 return ENXIO;
6111 }
6112 adapter->osdep.nintrs = 1;
6113 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6114 intrbuf, sizeof(intrbuf));
6115 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6116 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6117 device_xname(dev));
6118 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6119 if (adapter->osdep.ihs[0] == NULL) {
6120 aprint_error_dev(dev,"unable to establish %s\n",
6121 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6122 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6123 adapter->osdep.intrs = NULL;
6124 switch (intr_type) {
6125 case PCI_INTR_TYPE_MSI:
6126 /* The next try is for INTx: Disable MSI */
6127 max_type = PCI_INTR_TYPE_INTX;
6128 counts[PCI_INTR_TYPE_INTX] = 1;
6129 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6130 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6131 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6132 goto alloc_retry;
6133 } else
6134 break;
6135 case PCI_INTR_TYPE_INTX:
6136 default:
6137 /* See below */
6138 break;
6139 }
6140 }
6141 if (intr_type == PCI_INTR_TYPE_INTX) {
6142 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6143 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6144 }
6145 if (adapter->osdep.ihs[0] == NULL) {
6146 aprint_error_dev(dev,
6147 "couldn't establish interrupt%s%s\n",
6148 intrstr ? " at " : "", intrstr ? intrstr : "");
6149 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6150 adapter->osdep.intrs = NULL;
6151 return ENXIO;
6152 }
6153 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6154 /*
6155 * Try allocating a fast interrupt and the associated deferred
6156 * processing contexts.
6157 */
6158 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6159 txr->txr_si =
6160 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6161 ixgbe_deferred_mq_start, txr);
6162 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6163 ixgbe_handle_que, que);
6164
6165 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6166 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6167 aprint_error_dev(dev,
6168 "could not establish software interrupts\n");
6169
6170 return ENXIO;
6171 }
6172 /* For simplicity in the handlers */
6173 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6174
6175 return (0);
6176 } /* ixgbe_allocate_legacy */
6177
6178 /************************************************************************
6179 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6180 ************************************************************************/
6181 static int
6182 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6183 {
6184 device_t dev = adapter->dev;
6185 struct ix_queue *que = adapter->queues;
6186 struct tx_ring *txr = adapter->tx_rings;
6187 pci_chipset_tag_t pc;
6188 char intrbuf[PCI_INTRSTR_LEN];
6189 char intr_xname[32];
6190 char wqname[MAXCOMLEN];
6191 const char *intrstr = NULL;
6192 int error, vector = 0;
6193 int cpu_id = 0;
6194 kcpuset_t *affinity;
6195 #ifdef RSS
6196 unsigned int rss_buckets = 0;
6197 kcpuset_t cpu_mask;
6198 #endif
6199
6200 pc = adapter->osdep.pc;
6201 #ifdef RSS
6202 /*
6203 * If we're doing RSS, the number of queues needs to
6204 * match the number of RSS buckets that are configured.
6205 *
6206 * + If there's more queues than RSS buckets, we'll end
6207 * up with queues that get no traffic.
6208 *
6209 * + If there's more RSS buckets than queues, we'll end
6210 * up having multiple RSS buckets map to the same queue,
6211 * so there'll be some contention.
6212 */
6213 rss_buckets = rss_getnumbuckets();
6214 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6215 (adapter->num_queues != rss_buckets)) {
6216 device_printf(dev,
6217 "%s: number of queues (%d) != number of RSS buckets (%d)"
6218 "; performance will be impacted.\n",
6219 __func__, adapter->num_queues, rss_buckets);
6220 }
6221 #endif
6222
6223 adapter->osdep.nintrs = adapter->num_queues + 1;
6224 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6225 adapter->osdep.nintrs) != 0) {
6226 aprint_error_dev(dev,
6227 "failed to allocate MSI-X interrupt\n");
6228 return (ENXIO);
6229 }
6230
6231 kcpuset_create(&affinity, false);
6232 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6233 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6234 device_xname(dev), i);
6235 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6236 sizeof(intrbuf));
6237 #ifdef IXGBE_MPSAFE
6238 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6239 true);
6240 #endif
6241 /* Set the handler function */
6242 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6243 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6244 intr_xname);
6245 if (que->res == NULL) {
6246 aprint_error_dev(dev,
6247 "Failed to register QUE handler\n");
6248 error = ENXIO;
6249 goto err_out;
6250 }
6251 que->msix = vector;
6252 adapter->active_queues |= (u64)(1 << que->msix);
6253
6254 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6255 #ifdef RSS
6256 /*
6257 * The queue ID is used as the RSS layer bucket ID.
6258 * We look up the queue ID -> RSS CPU ID and select
6259 * that.
6260 */
6261 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6262 CPU_SETOF(cpu_id, &cpu_mask);
6263 #endif
6264 } else {
6265 /*
6266 * Bind the MSI-X vector, and thus the
6267 * rings to the corresponding CPU.
6268 *
6269 * This just happens to match the default RSS
6270 * round-robin bucket -> queue -> CPU allocation.
6271 */
6272 if (adapter->num_queues > 1)
6273 cpu_id = i;
6274 }
6275 /* Round-robin affinity */
6276 kcpuset_zero(affinity);
6277 kcpuset_set(affinity, cpu_id % ncpu);
6278 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6279 NULL);
6280 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6281 intrstr);
6282 if (error == 0) {
6283 #if 1 /* def IXGBE_DEBUG */
6284 #ifdef RSS
6285 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6286 cpu_id % ncpu);
6287 #else
6288 aprint_normal(", bound queue %d to cpu %d", i,
6289 cpu_id % ncpu);
6290 #endif
6291 #endif /* IXGBE_DEBUG */
6292 }
6293 aprint_normal("\n");
6294
6295 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6296 txr->txr_si = softint_establish(
6297 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6298 ixgbe_deferred_mq_start, txr);
6299 if (txr->txr_si == NULL) {
6300 aprint_error_dev(dev,
6301 "couldn't establish software interrupt\n");
6302 error = ENXIO;
6303 goto err_out;
6304 }
6305 }
6306 que->que_si
6307 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6308 ixgbe_handle_que, que);
6309 if (que->que_si == NULL) {
6310 aprint_error_dev(dev,
6311 "couldn't establish software interrupt\n");
6312 error = ENXIO;
6313 goto err_out;
6314 }
6315 }
6316 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6317 error = workqueue_create(&adapter->txr_wq, wqname,
6318 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6319 IXGBE_WORKQUEUE_FLAGS);
6320 if (error) {
6321 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6322 goto err_out;
6323 }
6324 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6325
6326 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6327 error = workqueue_create(&adapter->que_wq, wqname,
6328 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6329 IXGBE_WORKQUEUE_FLAGS);
6330 if (error) {
6331 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6332 goto err_out;
6333 }
6334
6335 /* and Link */
6336 cpu_id++;
6337 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6338 adapter->vector = vector;
6339 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6340 sizeof(intrbuf));
6341 #ifdef IXGBE_MPSAFE
6342 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6343 true);
6344 #endif
6345 /* Set the link handler function */
6346 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6347 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6348 intr_xname);
6349 if (adapter->osdep.ihs[vector] == NULL) {
6350 aprint_error_dev(dev, "Failed to register LINK handler\n");
6351 error = ENXIO;
6352 goto err_out;
6353 }
6354 /* Round-robin affinity */
6355 kcpuset_zero(affinity);
6356 kcpuset_set(affinity, cpu_id % ncpu);
6357 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6358 NULL);
6359
6360 aprint_normal_dev(dev,
6361 "for link, interrupting at %s", intrstr);
6362 if (error == 0)
6363 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6364 else
6365 aprint_normal("\n");
6366
6367 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6368 adapter->mbx_si =
6369 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6370 ixgbe_handle_mbx, adapter);
6371 if (adapter->mbx_si == NULL) {
6372 aprint_error_dev(dev,
6373 "could not establish software interrupts\n");
6374
6375 error = ENXIO;
6376 goto err_out;
6377 }
6378 }
6379
6380 kcpuset_destroy(affinity);
6381 aprint_normal_dev(dev,
6382 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6383
6384 return (0);
6385
6386 err_out:
6387 kcpuset_destroy(affinity);
6388 ixgbe_free_softint(adapter);
6389 ixgbe_free_pciintr_resources(adapter);
6390 return (error);
6391 } /* ixgbe_allocate_msix */
6392
6393 /************************************************************************
6394 * ixgbe_configure_interrupts
6395 *
6396 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6397 * This will also depend on user settings.
6398 ************************************************************************/
6399 static int
6400 ixgbe_configure_interrupts(struct adapter *adapter)
6401 {
6402 device_t dev = adapter->dev;
6403 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6404 int want, queues, msgs;
6405
6406 /* Default to 1 queue if MSI-X setup fails */
6407 adapter->num_queues = 1;
6408
6409 /* Override by tuneable */
6410 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6411 goto msi;
6412
6413 /*
6414 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6415 * interrupt slot.
6416 */
6417 if (ncpu == 1)
6418 goto msi;
6419
6420 /* First try MSI-X */
6421 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6422 msgs = MIN(msgs, IXG_MAX_NINTR);
6423 if (msgs < 2)
6424 goto msi;
6425
6426 adapter->msix_mem = (void *)1; /* XXX */
6427
6428 /* Figure out a reasonable auto config value */
6429 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6430
6431 #ifdef RSS
6432 /* If we're doing RSS, clamp at the number of RSS buckets */
6433 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6434 queues = min(queues, rss_getnumbuckets());
6435 #endif
6436 if (ixgbe_num_queues > queues) {
6437 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6438 ixgbe_num_queues = queues;
6439 }
6440
6441 if (ixgbe_num_queues != 0)
6442 queues = ixgbe_num_queues;
6443 else
6444 queues = min(queues,
6445 min(mac->max_tx_queues, mac->max_rx_queues));
6446
6447 /* reflect correct sysctl value */
6448 ixgbe_num_queues = queues;
6449
6450 /*
6451 * Want one vector (RX/TX pair) per queue
6452 * plus an additional for Link.
6453 */
6454 want = queues + 1;
6455 if (msgs >= want)
6456 msgs = want;
6457 else {
6458 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6459 "%d vectors but %d queues wanted!\n",
6460 msgs, want);
6461 goto msi;
6462 }
6463 adapter->num_queues = queues;
6464 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6465 return (0);
6466
6467 /*
6468 * MSI-X allocation failed or provided us with
6469 * less vectors than needed. Free MSI-X resources
6470 * and we'll try enabling MSI.
6471 */
6472 msi:
6473 /* Without MSI-X, some features are no longer supported */
6474 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6475 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6476 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6477 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6478
6479 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6480 adapter->msix_mem = NULL; /* XXX */
6481 if (msgs > 1)
6482 msgs = 1;
6483 if (msgs != 0) {
6484 msgs = 1;
6485 adapter->feat_en |= IXGBE_FEATURE_MSI;
6486 return (0);
6487 }
6488
6489 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6490 aprint_error_dev(dev,
6491 "Device does not support legacy interrupts.\n");
6492 return 1;
6493 }
6494
6495 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6496
6497 return (0);
6498 } /* ixgbe_configure_interrupts */
6499
6500
6501 /************************************************************************
6502 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6503 *
6504 * Done outside of interrupt context since the driver might sleep
6505 ************************************************************************/
6506 static void
6507 ixgbe_handle_link(void *context)
6508 {
6509 struct adapter *adapter = context;
6510 struct ixgbe_hw *hw = &adapter->hw;
6511
6512 IXGBE_CORE_LOCK(adapter);
6513 ++adapter->link_sicount.ev_count;
6514 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6515 ixgbe_update_link_status(adapter);
6516
6517 /* Re-enable link interrupts */
6518 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6519
6520 IXGBE_CORE_UNLOCK(adapter);
6521 } /* ixgbe_handle_link */
6522
6523 /************************************************************************
6524 * ixgbe_rearm_queues
6525 ************************************************************************/
6526 static void
6527 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6528 {
6529 u32 mask;
6530
6531 switch (adapter->hw.mac.type) {
6532 case ixgbe_mac_82598EB:
6533 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6535 break;
6536 case ixgbe_mac_82599EB:
6537 case ixgbe_mac_X540:
6538 case ixgbe_mac_X550:
6539 case ixgbe_mac_X550EM_x:
6540 case ixgbe_mac_X550EM_a:
6541 mask = (queues & 0xFFFFFFFF);
6542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6543 mask = (queues >> 32);
6544 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6545 break;
6546 default:
6547 break;
6548 }
6549 } /* ixgbe_rearm_queues */
6550