ixgbe.c revision 1.144 1 /* $NetBSD: ixgbe.c,v 1.144 2018/04/04 08:13:07 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 327031 2017-12-20 18:15:06Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 char ixgbe_driver_version[] = "4.0.0-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 static void ixgbe_rearm_queues(struct adapter *, u64);
195
196 static void ixgbe_initialize_transmit_units(struct adapter *);
197 static void ixgbe_initialize_receive_units(struct adapter *);
198 static void ixgbe_enable_rx_drop(struct adapter *);
199 static void ixgbe_disable_rx_drop(struct adapter *);
200 static void ixgbe_initialize_rss_mapping(struct adapter *);
201
202 static void ixgbe_enable_intr(struct adapter *);
203 static void ixgbe_disable_intr(struct adapter *);
204 static void ixgbe_update_stats_counters(struct adapter *);
205 static void ixgbe_set_promisc(struct adapter *);
206 static void ixgbe_set_multi(struct adapter *);
207 static void ixgbe_update_link_status(struct adapter *);
208 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
209 static void ixgbe_configure_ivars(struct adapter *);
210 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
211 static void ixgbe_eitr_write(struct ix_queue *, uint32_t);
212
213 static void ixgbe_setup_vlan_hw_support(struct adapter *);
214 #if 0
215 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
216 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
217 #endif
218
219 static void ixgbe_add_device_sysctls(struct adapter *);
220 static void ixgbe_add_hw_stats(struct adapter *);
221 static void ixgbe_clear_evcnt(struct adapter *);
222 static int ixgbe_set_flowcntl(struct adapter *, int);
223 static int ixgbe_set_advertise(struct adapter *, int);
224 static int ixgbe_get_advertise(struct adapter *);
225
226 /* Sysctl handlers */
227 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
228 const char *, int *, int);
229 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
230 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
231 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
235 #ifdef IXGBE_DEBUG
236 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
238 #endif
239 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
246
247 /* Support for pluggable optic modules */
248 static bool ixgbe_sfp_probe(struct adapter *);
249
250 /* Legacy (single vector) interrupt handler */
251 static int ixgbe_legacy_irq(void *);
252
253 /* The MSI/MSI-X Interrupt handlers */
254 static int ixgbe_msix_que(void *);
255 static int ixgbe_msix_link(void *);
256
257 /* Software interrupts for deferred work */
258 static void ixgbe_handle_que(void *);
259 static void ixgbe_handle_link(void *);
260 static void ixgbe_handle_msf(void *);
261 static void ixgbe_handle_mod(void *);
262 static void ixgbe_handle_phy(void *);
263
264 /* Workqueue handler for deferred work */
265 static void ixgbe_handle_que_work(struct work *, void *);
266
267 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
268
269 /************************************************************************
270 * NetBSD Device Interface Entry Points
271 ************************************************************************/
272 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
273 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
274 DVF_DETACH_SHUTDOWN);
275
276 #if 0
277 devclass_t ix_devclass;
278 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
279
280 MODULE_DEPEND(ix, pci, 1, 1, 1);
281 MODULE_DEPEND(ix, ether, 1, 1, 1);
282 #ifdef DEV_NETMAP
283 MODULE_DEPEND(ix, netmap, 1, 1, 1);
284 #endif
285 #endif
286
287 /*
288 * TUNEABLE PARAMETERS:
289 */
290
291 /*
292 * AIM: Adaptive Interrupt Moderation
293 * which means that the interrupt rate
294 * is varied over time based on the
295 * traffic for that interrupt vector
296 */
297 static bool ixgbe_enable_aim = true;
298 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
299 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
300 "Enable adaptive interrupt moderation");
301
302 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
303 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
304 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
305
306 /* How many packets rxeof tries to clean at a time */
307 static int ixgbe_rx_process_limit = 256;
308 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
309 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
310
311 /* How many packets txeof tries to clean at a time */
312 static int ixgbe_tx_process_limit = 256;
313 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
314 &ixgbe_tx_process_limit, 0,
315 "Maximum number of sent packets to process at a time, -1 means unlimited");
316
317 /* Flow control setting, default to full */
318 static int ixgbe_flow_control = ixgbe_fc_full;
319 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
320 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
321
322 /* Which pakcet processing uses workqueue or softint */
323 static bool ixgbe_txrx_workqueue = false;
324
325 /*
326 * Smart speed setting, default to on
327 * this only works as a compile option
328 * right now as its during attach, set
329 * this to 'ixgbe_smart_speed_off' to
330 * disable.
331 */
332 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
333
334 /*
335 * MSI-X should be the default for best performance,
336 * but this allows it to be forced off for testing.
337 */
338 static int ixgbe_enable_msix = 1;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
340 "Enable MSI-X interrupts");
341
342 /*
343 * Number of Queues, can be set to 0,
344 * it then autoconfigures based on the
345 * number of cpus with a max of 8. This
346 * can be overriden manually here.
347 */
348 static int ixgbe_num_queues = 0;
349 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
350 "Number of queues to configure, 0 indicates autoconfigure");
351
352 /*
353 * Number of TX descriptors per ring,
354 * setting higher than RX as this seems
355 * the better performing choice.
356 */
357 static int ixgbe_txd = PERFORM_TXD;
358 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
359 "Number of transmit descriptors per queue");
360
361 /* Number of RX descriptors per ring */
362 static int ixgbe_rxd = PERFORM_RXD;
363 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
364 "Number of receive descriptors per queue");
365
366 /*
367 * Defining this on will allow the use
368 * of unsupported SFP+ modules, note that
369 * doing so you are on your own :)
370 */
371 static int allow_unsupported_sfp = false;
372 #define TUNABLE_INT(__x, __y)
373 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
374
375 /*
376 * Not sure if Flow Director is fully baked,
377 * so we'll default to turning it off.
378 */
379 static int ixgbe_enable_fdir = 0;
380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
381 "Enable Flow Director");
382
383 /* Legacy Transmit (single queue) */
384 static int ixgbe_enable_legacy_tx = 0;
385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
386 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
387
388 /* Receive-Side Scaling */
389 static int ixgbe_enable_rss = 1;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
391 "Enable Receive-Side Scaling (RSS)");
392
393 /* Keep running tab on them for sanity check */
394 static int ixgbe_total_ports;
395
396 #if 0
397 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
398 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
399 #endif
400
401 #ifdef NET_MPSAFE
402 #define IXGBE_MPSAFE 1
403 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
404 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
405 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
406 #else
407 #define IXGBE_CALLOUT_FLAGS 0
408 #define IXGBE_SOFTINFT_FLAGS 0
409 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
410 #endif
411 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
412
413 /************************************************************************
414 * ixgbe_initialize_rss_mapping
415 ************************************************************************/
416 static void
417 ixgbe_initialize_rss_mapping(struct adapter *adapter)
418 {
419 struct ixgbe_hw *hw = &adapter->hw;
420 u32 reta = 0, mrqc, rss_key[10];
421 int queue_id, table_size, index_mult;
422 int i, j;
423 u32 rss_hash_config;
424
425 /* force use default RSS key. */
426 #ifdef __NetBSD__
427 rss_getkey((uint8_t *) &rss_key);
428 #else
429 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
430 /* Fetch the configured RSS key */
431 rss_getkey((uint8_t *) &rss_key);
432 } else {
433 /* set up random bits */
434 cprng_fast(&rss_key, sizeof(rss_key));
435 }
436 #endif
437
438 /* Set multiplier for RETA setup and table size based on MAC */
439 index_mult = 0x1;
440 table_size = 128;
441 switch (adapter->hw.mac.type) {
442 case ixgbe_mac_82598EB:
443 index_mult = 0x11;
444 break;
445 case ixgbe_mac_X550:
446 case ixgbe_mac_X550EM_x:
447 case ixgbe_mac_X550EM_a:
448 table_size = 512;
449 break;
450 default:
451 break;
452 }
453
454 /* Set up the redirection table */
455 for (i = 0, j = 0; i < table_size; i++, j++) {
456 if (j == adapter->num_queues)
457 j = 0;
458
459 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
460 /*
461 * Fetch the RSS bucket id for the given indirection
462 * entry. Cap it at the number of configured buckets
463 * (which is num_queues.)
464 */
465 queue_id = rss_get_indirection_to_bucket(i);
466 queue_id = queue_id % adapter->num_queues;
467 } else
468 queue_id = (j * index_mult);
469
470 /*
471 * The low 8 bits are for hash value (n+0);
472 * The next 8 bits are for hash value (n+1), etc.
473 */
474 reta = reta >> 8;
475 reta = reta | (((uint32_t) queue_id) << 24);
476 if ((i & 3) == 3) {
477 if (i < 128)
478 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
479 else
480 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
481 reta);
482 reta = 0;
483 }
484 }
485
486 /* Now fill our hash function seeds */
487 for (i = 0; i < 10; i++)
488 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
489
490 /* Perform hash on these packet types */
491 if (adapter->feat_en & IXGBE_FEATURE_RSS)
492 rss_hash_config = rss_gethashconfig();
493 else {
494 /*
495 * Disable UDP - IP fragments aren't currently being handled
496 * and so we end up with a mix of 2-tuple and 4-tuple
497 * traffic.
498 */
499 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
500 | RSS_HASHTYPE_RSS_TCP_IPV4
501 | RSS_HASHTYPE_RSS_IPV6
502 | RSS_HASHTYPE_RSS_TCP_IPV6
503 | RSS_HASHTYPE_RSS_IPV6_EX
504 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
505 }
506
507 mrqc = IXGBE_MRQC_RSSEN;
508 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
509 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
526 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
527 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
528 } /* ixgbe_initialize_rss_mapping */
529
530 /************************************************************************
531 * ixgbe_initialize_receive_units - Setup receive registers and features.
532 ************************************************************************/
533 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
534
535 static void
536 ixgbe_initialize_receive_units(struct adapter *adapter)
537 {
538 struct rx_ring *rxr = adapter->rx_rings;
539 struct ixgbe_hw *hw = &adapter->hw;
540 struct ifnet *ifp = adapter->ifp;
541 int i, j;
542 u32 bufsz, fctrl, srrctl, rxcsum;
543 u32 hlreg;
544
545 /*
546 * Make sure receives are disabled while
547 * setting up the descriptor ring
548 */
549 ixgbe_disable_rx(hw);
550
551 /* Enable broadcasts */
552 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
553 fctrl |= IXGBE_FCTRL_BAM;
554 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
555 fctrl |= IXGBE_FCTRL_DPF;
556 fctrl |= IXGBE_FCTRL_PMCF;
557 }
558 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
559
560 /* Set for Jumbo Frames? */
561 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
562 if (ifp->if_mtu > ETHERMTU)
563 hlreg |= IXGBE_HLREG0_JUMBOEN;
564 else
565 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
566
567 #ifdef DEV_NETMAP
568 /* CRC stripping is conditional in Netmap */
569 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
570 (ifp->if_capenable & IFCAP_NETMAP) &&
571 !ix_crcstrip)
572 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
573 else
574 #endif /* DEV_NETMAP */
575 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
576
577 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
578
579 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
580 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
581
582 for (i = 0; i < adapter->num_queues; i++, rxr++) {
583 u64 rdba = rxr->rxdma.dma_paddr;
584 u32 tqsmreg, reg;
585 int regnum = i / 4; /* 1 register per 4 queues */
586 int regshift = i % 4; /* 4 bits per 1 queue */
587 j = rxr->me;
588
589 /* Setup the Base and Length of the Rx Descriptor Ring */
590 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
591 (rdba & 0x00000000ffffffffULL));
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
593 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
594 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
595
596 /* Set up the SRRCTL register */
597 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
598 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
599 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
600 srrctl |= bufsz;
601 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
602
603 /* Set RQSMR (Receive Queue Statistic Mapping) register */
604 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
605 reg &= ~(0x000000ff << (regshift * 8));
606 reg |= i << (regshift * 8);
607 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
608
609 /*
610 * Set RQSMR (Receive Queue Statistic Mapping) register.
611 * Register location for queue 0...7 are different between
612 * 82598 and newer.
613 */
614 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
615 tqsmreg = IXGBE_TQSMR(regnum);
616 else
617 tqsmreg = IXGBE_TQSM(regnum);
618 reg = IXGBE_READ_REG(hw, tqsmreg);
619 reg &= ~(0x000000ff << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, tqsmreg, reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 }
635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641
642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me);
644 }
645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 }
653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655
656 ixgbe_initialize_rss_mapping(adapter);
657
658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 }
662
663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD;
665
666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671
672 } /* ixgbe_initialize_receive_units */
673
674 /************************************************************************
675 * ixgbe_initialize_transmit_units - Enable transmit units.
676 ************************************************************************/
677 static void
678 ixgbe_initialize_transmit_units(struct adapter *adapter)
679 {
680 struct tx_ring *txr = adapter->tx_rings;
681 struct ixgbe_hw *hw = &adapter->hw;
682 int i;
683
684 /* Setup the Base and Length of the Tx Descriptor Ring */
685 for (i = 0; i < adapter->num_queues; i++, txr++) {
686 u64 tdba = txr->txdma.dma_paddr;
687 u32 txctrl = 0;
688 int j = txr->me;
689
690 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
691 (tdba & 0x00000000ffffffffULL));
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
693 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
694 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
695
696 /* Setup the HW Tx Head and Tail descriptor pointers */
697 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
698 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
699
700 /* Cache the tail address */
701 txr->tail = IXGBE_TDT(j);
702
703 /* Disable Head Writeback */
704 /*
705 * Note: for X550 series devices, these registers are actually
706 * prefixed with TPH_ isntead of DCA_, but the addresses and
707 * fields remain the same.
708 */
709 switch (hw->mac.type) {
710 case ixgbe_mac_82598EB:
711 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
712 break;
713 default:
714 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
715 break;
716 }
717 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
718 switch (hw->mac.type) {
719 case ixgbe_mac_82598EB:
720 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
721 break;
722 default:
723 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
724 break;
725 }
726
727 }
728
729 if (hw->mac.type != ixgbe_mac_82598EB) {
730 u32 dmatxctl, rttdcs;
731
732 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
733 dmatxctl |= IXGBE_DMATXCTL_TE;
734 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
735 /* Disable arbiter to set MTQC */
736 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
737 rttdcs |= IXGBE_RTTDCS_ARBDIS;
738 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
739 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
740 ixgbe_get_mtqc(adapter->iov_mode));
741 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
742 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
743 }
744
745 return;
746 } /* ixgbe_initialize_transmit_units */
747
748 /************************************************************************
749 * ixgbe_attach - Device initialization routine
750 *
751 * Called when the driver is being loaded.
752 * Identifies the type of hardware, allocates all resources
753 * and initializes the hardware.
754 *
755 * return 0 on success, positive on failure
756 ************************************************************************/
757 static void
758 ixgbe_attach(device_t parent, device_t dev, void *aux)
759 {
760 struct adapter *adapter;
761 struct ixgbe_hw *hw;
762 int error = -1;
763 u32 ctrl_ext;
764 u16 high, low, nvmreg;
765 pcireg_t id, subid;
766 ixgbe_vendor_info_t *ent;
767 struct pci_attach_args *pa = aux;
768 const char *str;
769 char buf[256];
770
771 INIT_DEBUGOUT("ixgbe_attach: begin");
772
773 /* Allocate, clear, and link in our adapter structure */
774 adapter = device_private(dev);
775 adapter->hw.back = adapter;
776 adapter->dev = dev;
777 hw = &adapter->hw;
778 adapter->osdep.pc = pa->pa_pc;
779 adapter->osdep.tag = pa->pa_tag;
780 if (pci_dma64_available(pa))
781 adapter->osdep.dmat = pa->pa_dmat64;
782 else
783 adapter->osdep.dmat = pa->pa_dmat;
784 adapter->osdep.attached = false;
785
786 ent = ixgbe_lookup(pa);
787
788 KASSERT(ent != NULL);
789
790 aprint_normal(": %s, Version - %s\n",
791 ixgbe_strings[ent->index], ixgbe_driver_version);
792
793 /* Core Lock Init*/
794 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
795
796 /* Set up the timer callout */
797 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
798
799 /* Determine hardware revision */
800 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
801 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
802
803 hw->vendor_id = PCI_VENDOR(id);
804 hw->device_id = PCI_PRODUCT(id);
805 hw->revision_id =
806 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
807 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
808 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
809
810 /*
811 * Make sure BUSMASTER is set
812 */
813 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
814
815 /* Do base PCI setup - map BAR0 */
816 if (ixgbe_allocate_pci_resources(adapter, pa)) {
817 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
818 error = ENXIO;
819 goto err_out;
820 }
821
822 /* let hardware know driver is loaded */
823 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
824 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
825 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
826
827 /*
828 * Initialize the shared code
829 */
830 if (ixgbe_init_shared_code(hw) != 0) {
831 aprint_error_dev(dev, "Unable to initialize the shared code\n");
832 error = ENXIO;
833 goto err_out;
834 }
835
836 switch (hw->mac.type) {
837 case ixgbe_mac_82598EB:
838 str = "82598EB";
839 break;
840 case ixgbe_mac_82599EB:
841 str = "82599EB";
842 break;
843 case ixgbe_mac_X540:
844 str = "X540";
845 break;
846 case ixgbe_mac_X550:
847 str = "X550";
848 break;
849 case ixgbe_mac_X550EM_x:
850 str = "X550EM";
851 break;
852 case ixgbe_mac_X550EM_a:
853 str = "X550EM A";
854 break;
855 default:
856 str = "Unknown";
857 break;
858 }
859 aprint_normal_dev(dev, "device %s\n", str);
860
861 if (hw->mbx.ops.init_params)
862 hw->mbx.ops.init_params(hw);
863
864 hw->allow_unsupported_sfp = allow_unsupported_sfp;
865
866 /* Pick up the 82599 settings */
867 if (hw->mac.type != ixgbe_mac_82598EB) {
868 hw->phy.smart_speed = ixgbe_smart_speed;
869 adapter->num_segs = IXGBE_82599_SCATTER;
870 } else
871 adapter->num_segs = IXGBE_82598_SCATTER;
872
873 hw->mac.ops.set_lan_id(hw);
874 ixgbe_init_device_features(adapter);
875
876 if (ixgbe_configure_interrupts(adapter)) {
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* Allocate multicast array memory. */
882 adapter->mta = malloc(sizeof(*adapter->mta) *
883 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
884 if (adapter->mta == NULL) {
885 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
886 error = ENOMEM;
887 goto err_out;
888 }
889
890 /* Enable WoL (if supported) */
891 ixgbe_check_wol_support(adapter);
892
893 /* Verify adapter fan is still functional (if applicable) */
894 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
895 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
896 ixgbe_check_fan_failure(adapter, esdp, FALSE);
897 }
898
899 /* Ensure SW/FW semaphore is free */
900 ixgbe_init_swfw_semaphore(hw);
901
902 /* Enable EEE power saving */
903 if (adapter->feat_en & IXGBE_FEATURE_EEE)
904 hw->mac.ops.setup_eee(hw, TRUE);
905
906 /* Set an initial default flow control value */
907 hw->fc.requested_mode = ixgbe_flow_control;
908
909 /* Sysctls for limiting the amount of work done in the taskqueues */
910 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
911 "max number of rx packets to process",
912 &adapter->rx_process_limit, ixgbe_rx_process_limit);
913
914 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
915 "max number of tx packets to process",
916 &adapter->tx_process_limit, ixgbe_tx_process_limit);
917
918 /* Do descriptor calc and sanity checks */
919 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
920 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
921 aprint_error_dev(dev, "TXD config issue, using default!\n");
922 adapter->num_tx_desc = DEFAULT_TXD;
923 } else
924 adapter->num_tx_desc = ixgbe_txd;
925
926 /*
927 * With many RX rings it is easy to exceed the
928 * system mbuf allocation. Tuning nmbclusters
929 * can alleviate this.
930 */
931 if (nmbclusters > 0) {
932 int s;
933 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
934 if (s > nmbclusters) {
935 aprint_error_dev(dev, "RX Descriptors exceed "
936 "system mbuf max, using default instead!\n");
937 ixgbe_rxd = DEFAULT_RXD;
938 }
939 }
940
941 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
942 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
943 aprint_error_dev(dev, "RXD config issue, using default!\n");
944 adapter->num_rx_desc = DEFAULT_RXD;
945 } else
946 adapter->num_rx_desc = ixgbe_rxd;
947
948 /* Allocate our TX/RX Queues */
949 if (ixgbe_allocate_queues(adapter)) {
950 error = ENOMEM;
951 goto err_out;
952 }
953
954 hw->phy.reset_if_overtemp = TRUE;
955 error = ixgbe_reset_hw(hw);
956 hw->phy.reset_if_overtemp = FALSE;
957 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
958 /*
959 * No optics in this port, set up
960 * so the timer routine will probe
961 * for later insertion.
962 */
963 adapter->sfp_probe = TRUE;
964 error = IXGBE_SUCCESS;
965 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
966 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
967 error = EIO;
968 goto err_late;
969 } else if (error) {
970 aprint_error_dev(dev, "Hardware initialization failed\n");
971 error = EIO;
972 goto err_late;
973 }
974
975 /* Make sure we have a good EEPROM before we read from it */
976 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
977 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
978 error = EIO;
979 goto err_late;
980 }
981
982 aprint_normal("%s:", device_xname(dev));
983 /* NVM Image Version */
984 switch (hw->mac.type) {
985 case ixgbe_mac_X540:
986 case ixgbe_mac_X550EM_a:
987 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
988 if (nvmreg == 0xffff)
989 break;
990 high = (nvmreg >> 12) & 0x0f;
991 low = (nvmreg >> 4) & 0xff;
992 id = nvmreg & 0x0f;
993 aprint_normal(" NVM Image Version %u.", high);
994 if (hw->mac.type == ixgbe_mac_X540)
995 str = "%x";
996 else
997 str = "%02x";
998 aprint_normal(str, low);
999 aprint_normal(" ID 0x%x,", id);
1000 break;
1001 case ixgbe_mac_X550EM_x:
1002 case ixgbe_mac_X550:
1003 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1004 if (nvmreg == 0xffff)
1005 break;
1006 high = (nvmreg >> 12) & 0x0f;
1007 low = nvmreg & 0xff;
1008 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1009 break;
1010 default:
1011 break;
1012 }
1013
1014 /* PHY firmware revision */
1015 switch (hw->mac.type) {
1016 case ixgbe_mac_X540:
1017 case ixgbe_mac_X550:
1018 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1019 if (nvmreg == 0xffff)
1020 break;
1021 high = (nvmreg >> 12) & 0x0f;
1022 low = (nvmreg >> 4) & 0xff;
1023 id = nvmreg & 0x000f;
1024 aprint_normal(" PHY FW Revision %u.", high);
1025 if (hw->mac.type == ixgbe_mac_X540)
1026 str = "%x";
1027 else
1028 str = "%02x";
1029 aprint_normal(str, low);
1030 aprint_normal(" ID 0x%x,", id);
1031 break;
1032 default:
1033 break;
1034 }
1035
1036 /* NVM Map version & OEM NVM Image version */
1037 switch (hw->mac.type) {
1038 case ixgbe_mac_X550:
1039 case ixgbe_mac_X550EM_x:
1040 case ixgbe_mac_X550EM_a:
1041 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1042 if (nvmreg != 0xffff) {
1043 high = (nvmreg >> 12) & 0x0f;
1044 low = nvmreg & 0x00ff;
1045 aprint_normal(" NVM Map version %u.%02x,", high, low);
1046 }
1047 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1048 if (nvmreg != 0xffff) {
1049 high = (nvmreg >> 12) & 0x0f;
1050 low = nvmreg & 0x00ff;
1051 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1052 low);
1053 }
1054 break;
1055 default:
1056 break;
1057 }
1058
1059 /* Print the ETrackID */
1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1061 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1062 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1063
1064 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1065 error = ixgbe_allocate_msix(adapter, pa);
1066 if (error) {
1067 /* Free allocated queue structures first */
1068 ixgbe_free_transmit_structures(adapter);
1069 ixgbe_free_receive_structures(adapter);
1070 free(adapter->queues, M_DEVBUF);
1071
1072 /* Fallback to legacy interrupt */
1073 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1074 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1075 adapter->feat_en |= IXGBE_FEATURE_MSI;
1076 adapter->num_queues = 1;
1077
1078 /* Allocate our TX/RX Queues again */
1079 if (ixgbe_allocate_queues(adapter)) {
1080 error = ENOMEM;
1081 goto err_out;
1082 }
1083 }
1084 }
1085 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1086 error = ixgbe_allocate_legacy(adapter, pa);
1087 if (error)
1088 goto err_late;
1089
1090 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1091 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1092 ixgbe_handle_link, adapter);
1093 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1094 ixgbe_handle_mod, adapter);
1095 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1096 ixgbe_handle_msf, adapter);
1097 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1098 ixgbe_handle_phy, adapter);
1099 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1100 adapter->fdir_si =
1101 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1102 ixgbe_reinit_fdir, adapter);
1103 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1104 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1105 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1106 && (adapter->fdir_si == NULL))) {
1107 aprint_error_dev(dev,
1108 "could not establish software interrupts ()\n");
1109 goto err_out;
1110 }
1111
1112 error = ixgbe_start_hw(hw);
1113 switch (error) {
1114 case IXGBE_ERR_EEPROM_VERSION:
1115 aprint_error_dev(dev, "This device is a pre-production adapter/"
1116 "LOM. Please be aware there may be issues associated "
1117 "with your hardware.\nIf you are experiencing problems "
1118 "please contact your Intel or hardware representative "
1119 "who provided you with this hardware.\n");
1120 break;
1121 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1122 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1123 error = EIO;
1124 goto err_late;
1125 case IXGBE_ERR_SFP_NOT_PRESENT:
1126 aprint_error_dev(dev, "No SFP+ Module found\n");
1127 /* falls thru */
1128 default:
1129 break;
1130 }
1131
1132 /* Setup OS specific network interface */
1133 if (ixgbe_setup_interface(dev, adapter) != 0)
1134 goto err_late;
1135
1136 /*
1137 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1138 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1139 */
1140 if (hw->phy.media_type == ixgbe_media_type_copper) {
1141 uint16_t id1, id2;
1142 int oui, model, rev;
1143 const char *descr;
1144
1145 id1 = hw->phy.id >> 16;
1146 id2 = hw->phy.id & 0xffff;
1147 oui = MII_OUI(id1, id2);
1148 model = MII_MODEL(id2);
1149 rev = MII_REV(id2);
1150 if ((descr = mii_get_descr(oui, model)) != NULL)
1151 aprint_normal_dev(dev,
1152 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1153 descr, oui, model, rev);
1154 else
1155 aprint_normal_dev(dev,
1156 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1157 oui, model, rev);
1158 }
1159
1160 /* Enable the optics for 82599 SFP+ fiber */
1161 ixgbe_enable_tx_laser(hw);
1162
1163 /* Enable power to the phy. */
1164 ixgbe_set_phy_power(hw, TRUE);
1165
1166 /* Initialize statistics */
1167 ixgbe_update_stats_counters(adapter);
1168
1169 /* Check PCIE slot type/speed/width */
1170 ixgbe_get_slot_info(adapter);
1171
1172 /*
1173 * Do time init and sysctl init here, but
1174 * only on the first port of a bypass adapter.
1175 */
1176 ixgbe_bypass_init(adapter);
1177
1178 /* Set an initial dmac value */
1179 adapter->dmac = 0;
1180 /* Set initial advertised speeds (if applicable) */
1181 adapter->advertise = ixgbe_get_advertise(adapter);
1182
1183 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1184 ixgbe_define_iov_schemas(dev, &error);
1185
1186 /* Add sysctls */
1187 ixgbe_add_device_sysctls(adapter);
1188 ixgbe_add_hw_stats(adapter);
1189
1190 /* For Netmap */
1191 adapter->init_locked = ixgbe_init_locked;
1192 adapter->stop_locked = ixgbe_stop;
1193
1194 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1195 ixgbe_netmap_attach(adapter);
1196
1197 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1198 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1199 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1200 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1201
1202 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1203 pmf_class_network_register(dev, adapter->ifp);
1204 else
1205 aprint_error_dev(dev, "couldn't establish power handler\n");
1206
1207 INIT_DEBUGOUT("ixgbe_attach: end");
1208 adapter->osdep.attached = true;
1209
1210 return;
1211
1212 err_late:
1213 ixgbe_free_transmit_structures(adapter);
1214 ixgbe_free_receive_structures(adapter);
1215 free(adapter->queues, M_DEVBUF);
1216 err_out:
1217 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1218 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1219 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1220 ixgbe_free_softint(adapter);
1221 ixgbe_free_pci_resources(adapter);
1222 if (adapter->mta != NULL)
1223 free(adapter->mta, M_DEVBUF);
1224 IXGBE_CORE_LOCK_DESTROY(adapter);
1225
1226 return;
1227 } /* ixgbe_attach */
1228
1229 /************************************************************************
1230 * ixgbe_check_wol_support
1231 *
1232 * Checks whether the adapter's ports are capable of
1233 * Wake On LAN by reading the adapter's NVM.
1234 *
1235 * Sets each port's hw->wol_enabled value depending
1236 * on the value read here.
1237 ************************************************************************/
1238 static void
1239 ixgbe_check_wol_support(struct adapter *adapter)
1240 {
1241 struct ixgbe_hw *hw = &adapter->hw;
1242 u16 dev_caps = 0;
1243
1244 /* Find out WoL support for port */
1245 adapter->wol_support = hw->wol_enabled = 0;
1246 ixgbe_get_device_caps(hw, &dev_caps);
1247 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1248 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1249 hw->bus.func == 0))
1250 adapter->wol_support = hw->wol_enabled = 1;
1251
1252 /* Save initial wake up filter configuration */
1253 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1254
1255 return;
1256 } /* ixgbe_check_wol_support */
1257
1258 /************************************************************************
1259 * ixgbe_setup_interface
1260 *
1261 * Setup networking device structure and register an interface.
1262 ************************************************************************/
1263 static int
1264 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1265 {
1266 struct ethercom *ec = &adapter->osdep.ec;
1267 struct ifnet *ifp;
1268 int rv;
1269
1270 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1271
1272 ifp = adapter->ifp = &ec->ec_if;
1273 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1274 ifp->if_baudrate = IF_Gbps(10);
1275 ifp->if_init = ixgbe_init;
1276 ifp->if_stop = ixgbe_ifstop;
1277 ifp->if_softc = adapter;
1278 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1279 #ifdef IXGBE_MPSAFE
1280 ifp->if_extflags = IFEF_MPSAFE;
1281 #endif
1282 ifp->if_ioctl = ixgbe_ioctl;
1283 #if __FreeBSD_version >= 1100045
1284 /* TSO parameters */
1285 ifp->if_hw_tsomax = 65518;
1286 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1287 ifp->if_hw_tsomaxsegsize = 2048;
1288 #endif
1289 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1290 #if 0
1291 ixgbe_start_locked = ixgbe_legacy_start_locked;
1292 #endif
1293 } else {
1294 ifp->if_transmit = ixgbe_mq_start;
1295 #if 0
1296 ixgbe_start_locked = ixgbe_mq_start_locked;
1297 #endif
1298 }
1299 ifp->if_start = ixgbe_legacy_start;
1300 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1301 IFQ_SET_READY(&ifp->if_snd);
1302
1303 rv = if_initialize(ifp);
1304 if (rv != 0) {
1305 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1306 return rv;
1307 }
1308 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1309 ether_ifattach(ifp, adapter->hw.mac.addr);
1310 /*
1311 * We use per TX queue softint, so if_deferred_start_init() isn't
1312 * used.
1313 */
1314 if_register(ifp);
1315 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1316
1317 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1318
1319 /*
1320 * Tell the upper layer(s) we support long frames.
1321 */
1322 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1323
1324 /* Set capability flags */
1325 ifp->if_capabilities |= IFCAP_RXCSUM
1326 | IFCAP_TXCSUM
1327 | IFCAP_TSOv4
1328 | IFCAP_TSOv6
1329 | IFCAP_LRO;
1330 ifp->if_capenable = 0;
1331
1332 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1333 | ETHERCAP_VLAN_HWCSUM
1334 | ETHERCAP_JUMBO_MTU
1335 | ETHERCAP_VLAN_MTU;
1336
1337 /* Enable the above capabilities by default */
1338 ec->ec_capenable = ec->ec_capabilities;
1339
1340 /*
1341 * Don't turn this on by default, if vlans are
1342 * created on another pseudo device (eg. lagg)
1343 * then vlan events are not passed thru, breaking
1344 * operation, but with HW FILTER off it works. If
1345 * using vlans directly on the ixgbe driver you can
1346 * enable this and get full hardware tag filtering.
1347 */
1348 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1349
1350 /*
1351 * Specify the media types supported by this adapter and register
1352 * callbacks to update media and link information
1353 */
1354 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1355 ixgbe_media_status);
1356
1357 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1358 ixgbe_add_media_types(adapter);
1359
1360 /* Set autoselect media by default */
1361 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1362
1363 return (0);
1364 } /* ixgbe_setup_interface */
1365
1366 /************************************************************************
1367 * ixgbe_add_media_types
1368 ************************************************************************/
1369 static void
1370 ixgbe_add_media_types(struct adapter *adapter)
1371 {
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 device_t dev = adapter->dev;
1374 u64 layer;
1375
1376 layer = adapter->phy_layer;
1377
1378 #define ADD(mm, dd) \
1379 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1380
1381 ADD(IFM_NONE, 0);
1382
1383 /* Media types with matching NetBSD media defines */
1384 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1385 ADD(IFM_10G_T | IFM_FDX, 0);
1386 }
1387 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1388 ADD(IFM_1000_T | IFM_FDX, 0);
1389 }
1390 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1391 ADD(IFM_100_TX | IFM_FDX, 0);
1392 }
1393 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1394 ADD(IFM_10_T | IFM_FDX, 0);
1395 }
1396
1397 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1398 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1399 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1400 }
1401
1402 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1403 ADD(IFM_10G_LR | IFM_FDX, 0);
1404 if (hw->phy.multispeed_fiber) {
1405 ADD(IFM_1000_LX | IFM_FDX, 0);
1406 }
1407 }
1408 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1409 ADD(IFM_10G_SR | IFM_FDX, 0);
1410 if (hw->phy.multispeed_fiber) {
1411 ADD(IFM_1000_SX | IFM_FDX, 0);
1412 }
1413 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1414 ADD(IFM_1000_SX | IFM_FDX, 0);
1415 }
1416 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1417 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1418 }
1419
1420 #ifdef IFM_ETH_XTYPE
1421 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1422 ADD(IFM_10G_KR | IFM_FDX, 0);
1423 }
1424 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1425 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1426 }
1427 #else
1428 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1429 device_printf(dev, "Media supported: 10GbaseKR\n");
1430 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1431 ADD(IFM_10G_SR | IFM_FDX, 0);
1432 }
1433 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1434 device_printf(dev, "Media supported: 10GbaseKX4\n");
1435 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1436 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1437 }
1438 #endif
1439 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1440 ADD(IFM_1000_KX | IFM_FDX, 0);
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1443 ADD(IFM_2500_KX | IFM_FDX, 0);
1444 }
1445 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1446 ADD(IFM_2500_T | IFM_FDX, 0);
1447 }
1448 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1449 ADD(IFM_5000_T | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1452 device_printf(dev, "Media supported: 1000baseBX\n");
1453 /* XXX no ifmedia_set? */
1454
1455 ADD(IFM_AUTO, 0);
1456
1457 #undef ADD
1458 } /* ixgbe_add_media_types */
1459
1460 /************************************************************************
1461 * ixgbe_is_sfp
1462 ************************************************************************/
1463 static inline bool
1464 ixgbe_is_sfp(struct ixgbe_hw *hw)
1465 {
1466 switch (hw->mac.type) {
1467 case ixgbe_mac_82598EB:
1468 if (hw->phy.type == ixgbe_phy_nl)
1469 return (TRUE);
1470 return (FALSE);
1471 case ixgbe_mac_82599EB:
1472 switch (hw->mac.ops.get_media_type(hw)) {
1473 case ixgbe_media_type_fiber:
1474 case ixgbe_media_type_fiber_qsfp:
1475 return (TRUE);
1476 default:
1477 return (FALSE);
1478 }
1479 case ixgbe_mac_X550EM_x:
1480 case ixgbe_mac_X550EM_a:
1481 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1482 return (TRUE);
1483 return (FALSE);
1484 default:
1485 return (FALSE);
1486 }
1487 } /* ixgbe_is_sfp */
1488
1489 /************************************************************************
1490 * ixgbe_config_link
1491 ************************************************************************/
1492 static void
1493 ixgbe_config_link(struct adapter *adapter)
1494 {
1495 struct ixgbe_hw *hw = &adapter->hw;
1496 u32 autoneg, err = 0;
1497 bool sfp, negotiate = false;
1498
1499 sfp = ixgbe_is_sfp(hw);
1500
1501 if (sfp) {
1502 if (hw->phy.multispeed_fiber) {
1503 ixgbe_enable_tx_laser(hw);
1504 kpreempt_disable();
1505 softint_schedule(adapter->msf_si);
1506 kpreempt_enable();
1507 }
1508 kpreempt_disable();
1509 softint_schedule(adapter->mod_si);
1510 kpreempt_enable();
1511 } else {
1512 struct ifmedia *ifm = &adapter->media;
1513
1514 if (hw->mac.ops.check_link)
1515 err = ixgbe_check_link(hw, &adapter->link_speed,
1516 &adapter->link_up, FALSE);
1517 if (err)
1518 return;
1519
1520 /*
1521 * Check if it's the first call. If it's the first call,
1522 * get value for auto negotiation.
1523 */
1524 autoneg = hw->phy.autoneg_advertised;
1525 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1526 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1527 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1528 &negotiate);
1529 if (err)
1530 return;
1531 if (hw->mac.ops.setup_link)
1532 err = hw->mac.ops.setup_link(hw, autoneg,
1533 adapter->link_up);
1534 }
1535
1536 } /* ixgbe_config_link */
1537
1538 /************************************************************************
1539 * ixgbe_update_stats_counters - Update board statistics counters.
1540 ************************************************************************/
1541 static void
1542 ixgbe_update_stats_counters(struct adapter *adapter)
1543 {
1544 struct ifnet *ifp = adapter->ifp;
1545 struct ixgbe_hw *hw = &adapter->hw;
1546 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1547 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1548 u64 total_missed_rx = 0;
1549 uint64_t crcerrs, rlec;
1550
1551 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1552 stats->crcerrs.ev_count += crcerrs;
1553 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1554 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1555 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1556 if (hw->mac.type == ixgbe_mac_X550)
1557 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1558
1559 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1560 int j = i % adapter->num_queues;
1561 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1562 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1563 stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1564 }
1565 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1566 uint32_t mp;
1567 int j = i % adapter->num_queues;
1568
1569 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1570 /* global total per queue */
1571 stats->mpc[j].ev_count += mp;
1572 /* running comprehensive total for stats display */
1573 total_missed_rx += mp;
1574
1575 if (hw->mac.type == ixgbe_mac_82598EB)
1576 stats->rnbc[j].ev_count
1577 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1578
1579 }
1580 stats->mpctotal.ev_count += total_missed_rx;
1581
1582 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1583 if ((adapter->link_active == TRUE)
1584 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1585 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1586 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1587 }
1588 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1589 stats->rlec.ev_count += rlec;
1590
1591 /* Hardware workaround, gprc counts missed packets */
1592 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1593
1594 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1595 stats->lxontxc.ev_count += lxon;
1596 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1597 stats->lxofftxc.ev_count += lxoff;
1598 total = lxon + lxoff;
1599
1600 if (hw->mac.type != ixgbe_mac_82598EB) {
1601 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1602 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1603 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1604 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1605 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1606 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1607 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1608 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1609 } else {
1610 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1611 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1612 /* 82598 only has a counter in the high register */
1613 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1614 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1615 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1616 }
1617
1618 /*
1619 * Workaround: mprc hardware is incorrectly counting
1620 * broadcasts, so for now we subtract those.
1621 */
1622 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1623 stats->bprc.ev_count += bprc;
1624 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1625 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1626
1627 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1628 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1629 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1630 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1631 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1632 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1633
1634 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1635 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1636 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1637
1638 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1639 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1640 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1641 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1642 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1643 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1644 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1645 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1646 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1647 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1648 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1649 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1650 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1651 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1652 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1653 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1654 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1655 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1656 /* Only read FCOE on 82599 */
1657 if (hw->mac.type != ixgbe_mac_82598EB) {
1658 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1659 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1660 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1661 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1662 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1663 }
1664
1665 /* Fill out the OS statistics structure */
1666 /*
1667 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1668 * adapter->stats counters. It's required to make ifconfig -z
1669 * (SOICZIFDATA) work.
1670 */
1671 ifp->if_collisions = 0;
1672
1673 /* Rx Errors */
1674 ifp->if_iqdrops += total_missed_rx;
1675 ifp->if_ierrors += crcerrs + rlec;
1676 } /* ixgbe_update_stats_counters */
1677
1678 /************************************************************************
1679 * ixgbe_add_hw_stats
1680 *
1681 * Add sysctl variables, one per statistic, to the system.
1682 ************************************************************************/
1683 static void
1684 ixgbe_add_hw_stats(struct adapter *adapter)
1685 {
1686 device_t dev = adapter->dev;
1687 const struct sysctlnode *rnode, *cnode;
1688 struct sysctllog **log = &adapter->sysctllog;
1689 struct tx_ring *txr = adapter->tx_rings;
1690 struct rx_ring *rxr = adapter->rx_rings;
1691 struct ixgbe_hw *hw = &adapter->hw;
1692 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1693 const char *xname = device_xname(dev);
1694 int i;
1695
1696 /* Driver Statistics */
1697 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1698 NULL, xname, "Driver tx dma soft fail EFBIG");
1699 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1700 NULL, xname, "m_defrag() failed");
1701 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1702 NULL, xname, "Driver tx dma hard fail EFBIG");
1703 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1704 NULL, xname, "Driver tx dma hard fail EINVAL");
1705 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1706 NULL, xname, "Driver tx dma hard fail other");
1707 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1708 NULL, xname, "Driver tx dma soft fail EAGAIN");
1709 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1710 NULL, xname, "Driver tx dma soft fail ENOMEM");
1711 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1712 NULL, xname, "Watchdog timeouts");
1713 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1714 NULL, xname, "TSO errors");
1715 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1716 NULL, xname, "Link MSI-X IRQ Handled");
1717 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1718 NULL, xname, "Link softint");
1719 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1720 NULL, xname, "module softint");
1721 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1722 NULL, xname, "multimode softint");
1723 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1724 NULL, xname, "external PHY softint");
1725
1726 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1727 #ifdef LRO
1728 struct lro_ctrl *lro = &rxr->lro;
1729 #endif /* LRO */
1730
1731 snprintf(adapter->queues[i].evnamebuf,
1732 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1733 xname, i);
1734 snprintf(adapter->queues[i].namebuf,
1735 sizeof(adapter->queues[i].namebuf), "q%d", i);
1736
1737 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1738 aprint_error_dev(dev, "could not create sysctl root\n");
1739 break;
1740 }
1741
1742 if (sysctl_createv(log, 0, &rnode, &rnode,
1743 0, CTLTYPE_NODE,
1744 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1745 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1746 break;
1747
1748 if (sysctl_createv(log, 0, &rnode, &cnode,
1749 CTLFLAG_READWRITE, CTLTYPE_INT,
1750 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1751 ixgbe_sysctl_interrupt_rate_handler, 0,
1752 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1753 break;
1754
1755 if (sysctl_createv(log, 0, &rnode, &cnode,
1756 CTLFLAG_READONLY, CTLTYPE_INT,
1757 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1758 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1759 0, CTL_CREATE, CTL_EOL) != 0)
1760 break;
1761
1762 if (sysctl_createv(log, 0, &rnode, &cnode,
1763 CTLFLAG_READONLY, CTLTYPE_INT,
1764 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1765 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1766 0, CTL_CREATE, CTL_EOL) != 0)
1767 break;
1768
1769 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1770 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1771 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1772 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1773 "Handled queue in softint");
1774 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1775 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1776 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1777 NULL, adapter->queues[i].evnamebuf, "TSO");
1778 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1779 NULL, adapter->queues[i].evnamebuf,
1780 "Queue No Descriptor Available");
1781 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1782 NULL, adapter->queues[i].evnamebuf,
1783 "Queue Packets Transmitted");
1784 #ifndef IXGBE_LEGACY_TX
1785 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1786 NULL, adapter->queues[i].evnamebuf,
1787 "Packets dropped in pcq");
1788 #endif
1789
1790 if (sysctl_createv(log, 0, &rnode, &cnode,
1791 CTLFLAG_READONLY,
1792 CTLTYPE_INT,
1793 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1794 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1795 CTL_CREATE, CTL_EOL) != 0)
1796 break;
1797
1798 if (sysctl_createv(log, 0, &rnode, &cnode,
1799 CTLFLAG_READONLY,
1800 CTLTYPE_INT,
1801 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1802 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1803 CTL_CREATE, CTL_EOL) != 0)
1804 break;
1805
1806 if (i < __arraycount(stats->mpc)) {
1807 evcnt_attach_dynamic(&stats->mpc[i],
1808 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1809 "RX Missed Packet Count");
1810 if (hw->mac.type == ixgbe_mac_82598EB)
1811 evcnt_attach_dynamic(&stats->rnbc[i],
1812 EVCNT_TYPE_MISC, NULL,
1813 adapter->queues[i].evnamebuf,
1814 "Receive No Buffers");
1815 }
1816 if (i < __arraycount(stats->pxontxc)) {
1817 evcnt_attach_dynamic(&stats->pxontxc[i],
1818 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1819 "pxontxc");
1820 evcnt_attach_dynamic(&stats->pxonrxc[i],
1821 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1822 "pxonrxc");
1823 evcnt_attach_dynamic(&stats->pxofftxc[i],
1824 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1825 "pxofftxc");
1826 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1827 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1828 "pxoffrxc");
1829 evcnt_attach_dynamic(&stats->pxon2offc[i],
1830 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1831 "pxon2offc");
1832 }
1833 if (i < __arraycount(stats->qprc)) {
1834 evcnt_attach_dynamic(&stats->qprc[i],
1835 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1836 "qprc");
1837 evcnt_attach_dynamic(&stats->qptc[i],
1838 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1839 "qptc");
1840 evcnt_attach_dynamic(&stats->qbrc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1842 "qbrc");
1843 evcnt_attach_dynamic(&stats->qbtc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1845 "qbtc");
1846 evcnt_attach_dynamic(&stats->qprdc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1848 "qprdc");
1849 }
1850
1851 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1852 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1853 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1854 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1855 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1856 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1857 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1858 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1859 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1860 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1861 #ifdef LRO
1862 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1863 CTLFLAG_RD, &lro->lro_queued, 0,
1864 "LRO Queued");
1865 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1866 CTLFLAG_RD, &lro->lro_flushed, 0,
1867 "LRO Flushed");
1868 #endif /* LRO */
1869 }
1870
1871 /* MAC stats get their own sub node */
1872
1873 snprintf(stats->namebuf,
1874 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1875
1876 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1877 stats->namebuf, "rx csum offload - IP");
1878 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1879 stats->namebuf, "rx csum offload - L4");
1880 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1881 stats->namebuf, "rx csum offload - IP bad");
1882 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1883 stats->namebuf, "rx csum offload - L4 bad");
1884 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1885 stats->namebuf, "Interrupt conditions zero");
1886 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1887 stats->namebuf, "Legacy interrupts");
1888
1889 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1890 stats->namebuf, "CRC Errors");
1891 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1892 stats->namebuf, "Illegal Byte Errors");
1893 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1894 stats->namebuf, "Byte Errors");
1895 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1896 stats->namebuf, "MAC Short Packets Discarded");
1897 if (hw->mac.type >= ixgbe_mac_X550)
1898 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1899 stats->namebuf, "Bad SFD");
1900 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1901 stats->namebuf, "Total Packets Missed");
1902 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1903 stats->namebuf, "MAC Local Faults");
1904 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1905 stats->namebuf, "MAC Remote Faults");
1906 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1907 stats->namebuf, "Receive Length Errors");
1908 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1909 stats->namebuf, "Link XON Transmitted");
1910 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1911 stats->namebuf, "Link XON Received");
1912 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1913 stats->namebuf, "Link XOFF Transmitted");
1914 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1915 stats->namebuf, "Link XOFF Received");
1916
1917 /* Packet Reception Stats */
1918 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1919 stats->namebuf, "Total Octets Received");
1920 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1921 stats->namebuf, "Good Octets Received");
1922 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1923 stats->namebuf, "Total Packets Received");
1924 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1925 stats->namebuf, "Good Packets Received");
1926 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1927 stats->namebuf, "Multicast Packets Received");
1928 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1929 stats->namebuf, "Broadcast Packets Received");
1930 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1931 stats->namebuf, "64 byte frames received ");
1932 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1933 stats->namebuf, "65-127 byte frames received");
1934 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1935 stats->namebuf, "128-255 byte frames received");
1936 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1937 stats->namebuf, "256-511 byte frames received");
1938 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1939 stats->namebuf, "512-1023 byte frames received");
1940 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "1023-1522 byte frames received");
1942 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "Receive Undersized");
1944 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "Fragmented Packets Received ");
1946 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "Oversized Packets Received");
1948 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "Received Jabber");
1950 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Management Packets Received");
1952 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Management Packets Dropped");
1954 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Checksum Errors");
1956
1957 /* Packet Transmission Stats */
1958 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "Good Octets Transmitted");
1960 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Total Packets Transmitted");
1962 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "Good Packets Transmitted");
1964 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "Broadcast Packets Transmitted");
1966 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "Multicast Packets Transmitted");
1968 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "Management Packets Transmitted");
1970 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "64 byte frames transmitted ");
1972 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "65-127 byte frames transmitted");
1974 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "128-255 byte frames transmitted");
1976 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "256-511 byte frames transmitted");
1978 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "512-1023 byte frames transmitted");
1980 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "1024-1522 byte frames transmitted");
1982 } /* ixgbe_add_hw_stats */
1983
1984 static void
1985 ixgbe_clear_evcnt(struct adapter *adapter)
1986 {
1987 struct tx_ring *txr = adapter->tx_rings;
1988 struct rx_ring *rxr = adapter->rx_rings;
1989 struct ixgbe_hw *hw = &adapter->hw;
1990 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1991
1992 adapter->efbig_tx_dma_setup.ev_count = 0;
1993 adapter->mbuf_defrag_failed.ev_count = 0;
1994 adapter->efbig2_tx_dma_setup.ev_count = 0;
1995 adapter->einval_tx_dma_setup.ev_count = 0;
1996 adapter->other_tx_dma_setup.ev_count = 0;
1997 adapter->eagain_tx_dma_setup.ev_count = 0;
1998 adapter->enomem_tx_dma_setup.ev_count = 0;
1999 adapter->tso_err.ev_count = 0;
2000 adapter->watchdog_events.ev_count = 0;
2001 adapter->link_irq.ev_count = 0;
2002 adapter->link_sicount.ev_count = 0;
2003 adapter->mod_sicount.ev_count = 0;
2004 adapter->msf_sicount.ev_count = 0;
2005 adapter->phy_sicount.ev_count = 0;
2006
2007 txr = adapter->tx_rings;
2008 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2009 adapter->queues[i].irqs.ev_count = 0;
2010 adapter->queues[i].handleq.ev_count = 0;
2011 adapter->queues[i].req.ev_count = 0;
2012 txr->no_desc_avail.ev_count = 0;
2013 txr->total_packets.ev_count = 0;
2014 txr->tso_tx.ev_count = 0;
2015 #ifndef IXGBE_LEGACY_TX
2016 txr->pcq_drops.ev_count = 0;
2017 #endif
2018 txr->q_efbig_tx_dma_setup = 0;
2019 txr->q_mbuf_defrag_failed = 0;
2020 txr->q_efbig2_tx_dma_setup = 0;
2021 txr->q_einval_tx_dma_setup = 0;
2022 txr->q_other_tx_dma_setup = 0;
2023 txr->q_eagain_tx_dma_setup = 0;
2024 txr->q_enomem_tx_dma_setup = 0;
2025 txr->q_tso_err = 0;
2026
2027 if (i < __arraycount(stats->mpc)) {
2028 stats->mpc[i].ev_count = 0;
2029 if (hw->mac.type == ixgbe_mac_82598EB)
2030 stats->rnbc[i].ev_count = 0;
2031 }
2032 if (i < __arraycount(stats->pxontxc)) {
2033 stats->pxontxc[i].ev_count = 0;
2034 stats->pxonrxc[i].ev_count = 0;
2035 stats->pxofftxc[i].ev_count = 0;
2036 stats->pxoffrxc[i].ev_count = 0;
2037 stats->pxon2offc[i].ev_count = 0;
2038 }
2039 if (i < __arraycount(stats->qprc)) {
2040 stats->qprc[i].ev_count = 0;
2041 stats->qptc[i].ev_count = 0;
2042 stats->qbrc[i].ev_count = 0;
2043 stats->qbtc[i].ev_count = 0;
2044 stats->qprdc[i].ev_count = 0;
2045 }
2046
2047 rxr->rx_packets.ev_count = 0;
2048 rxr->rx_bytes.ev_count = 0;
2049 rxr->rx_copies.ev_count = 0;
2050 rxr->no_jmbuf.ev_count = 0;
2051 rxr->rx_discarded.ev_count = 0;
2052 }
2053 stats->ipcs.ev_count = 0;
2054 stats->l4cs.ev_count = 0;
2055 stats->ipcs_bad.ev_count = 0;
2056 stats->l4cs_bad.ev_count = 0;
2057 stats->intzero.ev_count = 0;
2058 stats->legint.ev_count = 0;
2059 stats->crcerrs.ev_count = 0;
2060 stats->illerrc.ev_count = 0;
2061 stats->errbc.ev_count = 0;
2062 stats->mspdc.ev_count = 0;
2063 stats->mbsdc.ev_count = 0;
2064 stats->mpctotal.ev_count = 0;
2065 stats->mlfc.ev_count = 0;
2066 stats->mrfc.ev_count = 0;
2067 stats->rlec.ev_count = 0;
2068 stats->lxontxc.ev_count = 0;
2069 stats->lxonrxc.ev_count = 0;
2070 stats->lxofftxc.ev_count = 0;
2071 stats->lxoffrxc.ev_count = 0;
2072
2073 /* Packet Reception Stats */
2074 stats->tor.ev_count = 0;
2075 stats->gorc.ev_count = 0;
2076 stats->tpr.ev_count = 0;
2077 stats->gprc.ev_count = 0;
2078 stats->mprc.ev_count = 0;
2079 stats->bprc.ev_count = 0;
2080 stats->prc64.ev_count = 0;
2081 stats->prc127.ev_count = 0;
2082 stats->prc255.ev_count = 0;
2083 stats->prc511.ev_count = 0;
2084 stats->prc1023.ev_count = 0;
2085 stats->prc1522.ev_count = 0;
2086 stats->ruc.ev_count = 0;
2087 stats->rfc.ev_count = 0;
2088 stats->roc.ev_count = 0;
2089 stats->rjc.ev_count = 0;
2090 stats->mngprc.ev_count = 0;
2091 stats->mngpdc.ev_count = 0;
2092 stats->xec.ev_count = 0;
2093
2094 /* Packet Transmission Stats */
2095 stats->gotc.ev_count = 0;
2096 stats->tpt.ev_count = 0;
2097 stats->gptc.ev_count = 0;
2098 stats->bptc.ev_count = 0;
2099 stats->mptc.ev_count = 0;
2100 stats->mngptc.ev_count = 0;
2101 stats->ptc64.ev_count = 0;
2102 stats->ptc127.ev_count = 0;
2103 stats->ptc255.ev_count = 0;
2104 stats->ptc511.ev_count = 0;
2105 stats->ptc1023.ev_count = 0;
2106 stats->ptc1522.ev_count = 0;
2107 }
2108
2109 /************************************************************************
2110 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2111 *
2112 * Retrieves the TDH value from the hardware
2113 ************************************************************************/
2114 static int
2115 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2116 {
2117 struct sysctlnode node = *rnode;
2118 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2119 uint32_t val;
2120
2121 if (!txr)
2122 return (0);
2123
2124 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2125 node.sysctl_data = &val;
2126 return sysctl_lookup(SYSCTLFN_CALL(&node));
2127 } /* ixgbe_sysctl_tdh_handler */
2128
2129 /************************************************************************
2130 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2131 *
2132 * Retrieves the TDT value from the hardware
2133 ************************************************************************/
2134 static int
2135 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2136 {
2137 struct sysctlnode node = *rnode;
2138 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2139 uint32_t val;
2140
2141 if (!txr)
2142 return (0);
2143
2144 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2145 node.sysctl_data = &val;
2146 return sysctl_lookup(SYSCTLFN_CALL(&node));
2147 } /* ixgbe_sysctl_tdt_handler */
2148
2149 /************************************************************************
2150 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2151 *
2152 * Retrieves the RDH value from the hardware
2153 ************************************************************************/
2154 static int
2155 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2156 {
2157 struct sysctlnode node = *rnode;
2158 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2159 uint32_t val;
2160
2161 if (!rxr)
2162 return (0);
2163
2164 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2165 node.sysctl_data = &val;
2166 return sysctl_lookup(SYSCTLFN_CALL(&node));
2167 } /* ixgbe_sysctl_rdh_handler */
2168
2169 /************************************************************************
2170 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2171 *
2172 * Retrieves the RDT value from the hardware
2173 ************************************************************************/
2174 static int
2175 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2176 {
2177 struct sysctlnode node = *rnode;
2178 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2179 uint32_t val;
2180
2181 if (!rxr)
2182 return (0);
2183
2184 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2185 node.sysctl_data = &val;
2186 return sysctl_lookup(SYSCTLFN_CALL(&node));
2187 } /* ixgbe_sysctl_rdt_handler */
2188
2189 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2190 /************************************************************************
2191 * ixgbe_register_vlan
2192 *
2193 * Run via vlan config EVENT, it enables us to use the
2194 * HW Filter table since we can get the vlan id. This
2195 * just creates the entry in the soft version of the
2196 * VFTA, init will repopulate the real table.
2197 ************************************************************************/
2198 static void
2199 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2200 {
2201 struct adapter *adapter = ifp->if_softc;
2202 u16 index, bit;
2203
2204 if (ifp->if_softc != arg) /* Not our event */
2205 return;
2206
2207 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2208 return;
2209
2210 IXGBE_CORE_LOCK(adapter);
2211 index = (vtag >> 5) & 0x7F;
2212 bit = vtag & 0x1F;
2213 adapter->shadow_vfta[index] |= (1 << bit);
2214 ixgbe_setup_vlan_hw_support(adapter);
2215 IXGBE_CORE_UNLOCK(adapter);
2216 } /* ixgbe_register_vlan */
2217
2218 /************************************************************************
2219 * ixgbe_unregister_vlan
2220 *
2221 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2222 ************************************************************************/
2223 static void
2224 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2225 {
2226 struct adapter *adapter = ifp->if_softc;
2227 u16 index, bit;
2228
2229 if (ifp->if_softc != arg)
2230 return;
2231
2232 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2233 return;
2234
2235 IXGBE_CORE_LOCK(adapter);
2236 index = (vtag >> 5) & 0x7F;
2237 bit = vtag & 0x1F;
2238 adapter->shadow_vfta[index] &= ~(1 << bit);
2239 /* Re-init to load the changes */
2240 ixgbe_setup_vlan_hw_support(adapter);
2241 IXGBE_CORE_UNLOCK(adapter);
2242 } /* ixgbe_unregister_vlan */
2243 #endif
2244
2245 static void
2246 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2247 {
2248 struct ethercom *ec = &adapter->osdep.ec;
2249 struct ixgbe_hw *hw = &adapter->hw;
2250 struct rx_ring *rxr;
2251 int i;
2252 u32 ctrl;
2253
2254
2255 /*
2256 * We get here thru init_locked, meaning
2257 * a soft reset, this has already cleared
2258 * the VFTA and other state, so if there
2259 * have been no vlan's registered do nothing.
2260 */
2261 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2262 return;
2263
2264 /* Setup the queues for vlans */
2265 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2266 for (i = 0; i < adapter->num_queues; i++) {
2267 rxr = &adapter->rx_rings[i];
2268 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2269 if (hw->mac.type != ixgbe_mac_82598EB) {
2270 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2271 ctrl |= IXGBE_RXDCTL_VME;
2272 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2273 }
2274 rxr->vtag_strip = TRUE;
2275 }
2276 }
2277
2278 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2279 return;
2280 /*
2281 * A soft reset zero's out the VFTA, so
2282 * we need to repopulate it now.
2283 */
2284 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2285 if (adapter->shadow_vfta[i] != 0)
2286 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2287 adapter->shadow_vfta[i]);
2288
2289 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2290 /* Enable the Filter Table if enabled */
2291 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2292 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2293 ctrl |= IXGBE_VLNCTRL_VFE;
2294 }
2295 if (hw->mac.type == ixgbe_mac_82598EB)
2296 ctrl |= IXGBE_VLNCTRL_VME;
2297 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2298 } /* ixgbe_setup_vlan_hw_support */
2299
2300 /************************************************************************
2301 * ixgbe_get_slot_info
2302 *
2303 * Get the width and transaction speed of
2304 * the slot this adapter is plugged into.
2305 ************************************************************************/
2306 static void
2307 ixgbe_get_slot_info(struct adapter *adapter)
2308 {
2309 device_t dev = adapter->dev;
2310 struct ixgbe_hw *hw = &adapter->hw;
2311 u32 offset;
2312 u16 link;
2313 int bus_info_valid = TRUE;
2314
2315 /* Some devices are behind an internal bridge */
2316 switch (hw->device_id) {
2317 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2318 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2319 goto get_parent_info;
2320 default:
2321 break;
2322 }
2323
2324 ixgbe_get_bus_info(hw);
2325
2326 /*
2327 * Some devices don't use PCI-E, but there is no need
2328 * to display "Unknown" for bus speed and width.
2329 */
2330 switch (hw->mac.type) {
2331 case ixgbe_mac_X550EM_x:
2332 case ixgbe_mac_X550EM_a:
2333 return;
2334 default:
2335 goto display;
2336 }
2337
2338 get_parent_info:
2339 /*
2340 * For the Quad port adapter we need to parse back
2341 * up the PCI tree to find the speed of the expansion
2342 * slot into which this adapter is plugged. A bit more work.
2343 */
2344 dev = device_parent(device_parent(dev));
2345 #if 0
2346 #ifdef IXGBE_DEBUG
2347 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2348 pci_get_slot(dev), pci_get_function(dev));
2349 #endif
2350 dev = device_parent(device_parent(dev));
2351 #ifdef IXGBE_DEBUG
2352 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2353 pci_get_slot(dev), pci_get_function(dev));
2354 #endif
2355 #endif
2356 /* Now get the PCI Express Capabilities offset */
2357 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2358 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2359 /*
2360 * Hmm...can't get PCI-Express capabilities.
2361 * Falling back to default method.
2362 */
2363 bus_info_valid = FALSE;
2364 ixgbe_get_bus_info(hw);
2365 goto display;
2366 }
2367 /* ...and read the Link Status Register */
2368 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2369 offset + PCIE_LCSR) >> 16;
2370 ixgbe_set_pci_config_data_generic(hw, link);
2371
2372 display:
2373 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2374 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2375 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2376 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2377 "Unknown"),
2378 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2379 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2380 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2381 "Unknown"));
2382
2383 if (bus_info_valid) {
2384 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2385 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2386 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2387 device_printf(dev, "PCI-Express bandwidth available"
2388 " for this card\n is not sufficient for"
2389 " optimal performance.\n");
2390 device_printf(dev, "For optimal performance a x8 "
2391 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2392 }
2393 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2394 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2395 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2396 device_printf(dev, "PCI-Express bandwidth available"
2397 " for this card\n is not sufficient for"
2398 " optimal performance.\n");
2399 device_printf(dev, "For optimal performance a x8 "
2400 "PCIE Gen3 slot is required.\n");
2401 }
2402 } else
2403 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2404
2405 return;
2406 } /* ixgbe_get_slot_info */
2407
2408 /************************************************************************
2409 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2410 ************************************************************************/
2411 static inline void
2412 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2413 {
2414 struct ixgbe_hw *hw = &adapter->hw;
2415 struct ix_queue *que = &adapter->queues[vector];
2416 u64 queue = (u64)(1ULL << vector);
2417 u32 mask;
2418
2419 mutex_enter(&que->dc_mtx);
2420 if (que->disabled_count > 0 && --que->disabled_count > 0)
2421 goto out;
2422
2423 if (hw->mac.type == ixgbe_mac_82598EB) {
2424 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2425 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2426 } else {
2427 mask = (queue & 0xFFFFFFFF);
2428 if (mask)
2429 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2430 mask = (queue >> 32);
2431 if (mask)
2432 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2433 }
2434 out:
2435 mutex_exit(&que->dc_mtx);
2436 } /* ixgbe_enable_queue */
2437
2438 /************************************************************************
2439 * ixgbe_disable_queue_internal
2440 ************************************************************************/
2441 static inline void
2442 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2443 {
2444 struct ixgbe_hw *hw = &adapter->hw;
2445 struct ix_queue *que = &adapter->queues[vector];
2446 u64 queue = (u64)(1ULL << vector);
2447 u32 mask;
2448
2449 mutex_enter(&que->dc_mtx);
2450
2451 if (que->disabled_count > 0) {
2452 if (nestok)
2453 que->disabled_count++;
2454 goto out;
2455 }
2456 que->disabled_count++;
2457
2458 if (hw->mac.type == ixgbe_mac_82598EB) {
2459 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2460 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2461 } else {
2462 mask = (queue & 0xFFFFFFFF);
2463 if (mask)
2464 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2465 mask = (queue >> 32);
2466 if (mask)
2467 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2468 }
2469 out:
2470 mutex_exit(&que->dc_mtx);
2471 } /* ixgbe_disable_queue_internal */
2472
2473 /************************************************************************
2474 * ixgbe_disable_queue
2475 ************************************************************************/
2476 static inline void
2477 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2478 {
2479
2480 ixgbe_disable_queue_internal(adapter, vector, true);
2481 } /* ixgbe_disable_queue */
2482
2483 /************************************************************************
2484 * ixgbe_sched_handle_que - schedule deferred packet processing
2485 ************************************************************************/
2486 static inline void
2487 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2488 {
2489
2490 if (adapter->txrx_use_workqueue) {
2491 /*
2492 * adapter->que_wq is bound to each CPU instead of
2493 * each NIC queue to reduce workqueue kthread. As we
2494 * should consider about interrupt affinity in this
2495 * function, the workqueue kthread must be WQ_PERCPU.
2496 * If create WQ_PERCPU workqueue kthread for each NIC
2497 * queue, that number of created workqueue kthread is
2498 * (number of used NIC queue) * (number of CPUs) =
2499 * (number of CPUs) ^ 2 most often.
2500 *
2501 * The same NIC queue's interrupts are avoided by
2502 * masking the queue's interrupt. And different
2503 * NIC queue's interrupts use different struct work
2504 * (que->wq_cookie). So, "enqueued flag" to avoid
2505 * twice workqueue_enqueue() is not required .
2506 */
2507 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2508 } else {
2509 softint_schedule(que->que_si);
2510 }
2511 }
2512
2513 /************************************************************************
2514 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2515 ************************************************************************/
2516 static int
2517 ixgbe_msix_que(void *arg)
2518 {
2519 struct ix_queue *que = arg;
2520 struct adapter *adapter = que->adapter;
2521 struct ifnet *ifp = adapter->ifp;
2522 struct tx_ring *txr = que->txr;
2523 struct rx_ring *rxr = que->rxr;
2524 bool more;
2525 u32 newitr = 0;
2526
2527 /* Protect against spurious interrupts */
2528 if ((ifp->if_flags & IFF_RUNNING) == 0)
2529 return 0;
2530
2531 ixgbe_disable_queue(adapter, que->msix);
2532 ++que->irqs.ev_count;
2533
2534 #ifdef __NetBSD__
2535 /* Don't run ixgbe_rxeof in interrupt context */
2536 more = true;
2537 #else
2538 more = ixgbe_rxeof(que);
2539 #endif
2540
2541 IXGBE_TX_LOCK(txr);
2542 ixgbe_txeof(txr);
2543 IXGBE_TX_UNLOCK(txr);
2544
2545 /* Do AIM now? */
2546
2547 if (adapter->enable_aim == false)
2548 goto no_calc;
2549 /*
2550 * Do Adaptive Interrupt Moderation:
2551 * - Write out last calculated setting
2552 * - Calculate based on average size over
2553 * the last interval.
2554 */
2555 if (que->eitr_setting)
2556 ixgbe_eitr_write(que, que->eitr_setting);
2557
2558 que->eitr_setting = 0;
2559
2560 /* Idle, do nothing */
2561 if ((txr->bytes == 0) && (rxr->bytes == 0))
2562 goto no_calc;
2563
2564 if ((txr->bytes) && (txr->packets))
2565 newitr = txr->bytes/txr->packets;
2566 if ((rxr->bytes) && (rxr->packets))
2567 newitr = max(newitr, (rxr->bytes / rxr->packets));
2568 newitr += 24; /* account for hardware frame, crc */
2569
2570 /* set an upper boundary */
2571 newitr = min(newitr, 3000);
2572
2573 /* Be nice to the mid range */
2574 if ((newitr > 300) && (newitr < 1200))
2575 newitr = (newitr / 3);
2576 else
2577 newitr = (newitr / 2);
2578
2579 /*
2580 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2581 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2582 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2583 * on 1G and higher.
2584 */
2585 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2586 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2587 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2588 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2589 }
2590
2591 /* save for next interrupt */
2592 que->eitr_setting = newitr;
2593
2594 /* Reset state */
2595 txr->bytes = 0;
2596 txr->packets = 0;
2597 rxr->bytes = 0;
2598 rxr->packets = 0;
2599
2600 no_calc:
2601 if (more)
2602 ixgbe_sched_handle_que(adapter, que);
2603 else
2604 ixgbe_enable_queue(adapter, que->msix);
2605
2606 return 1;
2607 } /* ixgbe_msix_que */
2608
2609 /************************************************************************
2610 * ixgbe_media_status - Media Ioctl callback
2611 *
2612 * Called whenever the user queries the status of
2613 * the interface using ifconfig.
2614 ************************************************************************/
2615 static void
2616 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2617 {
2618 struct adapter *adapter = ifp->if_softc;
2619 struct ixgbe_hw *hw = &adapter->hw;
2620 int layer;
2621
2622 INIT_DEBUGOUT("ixgbe_media_status: begin");
2623 IXGBE_CORE_LOCK(adapter);
2624 ixgbe_update_link_status(adapter);
2625
2626 ifmr->ifm_status = IFM_AVALID;
2627 ifmr->ifm_active = IFM_ETHER;
2628
2629 if (!adapter->link_active) {
2630 ifmr->ifm_active |= IFM_NONE;
2631 IXGBE_CORE_UNLOCK(adapter);
2632 return;
2633 }
2634
2635 ifmr->ifm_status |= IFM_ACTIVE;
2636 layer = adapter->phy_layer;
2637
2638 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2639 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2640 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2641 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2642 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2643 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2644 switch (adapter->link_speed) {
2645 case IXGBE_LINK_SPEED_10GB_FULL:
2646 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2647 break;
2648 case IXGBE_LINK_SPEED_5GB_FULL:
2649 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2650 break;
2651 case IXGBE_LINK_SPEED_2_5GB_FULL:
2652 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2653 break;
2654 case IXGBE_LINK_SPEED_1GB_FULL:
2655 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2656 break;
2657 case IXGBE_LINK_SPEED_100_FULL:
2658 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2659 break;
2660 case IXGBE_LINK_SPEED_10_FULL:
2661 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2662 break;
2663 }
2664 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2665 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2666 switch (adapter->link_speed) {
2667 case IXGBE_LINK_SPEED_10GB_FULL:
2668 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2669 break;
2670 }
2671 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2672 switch (adapter->link_speed) {
2673 case IXGBE_LINK_SPEED_10GB_FULL:
2674 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2675 break;
2676 case IXGBE_LINK_SPEED_1GB_FULL:
2677 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2678 break;
2679 }
2680 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2681 switch (adapter->link_speed) {
2682 case IXGBE_LINK_SPEED_10GB_FULL:
2683 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2684 break;
2685 case IXGBE_LINK_SPEED_1GB_FULL:
2686 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2687 break;
2688 }
2689 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2690 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2691 switch (adapter->link_speed) {
2692 case IXGBE_LINK_SPEED_10GB_FULL:
2693 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2694 break;
2695 case IXGBE_LINK_SPEED_1GB_FULL:
2696 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2697 break;
2698 }
2699 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2700 switch (adapter->link_speed) {
2701 case IXGBE_LINK_SPEED_10GB_FULL:
2702 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2703 break;
2704 }
2705 /*
2706 * XXX: These need to use the proper media types once
2707 * they're added.
2708 */
2709 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2710 switch (adapter->link_speed) {
2711 case IXGBE_LINK_SPEED_10GB_FULL:
2712 #ifndef IFM_ETH_XTYPE
2713 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2714 #else
2715 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2716 #endif
2717 break;
2718 case IXGBE_LINK_SPEED_2_5GB_FULL:
2719 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2720 break;
2721 case IXGBE_LINK_SPEED_1GB_FULL:
2722 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2723 break;
2724 }
2725 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2726 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2727 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2728 switch (adapter->link_speed) {
2729 case IXGBE_LINK_SPEED_10GB_FULL:
2730 #ifndef IFM_ETH_XTYPE
2731 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2732 #else
2733 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2734 #endif
2735 break;
2736 case IXGBE_LINK_SPEED_2_5GB_FULL:
2737 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2738 break;
2739 case IXGBE_LINK_SPEED_1GB_FULL:
2740 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2741 break;
2742 }
2743
2744 /* If nothing is recognized... */
2745 #if 0
2746 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2747 ifmr->ifm_active |= IFM_UNKNOWN;
2748 #endif
2749
2750 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2751
2752 /* Display current flow control setting used on link */
2753 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2754 hw->fc.current_mode == ixgbe_fc_full)
2755 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2756 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2757 hw->fc.current_mode == ixgbe_fc_full)
2758 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2759
2760 IXGBE_CORE_UNLOCK(adapter);
2761
2762 return;
2763 } /* ixgbe_media_status */
2764
2765 /************************************************************************
2766 * ixgbe_media_change - Media Ioctl callback
2767 *
2768 * Called when the user changes speed/duplex using
2769 * media/mediopt option with ifconfig.
2770 ************************************************************************/
2771 static int
2772 ixgbe_media_change(struct ifnet *ifp)
2773 {
2774 struct adapter *adapter = ifp->if_softc;
2775 struct ifmedia *ifm = &adapter->media;
2776 struct ixgbe_hw *hw = &adapter->hw;
2777 ixgbe_link_speed speed = 0;
2778 ixgbe_link_speed link_caps = 0;
2779 bool negotiate = false;
2780 s32 err = IXGBE_NOT_IMPLEMENTED;
2781
2782 INIT_DEBUGOUT("ixgbe_media_change: begin");
2783
2784 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2785 return (EINVAL);
2786
2787 if (hw->phy.media_type == ixgbe_media_type_backplane)
2788 return (EPERM);
2789
2790 /*
2791 * We don't actually need to check against the supported
2792 * media types of the adapter; ifmedia will take care of
2793 * that for us.
2794 */
2795 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2796 case IFM_AUTO:
2797 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2798 &negotiate);
2799 if (err != IXGBE_SUCCESS) {
2800 device_printf(adapter->dev, "Unable to determine "
2801 "supported advertise speeds\n");
2802 return (ENODEV);
2803 }
2804 speed |= link_caps;
2805 break;
2806 case IFM_10G_T:
2807 case IFM_10G_LRM:
2808 case IFM_10G_LR:
2809 case IFM_10G_TWINAX:
2810 #ifndef IFM_ETH_XTYPE
2811 case IFM_10G_SR: /* KR, too */
2812 case IFM_10G_CX4: /* KX4 */
2813 #else
2814 case IFM_10G_KR:
2815 case IFM_10G_KX4:
2816 #endif
2817 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2818 break;
2819 case IFM_5000_T:
2820 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2821 break;
2822 case IFM_2500_T:
2823 case IFM_2500_KX:
2824 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2825 break;
2826 case IFM_1000_T:
2827 case IFM_1000_LX:
2828 case IFM_1000_SX:
2829 case IFM_1000_KX:
2830 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2831 break;
2832 case IFM_100_TX:
2833 speed |= IXGBE_LINK_SPEED_100_FULL;
2834 break;
2835 case IFM_10_T:
2836 speed |= IXGBE_LINK_SPEED_10_FULL;
2837 break;
2838 case IFM_NONE:
2839 break;
2840 default:
2841 goto invalid;
2842 }
2843
2844 hw->mac.autotry_restart = TRUE;
2845 hw->mac.ops.setup_link(hw, speed, TRUE);
2846 adapter->advertise = 0;
2847 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2848 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2849 adapter->advertise |= 1 << 2;
2850 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2851 adapter->advertise |= 1 << 1;
2852 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2853 adapter->advertise |= 1 << 0;
2854 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2855 adapter->advertise |= 1 << 3;
2856 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2857 adapter->advertise |= 1 << 4;
2858 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2859 adapter->advertise |= 1 << 5;
2860 }
2861
2862 return (0);
2863
2864 invalid:
2865 device_printf(adapter->dev, "Invalid media type!\n");
2866
2867 return (EINVAL);
2868 } /* ixgbe_media_change */
2869
2870 /************************************************************************
2871 * ixgbe_set_promisc
2872 ************************************************************************/
2873 static void
2874 ixgbe_set_promisc(struct adapter *adapter)
2875 {
2876 struct ifnet *ifp = adapter->ifp;
2877 int mcnt = 0;
2878 u32 rctl;
2879 struct ether_multi *enm;
2880 struct ether_multistep step;
2881 struct ethercom *ec = &adapter->osdep.ec;
2882
2883 KASSERT(mutex_owned(&adapter->core_mtx));
2884 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2885 rctl &= (~IXGBE_FCTRL_UPE);
2886 if (ifp->if_flags & IFF_ALLMULTI)
2887 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2888 else {
2889 ETHER_LOCK(ec);
2890 ETHER_FIRST_MULTI(step, ec, enm);
2891 while (enm != NULL) {
2892 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2893 break;
2894 mcnt++;
2895 ETHER_NEXT_MULTI(step, enm);
2896 }
2897 ETHER_UNLOCK(ec);
2898 }
2899 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2900 rctl &= (~IXGBE_FCTRL_MPE);
2901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2902
2903 if (ifp->if_flags & IFF_PROMISC) {
2904 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2906 } else if (ifp->if_flags & IFF_ALLMULTI) {
2907 rctl |= IXGBE_FCTRL_MPE;
2908 rctl &= ~IXGBE_FCTRL_UPE;
2909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2910 }
2911 } /* ixgbe_set_promisc */
2912
2913 /************************************************************************
2914 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2915 ************************************************************************/
2916 static int
2917 ixgbe_msix_link(void *arg)
2918 {
2919 struct adapter *adapter = arg;
2920 struct ixgbe_hw *hw = &adapter->hw;
2921 u32 eicr, eicr_mask;
2922 s32 retval;
2923
2924 ++adapter->link_irq.ev_count;
2925
2926 /* Pause other interrupts */
2927 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2928
2929 /* First get the cause */
2930 /*
2931 * The specifications of 82598, 82599, X540 and X550 say EICS register
2932 * is write only. However, Linux says it is a workaround for silicon
2933 * errata to read EICS instead of EICR to get interrupt cause. It seems
2934 * there is a problem about read clear mechanism for EICR register.
2935 */
2936 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2937 /* Be sure the queue bits are not cleared */
2938 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2939 /* Clear interrupt with write */
2940 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2941
2942 /* Link status change */
2943 if (eicr & IXGBE_EICR_LSC) {
2944 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2945 softint_schedule(adapter->link_si);
2946 }
2947
2948 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2949 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2950 (eicr & IXGBE_EICR_FLOW_DIR)) {
2951 /* This is probably overkill :) */
2952 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
2953 return 1;
2954 /* Disable the interrupt */
2955 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2956 softint_schedule(adapter->fdir_si);
2957 }
2958
2959 if (eicr & IXGBE_EICR_ECC) {
2960 device_printf(adapter->dev,
2961 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2962 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2963 }
2964
2965 /* Check for over temp condition */
2966 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2967 switch (adapter->hw.mac.type) {
2968 case ixgbe_mac_X550EM_a:
2969 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2970 break;
2971 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2972 IXGBE_EICR_GPI_SDP0_X550EM_a);
2973 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2974 IXGBE_EICR_GPI_SDP0_X550EM_a);
2975 retval = hw->phy.ops.check_overtemp(hw);
2976 if (retval != IXGBE_ERR_OVERTEMP)
2977 break;
2978 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2979 device_printf(adapter->dev, "System shutdown required!\n");
2980 break;
2981 default:
2982 if (!(eicr & IXGBE_EICR_TS))
2983 break;
2984 retval = hw->phy.ops.check_overtemp(hw);
2985 if (retval != IXGBE_ERR_OVERTEMP)
2986 break;
2987 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2988 device_printf(adapter->dev, "System shutdown required!\n");
2989 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2990 break;
2991 }
2992 }
2993
2994 /* Check for VF message */
2995 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2996 (eicr & IXGBE_EICR_MAILBOX))
2997 softint_schedule(adapter->mbx_si);
2998 }
2999
3000 if (ixgbe_is_sfp(hw)) {
3001 /* Pluggable optics-related interrupt */
3002 if (hw->mac.type >= ixgbe_mac_X540)
3003 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3004 else
3005 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3006
3007 if (eicr & eicr_mask) {
3008 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3009 softint_schedule(adapter->mod_si);
3010 }
3011
3012 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3013 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3014 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3015 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3016 softint_schedule(adapter->msf_si);
3017 }
3018 }
3019
3020 /* Check for fan failure */
3021 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3022 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3023 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3024 }
3025
3026 /* External PHY interrupt */
3027 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3028 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3029 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3030 softint_schedule(adapter->phy_si);
3031 }
3032
3033 /* Re-enable other interrupts */
3034 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3035 return 1;
3036 } /* ixgbe_msix_link */
3037
3038 static void
3039 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
3040 {
3041 struct adapter *adapter = que->adapter;
3042
3043 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3044 itr |= itr << 16;
3045 else
3046 itr |= IXGBE_EITR_CNT_WDIS;
3047
3048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), itr);
3049 }
3050
3051
3052 /************************************************************************
3053 * ixgbe_sysctl_interrupt_rate_handler
3054 ************************************************************************/
3055 static int
3056 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3057 {
3058 struct sysctlnode node = *rnode;
3059 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3060 struct adapter *adapter = que->adapter;
3061 uint32_t reg, usec, rate;
3062 int error;
3063
3064 if (que == NULL)
3065 return 0;
3066 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3067 usec = ((reg & 0x0FF8) >> 3);
3068 if (usec > 0)
3069 rate = 500000 / usec;
3070 else
3071 rate = 0;
3072 node.sysctl_data = &rate;
3073 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3074 if (error || newp == NULL)
3075 return error;
3076 reg &= ~0xfff; /* default, no limitation */
3077 if (rate > 0 && rate < 500000) {
3078 if (rate < 1000)
3079 rate = 1000;
3080 reg |= ((4000000/rate) & 0xff8);
3081 /*
3082 * When RSC is used, ITR interval must be larger than
3083 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3084 * The minimum value is always greater than 2us on 100M
3085 * (and 10M?(not documented)), but it's not on 1G and higher.
3086 */
3087 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3088 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3089 if ((adapter->num_queues > 1)
3090 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3091 return EINVAL;
3092 }
3093 ixgbe_max_interrupt_rate = rate;
3094 } else
3095 ixgbe_max_interrupt_rate = 0;
3096 ixgbe_eitr_write(que, reg);
3097
3098 return (0);
3099 } /* ixgbe_sysctl_interrupt_rate_handler */
3100
3101 const struct sysctlnode *
3102 ixgbe_sysctl_instance(struct adapter *adapter)
3103 {
3104 const char *dvname;
3105 struct sysctllog **log;
3106 int rc;
3107 const struct sysctlnode *rnode;
3108
3109 if (adapter->sysctltop != NULL)
3110 return adapter->sysctltop;
3111
3112 log = &adapter->sysctllog;
3113 dvname = device_xname(adapter->dev);
3114
3115 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3116 0, CTLTYPE_NODE, dvname,
3117 SYSCTL_DESCR("ixgbe information and settings"),
3118 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3119 goto err;
3120
3121 return rnode;
3122 err:
3123 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3124 return NULL;
3125 }
3126
3127 /************************************************************************
3128 * ixgbe_add_device_sysctls
3129 ************************************************************************/
3130 static void
3131 ixgbe_add_device_sysctls(struct adapter *adapter)
3132 {
3133 device_t dev = adapter->dev;
3134 struct ixgbe_hw *hw = &adapter->hw;
3135 struct sysctllog **log;
3136 const struct sysctlnode *rnode, *cnode;
3137
3138 log = &adapter->sysctllog;
3139
3140 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3141 aprint_error_dev(dev, "could not create sysctl root\n");
3142 return;
3143 }
3144
3145 if (sysctl_createv(log, 0, &rnode, &cnode,
3146 CTLFLAG_READONLY, CTLTYPE_INT,
3147 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3148 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3149 aprint_error_dev(dev, "could not create sysctl\n");
3150
3151 if (sysctl_createv(log, 0, &rnode, &cnode,
3152 CTLFLAG_READONLY, CTLTYPE_INT,
3153 "num_queues", SYSCTL_DESCR("Number of queues"),
3154 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3155 aprint_error_dev(dev, "could not create sysctl\n");
3156
3157 /* Sysctls for all devices */
3158 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3159 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3160 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3161 CTL_EOL) != 0)
3162 aprint_error_dev(dev, "could not create sysctl\n");
3163
3164 adapter->enable_aim = ixgbe_enable_aim;
3165 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3166 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3167 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3168 aprint_error_dev(dev, "could not create sysctl\n");
3169
3170 if (sysctl_createv(log, 0, &rnode, &cnode,
3171 CTLFLAG_READWRITE, CTLTYPE_INT,
3172 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3173 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3174 CTL_EOL) != 0)
3175 aprint_error_dev(dev, "could not create sysctl\n");
3176
3177 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3178 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3179 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3180 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3181 aprint_error_dev(dev, "could not create sysctl\n");
3182
3183 #ifdef IXGBE_DEBUG
3184 /* testing sysctls (for all devices) */
3185 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3186 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3187 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3188 CTL_EOL) != 0)
3189 aprint_error_dev(dev, "could not create sysctl\n");
3190
3191 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3192 CTLTYPE_STRING, "print_rss_config",
3193 SYSCTL_DESCR("Prints RSS Configuration"),
3194 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3195 CTL_EOL) != 0)
3196 aprint_error_dev(dev, "could not create sysctl\n");
3197 #endif
3198 /* for X550 series devices */
3199 if (hw->mac.type >= ixgbe_mac_X550)
3200 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3201 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3202 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3203 CTL_EOL) != 0)
3204 aprint_error_dev(dev, "could not create sysctl\n");
3205
3206 /* for WoL-capable devices */
3207 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3208 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3209 CTLTYPE_BOOL, "wol_enable",
3210 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3211 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3212 CTL_EOL) != 0)
3213 aprint_error_dev(dev, "could not create sysctl\n");
3214
3215 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3216 CTLTYPE_INT, "wufc",
3217 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3218 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3219 CTL_EOL) != 0)
3220 aprint_error_dev(dev, "could not create sysctl\n");
3221 }
3222
3223 /* for X552/X557-AT devices */
3224 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3225 const struct sysctlnode *phy_node;
3226
3227 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3228 "phy", SYSCTL_DESCR("External PHY sysctls"),
3229 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3230 aprint_error_dev(dev, "could not create sysctl\n");
3231 return;
3232 }
3233
3234 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3235 CTLTYPE_INT, "temp",
3236 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3237 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3238 CTL_EOL) != 0)
3239 aprint_error_dev(dev, "could not create sysctl\n");
3240
3241 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3242 CTLTYPE_INT, "overtemp_occurred",
3243 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3244 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3245 CTL_CREATE, CTL_EOL) != 0)
3246 aprint_error_dev(dev, "could not create sysctl\n");
3247 }
3248
3249 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3250 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3251 CTLTYPE_INT, "eee_state",
3252 SYSCTL_DESCR("EEE Power Save State"),
3253 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3254 CTL_EOL) != 0)
3255 aprint_error_dev(dev, "could not create sysctl\n");
3256 }
3257 } /* ixgbe_add_device_sysctls */
3258
3259 /************************************************************************
3260 * ixgbe_allocate_pci_resources
3261 ************************************************************************/
3262 static int
3263 ixgbe_allocate_pci_resources(struct adapter *adapter,
3264 const struct pci_attach_args *pa)
3265 {
3266 pcireg_t memtype;
3267 device_t dev = adapter->dev;
3268 bus_addr_t addr;
3269 int flags;
3270
3271 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3272 switch (memtype) {
3273 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3274 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3275 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3276 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3277 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3278 goto map_err;
3279 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3280 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3281 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3282 }
3283 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3284 adapter->osdep.mem_size, flags,
3285 &adapter->osdep.mem_bus_space_handle) != 0) {
3286 map_err:
3287 adapter->osdep.mem_size = 0;
3288 aprint_error_dev(dev, "unable to map BAR0\n");
3289 return ENXIO;
3290 }
3291 break;
3292 default:
3293 aprint_error_dev(dev, "unexpected type on BAR0\n");
3294 return ENXIO;
3295 }
3296
3297 return (0);
3298 } /* ixgbe_allocate_pci_resources */
3299
3300 static void
3301 ixgbe_free_softint(struct adapter *adapter)
3302 {
3303 struct ix_queue *que = adapter->queues;
3304 struct tx_ring *txr = adapter->tx_rings;
3305 int i;
3306
3307 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3308 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3309 if (txr->txr_si != NULL)
3310 softint_disestablish(txr->txr_si);
3311 }
3312 if (que->que_si != NULL)
3313 softint_disestablish(que->que_si);
3314 }
3315 if (adapter->txr_wq != NULL)
3316 workqueue_destroy(adapter->txr_wq);
3317 if (adapter->txr_wq_enqueued != NULL)
3318 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3319 if (adapter->que_wq != NULL)
3320 workqueue_destroy(adapter->que_wq);
3321
3322 /* Drain the Link queue */
3323 if (adapter->link_si != NULL) {
3324 softint_disestablish(adapter->link_si);
3325 adapter->link_si = NULL;
3326 }
3327 if (adapter->mod_si != NULL) {
3328 softint_disestablish(adapter->mod_si);
3329 adapter->mod_si = NULL;
3330 }
3331 if (adapter->msf_si != NULL) {
3332 softint_disestablish(adapter->msf_si);
3333 adapter->msf_si = NULL;
3334 }
3335 if (adapter->phy_si != NULL) {
3336 softint_disestablish(adapter->phy_si);
3337 adapter->phy_si = NULL;
3338 }
3339 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3340 if (adapter->fdir_si != NULL) {
3341 softint_disestablish(adapter->fdir_si);
3342 adapter->fdir_si = NULL;
3343 }
3344 }
3345 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3346 if (adapter->mbx_si != NULL) {
3347 softint_disestablish(adapter->mbx_si);
3348 adapter->mbx_si = NULL;
3349 }
3350 }
3351 } /* ixgbe_free_softint */
3352
3353 /************************************************************************
3354 * ixgbe_detach - Device removal routine
3355 *
3356 * Called when the driver is being removed.
3357 * Stops the adapter and deallocates all the resources
3358 * that were allocated for driver operation.
3359 *
3360 * return 0 on success, positive on failure
3361 ************************************************************************/
3362 static int
3363 ixgbe_detach(device_t dev, int flags)
3364 {
3365 struct adapter *adapter = device_private(dev);
3366 struct rx_ring *rxr = adapter->rx_rings;
3367 struct tx_ring *txr = adapter->tx_rings;
3368 struct ixgbe_hw *hw = &adapter->hw;
3369 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3370 u32 ctrl_ext;
3371
3372 INIT_DEBUGOUT("ixgbe_detach: begin");
3373 if (adapter->osdep.attached == false)
3374 return 0;
3375
3376 if (ixgbe_pci_iov_detach(dev) != 0) {
3377 device_printf(dev, "SR-IOV in use; detach first.\n");
3378 return (EBUSY);
3379 }
3380
3381 /* Stop the interface. Callouts are stopped in it. */
3382 ixgbe_ifstop(adapter->ifp, 1);
3383 #if NVLAN > 0
3384 /* Make sure VLANs are not using driver */
3385 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3386 ; /* nothing to do: no VLANs */
3387 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3388 vlan_ifdetach(adapter->ifp);
3389 else {
3390 aprint_error_dev(dev, "VLANs in use, detach first\n");
3391 return (EBUSY);
3392 }
3393 #endif
3394
3395 pmf_device_deregister(dev);
3396
3397 ether_ifdetach(adapter->ifp);
3398 /* Stop the adapter */
3399 IXGBE_CORE_LOCK(adapter);
3400 ixgbe_setup_low_power_mode(adapter);
3401 IXGBE_CORE_UNLOCK(adapter);
3402
3403 ixgbe_free_softint(adapter);
3404
3405 /* let hardware know driver is unloading */
3406 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3407 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3408 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3409
3410 callout_halt(&adapter->timer, NULL);
3411
3412 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3413 netmap_detach(adapter->ifp);
3414
3415 ixgbe_free_pci_resources(adapter);
3416 #if 0 /* XXX the NetBSD port is probably missing something here */
3417 bus_generic_detach(dev);
3418 #endif
3419 if_detach(adapter->ifp);
3420 if_percpuq_destroy(adapter->ipq);
3421
3422 sysctl_teardown(&adapter->sysctllog);
3423 evcnt_detach(&adapter->efbig_tx_dma_setup);
3424 evcnt_detach(&adapter->mbuf_defrag_failed);
3425 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3426 evcnt_detach(&adapter->einval_tx_dma_setup);
3427 evcnt_detach(&adapter->other_tx_dma_setup);
3428 evcnt_detach(&adapter->eagain_tx_dma_setup);
3429 evcnt_detach(&adapter->enomem_tx_dma_setup);
3430 evcnt_detach(&adapter->watchdog_events);
3431 evcnt_detach(&adapter->tso_err);
3432 evcnt_detach(&adapter->link_irq);
3433 evcnt_detach(&adapter->link_sicount);
3434 evcnt_detach(&adapter->mod_sicount);
3435 evcnt_detach(&adapter->msf_sicount);
3436 evcnt_detach(&adapter->phy_sicount);
3437
3438 txr = adapter->tx_rings;
3439 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3440 evcnt_detach(&adapter->queues[i].irqs);
3441 evcnt_detach(&adapter->queues[i].handleq);
3442 evcnt_detach(&adapter->queues[i].req);
3443 evcnt_detach(&txr->no_desc_avail);
3444 evcnt_detach(&txr->total_packets);
3445 evcnt_detach(&txr->tso_tx);
3446 #ifndef IXGBE_LEGACY_TX
3447 evcnt_detach(&txr->pcq_drops);
3448 #endif
3449
3450 if (i < __arraycount(stats->mpc)) {
3451 evcnt_detach(&stats->mpc[i]);
3452 if (hw->mac.type == ixgbe_mac_82598EB)
3453 evcnt_detach(&stats->rnbc[i]);
3454 }
3455 if (i < __arraycount(stats->pxontxc)) {
3456 evcnt_detach(&stats->pxontxc[i]);
3457 evcnt_detach(&stats->pxonrxc[i]);
3458 evcnt_detach(&stats->pxofftxc[i]);
3459 evcnt_detach(&stats->pxoffrxc[i]);
3460 evcnt_detach(&stats->pxon2offc[i]);
3461 }
3462 if (i < __arraycount(stats->qprc)) {
3463 evcnt_detach(&stats->qprc[i]);
3464 evcnt_detach(&stats->qptc[i]);
3465 evcnt_detach(&stats->qbrc[i]);
3466 evcnt_detach(&stats->qbtc[i]);
3467 evcnt_detach(&stats->qprdc[i]);
3468 }
3469
3470 evcnt_detach(&rxr->rx_packets);
3471 evcnt_detach(&rxr->rx_bytes);
3472 evcnt_detach(&rxr->rx_copies);
3473 evcnt_detach(&rxr->no_jmbuf);
3474 evcnt_detach(&rxr->rx_discarded);
3475 }
3476 evcnt_detach(&stats->ipcs);
3477 evcnt_detach(&stats->l4cs);
3478 evcnt_detach(&stats->ipcs_bad);
3479 evcnt_detach(&stats->l4cs_bad);
3480 evcnt_detach(&stats->intzero);
3481 evcnt_detach(&stats->legint);
3482 evcnt_detach(&stats->crcerrs);
3483 evcnt_detach(&stats->illerrc);
3484 evcnt_detach(&stats->errbc);
3485 evcnt_detach(&stats->mspdc);
3486 if (hw->mac.type >= ixgbe_mac_X550)
3487 evcnt_detach(&stats->mbsdc);
3488 evcnt_detach(&stats->mpctotal);
3489 evcnt_detach(&stats->mlfc);
3490 evcnt_detach(&stats->mrfc);
3491 evcnt_detach(&stats->rlec);
3492 evcnt_detach(&stats->lxontxc);
3493 evcnt_detach(&stats->lxonrxc);
3494 evcnt_detach(&stats->lxofftxc);
3495 evcnt_detach(&stats->lxoffrxc);
3496
3497 /* Packet Reception Stats */
3498 evcnt_detach(&stats->tor);
3499 evcnt_detach(&stats->gorc);
3500 evcnt_detach(&stats->tpr);
3501 evcnt_detach(&stats->gprc);
3502 evcnt_detach(&stats->mprc);
3503 evcnt_detach(&stats->bprc);
3504 evcnt_detach(&stats->prc64);
3505 evcnt_detach(&stats->prc127);
3506 evcnt_detach(&stats->prc255);
3507 evcnt_detach(&stats->prc511);
3508 evcnt_detach(&stats->prc1023);
3509 evcnt_detach(&stats->prc1522);
3510 evcnt_detach(&stats->ruc);
3511 evcnt_detach(&stats->rfc);
3512 evcnt_detach(&stats->roc);
3513 evcnt_detach(&stats->rjc);
3514 evcnt_detach(&stats->mngprc);
3515 evcnt_detach(&stats->mngpdc);
3516 evcnt_detach(&stats->xec);
3517
3518 /* Packet Transmission Stats */
3519 evcnt_detach(&stats->gotc);
3520 evcnt_detach(&stats->tpt);
3521 evcnt_detach(&stats->gptc);
3522 evcnt_detach(&stats->bptc);
3523 evcnt_detach(&stats->mptc);
3524 evcnt_detach(&stats->mngptc);
3525 evcnt_detach(&stats->ptc64);
3526 evcnt_detach(&stats->ptc127);
3527 evcnt_detach(&stats->ptc255);
3528 evcnt_detach(&stats->ptc511);
3529 evcnt_detach(&stats->ptc1023);
3530 evcnt_detach(&stats->ptc1522);
3531
3532 ixgbe_free_transmit_structures(adapter);
3533 ixgbe_free_receive_structures(adapter);
3534 for (int i = 0; i < adapter->num_queues; i++) {
3535 struct ix_queue * que = &adapter->queues[i];
3536 mutex_destroy(&que->dc_mtx);
3537 }
3538 free(adapter->queues, M_DEVBUF);
3539 free(adapter->mta, M_DEVBUF);
3540
3541 IXGBE_CORE_LOCK_DESTROY(adapter);
3542
3543 return (0);
3544 } /* ixgbe_detach */
3545
3546 /************************************************************************
3547 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3548 *
3549 * Prepare the adapter/port for LPLU and/or WoL
3550 ************************************************************************/
3551 static int
3552 ixgbe_setup_low_power_mode(struct adapter *adapter)
3553 {
3554 struct ixgbe_hw *hw = &adapter->hw;
3555 device_t dev = adapter->dev;
3556 s32 error = 0;
3557
3558 KASSERT(mutex_owned(&adapter->core_mtx));
3559
3560 /* Limit power management flow to X550EM baseT */
3561 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3562 hw->phy.ops.enter_lplu) {
3563 /* X550EM baseT adapters need a special LPLU flow */
3564 hw->phy.reset_disable = true;
3565 ixgbe_stop(adapter);
3566 error = hw->phy.ops.enter_lplu(hw);
3567 if (error)
3568 device_printf(dev,
3569 "Error entering LPLU: %d\n", error);
3570 hw->phy.reset_disable = false;
3571 } else {
3572 /* Just stop for other adapters */
3573 ixgbe_stop(adapter);
3574 }
3575
3576 if (!hw->wol_enabled) {
3577 ixgbe_set_phy_power(hw, FALSE);
3578 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3579 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3580 } else {
3581 /* Turn off support for APM wakeup. (Using ACPI instead) */
3582 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3583 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3584
3585 /*
3586 * Clear Wake Up Status register to prevent any previous wakeup
3587 * events from waking us up immediately after we suspend.
3588 */
3589 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3590
3591 /*
3592 * Program the Wakeup Filter Control register with user filter
3593 * settings
3594 */
3595 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3596
3597 /* Enable wakeups and power management in Wakeup Control */
3598 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3599 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3600
3601 }
3602
3603 return error;
3604 } /* ixgbe_setup_low_power_mode */
3605
3606 /************************************************************************
3607 * ixgbe_shutdown - Shutdown entry point
3608 ************************************************************************/
3609 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3610 static int
3611 ixgbe_shutdown(device_t dev)
3612 {
3613 struct adapter *adapter = device_private(dev);
3614 int error = 0;
3615
3616 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3617
3618 IXGBE_CORE_LOCK(adapter);
3619 error = ixgbe_setup_low_power_mode(adapter);
3620 IXGBE_CORE_UNLOCK(adapter);
3621
3622 return (error);
3623 } /* ixgbe_shutdown */
3624 #endif
3625
3626 /************************************************************************
3627 * ixgbe_suspend
3628 *
3629 * From D0 to D3
3630 ************************************************************************/
3631 static bool
3632 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3633 {
3634 struct adapter *adapter = device_private(dev);
3635 int error = 0;
3636
3637 INIT_DEBUGOUT("ixgbe_suspend: begin");
3638
3639 IXGBE_CORE_LOCK(adapter);
3640
3641 error = ixgbe_setup_low_power_mode(adapter);
3642
3643 IXGBE_CORE_UNLOCK(adapter);
3644
3645 return (error);
3646 } /* ixgbe_suspend */
3647
3648 /************************************************************************
3649 * ixgbe_resume
3650 *
3651 * From D3 to D0
3652 ************************************************************************/
3653 static bool
3654 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3655 {
3656 struct adapter *adapter = device_private(dev);
3657 struct ifnet *ifp = adapter->ifp;
3658 struct ixgbe_hw *hw = &adapter->hw;
3659 u32 wus;
3660
3661 INIT_DEBUGOUT("ixgbe_resume: begin");
3662
3663 IXGBE_CORE_LOCK(adapter);
3664
3665 /* Read & clear WUS register */
3666 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3667 if (wus)
3668 device_printf(dev, "Woken up by (WUS): %#010x\n",
3669 IXGBE_READ_REG(hw, IXGBE_WUS));
3670 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3671 /* And clear WUFC until next low-power transition */
3672 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3673
3674 /*
3675 * Required after D3->D0 transition;
3676 * will re-advertise all previous advertised speeds
3677 */
3678 if (ifp->if_flags & IFF_UP)
3679 ixgbe_init_locked(adapter);
3680
3681 IXGBE_CORE_UNLOCK(adapter);
3682
3683 return true;
3684 } /* ixgbe_resume */
3685
3686 /*
3687 * Set the various hardware offload abilities.
3688 *
3689 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3690 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3691 * mbuf offload flags the driver will understand.
3692 */
3693 static void
3694 ixgbe_set_if_hwassist(struct adapter *adapter)
3695 {
3696 /* XXX */
3697 }
3698
3699 /************************************************************************
3700 * ixgbe_init_locked - Init entry point
3701 *
3702 * Used in two ways: It is used by the stack as an init
3703 * entry point in network interface structure. It is also
3704 * used by the driver as a hw/sw initialization routine to
3705 * get to a consistent state.
3706 *
3707 * return 0 on success, positive on failure
3708 ************************************************************************/
3709 static void
3710 ixgbe_init_locked(struct adapter *adapter)
3711 {
3712 struct ifnet *ifp = adapter->ifp;
3713 device_t dev = adapter->dev;
3714 struct ixgbe_hw *hw = &adapter->hw;
3715 struct tx_ring *txr;
3716 struct rx_ring *rxr;
3717 u32 txdctl, mhadd;
3718 u32 rxdctl, rxctrl;
3719 u32 ctrl_ext;
3720 int i, j, err;
3721
3722 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3723
3724 KASSERT(mutex_owned(&adapter->core_mtx));
3725 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3726
3727 hw->adapter_stopped = FALSE;
3728 ixgbe_stop_adapter(hw);
3729 callout_stop(&adapter->timer);
3730
3731 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3732 adapter->max_frame_size =
3733 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3734
3735 /* Queue indices may change with IOV mode */
3736 ixgbe_align_all_queue_indices(adapter);
3737
3738 /* reprogram the RAR[0] in case user changed it. */
3739 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3740
3741 /* Get the latest mac address, User can use a LAA */
3742 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3743 IXGBE_ETH_LENGTH_OF_ADDRESS);
3744 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3745 hw->addr_ctrl.rar_used_count = 1;
3746
3747 /* Set hardware offload abilities from ifnet flags */
3748 ixgbe_set_if_hwassist(adapter);
3749
3750 /* Prepare transmit descriptors and buffers */
3751 if (ixgbe_setup_transmit_structures(adapter)) {
3752 device_printf(dev, "Could not setup transmit structures\n");
3753 ixgbe_stop(adapter);
3754 return;
3755 }
3756
3757 ixgbe_init_hw(hw);
3758
3759 ixgbe_initialize_iov(adapter);
3760
3761 ixgbe_initialize_transmit_units(adapter);
3762
3763 /* Setup Multicast table */
3764 ixgbe_set_multi(adapter);
3765
3766 /* Determine the correct mbuf pool, based on frame size */
3767 if (adapter->max_frame_size <= MCLBYTES)
3768 adapter->rx_mbuf_sz = MCLBYTES;
3769 else
3770 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3771
3772 /* Prepare receive descriptors and buffers */
3773 if (ixgbe_setup_receive_structures(adapter)) {
3774 device_printf(dev, "Could not setup receive structures\n");
3775 ixgbe_stop(adapter);
3776 return;
3777 }
3778
3779 /* Configure RX settings */
3780 ixgbe_initialize_receive_units(adapter);
3781
3782 /* Enable SDP & MSI-X interrupts based on adapter */
3783 ixgbe_config_gpie(adapter);
3784
3785 /* Set MTU size */
3786 if (ifp->if_mtu > ETHERMTU) {
3787 /* aka IXGBE_MAXFRS on 82599 and newer */
3788 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3789 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3790 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3791 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3792 }
3793
3794 /* Now enable all the queues */
3795 for (i = 0; i < adapter->num_queues; i++) {
3796 txr = &adapter->tx_rings[i];
3797 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3798 txdctl |= IXGBE_TXDCTL_ENABLE;
3799 /* Set WTHRESH to 8, burst writeback */
3800 txdctl |= (8 << 16);
3801 /*
3802 * When the internal queue falls below PTHRESH (32),
3803 * start prefetching as long as there are at least
3804 * HTHRESH (1) buffers ready. The values are taken
3805 * from the Intel linux driver 3.8.21.
3806 * Prefetching enables tx line rate even with 1 queue.
3807 */
3808 txdctl |= (32 << 0) | (1 << 8);
3809 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3810 }
3811
3812 for (i = 0; i < adapter->num_queues; i++) {
3813 rxr = &adapter->rx_rings[i];
3814 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3815 if (hw->mac.type == ixgbe_mac_82598EB) {
3816 /*
3817 * PTHRESH = 21
3818 * HTHRESH = 4
3819 * WTHRESH = 8
3820 */
3821 rxdctl &= ~0x3FFFFF;
3822 rxdctl |= 0x080420;
3823 }
3824 rxdctl |= IXGBE_RXDCTL_ENABLE;
3825 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3826 for (j = 0; j < 10; j++) {
3827 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3828 IXGBE_RXDCTL_ENABLE)
3829 break;
3830 else
3831 msec_delay(1);
3832 }
3833 wmb();
3834
3835 /*
3836 * In netmap mode, we must preserve the buffers made
3837 * available to userspace before the if_init()
3838 * (this is true by default on the TX side, because
3839 * init makes all buffers available to userspace).
3840 *
3841 * netmap_reset() and the device specific routines
3842 * (e.g. ixgbe_setup_receive_rings()) map these
3843 * buffers at the end of the NIC ring, so here we
3844 * must set the RDT (tail) register to make sure
3845 * they are not overwritten.
3846 *
3847 * In this driver the NIC ring starts at RDH = 0,
3848 * RDT points to the last slot available for reception (?),
3849 * so RDT = num_rx_desc - 1 means the whole ring is available.
3850 */
3851 #ifdef DEV_NETMAP
3852 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3853 (ifp->if_capenable & IFCAP_NETMAP)) {
3854 struct netmap_adapter *na = NA(adapter->ifp);
3855 struct netmap_kring *kring = &na->rx_rings[i];
3856 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3857
3858 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3859 } else
3860 #endif /* DEV_NETMAP */
3861 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3862 adapter->num_rx_desc - 1);
3863 }
3864
3865 /* Enable Receive engine */
3866 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3867 if (hw->mac.type == ixgbe_mac_82598EB)
3868 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3869 rxctrl |= IXGBE_RXCTRL_RXEN;
3870 ixgbe_enable_rx_dma(hw, rxctrl);
3871
3872 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3873
3874 /* Set up MSI/MSI-X routing */
3875 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3876 ixgbe_configure_ivars(adapter);
3877 /* Set up auto-mask */
3878 if (hw->mac.type == ixgbe_mac_82598EB)
3879 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3880 else {
3881 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3882 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3883 }
3884 } else { /* Simple settings for Legacy/MSI */
3885 ixgbe_set_ivar(adapter, 0, 0, 0);
3886 ixgbe_set_ivar(adapter, 0, 0, 1);
3887 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3888 }
3889
3890 ixgbe_init_fdir(adapter);
3891
3892 /*
3893 * Check on any SFP devices that
3894 * need to be kick-started
3895 */
3896 if (hw->phy.type == ixgbe_phy_none) {
3897 err = hw->phy.ops.identify(hw);
3898 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3899 device_printf(dev,
3900 "Unsupported SFP+ module type was detected.\n");
3901 return;
3902 }
3903 }
3904
3905 /* Set moderation on the Link interrupt */
3906 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3907
3908 /* Enable power to the phy. */
3909 ixgbe_set_phy_power(hw, TRUE);
3910
3911 /* Config/Enable Link */
3912 ixgbe_config_link(adapter);
3913
3914 /* Hardware Packet Buffer & Flow Control setup */
3915 ixgbe_config_delay_values(adapter);
3916
3917 /* Initialize the FC settings */
3918 ixgbe_start_hw(hw);
3919
3920 /* Set up VLAN support and filter */
3921 ixgbe_setup_vlan_hw_support(adapter);
3922
3923 /* Setup DMA Coalescing */
3924 ixgbe_config_dmac(adapter);
3925
3926 /* And now turn on interrupts */
3927 ixgbe_enable_intr(adapter);
3928
3929 /* Enable the use of the MBX by the VF's */
3930 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3931 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3932 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3933 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3934 }
3935
3936 /* Update saved flags. See ixgbe_ifflags_cb() */
3937 adapter->if_flags = ifp->if_flags;
3938
3939 /* Now inform the stack we're ready */
3940 ifp->if_flags |= IFF_RUNNING;
3941
3942 return;
3943 } /* ixgbe_init_locked */
3944
3945 /************************************************************************
3946 * ixgbe_init
3947 ************************************************************************/
3948 static int
3949 ixgbe_init(struct ifnet *ifp)
3950 {
3951 struct adapter *adapter = ifp->if_softc;
3952
3953 IXGBE_CORE_LOCK(adapter);
3954 ixgbe_init_locked(adapter);
3955 IXGBE_CORE_UNLOCK(adapter);
3956
3957 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
3958 } /* ixgbe_init */
3959
3960 /************************************************************************
3961 * ixgbe_set_ivar
3962 *
3963 * Setup the correct IVAR register for a particular MSI-X interrupt
3964 * (yes this is all very magic and confusing :)
3965 * - entry is the register array entry
3966 * - vector is the MSI-X vector for this queue
3967 * - type is RX/TX/MISC
3968 ************************************************************************/
3969 static void
3970 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3971 {
3972 struct ixgbe_hw *hw = &adapter->hw;
3973 u32 ivar, index;
3974
3975 vector |= IXGBE_IVAR_ALLOC_VAL;
3976
3977 switch (hw->mac.type) {
3978 case ixgbe_mac_82598EB:
3979 if (type == -1)
3980 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3981 else
3982 entry += (type * 64);
3983 index = (entry >> 2) & 0x1F;
3984 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3985 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3986 ivar |= (vector << (8 * (entry & 0x3)));
3987 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3988 break;
3989 case ixgbe_mac_82599EB:
3990 case ixgbe_mac_X540:
3991 case ixgbe_mac_X550:
3992 case ixgbe_mac_X550EM_x:
3993 case ixgbe_mac_X550EM_a:
3994 if (type == -1) { /* MISC IVAR */
3995 index = (entry & 1) * 8;
3996 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3997 ivar &= ~(0xFF << index);
3998 ivar |= (vector << index);
3999 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4000 } else { /* RX/TX IVARS */
4001 index = (16 * (entry & 1)) + (8 * type);
4002 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4003 ivar &= ~(0xFF << index);
4004 ivar |= (vector << index);
4005 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4006 }
4007 break;
4008 default:
4009 break;
4010 }
4011 } /* ixgbe_set_ivar */
4012
4013 /************************************************************************
4014 * ixgbe_configure_ivars
4015 ************************************************************************/
4016 static void
4017 ixgbe_configure_ivars(struct adapter *adapter)
4018 {
4019 struct ix_queue *que = adapter->queues;
4020 u32 newitr;
4021
4022 if (ixgbe_max_interrupt_rate > 0)
4023 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4024 else {
4025 /*
4026 * Disable DMA coalescing if interrupt moderation is
4027 * disabled.
4028 */
4029 adapter->dmac = 0;
4030 newitr = 0;
4031 }
4032
4033 for (int i = 0; i < adapter->num_queues; i++, que++) {
4034 struct rx_ring *rxr = &adapter->rx_rings[i];
4035 struct tx_ring *txr = &adapter->tx_rings[i];
4036 /* First the RX queue entry */
4037 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4038 /* ... and the TX */
4039 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4040 /* Set an Initial EITR value */
4041 ixgbe_eitr_write(que, newitr);
4042 /*
4043 * To eliminate influence of the previous state.
4044 * At this point, Tx/Rx interrupt handler
4045 * (ixgbe_msix_que()) cannot be called, so both
4046 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4047 */
4048 que->eitr_setting = 0;
4049 }
4050
4051 /* For the Link interrupt */
4052 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4053 } /* ixgbe_configure_ivars */
4054
4055 /************************************************************************
4056 * ixgbe_config_gpie
4057 ************************************************************************/
4058 static void
4059 ixgbe_config_gpie(struct adapter *adapter)
4060 {
4061 struct ixgbe_hw *hw = &adapter->hw;
4062 u32 gpie;
4063
4064 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4065
4066 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4067 /* Enable Enhanced MSI-X mode */
4068 gpie |= IXGBE_GPIE_MSIX_MODE
4069 | IXGBE_GPIE_EIAME
4070 | IXGBE_GPIE_PBA_SUPPORT
4071 | IXGBE_GPIE_OCD;
4072 }
4073
4074 /* Fan Failure Interrupt */
4075 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4076 gpie |= IXGBE_SDP1_GPIEN;
4077
4078 /* Thermal Sensor Interrupt */
4079 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4080 gpie |= IXGBE_SDP0_GPIEN_X540;
4081
4082 /* Link detection */
4083 switch (hw->mac.type) {
4084 case ixgbe_mac_82599EB:
4085 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4086 break;
4087 case ixgbe_mac_X550EM_x:
4088 case ixgbe_mac_X550EM_a:
4089 gpie |= IXGBE_SDP0_GPIEN_X540;
4090 break;
4091 default:
4092 break;
4093 }
4094
4095 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4096
4097 } /* ixgbe_config_gpie */
4098
4099 /************************************************************************
4100 * ixgbe_config_delay_values
4101 *
4102 * Requires adapter->max_frame_size to be set.
4103 ************************************************************************/
4104 static void
4105 ixgbe_config_delay_values(struct adapter *adapter)
4106 {
4107 struct ixgbe_hw *hw = &adapter->hw;
4108 u32 rxpb, frame, size, tmp;
4109
4110 frame = adapter->max_frame_size;
4111
4112 /* Calculate High Water */
4113 switch (hw->mac.type) {
4114 case ixgbe_mac_X540:
4115 case ixgbe_mac_X550:
4116 case ixgbe_mac_X550EM_x:
4117 case ixgbe_mac_X550EM_a:
4118 tmp = IXGBE_DV_X540(frame, frame);
4119 break;
4120 default:
4121 tmp = IXGBE_DV(frame, frame);
4122 break;
4123 }
4124 size = IXGBE_BT2KB(tmp);
4125 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4126 hw->fc.high_water[0] = rxpb - size;
4127
4128 /* Now calculate Low Water */
4129 switch (hw->mac.type) {
4130 case ixgbe_mac_X540:
4131 case ixgbe_mac_X550:
4132 case ixgbe_mac_X550EM_x:
4133 case ixgbe_mac_X550EM_a:
4134 tmp = IXGBE_LOW_DV_X540(frame);
4135 break;
4136 default:
4137 tmp = IXGBE_LOW_DV(frame);
4138 break;
4139 }
4140 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4141
4142 hw->fc.pause_time = IXGBE_FC_PAUSE;
4143 hw->fc.send_xon = TRUE;
4144 } /* ixgbe_config_delay_values */
4145
4146 /************************************************************************
4147 * ixgbe_set_multi - Multicast Update
4148 *
4149 * Called whenever multicast address list is updated.
4150 ************************************************************************/
4151 static void
4152 ixgbe_set_multi(struct adapter *adapter)
4153 {
4154 struct ixgbe_mc_addr *mta;
4155 struct ifnet *ifp = adapter->ifp;
4156 u8 *update_ptr;
4157 int mcnt = 0;
4158 u32 fctrl;
4159 struct ethercom *ec = &adapter->osdep.ec;
4160 struct ether_multi *enm;
4161 struct ether_multistep step;
4162
4163 KASSERT(mutex_owned(&adapter->core_mtx));
4164 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4165
4166 mta = adapter->mta;
4167 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4168
4169 ifp->if_flags &= ~IFF_ALLMULTI;
4170 ETHER_LOCK(ec);
4171 ETHER_FIRST_MULTI(step, ec, enm);
4172 while (enm != NULL) {
4173 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4174 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4175 ETHER_ADDR_LEN) != 0)) {
4176 ifp->if_flags |= IFF_ALLMULTI;
4177 break;
4178 }
4179 bcopy(enm->enm_addrlo,
4180 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4181 mta[mcnt].vmdq = adapter->pool;
4182 mcnt++;
4183 ETHER_NEXT_MULTI(step, enm);
4184 }
4185 ETHER_UNLOCK(ec);
4186
4187 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4188 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4189 if (ifp->if_flags & IFF_PROMISC)
4190 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4191 else if (ifp->if_flags & IFF_ALLMULTI) {
4192 fctrl |= IXGBE_FCTRL_MPE;
4193 }
4194
4195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4196
4197 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4198 update_ptr = (u8 *)mta;
4199 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4200 ixgbe_mc_array_itr, TRUE);
4201 }
4202
4203 } /* ixgbe_set_multi */
4204
4205 /************************************************************************
4206 * ixgbe_mc_array_itr
4207 *
4208 * An iterator function needed by the multicast shared code.
4209 * It feeds the shared code routine the addresses in the
4210 * array of ixgbe_set_multi() one by one.
4211 ************************************************************************/
4212 static u8 *
4213 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4214 {
4215 struct ixgbe_mc_addr *mta;
4216
4217 mta = (struct ixgbe_mc_addr *)*update_ptr;
4218 *vmdq = mta->vmdq;
4219
4220 *update_ptr = (u8*)(mta + 1);
4221
4222 return (mta->addr);
4223 } /* ixgbe_mc_array_itr */
4224
4225 /************************************************************************
4226 * ixgbe_local_timer - Timer routine
4227 *
4228 * Checks for link status, updates statistics,
4229 * and runs the watchdog check.
4230 ************************************************************************/
4231 static void
4232 ixgbe_local_timer(void *arg)
4233 {
4234 struct adapter *adapter = arg;
4235
4236 IXGBE_CORE_LOCK(adapter);
4237 ixgbe_local_timer1(adapter);
4238 IXGBE_CORE_UNLOCK(adapter);
4239 }
4240
4241 static void
4242 ixgbe_local_timer1(void *arg)
4243 {
4244 struct adapter *adapter = arg;
4245 device_t dev = adapter->dev;
4246 struct ix_queue *que = adapter->queues;
4247 u64 queues = 0;
4248 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4249 int hung = 0;
4250 int i;
4251
4252 KASSERT(mutex_owned(&adapter->core_mtx));
4253
4254 /* Check for pluggable optics */
4255 if (adapter->sfp_probe)
4256 if (!ixgbe_sfp_probe(adapter))
4257 goto out; /* Nothing to do */
4258
4259 ixgbe_update_link_status(adapter);
4260 ixgbe_update_stats_counters(adapter);
4261
4262 /* Update some event counters */
4263 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4264 que = adapter->queues;
4265 for (i = 0; i < adapter->num_queues; i++, que++) {
4266 struct tx_ring *txr = que->txr;
4267
4268 v0 += txr->q_efbig_tx_dma_setup;
4269 v1 += txr->q_mbuf_defrag_failed;
4270 v2 += txr->q_efbig2_tx_dma_setup;
4271 v3 += txr->q_einval_tx_dma_setup;
4272 v4 += txr->q_other_tx_dma_setup;
4273 v5 += txr->q_eagain_tx_dma_setup;
4274 v6 += txr->q_enomem_tx_dma_setup;
4275 v7 += txr->q_tso_err;
4276 }
4277 adapter->efbig_tx_dma_setup.ev_count = v0;
4278 adapter->mbuf_defrag_failed.ev_count = v1;
4279 adapter->efbig2_tx_dma_setup.ev_count = v2;
4280 adapter->einval_tx_dma_setup.ev_count = v3;
4281 adapter->other_tx_dma_setup.ev_count = v4;
4282 adapter->eagain_tx_dma_setup.ev_count = v5;
4283 adapter->enomem_tx_dma_setup.ev_count = v6;
4284 adapter->tso_err.ev_count = v7;
4285
4286 /*
4287 * Check the TX queues status
4288 * - mark hung queues so we don't schedule on them
4289 * - watchdog only if all queues show hung
4290 */
4291 que = adapter->queues;
4292 for (i = 0; i < adapter->num_queues; i++, que++) {
4293 /* Keep track of queues with work for soft irq */
4294 if (que->txr->busy)
4295 queues |= ((u64)1 << que->me);
4296 /*
4297 * Each time txeof runs without cleaning, but there
4298 * are uncleaned descriptors it increments busy. If
4299 * we get to the MAX we declare it hung.
4300 */
4301 if (que->busy == IXGBE_QUEUE_HUNG) {
4302 ++hung;
4303 /* Mark the queue as inactive */
4304 adapter->active_queues &= ~((u64)1 << que->me);
4305 continue;
4306 } else {
4307 /* Check if we've come back from hung */
4308 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4309 adapter->active_queues |= ((u64)1 << que->me);
4310 }
4311 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4312 device_printf(dev,
4313 "Warning queue %d appears to be hung!\n", i);
4314 que->txr->busy = IXGBE_QUEUE_HUNG;
4315 ++hung;
4316 }
4317 }
4318
4319 /* Only truely watchdog if all queues show hung */
4320 if (hung == adapter->num_queues)
4321 goto watchdog;
4322 else if (queues != 0) { /* Force an IRQ on queues with work */
4323 que = adapter->queues;
4324 for (i = 0; i < adapter->num_queues; i++, que++) {
4325 mutex_enter(&que->dc_mtx);
4326 if (que->disabled_count == 0)
4327 ixgbe_rearm_queues(adapter,
4328 queues & ((u64)1 << i));
4329 mutex_exit(&que->dc_mtx);
4330 }
4331 }
4332
4333 out:
4334 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4335 return;
4336
4337 watchdog:
4338 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4339 adapter->ifp->if_flags &= ~IFF_RUNNING;
4340 adapter->watchdog_events.ev_count++;
4341 ixgbe_init_locked(adapter);
4342 } /* ixgbe_local_timer */
4343
4344 /************************************************************************
4345 * ixgbe_sfp_probe
4346 *
4347 * Determine if a port had optics inserted.
4348 ************************************************************************/
4349 static bool
4350 ixgbe_sfp_probe(struct adapter *adapter)
4351 {
4352 struct ixgbe_hw *hw = &adapter->hw;
4353 device_t dev = adapter->dev;
4354 bool result = FALSE;
4355
4356 if ((hw->phy.type == ixgbe_phy_nl) &&
4357 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4358 s32 ret = hw->phy.ops.identify_sfp(hw);
4359 if (ret)
4360 goto out;
4361 ret = hw->phy.ops.reset(hw);
4362 adapter->sfp_probe = FALSE;
4363 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4364 device_printf(dev,"Unsupported SFP+ module detected!");
4365 device_printf(dev,
4366 "Reload driver with supported module.\n");
4367 goto out;
4368 } else
4369 device_printf(dev, "SFP+ module detected!\n");
4370 /* We now have supported optics */
4371 result = TRUE;
4372 }
4373 out:
4374
4375 return (result);
4376 } /* ixgbe_sfp_probe */
4377
4378 /************************************************************************
4379 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4380 ************************************************************************/
4381 static void
4382 ixgbe_handle_mod(void *context)
4383 {
4384 struct adapter *adapter = context;
4385 struct ixgbe_hw *hw = &adapter->hw;
4386 device_t dev = adapter->dev;
4387 u32 err, cage_full = 0;
4388
4389 ++adapter->mod_sicount.ev_count;
4390 if (adapter->hw.need_crosstalk_fix) {
4391 switch (hw->mac.type) {
4392 case ixgbe_mac_82599EB:
4393 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4394 IXGBE_ESDP_SDP2;
4395 break;
4396 case ixgbe_mac_X550EM_x:
4397 case ixgbe_mac_X550EM_a:
4398 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4399 IXGBE_ESDP_SDP0;
4400 break;
4401 default:
4402 break;
4403 }
4404
4405 if (!cage_full)
4406 return;
4407 }
4408
4409 err = hw->phy.ops.identify_sfp(hw);
4410 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4411 device_printf(dev,
4412 "Unsupported SFP+ module type was detected.\n");
4413 return;
4414 }
4415
4416 if (hw->mac.type == ixgbe_mac_82598EB)
4417 err = hw->phy.ops.reset(hw);
4418 else
4419 err = hw->mac.ops.setup_sfp(hw);
4420
4421 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4422 device_printf(dev,
4423 "Setup failure - unsupported SFP+ module type.\n");
4424 return;
4425 }
4426 softint_schedule(adapter->msf_si);
4427 } /* ixgbe_handle_mod */
4428
4429
4430 /************************************************************************
4431 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4432 ************************************************************************/
4433 static void
4434 ixgbe_handle_msf(void *context)
4435 {
4436 struct adapter *adapter = context;
4437 struct ixgbe_hw *hw = &adapter->hw;
4438 u32 autoneg;
4439 bool negotiate;
4440
4441 ++adapter->msf_sicount.ev_count;
4442 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4443 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4444
4445 autoneg = hw->phy.autoneg_advertised;
4446 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4447 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4448 else
4449 negotiate = 0;
4450 if (hw->mac.ops.setup_link)
4451 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4452
4453 /* Adjust media types shown in ifconfig */
4454 ifmedia_removeall(&adapter->media);
4455 ixgbe_add_media_types(adapter);
4456 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4457 } /* ixgbe_handle_msf */
4458
4459 /************************************************************************
4460 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4461 ************************************************************************/
4462 static void
4463 ixgbe_handle_phy(void *context)
4464 {
4465 struct adapter *adapter = context;
4466 struct ixgbe_hw *hw = &adapter->hw;
4467 int error;
4468
4469 ++adapter->phy_sicount.ev_count;
4470 error = hw->phy.ops.handle_lasi(hw);
4471 if (error == IXGBE_ERR_OVERTEMP)
4472 device_printf(adapter->dev,
4473 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4474 " PHY will downshift to lower power state!\n");
4475 else if (error)
4476 device_printf(adapter->dev,
4477 "Error handling LASI interrupt: %d\n", error);
4478 } /* ixgbe_handle_phy */
4479
4480 static void
4481 ixgbe_ifstop(struct ifnet *ifp, int disable)
4482 {
4483 struct adapter *adapter = ifp->if_softc;
4484
4485 IXGBE_CORE_LOCK(adapter);
4486 ixgbe_stop(adapter);
4487 IXGBE_CORE_UNLOCK(adapter);
4488 }
4489
4490 /************************************************************************
4491 * ixgbe_stop - Stop the hardware
4492 *
4493 * Disables all traffic on the adapter by issuing a
4494 * global reset on the MAC and deallocates TX/RX buffers.
4495 ************************************************************************/
4496 static void
4497 ixgbe_stop(void *arg)
4498 {
4499 struct ifnet *ifp;
4500 struct adapter *adapter = arg;
4501 struct ixgbe_hw *hw = &adapter->hw;
4502
4503 ifp = adapter->ifp;
4504
4505 KASSERT(mutex_owned(&adapter->core_mtx));
4506
4507 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4508 ixgbe_disable_intr(adapter);
4509 callout_stop(&adapter->timer);
4510
4511 /* Let the stack know...*/
4512 ifp->if_flags &= ~IFF_RUNNING;
4513
4514 ixgbe_reset_hw(hw);
4515 hw->adapter_stopped = FALSE;
4516 ixgbe_stop_adapter(hw);
4517 if (hw->mac.type == ixgbe_mac_82599EB)
4518 ixgbe_stop_mac_link_on_d3_82599(hw);
4519 /* Turn off the laser - noop with no optics */
4520 ixgbe_disable_tx_laser(hw);
4521
4522 /* Update the stack */
4523 adapter->link_up = FALSE;
4524 ixgbe_update_link_status(adapter);
4525
4526 /* reprogram the RAR[0] in case user changed it. */
4527 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4528
4529 return;
4530 } /* ixgbe_stop */
4531
4532 /************************************************************************
4533 * ixgbe_update_link_status - Update OS on link state
4534 *
4535 * Note: Only updates the OS on the cached link state.
4536 * The real check of the hardware only happens with
4537 * a link interrupt.
4538 ************************************************************************/
4539 static void
4540 ixgbe_update_link_status(struct adapter *adapter)
4541 {
4542 struct ifnet *ifp = adapter->ifp;
4543 device_t dev = adapter->dev;
4544 struct ixgbe_hw *hw = &adapter->hw;
4545
4546 KASSERT(mutex_owned(&adapter->core_mtx));
4547
4548 if (adapter->link_up) {
4549 if (adapter->link_active == FALSE) {
4550 /*
4551 * To eliminate influence of the previous state
4552 * in the same way as ixgbe_init_locked().
4553 */
4554 struct ix_queue *que = adapter->queues;
4555 for (int i = 0; i < adapter->num_queues; i++, que++)
4556 que->eitr_setting = 0;
4557
4558 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4559 /*
4560 * Discard count for both MAC Local Fault and
4561 * Remote Fault because those registers are
4562 * valid only when the link speed is up and
4563 * 10Gbps.
4564 */
4565 IXGBE_READ_REG(hw, IXGBE_MLFC);
4566 IXGBE_READ_REG(hw, IXGBE_MRFC);
4567 }
4568
4569 if (bootverbose) {
4570 const char *bpsmsg;
4571
4572 switch (adapter->link_speed) {
4573 case IXGBE_LINK_SPEED_10GB_FULL:
4574 bpsmsg = "10 Gbps";
4575 break;
4576 case IXGBE_LINK_SPEED_5GB_FULL:
4577 bpsmsg = "5 Gbps";
4578 break;
4579 case IXGBE_LINK_SPEED_2_5GB_FULL:
4580 bpsmsg = "2.5 Gbps";
4581 break;
4582 case IXGBE_LINK_SPEED_1GB_FULL:
4583 bpsmsg = "1 Gbps";
4584 break;
4585 case IXGBE_LINK_SPEED_100_FULL:
4586 bpsmsg = "100 Mbps";
4587 break;
4588 case IXGBE_LINK_SPEED_10_FULL:
4589 bpsmsg = "10 Mbps";
4590 break;
4591 default:
4592 bpsmsg = "unknown speed";
4593 break;
4594 }
4595 device_printf(dev, "Link is up %s %s \n",
4596 bpsmsg, "Full Duplex");
4597 }
4598 adapter->link_active = TRUE;
4599 /* Update any Flow Control changes */
4600 ixgbe_fc_enable(&adapter->hw);
4601 /* Update DMA coalescing config */
4602 ixgbe_config_dmac(adapter);
4603 if_link_state_change(ifp, LINK_STATE_UP);
4604
4605 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4606 ixgbe_ping_all_vfs(adapter);
4607 }
4608 } else { /* Link down */
4609 if (adapter->link_active == TRUE) {
4610 if (bootverbose)
4611 device_printf(dev, "Link is Down\n");
4612 if_link_state_change(ifp, LINK_STATE_DOWN);
4613 adapter->link_active = FALSE;
4614 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4615 ixgbe_ping_all_vfs(adapter);
4616 ixgbe_drain_all(adapter);
4617 }
4618 }
4619 } /* ixgbe_update_link_status */
4620
4621 /************************************************************************
4622 * ixgbe_config_dmac - Configure DMA Coalescing
4623 ************************************************************************/
4624 static void
4625 ixgbe_config_dmac(struct adapter *adapter)
4626 {
4627 struct ixgbe_hw *hw = &adapter->hw;
4628 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4629
4630 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4631 return;
4632
4633 if (dcfg->watchdog_timer ^ adapter->dmac ||
4634 dcfg->link_speed ^ adapter->link_speed) {
4635 dcfg->watchdog_timer = adapter->dmac;
4636 dcfg->fcoe_en = false;
4637 dcfg->link_speed = adapter->link_speed;
4638 dcfg->num_tcs = 1;
4639
4640 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4641 dcfg->watchdog_timer, dcfg->link_speed);
4642
4643 hw->mac.ops.dmac_config(hw);
4644 }
4645 } /* ixgbe_config_dmac */
4646
4647 /************************************************************************
4648 * ixgbe_enable_intr
4649 ************************************************************************/
4650 static void
4651 ixgbe_enable_intr(struct adapter *adapter)
4652 {
4653 struct ixgbe_hw *hw = &adapter->hw;
4654 struct ix_queue *que = adapter->queues;
4655 u32 mask, fwsm;
4656
4657 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4658
4659 switch (adapter->hw.mac.type) {
4660 case ixgbe_mac_82599EB:
4661 mask |= IXGBE_EIMS_ECC;
4662 /* Temperature sensor on some adapters */
4663 mask |= IXGBE_EIMS_GPI_SDP0;
4664 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4665 mask |= IXGBE_EIMS_GPI_SDP1;
4666 mask |= IXGBE_EIMS_GPI_SDP2;
4667 break;
4668 case ixgbe_mac_X540:
4669 /* Detect if Thermal Sensor is enabled */
4670 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4671 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4672 mask |= IXGBE_EIMS_TS;
4673 mask |= IXGBE_EIMS_ECC;
4674 break;
4675 case ixgbe_mac_X550:
4676 /* MAC thermal sensor is automatically enabled */
4677 mask |= IXGBE_EIMS_TS;
4678 mask |= IXGBE_EIMS_ECC;
4679 break;
4680 case ixgbe_mac_X550EM_x:
4681 case ixgbe_mac_X550EM_a:
4682 /* Some devices use SDP0 for important information */
4683 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4684 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4685 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4686 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4687 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4688 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4689 mask |= IXGBE_EICR_GPI_SDP0_X540;
4690 mask |= IXGBE_EIMS_ECC;
4691 break;
4692 default:
4693 break;
4694 }
4695
4696 /* Enable Fan Failure detection */
4697 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4698 mask |= IXGBE_EIMS_GPI_SDP1;
4699 /* Enable SR-IOV */
4700 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4701 mask |= IXGBE_EIMS_MAILBOX;
4702 /* Enable Flow Director */
4703 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4704 mask |= IXGBE_EIMS_FLOW_DIR;
4705
4706 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4707
4708 /* With MSI-X we use auto clear */
4709 if (adapter->msix_mem) {
4710 mask = IXGBE_EIMS_ENABLE_MASK;
4711 /* Don't autoclear Link */
4712 mask &= ~IXGBE_EIMS_OTHER;
4713 mask &= ~IXGBE_EIMS_LSC;
4714 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4715 mask &= ~IXGBE_EIMS_MAILBOX;
4716 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4717 }
4718
4719 /*
4720 * Now enable all queues, this is done separately to
4721 * allow for handling the extended (beyond 32) MSI-X
4722 * vectors that can be used by 82599
4723 */
4724 for (int i = 0; i < adapter->num_queues; i++, que++)
4725 ixgbe_enable_queue(adapter, que->msix);
4726
4727 IXGBE_WRITE_FLUSH(hw);
4728
4729 } /* ixgbe_enable_intr */
4730
4731 /************************************************************************
4732 * ixgbe_disable_intr_internal
4733 ************************************************************************/
4734 static void
4735 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4736 {
4737 struct ix_queue *que = adapter->queues;
4738
4739 /* disable interrupts other than queues */
4740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4741
4742 if (adapter->msix_mem)
4743 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4744
4745 for (int i = 0; i < adapter->num_queues; i++, que++)
4746 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4747
4748 IXGBE_WRITE_FLUSH(&adapter->hw);
4749
4750 } /* ixgbe_do_disable_intr_internal */
4751
4752 /************************************************************************
4753 * ixgbe_disable_intr
4754 ************************************************************************/
4755 static void
4756 ixgbe_disable_intr(struct adapter *adapter)
4757 {
4758
4759 ixgbe_disable_intr_internal(adapter, true);
4760 } /* ixgbe_disable_intr */
4761
4762 /************************************************************************
4763 * ixgbe_ensure_disabled_intr
4764 ************************************************************************/
4765 void
4766 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4767 {
4768
4769 ixgbe_disable_intr_internal(adapter, false);
4770 } /* ixgbe_ensure_disabled_intr */
4771
4772 /************************************************************************
4773 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4774 ************************************************************************/
4775 static int
4776 ixgbe_legacy_irq(void *arg)
4777 {
4778 struct ix_queue *que = arg;
4779 struct adapter *adapter = que->adapter;
4780 struct ixgbe_hw *hw = &adapter->hw;
4781 struct ifnet *ifp = adapter->ifp;
4782 struct tx_ring *txr = adapter->tx_rings;
4783 bool more = false;
4784 u32 eicr, eicr_mask;
4785
4786 /* Silicon errata #26 on 82598 */
4787 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4788
4789 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4790
4791 adapter->stats.pf.legint.ev_count++;
4792 ++que->irqs.ev_count;
4793 if (eicr == 0) {
4794 adapter->stats.pf.intzero.ev_count++;
4795 if ((ifp->if_flags & IFF_UP) != 0)
4796 ixgbe_enable_intr(adapter);
4797 return 0;
4798 }
4799
4800 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4801 #ifdef __NetBSD__
4802 /* Don't run ixgbe_rxeof in interrupt context */
4803 more = true;
4804 #else
4805 more = ixgbe_rxeof(que);
4806 #endif
4807
4808 IXGBE_TX_LOCK(txr);
4809 ixgbe_txeof(txr);
4810 #ifdef notyet
4811 if (!ixgbe_ring_empty(ifp, txr->br))
4812 ixgbe_start_locked(ifp, txr);
4813 #endif
4814 IXGBE_TX_UNLOCK(txr);
4815 }
4816
4817 /* Check for fan failure */
4818 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4819 ixgbe_check_fan_failure(adapter, eicr, true);
4820 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4821 }
4822
4823 /* Link status change */
4824 if (eicr & IXGBE_EICR_LSC)
4825 softint_schedule(adapter->link_si);
4826
4827 if (ixgbe_is_sfp(hw)) {
4828 /* Pluggable optics-related interrupt */
4829 if (hw->mac.type >= ixgbe_mac_X540)
4830 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4831 else
4832 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4833
4834 if (eicr & eicr_mask) {
4835 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4836 softint_schedule(adapter->mod_si);
4837 }
4838
4839 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4840 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4841 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4842 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4843 softint_schedule(adapter->msf_si);
4844 }
4845 }
4846
4847 /* External PHY interrupt */
4848 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4849 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4850 softint_schedule(adapter->phy_si);
4851
4852 if (more) {
4853 que->req.ev_count++;
4854 ixgbe_sched_handle_que(adapter, que);
4855 } else
4856 ixgbe_enable_intr(adapter);
4857
4858 return 1;
4859 } /* ixgbe_legacy_irq */
4860
4861 /************************************************************************
4862 * ixgbe_free_pciintr_resources
4863 ************************************************************************/
4864 static void
4865 ixgbe_free_pciintr_resources(struct adapter *adapter)
4866 {
4867 struct ix_queue *que = adapter->queues;
4868 int rid;
4869
4870 /*
4871 * Release all msix queue resources:
4872 */
4873 for (int i = 0; i < adapter->num_queues; i++, que++) {
4874 if (que->res != NULL) {
4875 pci_intr_disestablish(adapter->osdep.pc,
4876 adapter->osdep.ihs[i]);
4877 adapter->osdep.ihs[i] = NULL;
4878 }
4879 }
4880
4881 /* Clean the Legacy or Link interrupt last */
4882 if (adapter->vector) /* we are doing MSIX */
4883 rid = adapter->vector;
4884 else
4885 rid = 0;
4886
4887 if (adapter->osdep.ihs[rid] != NULL) {
4888 pci_intr_disestablish(adapter->osdep.pc,
4889 adapter->osdep.ihs[rid]);
4890 adapter->osdep.ihs[rid] = NULL;
4891 }
4892
4893 if (adapter->osdep.intrs != NULL) {
4894 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4895 adapter->osdep.nintrs);
4896 adapter->osdep.intrs = NULL;
4897 }
4898 } /* ixgbe_free_pciintr_resources */
4899
4900 /************************************************************************
4901 * ixgbe_free_pci_resources
4902 ************************************************************************/
4903 static void
4904 ixgbe_free_pci_resources(struct adapter *adapter)
4905 {
4906
4907 ixgbe_free_pciintr_resources(adapter);
4908
4909 if (adapter->osdep.mem_size != 0) {
4910 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
4911 adapter->osdep.mem_bus_space_handle,
4912 adapter->osdep.mem_size);
4913 }
4914
4915 } /* ixgbe_free_pci_resources */
4916
4917 /************************************************************************
4918 * ixgbe_set_sysctl_value
4919 ************************************************************************/
4920 static void
4921 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4922 const char *description, int *limit, int value)
4923 {
4924 device_t dev = adapter->dev;
4925 struct sysctllog **log;
4926 const struct sysctlnode *rnode, *cnode;
4927
4928 log = &adapter->sysctllog;
4929 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4930 aprint_error_dev(dev, "could not create sysctl root\n");
4931 return;
4932 }
4933 if (sysctl_createv(log, 0, &rnode, &cnode,
4934 CTLFLAG_READWRITE, CTLTYPE_INT,
4935 name, SYSCTL_DESCR(description),
4936 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
4937 aprint_error_dev(dev, "could not create sysctl\n");
4938 *limit = value;
4939 } /* ixgbe_set_sysctl_value */
4940
4941 /************************************************************************
4942 * ixgbe_sysctl_flowcntl
4943 *
4944 * SYSCTL wrapper around setting Flow Control
4945 ************************************************************************/
4946 static int
4947 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
4948 {
4949 struct sysctlnode node = *rnode;
4950 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4951 int error, fc;
4952
4953 fc = adapter->hw.fc.current_mode;
4954 node.sysctl_data = &fc;
4955 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4956 if (error != 0 || newp == NULL)
4957 return error;
4958
4959 /* Don't bother if it's not changed */
4960 if (fc == adapter->hw.fc.current_mode)
4961 return (0);
4962
4963 return ixgbe_set_flowcntl(adapter, fc);
4964 } /* ixgbe_sysctl_flowcntl */
4965
4966 /************************************************************************
4967 * ixgbe_set_flowcntl - Set flow control
4968 *
4969 * Flow control values:
4970 * 0 - off
4971 * 1 - rx pause
4972 * 2 - tx pause
4973 * 3 - full
4974 ************************************************************************/
4975 static int
4976 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4977 {
4978 switch (fc) {
4979 case ixgbe_fc_rx_pause:
4980 case ixgbe_fc_tx_pause:
4981 case ixgbe_fc_full:
4982 adapter->hw.fc.requested_mode = fc;
4983 if (adapter->num_queues > 1)
4984 ixgbe_disable_rx_drop(adapter);
4985 break;
4986 case ixgbe_fc_none:
4987 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4988 if (adapter->num_queues > 1)
4989 ixgbe_enable_rx_drop(adapter);
4990 break;
4991 default:
4992 return (EINVAL);
4993 }
4994
4995 #if 0 /* XXX NetBSD */
4996 /* Don't autoneg if forcing a value */
4997 adapter->hw.fc.disable_fc_autoneg = TRUE;
4998 #endif
4999 ixgbe_fc_enable(&adapter->hw);
5000
5001 return (0);
5002 } /* ixgbe_set_flowcntl */
5003
5004 /************************************************************************
5005 * ixgbe_enable_rx_drop
5006 *
5007 * Enable the hardware to drop packets when the buffer is
5008 * full. This is useful with multiqueue, so that no single
5009 * queue being full stalls the entire RX engine. We only
5010 * enable this when Multiqueue is enabled AND Flow Control
5011 * is disabled.
5012 ************************************************************************/
5013 static void
5014 ixgbe_enable_rx_drop(struct adapter *adapter)
5015 {
5016 struct ixgbe_hw *hw = &adapter->hw;
5017 struct rx_ring *rxr;
5018 u32 srrctl;
5019
5020 for (int i = 0; i < adapter->num_queues; i++) {
5021 rxr = &adapter->rx_rings[i];
5022 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5023 srrctl |= IXGBE_SRRCTL_DROP_EN;
5024 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5025 }
5026
5027 /* enable drop for each vf */
5028 for (int i = 0; i < adapter->num_vfs; i++) {
5029 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5030 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5031 IXGBE_QDE_ENABLE));
5032 }
5033 } /* ixgbe_enable_rx_drop */
5034
5035 /************************************************************************
5036 * ixgbe_disable_rx_drop
5037 ************************************************************************/
5038 static void
5039 ixgbe_disable_rx_drop(struct adapter *adapter)
5040 {
5041 struct ixgbe_hw *hw = &adapter->hw;
5042 struct rx_ring *rxr;
5043 u32 srrctl;
5044
5045 for (int i = 0; i < adapter->num_queues; i++) {
5046 rxr = &adapter->rx_rings[i];
5047 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5048 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5049 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5050 }
5051
5052 /* disable drop for each vf */
5053 for (int i = 0; i < adapter->num_vfs; i++) {
5054 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5055 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5056 }
5057 } /* ixgbe_disable_rx_drop */
5058
5059 /************************************************************************
5060 * ixgbe_sysctl_advertise
5061 *
5062 * SYSCTL wrapper around setting advertised speed
5063 ************************************************************************/
5064 static int
5065 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5066 {
5067 struct sysctlnode node = *rnode;
5068 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5069 int error = 0, advertise;
5070
5071 advertise = adapter->advertise;
5072 node.sysctl_data = &advertise;
5073 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5074 if (error != 0 || newp == NULL)
5075 return error;
5076
5077 return ixgbe_set_advertise(adapter, advertise);
5078 } /* ixgbe_sysctl_advertise */
5079
5080 /************************************************************************
5081 * ixgbe_set_advertise - Control advertised link speed
5082 *
5083 * Flags:
5084 * 0x00 - Default (all capable link speed)
5085 * 0x01 - advertise 100 Mb
5086 * 0x02 - advertise 1G
5087 * 0x04 - advertise 10G
5088 * 0x08 - advertise 10 Mb
5089 * 0x10 - advertise 2.5G
5090 * 0x20 - advertise 5G
5091 ************************************************************************/
5092 static int
5093 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5094 {
5095 device_t dev;
5096 struct ixgbe_hw *hw;
5097 ixgbe_link_speed speed = 0;
5098 ixgbe_link_speed link_caps = 0;
5099 s32 err = IXGBE_NOT_IMPLEMENTED;
5100 bool negotiate = FALSE;
5101
5102 /* Checks to validate new value */
5103 if (adapter->advertise == advertise) /* no change */
5104 return (0);
5105
5106 dev = adapter->dev;
5107 hw = &adapter->hw;
5108
5109 /* No speed changes for backplane media */
5110 if (hw->phy.media_type == ixgbe_media_type_backplane)
5111 return (ENODEV);
5112
5113 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5114 (hw->phy.multispeed_fiber))) {
5115 device_printf(dev,
5116 "Advertised speed can only be set on copper or "
5117 "multispeed fiber media types.\n");
5118 return (EINVAL);
5119 }
5120
5121 if (advertise < 0x0 || advertise > 0x2f) {
5122 device_printf(dev,
5123 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5124 return (EINVAL);
5125 }
5126
5127 if (hw->mac.ops.get_link_capabilities) {
5128 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5129 &negotiate);
5130 if (err != IXGBE_SUCCESS) {
5131 device_printf(dev, "Unable to determine supported advertise speeds\n");
5132 return (ENODEV);
5133 }
5134 }
5135
5136 /* Set new value and report new advertised mode */
5137 if (advertise & 0x1) {
5138 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5139 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5140 return (EINVAL);
5141 }
5142 speed |= IXGBE_LINK_SPEED_100_FULL;
5143 }
5144 if (advertise & 0x2) {
5145 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5146 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5147 return (EINVAL);
5148 }
5149 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5150 }
5151 if (advertise & 0x4) {
5152 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5153 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5154 return (EINVAL);
5155 }
5156 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5157 }
5158 if (advertise & 0x8) {
5159 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5160 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5161 return (EINVAL);
5162 }
5163 speed |= IXGBE_LINK_SPEED_10_FULL;
5164 }
5165 if (advertise & 0x10) {
5166 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5167 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5168 return (EINVAL);
5169 }
5170 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5171 }
5172 if (advertise & 0x20) {
5173 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5174 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5175 return (EINVAL);
5176 }
5177 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5178 }
5179 if (advertise == 0)
5180 speed = link_caps; /* All capable link speed */
5181
5182 hw->mac.autotry_restart = TRUE;
5183 hw->mac.ops.setup_link(hw, speed, TRUE);
5184 adapter->advertise = advertise;
5185
5186 return (0);
5187 } /* ixgbe_set_advertise */
5188
5189 /************************************************************************
5190 * ixgbe_get_advertise - Get current advertised speed settings
5191 *
5192 * Formatted for sysctl usage.
5193 * Flags:
5194 * 0x01 - advertise 100 Mb
5195 * 0x02 - advertise 1G
5196 * 0x04 - advertise 10G
5197 * 0x08 - advertise 10 Mb (yes, Mb)
5198 * 0x10 - advertise 2.5G
5199 * 0x20 - advertise 5G
5200 ************************************************************************/
5201 static int
5202 ixgbe_get_advertise(struct adapter *adapter)
5203 {
5204 struct ixgbe_hw *hw = &adapter->hw;
5205 int speed;
5206 ixgbe_link_speed link_caps = 0;
5207 s32 err;
5208 bool negotiate = FALSE;
5209
5210 /*
5211 * Advertised speed means nothing unless it's copper or
5212 * multi-speed fiber
5213 */
5214 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5215 !(hw->phy.multispeed_fiber))
5216 return (0);
5217
5218 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5219 if (err != IXGBE_SUCCESS)
5220 return (0);
5221
5222 speed =
5223 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5224 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5225 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5226 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5227 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5228 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5229
5230 return speed;
5231 } /* ixgbe_get_advertise */
5232
5233 /************************************************************************
5234 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5235 *
5236 * Control values:
5237 * 0/1 - off / on (use default value of 1000)
5238 *
5239 * Legal timer values are:
5240 * 50,100,250,500,1000,2000,5000,10000
5241 *
5242 * Turning off interrupt moderation will also turn this off.
5243 ************************************************************************/
5244 static int
5245 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5246 {
5247 struct sysctlnode node = *rnode;
5248 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5249 struct ifnet *ifp = adapter->ifp;
5250 int error;
5251 int newval;
5252
5253 newval = adapter->dmac;
5254 node.sysctl_data = &newval;
5255 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5256 if ((error) || (newp == NULL))
5257 return (error);
5258
5259 switch (newval) {
5260 case 0:
5261 /* Disabled */
5262 adapter->dmac = 0;
5263 break;
5264 case 1:
5265 /* Enable and use default */
5266 adapter->dmac = 1000;
5267 break;
5268 case 50:
5269 case 100:
5270 case 250:
5271 case 500:
5272 case 1000:
5273 case 2000:
5274 case 5000:
5275 case 10000:
5276 /* Legal values - allow */
5277 adapter->dmac = newval;
5278 break;
5279 default:
5280 /* Do nothing, illegal value */
5281 return (EINVAL);
5282 }
5283
5284 /* Re-initialize hardware if it's already running */
5285 if (ifp->if_flags & IFF_RUNNING)
5286 ifp->if_init(ifp);
5287
5288 return (0);
5289 }
5290
5291 #ifdef IXGBE_DEBUG
5292 /************************************************************************
5293 * ixgbe_sysctl_power_state
5294 *
5295 * Sysctl to test power states
5296 * Values:
5297 * 0 - set device to D0
5298 * 3 - set device to D3
5299 * (none) - get current device power state
5300 ************************************************************************/
5301 static int
5302 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5303 {
5304 #ifdef notyet
5305 struct sysctlnode node = *rnode;
5306 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5307 device_t dev = adapter->dev;
5308 int curr_ps, new_ps, error = 0;
5309
5310 curr_ps = new_ps = pci_get_powerstate(dev);
5311
5312 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5313 if ((error) || (req->newp == NULL))
5314 return (error);
5315
5316 if (new_ps == curr_ps)
5317 return (0);
5318
5319 if (new_ps == 3 && curr_ps == 0)
5320 error = DEVICE_SUSPEND(dev);
5321 else if (new_ps == 0 && curr_ps == 3)
5322 error = DEVICE_RESUME(dev);
5323 else
5324 return (EINVAL);
5325
5326 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5327
5328 return (error);
5329 #else
5330 return 0;
5331 #endif
5332 } /* ixgbe_sysctl_power_state */
5333 #endif
5334
5335 /************************************************************************
5336 * ixgbe_sysctl_wol_enable
5337 *
5338 * Sysctl to enable/disable the WoL capability,
5339 * if supported by the adapter.
5340 *
5341 * Values:
5342 * 0 - disabled
5343 * 1 - enabled
5344 ************************************************************************/
5345 static int
5346 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5347 {
5348 struct sysctlnode node = *rnode;
5349 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5350 struct ixgbe_hw *hw = &adapter->hw;
5351 bool new_wol_enabled;
5352 int error = 0;
5353
5354 new_wol_enabled = hw->wol_enabled;
5355 node.sysctl_data = &new_wol_enabled;
5356 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5357 if ((error) || (newp == NULL))
5358 return (error);
5359 if (new_wol_enabled == hw->wol_enabled)
5360 return (0);
5361
5362 if (new_wol_enabled && !adapter->wol_support)
5363 return (ENODEV);
5364 else
5365 hw->wol_enabled = new_wol_enabled;
5366
5367 return (0);
5368 } /* ixgbe_sysctl_wol_enable */
5369
5370 /************************************************************************
5371 * ixgbe_sysctl_wufc - Wake Up Filter Control
5372 *
5373 * Sysctl to enable/disable the types of packets that the
5374 * adapter will wake up on upon receipt.
5375 * Flags:
5376 * 0x1 - Link Status Change
5377 * 0x2 - Magic Packet
5378 * 0x4 - Direct Exact
5379 * 0x8 - Directed Multicast
5380 * 0x10 - Broadcast
5381 * 0x20 - ARP/IPv4 Request Packet
5382 * 0x40 - Direct IPv4 Packet
5383 * 0x80 - Direct IPv6 Packet
5384 *
5385 * Settings not listed above will cause the sysctl to return an error.
5386 ************************************************************************/
5387 static int
5388 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5389 {
5390 struct sysctlnode node = *rnode;
5391 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5392 int error = 0;
5393 u32 new_wufc;
5394
5395 new_wufc = adapter->wufc;
5396 node.sysctl_data = &new_wufc;
5397 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5398 if ((error) || (newp == NULL))
5399 return (error);
5400 if (new_wufc == adapter->wufc)
5401 return (0);
5402
5403 if (new_wufc & 0xffffff00)
5404 return (EINVAL);
5405
5406 new_wufc &= 0xff;
5407 new_wufc |= (0xffffff & adapter->wufc);
5408 adapter->wufc = new_wufc;
5409
5410 return (0);
5411 } /* ixgbe_sysctl_wufc */
5412
5413 #ifdef IXGBE_DEBUG
5414 /************************************************************************
5415 * ixgbe_sysctl_print_rss_config
5416 ************************************************************************/
5417 static int
5418 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5419 {
5420 #ifdef notyet
5421 struct sysctlnode node = *rnode;
5422 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5423 struct ixgbe_hw *hw = &adapter->hw;
5424 device_t dev = adapter->dev;
5425 struct sbuf *buf;
5426 int error = 0, reta_size;
5427 u32 reg;
5428
5429 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5430 if (!buf) {
5431 device_printf(dev, "Could not allocate sbuf for output.\n");
5432 return (ENOMEM);
5433 }
5434
5435 // TODO: use sbufs to make a string to print out
5436 /* Set multiplier for RETA setup and table size based on MAC */
5437 switch (adapter->hw.mac.type) {
5438 case ixgbe_mac_X550:
5439 case ixgbe_mac_X550EM_x:
5440 case ixgbe_mac_X550EM_a:
5441 reta_size = 128;
5442 break;
5443 default:
5444 reta_size = 32;
5445 break;
5446 }
5447
5448 /* Print out the redirection table */
5449 sbuf_cat(buf, "\n");
5450 for (int i = 0; i < reta_size; i++) {
5451 if (i < 32) {
5452 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5453 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5454 } else {
5455 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5456 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5457 }
5458 }
5459
5460 // TODO: print more config
5461
5462 error = sbuf_finish(buf);
5463 if (error)
5464 device_printf(dev, "Error finishing sbuf: %d\n", error);
5465
5466 sbuf_delete(buf);
5467 #endif
5468 return (0);
5469 } /* ixgbe_sysctl_print_rss_config */
5470 #endif /* IXGBE_DEBUG */
5471
5472 /************************************************************************
5473 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5474 *
5475 * For X552/X557-AT devices using an external PHY
5476 ************************************************************************/
5477 static int
5478 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5479 {
5480 struct sysctlnode node = *rnode;
5481 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5482 struct ixgbe_hw *hw = &adapter->hw;
5483 int val;
5484 u16 reg;
5485 int error;
5486
5487 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5488 device_printf(adapter->dev,
5489 "Device has no supported external thermal sensor.\n");
5490 return (ENODEV);
5491 }
5492
5493 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5494 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5495 device_printf(adapter->dev,
5496 "Error reading from PHY's current temperature register\n");
5497 return (EAGAIN);
5498 }
5499
5500 node.sysctl_data = &val;
5501
5502 /* Shift temp for output */
5503 val = reg >> 8;
5504
5505 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5506 if ((error) || (newp == NULL))
5507 return (error);
5508
5509 return (0);
5510 } /* ixgbe_sysctl_phy_temp */
5511
5512 /************************************************************************
5513 * ixgbe_sysctl_phy_overtemp_occurred
5514 *
5515 * Reports (directly from the PHY) whether the current PHY
5516 * temperature is over the overtemp threshold.
5517 ************************************************************************/
5518 static int
5519 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5520 {
5521 struct sysctlnode node = *rnode;
5522 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5523 struct ixgbe_hw *hw = &adapter->hw;
5524 int val, error;
5525 u16 reg;
5526
5527 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5528 device_printf(adapter->dev,
5529 "Device has no supported external thermal sensor.\n");
5530 return (ENODEV);
5531 }
5532
5533 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5534 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5535 device_printf(adapter->dev,
5536 "Error reading from PHY's temperature status register\n");
5537 return (EAGAIN);
5538 }
5539
5540 node.sysctl_data = &val;
5541
5542 /* Get occurrence bit */
5543 val = !!(reg & 0x4000);
5544
5545 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5546 if ((error) || (newp == NULL))
5547 return (error);
5548
5549 return (0);
5550 } /* ixgbe_sysctl_phy_overtemp_occurred */
5551
5552 /************************************************************************
5553 * ixgbe_sysctl_eee_state
5554 *
5555 * Sysctl to set EEE power saving feature
5556 * Values:
5557 * 0 - disable EEE
5558 * 1 - enable EEE
5559 * (none) - get current device EEE state
5560 ************************************************************************/
5561 static int
5562 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5563 {
5564 struct sysctlnode node = *rnode;
5565 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5566 struct ifnet *ifp = adapter->ifp;
5567 device_t dev = adapter->dev;
5568 int curr_eee, new_eee, error = 0;
5569 s32 retval;
5570
5571 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5572 node.sysctl_data = &new_eee;
5573 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5574 if ((error) || (newp == NULL))
5575 return (error);
5576
5577 /* Nothing to do */
5578 if (new_eee == curr_eee)
5579 return (0);
5580
5581 /* Not supported */
5582 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5583 return (EINVAL);
5584
5585 /* Bounds checking */
5586 if ((new_eee < 0) || (new_eee > 1))
5587 return (EINVAL);
5588
5589 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5590 if (retval) {
5591 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5592 return (EINVAL);
5593 }
5594
5595 /* Restart auto-neg */
5596 ifp->if_init(ifp);
5597
5598 device_printf(dev, "New EEE state: %d\n", new_eee);
5599
5600 /* Cache new value */
5601 if (new_eee)
5602 adapter->feat_en |= IXGBE_FEATURE_EEE;
5603 else
5604 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5605
5606 return (error);
5607 } /* ixgbe_sysctl_eee_state */
5608
5609 /************************************************************************
5610 * ixgbe_init_device_features
5611 ************************************************************************/
5612 static void
5613 ixgbe_init_device_features(struct adapter *adapter)
5614 {
5615 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5616 | IXGBE_FEATURE_RSS
5617 | IXGBE_FEATURE_MSI
5618 | IXGBE_FEATURE_MSIX
5619 | IXGBE_FEATURE_LEGACY_IRQ
5620 | IXGBE_FEATURE_LEGACY_TX;
5621
5622 /* Set capabilities first... */
5623 switch (adapter->hw.mac.type) {
5624 case ixgbe_mac_82598EB:
5625 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5626 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5627 break;
5628 case ixgbe_mac_X540:
5629 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5630 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5631 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5632 (adapter->hw.bus.func == 0))
5633 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5634 break;
5635 case ixgbe_mac_X550:
5636 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5637 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5638 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5639 break;
5640 case ixgbe_mac_X550EM_x:
5641 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5642 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5643 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5644 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5645 break;
5646 case ixgbe_mac_X550EM_a:
5647 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5648 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5649 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5650 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5651 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5652 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5653 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5654 }
5655 break;
5656 case ixgbe_mac_82599EB:
5657 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5658 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5659 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5660 (adapter->hw.bus.func == 0))
5661 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5662 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5663 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5664 break;
5665 default:
5666 break;
5667 }
5668
5669 /* Enabled by default... */
5670 /* Fan failure detection */
5671 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5672 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5673 /* Netmap */
5674 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5675 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5676 /* EEE */
5677 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5678 adapter->feat_en |= IXGBE_FEATURE_EEE;
5679 /* Thermal Sensor */
5680 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5681 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5682
5683 /* Enabled via global sysctl... */
5684 /* Flow Director */
5685 if (ixgbe_enable_fdir) {
5686 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5687 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5688 else
5689 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5690 }
5691 /* Legacy (single queue) transmit */
5692 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5693 ixgbe_enable_legacy_tx)
5694 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5695 /*
5696 * Message Signal Interrupts - Extended (MSI-X)
5697 * Normal MSI is only enabled if MSI-X calls fail.
5698 */
5699 if (!ixgbe_enable_msix)
5700 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5701 /* Receive-Side Scaling (RSS) */
5702 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5703 adapter->feat_en |= IXGBE_FEATURE_RSS;
5704
5705 /* Disable features with unmet dependencies... */
5706 /* No MSI-X */
5707 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5708 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5709 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5710 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5711 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5712 }
5713 } /* ixgbe_init_device_features */
5714
5715 /************************************************************************
5716 * ixgbe_probe - Device identification routine
5717 *
5718 * Determines if the driver should be loaded on
5719 * adapter based on its PCI vendor/device ID.
5720 *
5721 * return BUS_PROBE_DEFAULT on success, positive on failure
5722 ************************************************************************/
5723 static int
5724 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5725 {
5726 const struct pci_attach_args *pa = aux;
5727
5728 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5729 }
5730
5731 static ixgbe_vendor_info_t *
5732 ixgbe_lookup(const struct pci_attach_args *pa)
5733 {
5734 ixgbe_vendor_info_t *ent;
5735 pcireg_t subid;
5736
5737 INIT_DEBUGOUT("ixgbe_lookup: begin");
5738
5739 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5740 return NULL;
5741
5742 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5743
5744 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5745 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5746 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5747 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5748 (ent->subvendor_id == 0)) &&
5749 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5750 (ent->subdevice_id == 0))) {
5751 ++ixgbe_total_ports;
5752 return ent;
5753 }
5754 }
5755 return NULL;
5756 }
5757
5758 static int
5759 ixgbe_ifflags_cb(struct ethercom *ec)
5760 {
5761 struct ifnet *ifp = &ec->ec_if;
5762 struct adapter *adapter = ifp->if_softc;
5763 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
5764
5765 IXGBE_CORE_LOCK(adapter);
5766
5767 if (change != 0)
5768 adapter->if_flags = ifp->if_flags;
5769
5770 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5771 rc = ENETRESET;
5772 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5773 ixgbe_set_promisc(adapter);
5774
5775 /* Set up VLAN support and filter */
5776 ixgbe_setup_vlan_hw_support(adapter);
5777
5778 IXGBE_CORE_UNLOCK(adapter);
5779
5780 return rc;
5781 }
5782
5783 /************************************************************************
5784 * ixgbe_ioctl - Ioctl entry point
5785 *
5786 * Called when the user wants to configure the interface.
5787 *
5788 * return 0 on success, positive on failure
5789 ************************************************************************/
5790 static int
5791 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5792 {
5793 struct adapter *adapter = ifp->if_softc;
5794 struct ixgbe_hw *hw = &adapter->hw;
5795 struct ifcapreq *ifcr = data;
5796 struct ifreq *ifr = data;
5797 int error = 0;
5798 int l4csum_en;
5799 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5800 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5801
5802 switch (command) {
5803 case SIOCSIFFLAGS:
5804 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5805 break;
5806 case SIOCADDMULTI:
5807 case SIOCDELMULTI:
5808 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5809 break;
5810 case SIOCSIFMEDIA:
5811 case SIOCGIFMEDIA:
5812 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5813 break;
5814 case SIOCSIFCAP:
5815 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5816 break;
5817 case SIOCSIFMTU:
5818 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5819 break;
5820 #ifdef __NetBSD__
5821 case SIOCINITIFADDR:
5822 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5823 break;
5824 case SIOCGIFFLAGS:
5825 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5826 break;
5827 case SIOCGIFAFLAG_IN:
5828 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5829 break;
5830 case SIOCGIFADDR:
5831 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5832 break;
5833 case SIOCGIFMTU:
5834 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5835 break;
5836 case SIOCGIFCAP:
5837 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5838 break;
5839 case SIOCGETHERCAP:
5840 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5841 break;
5842 case SIOCGLIFADDR:
5843 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5844 break;
5845 case SIOCZIFDATA:
5846 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5847 hw->mac.ops.clear_hw_cntrs(hw);
5848 ixgbe_clear_evcnt(adapter);
5849 break;
5850 case SIOCAIFADDR:
5851 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5852 break;
5853 #endif
5854 default:
5855 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5856 break;
5857 }
5858
5859 switch (command) {
5860 case SIOCSIFMEDIA:
5861 case SIOCGIFMEDIA:
5862 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5863 case SIOCGI2C:
5864 {
5865 struct ixgbe_i2c_req i2c;
5866
5867 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5868 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5869 if (error != 0)
5870 break;
5871 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5872 error = EINVAL;
5873 break;
5874 }
5875 if (i2c.len > sizeof(i2c.data)) {
5876 error = EINVAL;
5877 break;
5878 }
5879
5880 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5881 i2c.dev_addr, i2c.data);
5882 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5883 break;
5884 }
5885 case SIOCSIFCAP:
5886 /* Layer-4 Rx checksum offload has to be turned on and
5887 * off as a unit.
5888 */
5889 l4csum_en = ifcr->ifcr_capenable & l4csum;
5890 if (l4csum_en != l4csum && l4csum_en != 0)
5891 return EINVAL;
5892 /*FALLTHROUGH*/
5893 case SIOCADDMULTI:
5894 case SIOCDELMULTI:
5895 case SIOCSIFFLAGS:
5896 case SIOCSIFMTU:
5897 default:
5898 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5899 return error;
5900 if ((ifp->if_flags & IFF_RUNNING) == 0)
5901 ;
5902 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5903 IXGBE_CORE_LOCK(adapter);
5904 if ((ifp->if_flags & IFF_RUNNING) != 0)
5905 ixgbe_init_locked(adapter);
5906 ixgbe_recalculate_max_frame(adapter);
5907 IXGBE_CORE_UNLOCK(adapter);
5908 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
5909 /*
5910 * Multicast list has changed; set the hardware filter
5911 * accordingly.
5912 */
5913 IXGBE_CORE_LOCK(adapter);
5914 ixgbe_disable_intr(adapter);
5915 ixgbe_set_multi(adapter);
5916 ixgbe_enable_intr(adapter);
5917 IXGBE_CORE_UNLOCK(adapter);
5918 }
5919 return 0;
5920 }
5921
5922 return error;
5923 } /* ixgbe_ioctl */
5924
5925 /************************************************************************
5926 * ixgbe_check_fan_failure
5927 ************************************************************************/
5928 static void
5929 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
5930 {
5931 u32 mask;
5932
5933 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
5934 IXGBE_ESDP_SDP1;
5935
5936 if (reg & mask)
5937 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5938 } /* ixgbe_check_fan_failure */
5939
5940 /************************************************************************
5941 * ixgbe_handle_que
5942 ************************************************************************/
5943 static void
5944 ixgbe_handle_que(void *context)
5945 {
5946 struct ix_queue *que = context;
5947 struct adapter *adapter = que->adapter;
5948 struct tx_ring *txr = que->txr;
5949 struct ifnet *ifp = adapter->ifp;
5950 bool more = false;
5951
5952 que->handleq.ev_count++;
5953
5954 if (ifp->if_flags & IFF_RUNNING) {
5955 more = ixgbe_rxeof(que);
5956 IXGBE_TX_LOCK(txr);
5957 more |= ixgbe_txeof(txr);
5958 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5959 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
5960 ixgbe_mq_start_locked(ifp, txr);
5961 /* Only for queue 0 */
5962 /* NetBSD still needs this for CBQ */
5963 if ((&adapter->queues[0] == que)
5964 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
5965 ixgbe_legacy_start_locked(ifp, txr);
5966 IXGBE_TX_UNLOCK(txr);
5967 }
5968
5969 if (more) {
5970 que->req.ev_count++;
5971 ixgbe_sched_handle_que(adapter, que);
5972 } else if (que->res != NULL) {
5973 /* Re-enable this interrupt */
5974 ixgbe_enable_queue(adapter, que->msix);
5975 } else
5976 ixgbe_enable_intr(adapter);
5977
5978 return;
5979 } /* ixgbe_handle_que */
5980
5981 /************************************************************************
5982 * ixgbe_handle_que_work
5983 ************************************************************************/
5984 static void
5985 ixgbe_handle_que_work(struct work *wk, void *context)
5986 {
5987 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
5988
5989 /*
5990 * "enqueued flag" is not required here.
5991 * See ixgbe_msix_que().
5992 */
5993 ixgbe_handle_que(que);
5994 }
5995
5996 /************************************************************************
5997 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
5998 ************************************************************************/
5999 static int
6000 ixgbe_allocate_legacy(struct adapter *adapter,
6001 const struct pci_attach_args *pa)
6002 {
6003 device_t dev = adapter->dev;
6004 struct ix_queue *que = adapter->queues;
6005 struct tx_ring *txr = adapter->tx_rings;
6006 int counts[PCI_INTR_TYPE_SIZE];
6007 pci_intr_type_t intr_type, max_type;
6008 char intrbuf[PCI_INTRSTR_LEN];
6009 const char *intrstr = NULL;
6010
6011 /* We allocate a single interrupt resource */
6012 max_type = PCI_INTR_TYPE_MSI;
6013 counts[PCI_INTR_TYPE_MSIX] = 0;
6014 counts[PCI_INTR_TYPE_MSI] =
6015 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6016 /* Check not feat_en but feat_cap to fallback to INTx */
6017 counts[PCI_INTR_TYPE_INTX] =
6018 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6019
6020 alloc_retry:
6021 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6022 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6023 return ENXIO;
6024 }
6025 adapter->osdep.nintrs = 1;
6026 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6027 intrbuf, sizeof(intrbuf));
6028 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6029 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6030 device_xname(dev));
6031 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6032 if (adapter->osdep.ihs[0] == NULL) {
6033 aprint_error_dev(dev,"unable to establish %s\n",
6034 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6035 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6036 adapter->osdep.intrs = NULL;
6037 switch (intr_type) {
6038 case PCI_INTR_TYPE_MSI:
6039 /* The next try is for INTx: Disable MSI */
6040 max_type = PCI_INTR_TYPE_INTX;
6041 counts[PCI_INTR_TYPE_INTX] = 1;
6042 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6043 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6044 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6045 goto alloc_retry;
6046 } else
6047 break;
6048 case PCI_INTR_TYPE_INTX:
6049 default:
6050 /* See below */
6051 break;
6052 }
6053 }
6054 if (intr_type == PCI_INTR_TYPE_INTX) {
6055 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6056 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6057 }
6058 if (adapter->osdep.ihs[0] == NULL) {
6059 aprint_error_dev(dev,
6060 "couldn't establish interrupt%s%s\n",
6061 intrstr ? " at " : "", intrstr ? intrstr : "");
6062 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6063 adapter->osdep.intrs = NULL;
6064 return ENXIO;
6065 }
6066 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6067 /*
6068 * Try allocating a fast interrupt and the associated deferred
6069 * processing contexts.
6070 */
6071 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6072 txr->txr_si =
6073 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6074 ixgbe_deferred_mq_start, txr);
6075 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6076 ixgbe_handle_que, que);
6077
6078 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6079 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6080 aprint_error_dev(dev,
6081 "could not establish software interrupts\n");
6082
6083 return ENXIO;
6084 }
6085 /* For simplicity in the handlers */
6086 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6087
6088 return (0);
6089 } /* ixgbe_allocate_legacy */
6090
6091 /************************************************************************
6092 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6093 ************************************************************************/
6094 static int
6095 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6096 {
6097 device_t dev = adapter->dev;
6098 struct ix_queue *que = adapter->queues;
6099 struct tx_ring *txr = adapter->tx_rings;
6100 pci_chipset_tag_t pc;
6101 char intrbuf[PCI_INTRSTR_LEN];
6102 char intr_xname[32];
6103 char wqname[MAXCOMLEN];
6104 const char *intrstr = NULL;
6105 int error, vector = 0;
6106 int cpu_id = 0;
6107 kcpuset_t *affinity;
6108 #ifdef RSS
6109 unsigned int rss_buckets = 0;
6110 kcpuset_t cpu_mask;
6111 #endif
6112
6113 pc = adapter->osdep.pc;
6114 #ifdef RSS
6115 /*
6116 * If we're doing RSS, the number of queues needs to
6117 * match the number of RSS buckets that are configured.
6118 *
6119 * + If there's more queues than RSS buckets, we'll end
6120 * up with queues that get no traffic.
6121 *
6122 * + If there's more RSS buckets than queues, we'll end
6123 * up having multiple RSS buckets map to the same queue,
6124 * so there'll be some contention.
6125 */
6126 rss_buckets = rss_getnumbuckets();
6127 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6128 (adapter->num_queues != rss_buckets)) {
6129 device_printf(dev,
6130 "%s: number of queues (%d) != number of RSS buckets (%d)"
6131 "; performance will be impacted.\n",
6132 __func__, adapter->num_queues, rss_buckets);
6133 }
6134 #endif
6135
6136 adapter->osdep.nintrs = adapter->num_queues + 1;
6137 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6138 adapter->osdep.nintrs) != 0) {
6139 aprint_error_dev(dev,
6140 "failed to allocate MSI-X interrupt\n");
6141 return (ENXIO);
6142 }
6143
6144 kcpuset_create(&affinity, false);
6145 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6146 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6147 device_xname(dev), i);
6148 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6149 sizeof(intrbuf));
6150 #ifdef IXGBE_MPSAFE
6151 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6152 true);
6153 #endif
6154 /* Set the handler function */
6155 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6156 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6157 intr_xname);
6158 if (que->res == NULL) {
6159 aprint_error_dev(dev,
6160 "Failed to register QUE handler\n");
6161 error = ENXIO;
6162 goto err_out;
6163 }
6164 que->msix = vector;
6165 adapter->active_queues |= (u64)(1 << que->msix);
6166
6167 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6168 #ifdef RSS
6169 /*
6170 * The queue ID is used as the RSS layer bucket ID.
6171 * We look up the queue ID -> RSS CPU ID and select
6172 * that.
6173 */
6174 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6175 CPU_SETOF(cpu_id, &cpu_mask);
6176 #endif
6177 } else {
6178 /*
6179 * Bind the MSI-X vector, and thus the
6180 * rings to the corresponding CPU.
6181 *
6182 * This just happens to match the default RSS
6183 * round-robin bucket -> queue -> CPU allocation.
6184 */
6185 if (adapter->num_queues > 1)
6186 cpu_id = i;
6187 }
6188 /* Round-robin affinity */
6189 kcpuset_zero(affinity);
6190 kcpuset_set(affinity, cpu_id % ncpu);
6191 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6192 NULL);
6193 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6194 intrstr);
6195 if (error == 0) {
6196 #if 1 /* def IXGBE_DEBUG */
6197 #ifdef RSS
6198 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6199 cpu_id % ncpu);
6200 #else
6201 aprint_normal(", bound queue %d to cpu %d", i,
6202 cpu_id % ncpu);
6203 #endif
6204 #endif /* IXGBE_DEBUG */
6205 }
6206 aprint_normal("\n");
6207
6208 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6209 txr->txr_si = softint_establish(
6210 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6211 ixgbe_deferred_mq_start, txr);
6212 if (txr->txr_si == NULL) {
6213 aprint_error_dev(dev,
6214 "couldn't establish software interrupt\n");
6215 error = ENXIO;
6216 goto err_out;
6217 }
6218 }
6219 que->que_si
6220 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6221 ixgbe_handle_que, que);
6222 if (que->que_si == NULL) {
6223 aprint_error_dev(dev,
6224 "couldn't establish software interrupt\n");
6225 error = ENXIO;
6226 goto err_out;
6227 }
6228 }
6229 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6230 error = workqueue_create(&adapter->txr_wq, wqname,
6231 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6232 IXGBE_WORKQUEUE_FLAGS);
6233 if (error) {
6234 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6235 goto err_out;
6236 }
6237 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6238
6239 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6240 error = workqueue_create(&adapter->que_wq, wqname,
6241 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6242 IXGBE_WORKQUEUE_FLAGS);
6243 if (error) {
6244 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6245 goto err_out;
6246 }
6247
6248 /* and Link */
6249 cpu_id++;
6250 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6251 adapter->vector = vector;
6252 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6253 sizeof(intrbuf));
6254 #ifdef IXGBE_MPSAFE
6255 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6256 true);
6257 #endif
6258 /* Set the link handler function */
6259 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6260 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6261 intr_xname);
6262 if (adapter->osdep.ihs[vector] == NULL) {
6263 adapter->res = NULL;
6264 aprint_error_dev(dev, "Failed to register LINK handler\n");
6265 error = ENXIO;
6266 goto err_out;
6267 }
6268 /* Round-robin affinity */
6269 kcpuset_zero(affinity);
6270 kcpuset_set(affinity, cpu_id % ncpu);
6271 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6272 NULL);
6273
6274 aprint_normal_dev(dev,
6275 "for link, interrupting at %s", intrstr);
6276 if (error == 0)
6277 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6278 else
6279 aprint_normal("\n");
6280
6281 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6282 adapter->mbx_si =
6283 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6284 ixgbe_handle_mbx, adapter);
6285 if (adapter->mbx_si == NULL) {
6286 aprint_error_dev(dev,
6287 "could not establish software interrupts\n");
6288
6289 error = ENXIO;
6290 goto err_out;
6291 }
6292 }
6293
6294 kcpuset_destroy(affinity);
6295 aprint_normal_dev(dev,
6296 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6297
6298 return (0);
6299
6300 err_out:
6301 kcpuset_destroy(affinity);
6302 ixgbe_free_softint(adapter);
6303 ixgbe_free_pciintr_resources(adapter);
6304 return (error);
6305 } /* ixgbe_allocate_msix */
6306
6307 /************************************************************************
6308 * ixgbe_configure_interrupts
6309 *
6310 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6311 * This will also depend on user settings.
6312 ************************************************************************/
6313 static int
6314 ixgbe_configure_interrupts(struct adapter *adapter)
6315 {
6316 device_t dev = adapter->dev;
6317 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6318 int want, queues, msgs;
6319
6320 /* Default to 1 queue if MSI-X setup fails */
6321 adapter->num_queues = 1;
6322
6323 /* Override by tuneable */
6324 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6325 goto msi;
6326
6327 /*
6328 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6329 * interrupt slot.
6330 */
6331 if (ncpu == 1)
6332 goto msi;
6333
6334 /* First try MSI-X */
6335 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6336 msgs = MIN(msgs, IXG_MAX_NINTR);
6337 if (msgs < 2)
6338 goto msi;
6339
6340 adapter->msix_mem = (void *)1; /* XXX */
6341
6342 /* Figure out a reasonable auto config value */
6343 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6344
6345 #ifdef RSS
6346 /* If we're doing RSS, clamp at the number of RSS buckets */
6347 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6348 queues = min(queues, rss_getnumbuckets());
6349 #endif
6350 if (ixgbe_num_queues > queues) {
6351 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6352 ixgbe_num_queues = queues;
6353 }
6354
6355 if (ixgbe_num_queues != 0)
6356 queues = ixgbe_num_queues;
6357 else
6358 queues = min(queues,
6359 min(mac->max_tx_queues, mac->max_rx_queues));
6360
6361 /* reflect correct sysctl value */
6362 ixgbe_num_queues = queues;
6363
6364 /*
6365 * Want one vector (RX/TX pair) per queue
6366 * plus an additional for Link.
6367 */
6368 want = queues + 1;
6369 if (msgs >= want)
6370 msgs = want;
6371 else {
6372 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6373 "%d vectors but %d queues wanted!\n",
6374 msgs, want);
6375 goto msi;
6376 }
6377 adapter->num_queues = queues;
6378 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6379 return (0);
6380
6381 /*
6382 * MSI-X allocation failed or provided us with
6383 * less vectors than needed. Free MSI-X resources
6384 * and we'll try enabling MSI.
6385 */
6386 msi:
6387 /* Without MSI-X, some features are no longer supported */
6388 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6389 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6390 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6391 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6392
6393 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6394 adapter->msix_mem = NULL; /* XXX */
6395 if (msgs > 1)
6396 msgs = 1;
6397 if (msgs != 0) {
6398 msgs = 1;
6399 adapter->feat_en |= IXGBE_FEATURE_MSI;
6400 return (0);
6401 }
6402
6403 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6404 aprint_error_dev(dev,
6405 "Device does not support legacy interrupts.\n");
6406 return 1;
6407 }
6408
6409 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6410
6411 return (0);
6412 } /* ixgbe_configure_interrupts */
6413
6414
6415 /************************************************************************
6416 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6417 *
6418 * Done outside of interrupt context since the driver might sleep
6419 ************************************************************************/
6420 static void
6421 ixgbe_handle_link(void *context)
6422 {
6423 struct adapter *adapter = context;
6424 struct ixgbe_hw *hw = &adapter->hw;
6425
6426 IXGBE_CORE_LOCK(adapter);
6427 ++adapter->link_sicount.ev_count;
6428 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6429 ixgbe_update_link_status(adapter);
6430
6431 /* Re-enable link interrupts */
6432 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6433
6434 IXGBE_CORE_UNLOCK(adapter);
6435 } /* ixgbe_handle_link */
6436
6437 /************************************************************************
6438 * ixgbe_rearm_queues
6439 ************************************************************************/
6440 static void
6441 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6442 {
6443 u32 mask;
6444
6445 switch (adapter->hw.mac.type) {
6446 case ixgbe_mac_82598EB:
6447 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6449 break;
6450 case ixgbe_mac_82599EB:
6451 case ixgbe_mac_X540:
6452 case ixgbe_mac_X550:
6453 case ixgbe_mac_X550EM_x:
6454 case ixgbe_mac_X550EM_a:
6455 mask = (queues & 0xFFFFFFFF);
6456 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6457 mask = (queues >> 32);
6458 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6459 break;
6460 default:
6461 break;
6462 }
6463 } /* ixgbe_rearm_queues */
6464