ixgbe.c revision 1.143 1 /* $NetBSD: ixgbe.c,v 1.143 2018/04/04 06:30:09 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 char ixgbe_driver_version[] = "3.2.12-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 static void ixgbe_rearm_queues(struct adapter *, u64);
195
196 static void ixgbe_initialize_transmit_units(struct adapter *);
197 static void ixgbe_initialize_receive_units(struct adapter *);
198 static void ixgbe_enable_rx_drop(struct adapter *);
199 static void ixgbe_disable_rx_drop(struct adapter *);
200 static void ixgbe_initialize_rss_mapping(struct adapter *);
201
202 static void ixgbe_enable_intr(struct adapter *);
203 static void ixgbe_disable_intr(struct adapter *);
204 static void ixgbe_update_stats_counters(struct adapter *);
205 static void ixgbe_set_promisc(struct adapter *);
206 static void ixgbe_set_multi(struct adapter *);
207 static void ixgbe_update_link_status(struct adapter *);
208 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
209 static void ixgbe_configure_ivars(struct adapter *);
210 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
211 static void ixgbe_eitr_write(struct ix_queue *, uint32_t);
212
213 static void ixgbe_setup_vlan_hw_support(struct adapter *);
214 #if 0
215 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
216 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
217 #endif
218
219 static void ixgbe_add_device_sysctls(struct adapter *);
220 static void ixgbe_add_hw_stats(struct adapter *);
221 static void ixgbe_clear_evcnt(struct adapter *);
222 static int ixgbe_set_flowcntl(struct adapter *, int);
223 static int ixgbe_set_advertise(struct adapter *, int);
224 static int ixgbe_get_advertise(struct adapter *);
225
226 /* Sysctl handlers */
227 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
228 const char *, int *, int);
229 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
230 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
231 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
235 #ifdef IXGBE_DEBUG
236 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
238 #endif
239 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
246
247 /* Support for pluggable optic modules */
248 static bool ixgbe_sfp_probe(struct adapter *);
249
250 /* Legacy (single vector) interrupt handler */
251 static int ixgbe_legacy_irq(void *);
252
253 /* The MSI/MSI-X Interrupt handlers */
254 static int ixgbe_msix_que(void *);
255 static int ixgbe_msix_link(void *);
256
257 /* Software interrupts for deferred work */
258 static void ixgbe_handle_que(void *);
259 static void ixgbe_handle_link(void *);
260 static void ixgbe_handle_msf(void *);
261 static void ixgbe_handle_mod(void *);
262 static void ixgbe_handle_phy(void *);
263
264 /* Workqueue handler for deferred work */
265 static void ixgbe_handle_que_work(struct work *, void *);
266
267 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
268
269 /************************************************************************
270 * NetBSD Device Interface Entry Points
271 ************************************************************************/
272 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
273 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
274 DVF_DETACH_SHUTDOWN);
275
276 #if 0
277 devclass_t ix_devclass;
278 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
279
280 MODULE_DEPEND(ix, pci, 1, 1, 1);
281 MODULE_DEPEND(ix, ether, 1, 1, 1);
282 #ifdef DEV_NETMAP
283 MODULE_DEPEND(ix, netmap, 1, 1, 1);
284 #endif
285 #endif
286
287 /*
288 * TUNEABLE PARAMETERS:
289 */
290
291 /*
292 * AIM: Adaptive Interrupt Moderation
293 * which means that the interrupt rate
294 * is varied over time based on the
295 * traffic for that interrupt vector
296 */
297 static bool ixgbe_enable_aim = true;
298 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
299 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
300 "Enable adaptive interrupt moderation");
301
302 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
303 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
304 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
305
306 /* How many packets rxeof tries to clean at a time */
307 static int ixgbe_rx_process_limit = 256;
308 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
309 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
310
311 /* How many packets txeof tries to clean at a time */
312 static int ixgbe_tx_process_limit = 256;
313 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
314 &ixgbe_tx_process_limit, 0,
315 "Maximum number of sent packets to process at a time, -1 means unlimited");
316
317 /* Flow control setting, default to full */
318 static int ixgbe_flow_control = ixgbe_fc_full;
319 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
320 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
321
322 /* Which pakcet processing uses workqueue or softint */
323 static bool ixgbe_txrx_workqueue = false;
324
325 /*
326 * Smart speed setting, default to on
327 * this only works as a compile option
328 * right now as its during attach, set
329 * this to 'ixgbe_smart_speed_off' to
330 * disable.
331 */
332 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
333
334 /*
335 * MSI-X should be the default for best performance,
336 * but this allows it to be forced off for testing.
337 */
338 static int ixgbe_enable_msix = 1;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
340 "Enable MSI-X interrupts");
341
342 /*
343 * Number of Queues, can be set to 0,
344 * it then autoconfigures based on the
345 * number of cpus with a max of 8. This
346 * can be overriden manually here.
347 */
348 static int ixgbe_num_queues = 0;
349 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
350 "Number of queues to configure, 0 indicates autoconfigure");
351
352 /*
353 * Number of TX descriptors per ring,
354 * setting higher than RX as this seems
355 * the better performing choice.
356 */
357 static int ixgbe_txd = PERFORM_TXD;
358 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
359 "Number of transmit descriptors per queue");
360
361 /* Number of RX descriptors per ring */
362 static int ixgbe_rxd = PERFORM_RXD;
363 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
364 "Number of receive descriptors per queue");
365
366 /*
367 * Defining this on will allow the use
368 * of unsupported SFP+ modules, note that
369 * doing so you are on your own :)
370 */
371 static int allow_unsupported_sfp = false;
372 #define TUNABLE_INT(__x, __y)
373 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
374
375 /*
376 * Not sure if Flow Director is fully baked,
377 * so we'll default to turning it off.
378 */
379 static int ixgbe_enable_fdir = 0;
380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
381 "Enable Flow Director");
382
383 /* Legacy Transmit (single queue) */
384 static int ixgbe_enable_legacy_tx = 0;
385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
386 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
387
388 /* Receive-Side Scaling */
389 static int ixgbe_enable_rss = 1;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
391 "Enable Receive-Side Scaling (RSS)");
392
393 /* Keep running tab on them for sanity check */
394 static int ixgbe_total_ports;
395
396 #if 0
397 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
398 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
399 #endif
400
401 #ifdef NET_MPSAFE
402 #define IXGBE_MPSAFE 1
403 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
404 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
405 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
406 #else
407 #define IXGBE_CALLOUT_FLAGS 0
408 #define IXGBE_SOFTINFT_FLAGS 0
409 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
410 #endif
411 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
412
413 /************************************************************************
414 * ixgbe_initialize_rss_mapping
415 ************************************************************************/
416 static void
417 ixgbe_initialize_rss_mapping(struct adapter *adapter)
418 {
419 struct ixgbe_hw *hw = &adapter->hw;
420 u32 reta = 0, mrqc, rss_key[10];
421 int queue_id, table_size, index_mult;
422 int i, j;
423 u32 rss_hash_config;
424
425 /* force use default RSS key. */
426 #ifdef __NetBSD__
427 rss_getkey((uint8_t *) &rss_key);
428 #else
429 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
430 /* Fetch the configured RSS key */
431 rss_getkey((uint8_t *) &rss_key);
432 } else {
433 /* set up random bits */
434 cprng_fast(&rss_key, sizeof(rss_key));
435 }
436 #endif
437
438 /* Set multiplier for RETA setup and table size based on MAC */
439 index_mult = 0x1;
440 table_size = 128;
441 switch (adapter->hw.mac.type) {
442 case ixgbe_mac_82598EB:
443 index_mult = 0x11;
444 break;
445 case ixgbe_mac_X550:
446 case ixgbe_mac_X550EM_x:
447 case ixgbe_mac_X550EM_a:
448 table_size = 512;
449 break;
450 default:
451 break;
452 }
453
454 /* Set up the redirection table */
455 for (i = 0, j = 0; i < table_size; i++, j++) {
456 if (j == adapter->num_queues)
457 j = 0;
458
459 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
460 /*
461 * Fetch the RSS bucket id for the given indirection
462 * entry. Cap it at the number of configured buckets
463 * (which is num_queues.)
464 */
465 queue_id = rss_get_indirection_to_bucket(i);
466 queue_id = queue_id % adapter->num_queues;
467 } else
468 queue_id = (j * index_mult);
469
470 /*
471 * The low 8 bits are for hash value (n+0);
472 * The next 8 bits are for hash value (n+1), etc.
473 */
474 reta = reta >> 8;
475 reta = reta | (((uint32_t) queue_id) << 24);
476 if ((i & 3) == 3) {
477 if (i < 128)
478 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
479 else
480 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
481 reta);
482 reta = 0;
483 }
484 }
485
486 /* Now fill our hash function seeds */
487 for (i = 0; i < 10; i++)
488 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
489
490 /* Perform hash on these packet types */
491 if (adapter->feat_en & IXGBE_FEATURE_RSS)
492 rss_hash_config = rss_gethashconfig();
493 else {
494 /*
495 * Disable UDP - IP fragments aren't currently being handled
496 * and so we end up with a mix of 2-tuple and 4-tuple
497 * traffic.
498 */
499 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
500 | RSS_HASHTYPE_RSS_TCP_IPV4
501 | RSS_HASHTYPE_RSS_IPV6
502 | RSS_HASHTYPE_RSS_TCP_IPV6
503 | RSS_HASHTYPE_RSS_IPV6_EX
504 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
505 }
506
507 mrqc = IXGBE_MRQC_RSSEN;
508 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
509 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
526 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
527 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
528 } /* ixgbe_initialize_rss_mapping */
529
530 /************************************************************************
531 * ixgbe_initialize_receive_units - Setup receive registers and features.
532 ************************************************************************/
533 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
534
535 static void
536 ixgbe_initialize_receive_units(struct adapter *adapter)
537 {
538 struct rx_ring *rxr = adapter->rx_rings;
539 struct ixgbe_hw *hw = &adapter->hw;
540 struct ifnet *ifp = adapter->ifp;
541 int i, j;
542 u32 bufsz, fctrl, srrctl, rxcsum;
543 u32 hlreg;
544
545 /*
546 * Make sure receives are disabled while
547 * setting up the descriptor ring
548 */
549 ixgbe_disable_rx(hw);
550
551 /* Enable broadcasts */
552 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
553 fctrl |= IXGBE_FCTRL_BAM;
554 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
555 fctrl |= IXGBE_FCTRL_DPF;
556 fctrl |= IXGBE_FCTRL_PMCF;
557 }
558 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
559
560 /* Set for Jumbo Frames? */
561 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
562 if (ifp->if_mtu > ETHERMTU)
563 hlreg |= IXGBE_HLREG0_JUMBOEN;
564 else
565 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
566
567 #ifdef DEV_NETMAP
568 /* CRC stripping is conditional in Netmap */
569 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
570 (ifp->if_capenable & IFCAP_NETMAP) &&
571 !ix_crcstrip)
572 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
573 else
574 #endif /* DEV_NETMAP */
575 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
576
577 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
578
579 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
580 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
581
582 for (i = 0; i < adapter->num_queues; i++, rxr++) {
583 u64 rdba = rxr->rxdma.dma_paddr;
584 u32 tqsmreg, reg;
585 int regnum = i / 4; /* 1 register per 4 queues */
586 int regshift = i % 4; /* 4 bits per 1 queue */
587 j = rxr->me;
588
589 /* Setup the Base and Length of the Rx Descriptor Ring */
590 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
591 (rdba & 0x00000000ffffffffULL));
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
593 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
594 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
595
596 /* Set up the SRRCTL register */
597 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
598 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
599 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
600 srrctl |= bufsz;
601 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
602
603 /* Set RQSMR (Receive Queue Statistic Mapping) register */
604 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
605 reg &= ~(0x000000ff << (regshift * 8));
606 reg |= i << (regshift * 8);
607 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
608
609 /*
610 * Set RQSMR (Receive Queue Statistic Mapping) register.
611 * Register location for queue 0...7 are different between
612 * 82598 and newer.
613 */
614 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
615 tqsmreg = IXGBE_TQSMR(regnum);
616 else
617 tqsmreg = IXGBE_TQSM(regnum);
618 reg = IXGBE_READ_REG(hw, tqsmreg);
619 reg &= ~(0x000000ff << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, tqsmreg, reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 }
635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641
642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me);
644 }
645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 }
653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655
656 ixgbe_initialize_rss_mapping(adapter);
657
658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 }
662
663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD;
665
666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671
672 return;
673 } /* ixgbe_initialize_receive_units */
674
675 /************************************************************************
676 * ixgbe_initialize_transmit_units - Enable transmit units.
677 ************************************************************************/
678 static void
679 ixgbe_initialize_transmit_units(struct adapter *adapter)
680 {
681 struct tx_ring *txr = adapter->tx_rings;
682 struct ixgbe_hw *hw = &adapter->hw;
683
684 /* Setup the Base and Length of the Tx Descriptor Ring */
685 for (int i = 0; i < adapter->num_queues; i++, txr++) {
686 u64 tdba = txr->txdma.dma_paddr;
687 u32 txctrl = 0;
688 int j = txr->me;
689
690 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
691 (tdba & 0x00000000ffffffffULL));
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
693 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
694 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
695
696 /* Setup the HW Tx Head and Tail descriptor pointers */
697 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
698 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
699
700 /* Cache the tail address */
701 txr->tail = IXGBE_TDT(j);
702
703 /* Disable Head Writeback */
704 /*
705 * Note: for X550 series devices, these registers are actually
706 * prefixed with TPH_ isntead of DCA_, but the addresses and
707 * fields remain the same.
708 */
709 switch (hw->mac.type) {
710 case ixgbe_mac_82598EB:
711 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
712 break;
713 default:
714 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
715 break;
716 }
717 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
718 switch (hw->mac.type) {
719 case ixgbe_mac_82598EB:
720 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
721 break;
722 default:
723 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
724 break;
725 }
726
727 }
728
729 if (hw->mac.type != ixgbe_mac_82598EB) {
730 u32 dmatxctl, rttdcs;
731
732 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
733 dmatxctl |= IXGBE_DMATXCTL_TE;
734 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
735 /* Disable arbiter to set MTQC */
736 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
737 rttdcs |= IXGBE_RTTDCS_ARBDIS;
738 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
739 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
740 ixgbe_get_mtqc(adapter->iov_mode));
741 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
742 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
743 }
744
745 return;
746 } /* ixgbe_initialize_transmit_units */
747
748 /************************************************************************
749 * ixgbe_attach - Device initialization routine
750 *
751 * Called when the driver is being loaded.
752 * Identifies the type of hardware, allocates all resources
753 * and initializes the hardware.
754 *
755 * return 0 on success, positive on failure
756 ************************************************************************/
757 static void
758 ixgbe_attach(device_t parent, device_t dev, void *aux)
759 {
760 struct adapter *adapter;
761 struct ixgbe_hw *hw;
762 int error = -1;
763 u32 ctrl_ext;
764 u16 high, low, nvmreg;
765 pcireg_t id, subid;
766 ixgbe_vendor_info_t *ent;
767 struct pci_attach_args *pa = aux;
768 const char *str;
769 char buf[256];
770
771 INIT_DEBUGOUT("ixgbe_attach: begin");
772
773 /* Allocate, clear, and link in our adapter structure */
774 adapter = device_private(dev);
775 adapter->hw.back = adapter;
776 adapter->dev = dev;
777 hw = &adapter->hw;
778 adapter->osdep.pc = pa->pa_pc;
779 adapter->osdep.tag = pa->pa_tag;
780 if (pci_dma64_available(pa))
781 adapter->osdep.dmat = pa->pa_dmat64;
782 else
783 adapter->osdep.dmat = pa->pa_dmat;
784 adapter->osdep.attached = false;
785
786 ent = ixgbe_lookup(pa);
787
788 KASSERT(ent != NULL);
789
790 aprint_normal(": %s, Version - %s\n",
791 ixgbe_strings[ent->index], ixgbe_driver_version);
792
793 /* Core Lock Init*/
794 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
795
796 /* Set up the timer callout */
797 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
798
799 /* Determine hardware revision */
800 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
801 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
802
803 hw->vendor_id = PCI_VENDOR(id);
804 hw->device_id = PCI_PRODUCT(id);
805 hw->revision_id =
806 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
807 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
808 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
809
810 /*
811 * Make sure BUSMASTER is set
812 */
813 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
814
815 /* Do base PCI setup - map BAR0 */
816 if (ixgbe_allocate_pci_resources(adapter, pa)) {
817 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
818 error = ENXIO;
819 goto err_out;
820 }
821
822 /* let hardware know driver is loaded */
823 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
824 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
825 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
826
827 /*
828 * Initialize the shared code
829 */
830 if (ixgbe_init_shared_code(hw)) {
831 aprint_error_dev(dev, "Unable to initialize the shared code\n");
832 error = ENXIO;
833 goto err_out;
834 }
835
836 switch (hw->mac.type) {
837 case ixgbe_mac_82598EB:
838 str = "82598EB";
839 break;
840 case ixgbe_mac_82599EB:
841 str = "82599EB";
842 break;
843 case ixgbe_mac_X540:
844 str = "X540";
845 break;
846 case ixgbe_mac_X550:
847 str = "X550";
848 break;
849 case ixgbe_mac_X550EM_x:
850 str = "X550EM";
851 break;
852 case ixgbe_mac_X550EM_a:
853 str = "X550EM A";
854 break;
855 default:
856 str = "Unknown";
857 break;
858 }
859 aprint_normal_dev(dev, "device %s\n", str);
860
861 if (hw->mbx.ops.init_params)
862 hw->mbx.ops.init_params(hw);
863
864 hw->allow_unsupported_sfp = allow_unsupported_sfp;
865
866 /* Pick up the 82599 settings */
867 if (hw->mac.type != ixgbe_mac_82598EB) {
868 hw->phy.smart_speed = ixgbe_smart_speed;
869 adapter->num_segs = IXGBE_82599_SCATTER;
870 } else
871 adapter->num_segs = IXGBE_82598_SCATTER;
872
873 hw->mac.ops.set_lan_id(hw);
874 ixgbe_init_device_features(adapter);
875
876 if (ixgbe_configure_interrupts(adapter)) {
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* Allocate multicast array memory. */
882 adapter->mta = malloc(sizeof(*adapter->mta) *
883 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
884 if (adapter->mta == NULL) {
885 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
886 error = ENOMEM;
887 goto err_out;
888 }
889
890 /* Enable WoL (if supported) */
891 ixgbe_check_wol_support(adapter);
892
893 /* Verify adapter fan is still functional (if applicable) */
894 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
895 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
896 ixgbe_check_fan_failure(adapter, esdp, FALSE);
897 }
898
899 /* Ensure SW/FW semaphore is free */
900 ixgbe_init_swfw_semaphore(hw);
901
902 /* Enable EEE power saving */
903 if (adapter->feat_en & IXGBE_FEATURE_EEE)
904 hw->mac.ops.setup_eee(hw, TRUE);
905
906 /* Set an initial default flow control value */
907 hw->fc.requested_mode = ixgbe_flow_control;
908
909 /* Sysctls for limiting the amount of work done in the taskqueues */
910 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
911 "max number of rx packets to process",
912 &adapter->rx_process_limit, ixgbe_rx_process_limit);
913
914 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
915 "max number of tx packets to process",
916 &adapter->tx_process_limit, ixgbe_tx_process_limit);
917
918 /* Do descriptor calc and sanity checks */
919 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
920 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
921 aprint_error_dev(dev, "TXD config issue, using default!\n");
922 adapter->num_tx_desc = DEFAULT_TXD;
923 } else
924 adapter->num_tx_desc = ixgbe_txd;
925
926 /*
927 * With many RX rings it is easy to exceed the
928 * system mbuf allocation. Tuning nmbclusters
929 * can alleviate this.
930 */
931 if (nmbclusters > 0) {
932 int s;
933 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
934 if (s > nmbclusters) {
935 aprint_error_dev(dev, "RX Descriptors exceed "
936 "system mbuf max, using default instead!\n");
937 ixgbe_rxd = DEFAULT_RXD;
938 }
939 }
940
941 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
942 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
943 aprint_error_dev(dev, "RXD config issue, using default!\n");
944 adapter->num_rx_desc = DEFAULT_RXD;
945 } else
946 adapter->num_rx_desc = ixgbe_rxd;
947
948 /* Allocate our TX/RX Queues */
949 if (ixgbe_allocate_queues(adapter)) {
950 error = ENOMEM;
951 goto err_out;
952 }
953
954 hw->phy.reset_if_overtemp = TRUE;
955 error = ixgbe_reset_hw(hw);
956 hw->phy.reset_if_overtemp = FALSE;
957 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
958 /*
959 * No optics in this port, set up
960 * so the timer routine will probe
961 * for later insertion.
962 */
963 adapter->sfp_probe = TRUE;
964 error = IXGBE_SUCCESS;
965 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
966 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
967 error = EIO;
968 goto err_late;
969 } else if (error) {
970 aprint_error_dev(dev, "Hardware initialization failed\n");
971 error = EIO;
972 goto err_late;
973 }
974
975 /* Make sure we have a good EEPROM before we read from it */
976 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
977 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
978 error = EIO;
979 goto err_late;
980 }
981
982 aprint_normal("%s:", device_xname(dev));
983 /* NVM Image Version */
984 switch (hw->mac.type) {
985 case ixgbe_mac_X540:
986 case ixgbe_mac_X550EM_a:
987 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
988 if (nvmreg == 0xffff)
989 break;
990 high = (nvmreg >> 12) & 0x0f;
991 low = (nvmreg >> 4) & 0xff;
992 id = nvmreg & 0x0f;
993 aprint_normal(" NVM Image Version %u.", high);
994 if (hw->mac.type == ixgbe_mac_X540)
995 str = "%x";
996 else
997 str = "%02x";
998 aprint_normal(str, low);
999 aprint_normal(" ID 0x%x,", id);
1000 break;
1001 case ixgbe_mac_X550EM_x:
1002 case ixgbe_mac_X550:
1003 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1004 if (nvmreg == 0xffff)
1005 break;
1006 high = (nvmreg >> 12) & 0x0f;
1007 low = nvmreg & 0xff;
1008 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1009 break;
1010 default:
1011 break;
1012 }
1013
1014 /* PHY firmware revision */
1015 switch (hw->mac.type) {
1016 case ixgbe_mac_X540:
1017 case ixgbe_mac_X550:
1018 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1019 if (nvmreg == 0xffff)
1020 break;
1021 high = (nvmreg >> 12) & 0x0f;
1022 low = (nvmreg >> 4) & 0xff;
1023 id = nvmreg & 0x000f;
1024 aprint_normal(" PHY FW Revision %u.", high);
1025 if (hw->mac.type == ixgbe_mac_X540)
1026 str = "%x";
1027 else
1028 str = "%02x";
1029 aprint_normal(str, low);
1030 aprint_normal(" ID 0x%x,", id);
1031 break;
1032 default:
1033 break;
1034 }
1035
1036 /* NVM Map version & OEM NVM Image version */
1037 switch (hw->mac.type) {
1038 case ixgbe_mac_X550:
1039 case ixgbe_mac_X550EM_x:
1040 case ixgbe_mac_X550EM_a:
1041 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1042 if (nvmreg != 0xffff) {
1043 high = (nvmreg >> 12) & 0x0f;
1044 low = nvmreg & 0x00ff;
1045 aprint_normal(" NVM Map version %u.%02x,", high, low);
1046 }
1047 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1048 if (nvmreg != 0xffff) {
1049 high = (nvmreg >> 12) & 0x0f;
1050 low = nvmreg & 0x00ff;
1051 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1052 low);
1053 }
1054 break;
1055 default:
1056 break;
1057 }
1058
1059 /* Print the ETrackID */
1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1061 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1062 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1063
1064 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1065 error = ixgbe_allocate_msix(adapter, pa);
1066 if (error) {
1067 /* Free allocated queue structures first */
1068 ixgbe_free_transmit_structures(adapter);
1069 ixgbe_free_receive_structures(adapter);
1070 free(adapter->queues, M_DEVBUF);
1071
1072 /* Fallback to legacy interrupt */
1073 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1074 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1075 adapter->feat_en |= IXGBE_FEATURE_MSI;
1076 adapter->num_queues = 1;
1077
1078 /* Allocate our TX/RX Queues again */
1079 if (ixgbe_allocate_queues(adapter)) {
1080 error = ENOMEM;
1081 goto err_out;
1082 }
1083 }
1084 }
1085 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1086 error = ixgbe_allocate_legacy(adapter, pa);
1087 if (error)
1088 goto err_late;
1089
1090 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1091 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1092 ixgbe_handle_link, adapter);
1093 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1094 ixgbe_handle_mod, adapter);
1095 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1096 ixgbe_handle_msf, adapter);
1097 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1098 ixgbe_handle_phy, adapter);
1099 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1100 adapter->fdir_si =
1101 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1102 ixgbe_reinit_fdir, adapter);
1103 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1104 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1105 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1106 && (adapter->fdir_si == NULL))) {
1107 aprint_error_dev(dev,
1108 "could not establish software interrupts ()\n");
1109 goto err_out;
1110 }
1111
1112 error = ixgbe_start_hw(hw);
1113 switch (error) {
1114 case IXGBE_ERR_EEPROM_VERSION:
1115 aprint_error_dev(dev, "This device is a pre-production adapter/"
1116 "LOM. Please be aware there may be issues associated "
1117 "with your hardware.\nIf you are experiencing problems "
1118 "please contact your Intel or hardware representative "
1119 "who provided you with this hardware.\n");
1120 break;
1121 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1122 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1123 error = EIO;
1124 goto err_late;
1125 case IXGBE_ERR_SFP_NOT_PRESENT:
1126 aprint_error_dev(dev, "No SFP+ Module found\n");
1127 /* falls thru */
1128 default:
1129 break;
1130 }
1131
1132 /* Setup OS specific network interface */
1133 if (ixgbe_setup_interface(dev, adapter) != 0)
1134 goto err_late;
1135
1136 /*
1137 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1138 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1139 */
1140 if (hw->phy.media_type == ixgbe_media_type_copper) {
1141 uint16_t id1, id2;
1142 int oui, model, rev;
1143 const char *descr;
1144
1145 id1 = hw->phy.id >> 16;
1146 id2 = hw->phy.id & 0xffff;
1147 oui = MII_OUI(id1, id2);
1148 model = MII_MODEL(id2);
1149 rev = MII_REV(id2);
1150 if ((descr = mii_get_descr(oui, model)) != NULL)
1151 aprint_normal_dev(dev,
1152 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1153 descr, oui, model, rev);
1154 else
1155 aprint_normal_dev(dev,
1156 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1157 oui, model, rev);
1158 }
1159
1160 /* Enable the optics for 82599 SFP+ fiber */
1161 ixgbe_enable_tx_laser(hw);
1162
1163 /* Enable power to the phy. */
1164 ixgbe_set_phy_power(hw, TRUE);
1165
1166 /* Initialize statistics */
1167 ixgbe_update_stats_counters(adapter);
1168
1169 /* Check PCIE slot type/speed/width */
1170 ixgbe_get_slot_info(adapter);
1171
1172 /*
1173 * Do time init and sysctl init here, but
1174 * only on the first port of a bypass adapter.
1175 */
1176 ixgbe_bypass_init(adapter);
1177
1178 /* Set an initial dmac value */
1179 adapter->dmac = 0;
1180 /* Set initial advertised speeds (if applicable) */
1181 adapter->advertise = ixgbe_get_advertise(adapter);
1182
1183 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1184 ixgbe_define_iov_schemas(dev, &error);
1185
1186 /* Add sysctls */
1187 ixgbe_add_device_sysctls(adapter);
1188 ixgbe_add_hw_stats(adapter);
1189
1190 /* For Netmap */
1191 adapter->init_locked = ixgbe_init_locked;
1192 adapter->stop_locked = ixgbe_stop;
1193
1194 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1195 ixgbe_netmap_attach(adapter);
1196
1197 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1198 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1199 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1200 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1201
1202 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1203 pmf_class_network_register(dev, adapter->ifp);
1204 else
1205 aprint_error_dev(dev, "couldn't establish power handler\n");
1206
1207 INIT_DEBUGOUT("ixgbe_attach: end");
1208 adapter->osdep.attached = true;
1209
1210 return;
1211
1212 err_late:
1213 ixgbe_free_transmit_structures(adapter);
1214 ixgbe_free_receive_structures(adapter);
1215 free(adapter->queues, M_DEVBUF);
1216 err_out:
1217 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1218 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1219 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1220 ixgbe_free_softint(adapter);
1221 ixgbe_free_pci_resources(adapter);
1222 if (adapter->mta != NULL)
1223 free(adapter->mta, M_DEVBUF);
1224 IXGBE_CORE_LOCK_DESTROY(adapter);
1225
1226 return;
1227 } /* ixgbe_attach */
1228
1229 /************************************************************************
1230 * ixgbe_check_wol_support
1231 *
1232 * Checks whether the adapter's ports are capable of
1233 * Wake On LAN by reading the adapter's NVM.
1234 *
1235 * Sets each port's hw->wol_enabled value depending
1236 * on the value read here.
1237 ************************************************************************/
1238 static void
1239 ixgbe_check_wol_support(struct adapter *adapter)
1240 {
1241 struct ixgbe_hw *hw = &adapter->hw;
1242 u16 dev_caps = 0;
1243
1244 /* Find out WoL support for port */
1245 adapter->wol_support = hw->wol_enabled = 0;
1246 ixgbe_get_device_caps(hw, &dev_caps);
1247 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1248 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1249 hw->bus.func == 0))
1250 adapter->wol_support = hw->wol_enabled = 1;
1251
1252 /* Save initial wake up filter configuration */
1253 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1254
1255 return;
1256 } /* ixgbe_check_wol_support */
1257
1258 /************************************************************************
1259 * ixgbe_setup_interface
1260 *
1261 * Setup networking device structure and register an interface.
1262 ************************************************************************/
1263 static int
1264 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1265 {
1266 struct ethercom *ec = &adapter->osdep.ec;
1267 struct ifnet *ifp;
1268 int rv;
1269
1270 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1271
1272 ifp = adapter->ifp = &ec->ec_if;
1273 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1274 ifp->if_baudrate = IF_Gbps(10);
1275 ifp->if_init = ixgbe_init;
1276 ifp->if_stop = ixgbe_ifstop;
1277 ifp->if_softc = adapter;
1278 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1279 #ifdef IXGBE_MPSAFE
1280 ifp->if_extflags = IFEF_MPSAFE;
1281 #endif
1282 ifp->if_ioctl = ixgbe_ioctl;
1283 #if __FreeBSD_version >= 1100045
1284 /* TSO parameters */
1285 ifp->if_hw_tsomax = 65518;
1286 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1287 ifp->if_hw_tsomaxsegsize = 2048;
1288 #endif
1289 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1290 #if 0
1291 ixgbe_start_locked = ixgbe_legacy_start_locked;
1292 #endif
1293 } else {
1294 ifp->if_transmit = ixgbe_mq_start;
1295 #if 0
1296 ixgbe_start_locked = ixgbe_mq_start_locked;
1297 #endif
1298 }
1299 ifp->if_start = ixgbe_legacy_start;
1300 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1301 IFQ_SET_READY(&ifp->if_snd);
1302
1303 rv = if_initialize(ifp);
1304 if (rv != 0) {
1305 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1306 return rv;
1307 }
1308 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1309 ether_ifattach(ifp, adapter->hw.mac.addr);
1310 /*
1311 * We use per TX queue softint, so if_deferred_start_init() isn't
1312 * used.
1313 */
1314 if_register(ifp);
1315 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1316
1317 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1318
1319 /*
1320 * Tell the upper layer(s) we support long frames.
1321 */
1322 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1323
1324 /* Set capability flags */
1325 ifp->if_capabilities |= IFCAP_RXCSUM
1326 | IFCAP_TXCSUM
1327 | IFCAP_TSOv4
1328 | IFCAP_TSOv6
1329 | IFCAP_LRO;
1330 ifp->if_capenable = 0;
1331
1332 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1333 | ETHERCAP_VLAN_HWCSUM
1334 | ETHERCAP_JUMBO_MTU
1335 | ETHERCAP_VLAN_MTU;
1336
1337 /* Enable the above capabilities by default */
1338 ec->ec_capenable = ec->ec_capabilities;
1339
1340 /*
1341 * Don't turn this on by default, if vlans are
1342 * created on another pseudo device (eg. lagg)
1343 * then vlan events are not passed thru, breaking
1344 * operation, but with HW FILTER off it works. If
1345 * using vlans directly on the ixgbe driver you can
1346 * enable this and get full hardware tag filtering.
1347 */
1348 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1349
1350 /*
1351 * Specify the media types supported by this adapter and register
1352 * callbacks to update media and link information
1353 */
1354 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1355 ixgbe_media_status);
1356
1357 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1358 ixgbe_add_media_types(adapter);
1359
1360 /* Set autoselect media by default */
1361 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1362
1363 return (0);
1364 } /* ixgbe_setup_interface */
1365
1366 /************************************************************************
1367 * ixgbe_add_media_types
1368 ************************************************************************/
1369 static void
1370 ixgbe_add_media_types(struct adapter *adapter)
1371 {
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 device_t dev = adapter->dev;
1374 u64 layer;
1375
1376 layer = adapter->phy_layer;
1377
1378 #define ADD(mm, dd) \
1379 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1380
1381 ADD(IFM_NONE, 0);
1382
1383 /* Media types with matching NetBSD media defines */
1384 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1385 ADD(IFM_10G_T | IFM_FDX, 0);
1386 }
1387 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1388 ADD(IFM_1000_T | IFM_FDX, 0);
1389 }
1390 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1391 ADD(IFM_100_TX | IFM_FDX, 0);
1392 }
1393 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1394 ADD(IFM_10_T | IFM_FDX, 0);
1395 }
1396
1397 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1398 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1399 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1400 }
1401
1402 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1403 ADD(IFM_10G_LR | IFM_FDX, 0);
1404 if (hw->phy.multispeed_fiber) {
1405 ADD(IFM_1000_LX | IFM_FDX, 0);
1406 }
1407 }
1408 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1409 ADD(IFM_10G_SR | IFM_FDX, 0);
1410 if (hw->phy.multispeed_fiber) {
1411 ADD(IFM_1000_SX | IFM_FDX, 0);
1412 }
1413 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1414 ADD(IFM_1000_SX | IFM_FDX, 0);
1415 }
1416 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1417 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1418 }
1419
1420 #ifdef IFM_ETH_XTYPE
1421 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1422 ADD(IFM_10G_KR | IFM_FDX, 0);
1423 }
1424 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1425 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1426 }
1427 #else
1428 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1429 device_printf(dev, "Media supported: 10GbaseKR\n");
1430 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1431 ADD(IFM_10G_SR | IFM_FDX, 0);
1432 }
1433 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1434 device_printf(dev, "Media supported: 10GbaseKX4\n");
1435 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1436 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1437 }
1438 #endif
1439 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1440 ADD(IFM_1000_KX | IFM_FDX, 0);
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1443 ADD(IFM_2500_KX | IFM_FDX, 0);
1444 }
1445 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1446 ADD(IFM_2500_T | IFM_FDX, 0);
1447 }
1448 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1449 ADD(IFM_5000_T | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1452 device_printf(dev, "Media supported: 1000baseBX\n");
1453 /* XXX no ifmedia_set? */
1454
1455 ADD(IFM_AUTO, 0);
1456
1457 #undef ADD
1458 } /* ixgbe_add_media_types */
1459
1460 /************************************************************************
1461 * ixgbe_is_sfp
1462 ************************************************************************/
1463 static inline bool
1464 ixgbe_is_sfp(struct ixgbe_hw *hw)
1465 {
1466 switch (hw->mac.type) {
1467 case ixgbe_mac_82598EB:
1468 if (hw->phy.type == ixgbe_phy_nl)
1469 return TRUE;
1470 return FALSE;
1471 case ixgbe_mac_82599EB:
1472 switch (hw->mac.ops.get_media_type(hw)) {
1473 case ixgbe_media_type_fiber:
1474 case ixgbe_media_type_fiber_qsfp:
1475 return TRUE;
1476 default:
1477 return FALSE;
1478 }
1479 case ixgbe_mac_X550EM_x:
1480 case ixgbe_mac_X550EM_a:
1481 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1482 return TRUE;
1483 return FALSE;
1484 default:
1485 return FALSE;
1486 }
1487 } /* ixgbe_is_sfp */
1488
1489 /************************************************************************
1490 * ixgbe_config_link
1491 ************************************************************************/
1492 static void
1493 ixgbe_config_link(struct adapter *adapter)
1494 {
1495 struct ixgbe_hw *hw = &adapter->hw;
1496 u32 autoneg, err = 0;
1497 bool sfp, negotiate = false;
1498
1499 sfp = ixgbe_is_sfp(hw);
1500
1501 if (sfp) {
1502 if (hw->phy.multispeed_fiber) {
1503 hw->mac.ops.setup_sfp(hw);
1504 ixgbe_enable_tx_laser(hw);
1505 kpreempt_disable();
1506 softint_schedule(adapter->msf_si);
1507 kpreempt_enable();
1508 } else {
1509 kpreempt_disable();
1510 softint_schedule(adapter->mod_si);
1511 kpreempt_enable();
1512 }
1513 } else {
1514 struct ifmedia *ifm = &adapter->media;
1515
1516 if (hw->mac.ops.check_link)
1517 err = ixgbe_check_link(hw, &adapter->link_speed,
1518 &adapter->link_up, FALSE);
1519 if (err)
1520 goto out;
1521
1522 /*
1523 * Check if it's the first call. If it's the first call,
1524 * get value for auto negotiation.
1525 */
1526 autoneg = hw->phy.autoneg_advertised;
1527 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1528 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1529 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1530 &negotiate);
1531 if (err)
1532 goto out;
1533 if (hw->mac.ops.setup_link)
1534 err = hw->mac.ops.setup_link(hw, autoneg,
1535 adapter->link_up);
1536 }
1537 out:
1538
1539 return;
1540 } /* ixgbe_config_link */
1541
1542 /************************************************************************
1543 * ixgbe_update_stats_counters - Update board statistics counters.
1544 ************************************************************************/
1545 static void
1546 ixgbe_update_stats_counters(struct adapter *adapter)
1547 {
1548 struct ifnet *ifp = adapter->ifp;
1549 struct ixgbe_hw *hw = &adapter->hw;
1550 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1551 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1552 u64 total_missed_rx = 0;
1553 uint64_t crcerrs, rlec;
1554
1555 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1556 stats->crcerrs.ev_count += crcerrs;
1557 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1558 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1559 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1560 if (hw->mac.type == ixgbe_mac_X550)
1561 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1562
1563 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1564 int j = i % adapter->num_queues;
1565 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1566 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1567 stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1568 }
1569 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1570 uint32_t mp;
1571 int j = i % adapter->num_queues;
1572
1573 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1574 /* global total per queue */
1575 stats->mpc[j].ev_count += mp;
1576 /* running comprehensive total for stats display */
1577 total_missed_rx += mp;
1578
1579 if (hw->mac.type == ixgbe_mac_82598EB)
1580 stats->rnbc[j].ev_count
1581 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1582
1583 }
1584 stats->mpctotal.ev_count += total_missed_rx;
1585
1586 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1587 if ((adapter->link_active == TRUE)
1588 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1589 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1590 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1591 }
1592 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1593 stats->rlec.ev_count += rlec;
1594
1595 /* Hardware workaround, gprc counts missed packets */
1596 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1597
1598 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1599 stats->lxontxc.ev_count += lxon;
1600 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1601 stats->lxofftxc.ev_count += lxoff;
1602 total = lxon + lxoff;
1603
1604 if (hw->mac.type != ixgbe_mac_82598EB) {
1605 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1606 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1607 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1608 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1609 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1610 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1611 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1612 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1613 } else {
1614 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1615 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1616 /* 82598 only has a counter in the high register */
1617 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1618 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1619 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1620 }
1621
1622 /*
1623 * Workaround: mprc hardware is incorrectly counting
1624 * broadcasts, so for now we subtract those.
1625 */
1626 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1627 stats->bprc.ev_count += bprc;
1628 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1629 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1630
1631 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1632 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1633 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1634 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1635 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1636 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1637
1638 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1639 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1640 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1641
1642 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1643 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1644 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1645 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1646 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1647 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1648 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1649 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1650 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1651 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1652 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1653 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1654 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1655 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1656 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1657 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1658 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1659 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1660 /* Only read FCOE on 82599 */
1661 if (hw->mac.type != ixgbe_mac_82598EB) {
1662 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1663 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1664 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1665 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1666 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1667 }
1668
1669 /* Fill out the OS statistics structure */
1670 /*
1671 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1672 * adapter->stats counters. It's required to make ifconfig -z
1673 * (SOICZIFDATA) work.
1674 */
1675 ifp->if_collisions = 0;
1676
1677 /* Rx Errors */
1678 ifp->if_iqdrops += total_missed_rx;
1679 ifp->if_ierrors += crcerrs + rlec;
1680 } /* ixgbe_update_stats_counters */
1681
1682 /************************************************************************
1683 * ixgbe_add_hw_stats
1684 *
1685 * Add sysctl variables, one per statistic, to the system.
1686 ************************************************************************/
1687 static void
1688 ixgbe_add_hw_stats(struct adapter *adapter)
1689 {
1690 device_t dev = adapter->dev;
1691 const struct sysctlnode *rnode, *cnode;
1692 struct sysctllog **log = &adapter->sysctllog;
1693 struct tx_ring *txr = adapter->tx_rings;
1694 struct rx_ring *rxr = adapter->rx_rings;
1695 struct ixgbe_hw *hw = &adapter->hw;
1696 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1697 const char *xname = device_xname(dev);
1698
1699 /* Driver Statistics */
1700 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1701 NULL, xname, "Driver tx dma soft fail EFBIG");
1702 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1703 NULL, xname, "m_defrag() failed");
1704 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1705 NULL, xname, "Driver tx dma hard fail EFBIG");
1706 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1707 NULL, xname, "Driver tx dma hard fail EINVAL");
1708 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1709 NULL, xname, "Driver tx dma hard fail other");
1710 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1711 NULL, xname, "Driver tx dma soft fail EAGAIN");
1712 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1713 NULL, xname, "Driver tx dma soft fail ENOMEM");
1714 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1715 NULL, xname, "Watchdog timeouts");
1716 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1717 NULL, xname, "TSO errors");
1718 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1719 NULL, xname, "Link MSI-X IRQ Handled");
1720 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1721 NULL, xname, "Link softint");
1722 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1723 NULL, xname, "module softint");
1724 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1725 NULL, xname, "multimode softint");
1726 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1727 NULL, xname, "external PHY softint");
1728
1729 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1730 #ifdef LRO
1731 struct lro_ctrl *lro = &rxr->lro;
1732 #endif /* LRO */
1733
1734 snprintf(adapter->queues[i].evnamebuf,
1735 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1736 xname, i);
1737 snprintf(adapter->queues[i].namebuf,
1738 sizeof(adapter->queues[i].namebuf), "q%d", i);
1739
1740 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1741 aprint_error_dev(dev, "could not create sysctl root\n");
1742 break;
1743 }
1744
1745 if (sysctl_createv(log, 0, &rnode, &rnode,
1746 0, CTLTYPE_NODE,
1747 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1748 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1749 break;
1750
1751 if (sysctl_createv(log, 0, &rnode, &cnode,
1752 CTLFLAG_READWRITE, CTLTYPE_INT,
1753 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1754 ixgbe_sysctl_interrupt_rate_handler, 0,
1755 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1756 break;
1757
1758 if (sysctl_createv(log, 0, &rnode, &cnode,
1759 CTLFLAG_READONLY, CTLTYPE_INT,
1760 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1761 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1762 0, CTL_CREATE, CTL_EOL) != 0)
1763 break;
1764
1765 if (sysctl_createv(log, 0, &rnode, &cnode,
1766 CTLFLAG_READONLY, CTLTYPE_INT,
1767 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1768 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1769 0, CTL_CREATE, CTL_EOL) != 0)
1770 break;
1771
1772 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1773 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1774 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1775 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1776 "Handled queue in softint");
1777 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1778 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1779 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1780 NULL, adapter->queues[i].evnamebuf, "TSO");
1781 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1782 NULL, adapter->queues[i].evnamebuf,
1783 "Queue No Descriptor Available");
1784 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1785 NULL, adapter->queues[i].evnamebuf,
1786 "Queue Packets Transmitted");
1787 #ifndef IXGBE_LEGACY_TX
1788 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1789 NULL, adapter->queues[i].evnamebuf,
1790 "Packets dropped in pcq");
1791 #endif
1792
1793 if (sysctl_createv(log, 0, &rnode, &cnode,
1794 CTLFLAG_READONLY,
1795 CTLTYPE_INT,
1796 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1797 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1798 CTL_CREATE, CTL_EOL) != 0)
1799 break;
1800
1801 if (sysctl_createv(log, 0, &rnode, &cnode,
1802 CTLFLAG_READONLY,
1803 CTLTYPE_INT,
1804 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1805 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1806 CTL_CREATE, CTL_EOL) != 0)
1807 break;
1808
1809 if (i < __arraycount(stats->mpc)) {
1810 evcnt_attach_dynamic(&stats->mpc[i],
1811 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1812 "RX Missed Packet Count");
1813 if (hw->mac.type == ixgbe_mac_82598EB)
1814 evcnt_attach_dynamic(&stats->rnbc[i],
1815 EVCNT_TYPE_MISC, NULL,
1816 adapter->queues[i].evnamebuf,
1817 "Receive No Buffers");
1818 }
1819 if (i < __arraycount(stats->pxontxc)) {
1820 evcnt_attach_dynamic(&stats->pxontxc[i],
1821 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1822 "pxontxc");
1823 evcnt_attach_dynamic(&stats->pxonrxc[i],
1824 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1825 "pxonrxc");
1826 evcnt_attach_dynamic(&stats->pxofftxc[i],
1827 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1828 "pxofftxc");
1829 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1830 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1831 "pxoffrxc");
1832 evcnt_attach_dynamic(&stats->pxon2offc[i],
1833 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1834 "pxon2offc");
1835 }
1836 if (i < __arraycount(stats->qprc)) {
1837 evcnt_attach_dynamic(&stats->qprc[i],
1838 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1839 "qprc");
1840 evcnt_attach_dynamic(&stats->qptc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1842 "qptc");
1843 evcnt_attach_dynamic(&stats->qbrc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1845 "qbrc");
1846 evcnt_attach_dynamic(&stats->qbtc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1848 "qbtc");
1849 evcnt_attach_dynamic(&stats->qprdc[i],
1850 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1851 "qprdc");
1852 }
1853
1854 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1855 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1856 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1857 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1858 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1859 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1860 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1861 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1862 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1863 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1864 #ifdef LRO
1865 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1866 CTLFLAG_RD, &lro->lro_queued, 0,
1867 "LRO Queued");
1868 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1869 CTLFLAG_RD, &lro->lro_flushed, 0,
1870 "LRO Flushed");
1871 #endif /* LRO */
1872 }
1873
1874 /* MAC stats get their own sub node */
1875
1876 snprintf(stats->namebuf,
1877 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1878
1879 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1880 stats->namebuf, "rx csum offload - IP");
1881 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1882 stats->namebuf, "rx csum offload - L4");
1883 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1884 stats->namebuf, "rx csum offload - IP bad");
1885 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1886 stats->namebuf, "rx csum offload - L4 bad");
1887 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1888 stats->namebuf, "Interrupt conditions zero");
1889 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1890 stats->namebuf, "Legacy interrupts");
1891
1892 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1893 stats->namebuf, "CRC Errors");
1894 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1895 stats->namebuf, "Illegal Byte Errors");
1896 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1897 stats->namebuf, "Byte Errors");
1898 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1899 stats->namebuf, "MAC Short Packets Discarded");
1900 if (hw->mac.type >= ixgbe_mac_X550)
1901 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1902 stats->namebuf, "Bad SFD");
1903 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1904 stats->namebuf, "Total Packets Missed");
1905 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1906 stats->namebuf, "MAC Local Faults");
1907 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1908 stats->namebuf, "MAC Remote Faults");
1909 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1910 stats->namebuf, "Receive Length Errors");
1911 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1912 stats->namebuf, "Link XON Transmitted");
1913 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1914 stats->namebuf, "Link XON Received");
1915 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1916 stats->namebuf, "Link XOFF Transmitted");
1917 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1918 stats->namebuf, "Link XOFF Received");
1919
1920 /* Packet Reception Stats */
1921 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1922 stats->namebuf, "Total Octets Received");
1923 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1924 stats->namebuf, "Good Octets Received");
1925 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1926 stats->namebuf, "Total Packets Received");
1927 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1928 stats->namebuf, "Good Packets Received");
1929 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1930 stats->namebuf, "Multicast Packets Received");
1931 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1932 stats->namebuf, "Broadcast Packets Received");
1933 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1934 stats->namebuf, "64 byte frames received ");
1935 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1936 stats->namebuf, "65-127 byte frames received");
1937 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1938 stats->namebuf, "128-255 byte frames received");
1939 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "256-511 byte frames received");
1941 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "512-1023 byte frames received");
1943 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "1023-1522 byte frames received");
1945 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "Receive Undersized");
1947 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Fragmented Packets Received ");
1949 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "Oversized Packets Received");
1951 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "Received Jabber");
1953 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Management Packets Received");
1955 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "Management Packets Dropped");
1957 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "Checksum Errors");
1959
1960 /* Packet Transmission Stats */
1961 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "Good Octets Transmitted");
1963 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "Total Packets Transmitted");
1965 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "Good Packets Transmitted");
1967 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Broadcast Packets Transmitted");
1969 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Multicast Packets Transmitted");
1971 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "Management Packets Transmitted");
1973 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "64 byte frames transmitted ");
1975 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "65-127 byte frames transmitted");
1977 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "128-255 byte frames transmitted");
1979 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "256-511 byte frames transmitted");
1981 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "512-1023 byte frames transmitted");
1983 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "1024-1522 byte frames transmitted");
1985 } /* ixgbe_add_hw_stats */
1986
1987 static void
1988 ixgbe_clear_evcnt(struct adapter *adapter)
1989 {
1990 struct tx_ring *txr = adapter->tx_rings;
1991 struct rx_ring *rxr = adapter->rx_rings;
1992 struct ixgbe_hw *hw = &adapter->hw;
1993 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1994
1995 adapter->efbig_tx_dma_setup.ev_count = 0;
1996 adapter->mbuf_defrag_failed.ev_count = 0;
1997 adapter->efbig2_tx_dma_setup.ev_count = 0;
1998 adapter->einval_tx_dma_setup.ev_count = 0;
1999 adapter->other_tx_dma_setup.ev_count = 0;
2000 adapter->eagain_tx_dma_setup.ev_count = 0;
2001 adapter->enomem_tx_dma_setup.ev_count = 0;
2002 adapter->tso_err.ev_count = 0;
2003 adapter->watchdog_events.ev_count = 0;
2004 adapter->link_irq.ev_count = 0;
2005 adapter->link_sicount.ev_count = 0;
2006 adapter->mod_sicount.ev_count = 0;
2007 adapter->msf_sicount.ev_count = 0;
2008 adapter->phy_sicount.ev_count = 0;
2009
2010 txr = adapter->tx_rings;
2011 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2012 adapter->queues[i].irqs.ev_count = 0;
2013 adapter->queues[i].handleq.ev_count = 0;
2014 adapter->queues[i].req.ev_count = 0;
2015 txr->no_desc_avail.ev_count = 0;
2016 txr->total_packets.ev_count = 0;
2017 txr->tso_tx.ev_count = 0;
2018 #ifndef IXGBE_LEGACY_TX
2019 txr->pcq_drops.ev_count = 0;
2020 #endif
2021 txr->q_efbig_tx_dma_setup = 0;
2022 txr->q_mbuf_defrag_failed = 0;
2023 txr->q_efbig2_tx_dma_setup = 0;
2024 txr->q_einval_tx_dma_setup = 0;
2025 txr->q_other_tx_dma_setup = 0;
2026 txr->q_eagain_tx_dma_setup = 0;
2027 txr->q_enomem_tx_dma_setup = 0;
2028 txr->q_tso_err = 0;
2029
2030 if (i < __arraycount(stats->mpc)) {
2031 stats->mpc[i].ev_count = 0;
2032 if (hw->mac.type == ixgbe_mac_82598EB)
2033 stats->rnbc[i].ev_count = 0;
2034 }
2035 if (i < __arraycount(stats->pxontxc)) {
2036 stats->pxontxc[i].ev_count = 0;
2037 stats->pxonrxc[i].ev_count = 0;
2038 stats->pxofftxc[i].ev_count = 0;
2039 stats->pxoffrxc[i].ev_count = 0;
2040 stats->pxon2offc[i].ev_count = 0;
2041 }
2042 if (i < __arraycount(stats->qprc)) {
2043 stats->qprc[i].ev_count = 0;
2044 stats->qptc[i].ev_count = 0;
2045 stats->qbrc[i].ev_count = 0;
2046 stats->qbtc[i].ev_count = 0;
2047 stats->qprdc[i].ev_count = 0;
2048 }
2049
2050 rxr->rx_packets.ev_count = 0;
2051 rxr->rx_bytes.ev_count = 0;
2052 rxr->rx_copies.ev_count = 0;
2053 rxr->no_jmbuf.ev_count = 0;
2054 rxr->rx_discarded.ev_count = 0;
2055 }
2056 stats->ipcs.ev_count = 0;
2057 stats->l4cs.ev_count = 0;
2058 stats->ipcs_bad.ev_count = 0;
2059 stats->l4cs_bad.ev_count = 0;
2060 stats->intzero.ev_count = 0;
2061 stats->legint.ev_count = 0;
2062 stats->crcerrs.ev_count = 0;
2063 stats->illerrc.ev_count = 0;
2064 stats->errbc.ev_count = 0;
2065 stats->mspdc.ev_count = 0;
2066 stats->mbsdc.ev_count = 0;
2067 stats->mpctotal.ev_count = 0;
2068 stats->mlfc.ev_count = 0;
2069 stats->mrfc.ev_count = 0;
2070 stats->rlec.ev_count = 0;
2071 stats->lxontxc.ev_count = 0;
2072 stats->lxonrxc.ev_count = 0;
2073 stats->lxofftxc.ev_count = 0;
2074 stats->lxoffrxc.ev_count = 0;
2075
2076 /* Packet Reception Stats */
2077 stats->tor.ev_count = 0;
2078 stats->gorc.ev_count = 0;
2079 stats->tpr.ev_count = 0;
2080 stats->gprc.ev_count = 0;
2081 stats->mprc.ev_count = 0;
2082 stats->bprc.ev_count = 0;
2083 stats->prc64.ev_count = 0;
2084 stats->prc127.ev_count = 0;
2085 stats->prc255.ev_count = 0;
2086 stats->prc511.ev_count = 0;
2087 stats->prc1023.ev_count = 0;
2088 stats->prc1522.ev_count = 0;
2089 stats->ruc.ev_count = 0;
2090 stats->rfc.ev_count = 0;
2091 stats->roc.ev_count = 0;
2092 stats->rjc.ev_count = 0;
2093 stats->mngprc.ev_count = 0;
2094 stats->mngpdc.ev_count = 0;
2095 stats->xec.ev_count = 0;
2096
2097 /* Packet Transmission Stats */
2098 stats->gotc.ev_count = 0;
2099 stats->tpt.ev_count = 0;
2100 stats->gptc.ev_count = 0;
2101 stats->bptc.ev_count = 0;
2102 stats->mptc.ev_count = 0;
2103 stats->mngptc.ev_count = 0;
2104 stats->ptc64.ev_count = 0;
2105 stats->ptc127.ev_count = 0;
2106 stats->ptc255.ev_count = 0;
2107 stats->ptc511.ev_count = 0;
2108 stats->ptc1023.ev_count = 0;
2109 stats->ptc1522.ev_count = 0;
2110 }
2111
2112 /************************************************************************
2113 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2114 *
2115 * Retrieves the TDH value from the hardware
2116 ************************************************************************/
2117 static int
2118 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2119 {
2120 struct sysctlnode node = *rnode;
2121 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2122 uint32_t val;
2123
2124 if (!txr)
2125 return (0);
2126
2127 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2128 node.sysctl_data = &val;
2129 return sysctl_lookup(SYSCTLFN_CALL(&node));
2130 } /* ixgbe_sysctl_tdh_handler */
2131
2132 /************************************************************************
2133 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2134 *
2135 * Retrieves the TDT value from the hardware
2136 ************************************************************************/
2137 static int
2138 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2139 {
2140 struct sysctlnode node = *rnode;
2141 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2142 uint32_t val;
2143
2144 if (!txr)
2145 return (0);
2146
2147 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2148 node.sysctl_data = &val;
2149 return sysctl_lookup(SYSCTLFN_CALL(&node));
2150 } /* ixgbe_sysctl_tdt_handler */
2151
2152 /************************************************************************
2153 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2154 *
2155 * Retrieves the RDH value from the hardware
2156 ************************************************************************/
2157 static int
2158 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2159 {
2160 struct sysctlnode node = *rnode;
2161 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2162 uint32_t val;
2163
2164 if (!rxr)
2165 return (0);
2166
2167 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2168 node.sysctl_data = &val;
2169 return sysctl_lookup(SYSCTLFN_CALL(&node));
2170 } /* ixgbe_sysctl_rdh_handler */
2171
2172 /************************************************************************
2173 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2174 *
2175 * Retrieves the RDT value from the hardware
2176 ************************************************************************/
2177 static int
2178 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2179 {
2180 struct sysctlnode node = *rnode;
2181 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2182 uint32_t val;
2183
2184 if (!rxr)
2185 return (0);
2186
2187 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2188 node.sysctl_data = &val;
2189 return sysctl_lookup(SYSCTLFN_CALL(&node));
2190 } /* ixgbe_sysctl_rdt_handler */
2191
2192 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2193 /************************************************************************
2194 * ixgbe_register_vlan
2195 *
2196 * Run via vlan config EVENT, it enables us to use the
2197 * HW Filter table since we can get the vlan id. This
2198 * just creates the entry in the soft version of the
2199 * VFTA, init will repopulate the real table.
2200 ************************************************************************/
2201 static void
2202 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2203 {
2204 struct adapter *adapter = ifp->if_softc;
2205 u16 index, bit;
2206
2207 if (ifp->if_softc != arg) /* Not our event */
2208 return;
2209
2210 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2211 return;
2212
2213 IXGBE_CORE_LOCK(adapter);
2214 index = (vtag >> 5) & 0x7F;
2215 bit = vtag & 0x1F;
2216 adapter->shadow_vfta[index] |= (1 << bit);
2217 ixgbe_setup_vlan_hw_support(adapter);
2218 IXGBE_CORE_UNLOCK(adapter);
2219 } /* ixgbe_register_vlan */
2220
2221 /************************************************************************
2222 * ixgbe_unregister_vlan
2223 *
2224 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2225 ************************************************************************/
2226 static void
2227 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2228 {
2229 struct adapter *adapter = ifp->if_softc;
2230 u16 index, bit;
2231
2232 if (ifp->if_softc != arg)
2233 return;
2234
2235 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2236 return;
2237
2238 IXGBE_CORE_LOCK(adapter);
2239 index = (vtag >> 5) & 0x7F;
2240 bit = vtag & 0x1F;
2241 adapter->shadow_vfta[index] &= ~(1 << bit);
2242 /* Re-init to load the changes */
2243 ixgbe_setup_vlan_hw_support(adapter);
2244 IXGBE_CORE_UNLOCK(adapter);
2245 } /* ixgbe_unregister_vlan */
2246 #endif
2247
2248 static void
2249 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2250 {
2251 struct ethercom *ec = &adapter->osdep.ec;
2252 struct ixgbe_hw *hw = &adapter->hw;
2253 struct rx_ring *rxr;
2254 int i;
2255 u32 ctrl;
2256
2257
2258 /*
2259 * We get here thru init_locked, meaning
2260 * a soft reset, this has already cleared
2261 * the VFTA and other state, so if there
2262 * have been no vlan's registered do nothing.
2263 */
2264 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2265 return;
2266
2267 /* Setup the queues for vlans */
2268 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2269 for (i = 0; i < adapter->num_queues; i++) {
2270 rxr = &adapter->rx_rings[i];
2271 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2272 if (hw->mac.type != ixgbe_mac_82598EB) {
2273 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2274 ctrl |= IXGBE_RXDCTL_VME;
2275 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2276 }
2277 rxr->vtag_strip = TRUE;
2278 }
2279 }
2280
2281 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2282 return;
2283 /*
2284 * A soft reset zero's out the VFTA, so
2285 * we need to repopulate it now.
2286 */
2287 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2288 if (adapter->shadow_vfta[i] != 0)
2289 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2290 adapter->shadow_vfta[i]);
2291
2292 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2293 /* Enable the Filter Table if enabled */
2294 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2295 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2296 ctrl |= IXGBE_VLNCTRL_VFE;
2297 }
2298 if (hw->mac.type == ixgbe_mac_82598EB)
2299 ctrl |= IXGBE_VLNCTRL_VME;
2300 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2301 } /* ixgbe_setup_vlan_hw_support */
2302
2303 /************************************************************************
2304 * ixgbe_get_slot_info
2305 *
2306 * Get the width and transaction speed of
2307 * the slot this adapter is plugged into.
2308 ************************************************************************/
2309 static void
2310 ixgbe_get_slot_info(struct adapter *adapter)
2311 {
2312 device_t dev = adapter->dev;
2313 struct ixgbe_hw *hw = &adapter->hw;
2314 u32 offset;
2315 // struct ixgbe_mac_info *mac = &hw->mac;
2316 u16 link;
2317 int bus_info_valid = TRUE;
2318
2319 /* Some devices are behind an internal bridge */
2320 switch (hw->device_id) {
2321 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2322 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2323 goto get_parent_info;
2324 default:
2325 break;
2326 }
2327
2328 ixgbe_get_bus_info(hw);
2329
2330 /*
2331 * Some devices don't use PCI-E, but there is no need
2332 * to display "Unknown" for bus speed and width.
2333 */
2334 switch (hw->mac.type) {
2335 case ixgbe_mac_X550EM_x:
2336 case ixgbe_mac_X550EM_a:
2337 return;
2338 default:
2339 goto display;
2340 }
2341
2342 get_parent_info:
2343 /*
2344 * For the Quad port adapter we need to parse back
2345 * up the PCI tree to find the speed of the expansion
2346 * slot into which this adapter is plugged. A bit more work.
2347 */
2348 dev = device_parent(device_parent(dev));
2349 #if 0
2350 #ifdef IXGBE_DEBUG
2351 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2352 pci_get_slot(dev), pci_get_function(dev));
2353 #endif
2354 dev = device_parent(device_parent(dev));
2355 #ifdef IXGBE_DEBUG
2356 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2357 pci_get_slot(dev), pci_get_function(dev));
2358 #endif
2359 #endif
2360 /* Now get the PCI Express Capabilities offset */
2361 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2362 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2363 /*
2364 * Hmm...can't get PCI-Express capabilities.
2365 * Falling back to default method.
2366 */
2367 bus_info_valid = FALSE;
2368 ixgbe_get_bus_info(hw);
2369 goto display;
2370 }
2371 /* ...and read the Link Status Register */
2372 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2373 offset + PCIE_LCSR) >> 16;
2374 ixgbe_set_pci_config_data_generic(hw, link);
2375
2376 display:
2377 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2378 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2379 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2380 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2381 "Unknown"),
2382 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2383 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2384 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2385 "Unknown"));
2386
2387 if (bus_info_valid) {
2388 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2389 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2390 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2391 device_printf(dev, "PCI-Express bandwidth available"
2392 " for this card\n is not sufficient for"
2393 " optimal performance.\n");
2394 device_printf(dev, "For optimal performance a x8 "
2395 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2396 }
2397 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2398 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2399 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2400 device_printf(dev, "PCI-Express bandwidth available"
2401 " for this card\n is not sufficient for"
2402 " optimal performance.\n");
2403 device_printf(dev, "For optimal performance a x8 "
2404 "PCIE Gen3 slot is required.\n");
2405 }
2406 } else
2407 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2408
2409 return;
2410 } /* ixgbe_get_slot_info */
2411
2412 /************************************************************************
2413 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2414 ************************************************************************/
2415 static inline void
2416 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2417 {
2418 struct ixgbe_hw *hw = &adapter->hw;
2419 struct ix_queue *que = &adapter->queues[vector];
2420 u64 queue = (u64)(1ULL << vector);
2421 u32 mask;
2422
2423 mutex_enter(&que->dc_mtx);
2424 if (que->disabled_count > 0 && --que->disabled_count > 0)
2425 goto out;
2426
2427 if (hw->mac.type == ixgbe_mac_82598EB) {
2428 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2429 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2430 } else {
2431 mask = (queue & 0xFFFFFFFF);
2432 if (mask)
2433 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2434 mask = (queue >> 32);
2435 if (mask)
2436 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2437 }
2438 out:
2439 mutex_exit(&que->dc_mtx);
2440 } /* ixgbe_enable_queue */
2441
2442 /************************************************************************
2443 * ixgbe_disable_queue_internal
2444 ************************************************************************/
2445 static inline void
2446 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2447 {
2448 struct ixgbe_hw *hw = &adapter->hw;
2449 struct ix_queue *que = &adapter->queues[vector];
2450 u64 queue = (u64)(1ULL << vector);
2451 u32 mask;
2452
2453 mutex_enter(&que->dc_mtx);
2454
2455 if (que->disabled_count > 0) {
2456 if (nestok)
2457 que->disabled_count++;
2458 goto out;
2459 }
2460 que->disabled_count++;
2461
2462 if (hw->mac.type == ixgbe_mac_82598EB) {
2463 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2464 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2465 } else {
2466 mask = (queue & 0xFFFFFFFF);
2467 if (mask)
2468 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2469 mask = (queue >> 32);
2470 if (mask)
2471 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2472 }
2473 out:
2474 mutex_exit(&que->dc_mtx);
2475 } /* ixgbe_disable_queue_internal */
2476
2477 /************************************************************************
2478 * ixgbe_disable_queue
2479 ************************************************************************/
2480 static inline void
2481 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2482 {
2483
2484 ixgbe_disable_queue_internal(adapter, vector, true);
2485 } /* ixgbe_disable_queue */
2486
2487 /************************************************************************
2488 * ixgbe_sched_handle_que - schedule deferred packet processing
2489 ************************************************************************/
2490 static inline void
2491 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2492 {
2493
2494 if (adapter->txrx_use_workqueue) {
2495 /*
2496 * adapter->que_wq is bound to each CPU instead of
2497 * each NIC queue to reduce workqueue kthread. As we
2498 * should consider about interrupt affinity in this
2499 * function, the workqueue kthread must be WQ_PERCPU.
2500 * If create WQ_PERCPU workqueue kthread for each NIC
2501 * queue, that number of created workqueue kthread is
2502 * (number of used NIC queue) * (number of CPUs) =
2503 * (number of CPUs) ^ 2 most often.
2504 *
2505 * The same NIC queue's interrupts are avoided by
2506 * masking the queue's interrupt. And different
2507 * NIC queue's interrupts use different struct work
2508 * (que->wq_cookie). So, "enqueued flag" to avoid
2509 * twice workqueue_enqueue() is not required .
2510 */
2511 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2512 } else {
2513 softint_schedule(que->que_si);
2514 }
2515 }
2516
2517 /************************************************************************
2518 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2519 ************************************************************************/
2520 static int
2521 ixgbe_msix_que(void *arg)
2522 {
2523 struct ix_queue *que = arg;
2524 struct adapter *adapter = que->adapter;
2525 struct ifnet *ifp = adapter->ifp;
2526 struct tx_ring *txr = que->txr;
2527 struct rx_ring *rxr = que->rxr;
2528 bool more;
2529 u32 newitr = 0;
2530
2531 /* Protect against spurious interrupts */
2532 if ((ifp->if_flags & IFF_RUNNING) == 0)
2533 return 0;
2534
2535 ixgbe_disable_queue(adapter, que->msix);
2536 ++que->irqs.ev_count;
2537
2538 #ifdef __NetBSD__
2539 /* Don't run ixgbe_rxeof in interrupt context */
2540 more = true;
2541 #else
2542 more = ixgbe_rxeof(que);
2543 #endif
2544
2545 IXGBE_TX_LOCK(txr);
2546 ixgbe_txeof(txr);
2547 IXGBE_TX_UNLOCK(txr);
2548
2549 /* Do AIM now? */
2550
2551 if (adapter->enable_aim == false)
2552 goto no_calc;
2553 /*
2554 * Do Adaptive Interrupt Moderation:
2555 * - Write out last calculated setting
2556 * - Calculate based on average size over
2557 * the last interval.
2558 */
2559 if (que->eitr_setting)
2560 ixgbe_eitr_write(que, que->eitr_setting);
2561
2562 que->eitr_setting = 0;
2563
2564 /* Idle, do nothing */
2565 if ((txr->bytes == 0) && (rxr->bytes == 0))
2566 goto no_calc;
2567
2568 if ((txr->bytes) && (txr->packets))
2569 newitr = txr->bytes/txr->packets;
2570 if ((rxr->bytes) && (rxr->packets))
2571 newitr = max(newitr, (rxr->bytes / rxr->packets));
2572 newitr += 24; /* account for hardware frame, crc */
2573
2574 /* set an upper boundary */
2575 newitr = min(newitr, 3000);
2576
2577 /* Be nice to the mid range */
2578 if ((newitr > 300) && (newitr < 1200))
2579 newitr = (newitr / 3);
2580 else
2581 newitr = (newitr / 2);
2582
2583 /*
2584 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2585 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2586 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2587 * on 1G and higher.
2588 */
2589 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2590 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2591 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2592 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2593 }
2594
2595 /* save for next interrupt */
2596 que->eitr_setting = newitr;
2597
2598 /* Reset state */
2599 txr->bytes = 0;
2600 txr->packets = 0;
2601 rxr->bytes = 0;
2602 rxr->packets = 0;
2603
2604 no_calc:
2605 if (more)
2606 ixgbe_sched_handle_que(adapter, que);
2607 else
2608 ixgbe_enable_queue(adapter, que->msix);
2609
2610 return 1;
2611 } /* ixgbe_msix_que */
2612
2613 /************************************************************************
2614 * ixgbe_media_status - Media Ioctl callback
2615 *
2616 * Called whenever the user queries the status of
2617 * the interface using ifconfig.
2618 ************************************************************************/
2619 static void
2620 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2621 {
2622 struct adapter *adapter = ifp->if_softc;
2623 struct ixgbe_hw *hw = &adapter->hw;
2624 int layer;
2625
2626 INIT_DEBUGOUT("ixgbe_media_status: begin");
2627 IXGBE_CORE_LOCK(adapter);
2628 ixgbe_update_link_status(adapter);
2629
2630 ifmr->ifm_status = IFM_AVALID;
2631 ifmr->ifm_active = IFM_ETHER;
2632
2633 if (!adapter->link_active) {
2634 ifmr->ifm_active |= IFM_NONE;
2635 IXGBE_CORE_UNLOCK(adapter);
2636 return;
2637 }
2638
2639 ifmr->ifm_status |= IFM_ACTIVE;
2640 layer = adapter->phy_layer;
2641
2642 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2643 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2644 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2645 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2646 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2647 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2648 switch (adapter->link_speed) {
2649 case IXGBE_LINK_SPEED_10GB_FULL:
2650 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2651 break;
2652 case IXGBE_LINK_SPEED_5GB_FULL:
2653 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2654 break;
2655 case IXGBE_LINK_SPEED_2_5GB_FULL:
2656 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2657 break;
2658 case IXGBE_LINK_SPEED_1GB_FULL:
2659 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2660 break;
2661 case IXGBE_LINK_SPEED_100_FULL:
2662 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2663 break;
2664 case IXGBE_LINK_SPEED_10_FULL:
2665 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2666 break;
2667 }
2668 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2669 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2670 switch (adapter->link_speed) {
2671 case IXGBE_LINK_SPEED_10GB_FULL:
2672 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2673 break;
2674 }
2675 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2676 switch (adapter->link_speed) {
2677 case IXGBE_LINK_SPEED_10GB_FULL:
2678 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2679 break;
2680 case IXGBE_LINK_SPEED_1GB_FULL:
2681 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2682 break;
2683 }
2684 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2685 switch (adapter->link_speed) {
2686 case IXGBE_LINK_SPEED_10GB_FULL:
2687 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2688 break;
2689 case IXGBE_LINK_SPEED_1GB_FULL:
2690 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2691 break;
2692 }
2693 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2694 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2695 switch (adapter->link_speed) {
2696 case IXGBE_LINK_SPEED_10GB_FULL:
2697 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2698 break;
2699 case IXGBE_LINK_SPEED_1GB_FULL:
2700 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2701 break;
2702 }
2703 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2704 switch (adapter->link_speed) {
2705 case IXGBE_LINK_SPEED_10GB_FULL:
2706 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2707 break;
2708 }
2709 /*
2710 * XXX: These need to use the proper media types once
2711 * they're added.
2712 */
2713 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2714 switch (adapter->link_speed) {
2715 case IXGBE_LINK_SPEED_10GB_FULL:
2716 #ifndef IFM_ETH_XTYPE
2717 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2718 #else
2719 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2720 #endif
2721 break;
2722 case IXGBE_LINK_SPEED_2_5GB_FULL:
2723 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2724 break;
2725 case IXGBE_LINK_SPEED_1GB_FULL:
2726 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2727 break;
2728 }
2729 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2730 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2731 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2732 switch (adapter->link_speed) {
2733 case IXGBE_LINK_SPEED_10GB_FULL:
2734 #ifndef IFM_ETH_XTYPE
2735 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2736 #else
2737 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2738 #endif
2739 break;
2740 case IXGBE_LINK_SPEED_2_5GB_FULL:
2741 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2742 break;
2743 case IXGBE_LINK_SPEED_1GB_FULL:
2744 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2745 break;
2746 }
2747
2748 /* If nothing is recognized... */
2749 #if 0
2750 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2751 ifmr->ifm_active |= IFM_UNKNOWN;
2752 #endif
2753
2754 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2755
2756 /* Display current flow control setting used on link */
2757 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2758 hw->fc.current_mode == ixgbe_fc_full)
2759 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2760 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2761 hw->fc.current_mode == ixgbe_fc_full)
2762 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2763
2764 IXGBE_CORE_UNLOCK(adapter);
2765
2766 return;
2767 } /* ixgbe_media_status */
2768
2769 /************************************************************************
2770 * ixgbe_media_change - Media Ioctl callback
2771 *
2772 * Called when the user changes speed/duplex using
2773 * media/mediopt option with ifconfig.
2774 ************************************************************************/
2775 static int
2776 ixgbe_media_change(struct ifnet *ifp)
2777 {
2778 struct adapter *adapter = ifp->if_softc;
2779 struct ifmedia *ifm = &adapter->media;
2780 struct ixgbe_hw *hw = &adapter->hw;
2781 ixgbe_link_speed speed = 0;
2782 ixgbe_link_speed link_caps = 0;
2783 bool negotiate = false;
2784 s32 err = IXGBE_NOT_IMPLEMENTED;
2785
2786 INIT_DEBUGOUT("ixgbe_media_change: begin");
2787
2788 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2789 return (EINVAL);
2790
2791 if (hw->phy.media_type == ixgbe_media_type_backplane)
2792 return (ENODEV);
2793
2794 /*
2795 * We don't actually need to check against the supported
2796 * media types of the adapter; ifmedia will take care of
2797 * that for us.
2798 */
2799 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2800 case IFM_AUTO:
2801 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2802 &negotiate);
2803 if (err != IXGBE_SUCCESS) {
2804 device_printf(adapter->dev, "Unable to determine "
2805 "supported advertise speeds\n");
2806 return (ENODEV);
2807 }
2808 speed |= link_caps;
2809 break;
2810 case IFM_10G_T:
2811 case IFM_10G_LRM:
2812 case IFM_10G_LR:
2813 case IFM_10G_TWINAX:
2814 #ifndef IFM_ETH_XTYPE
2815 case IFM_10G_SR: /* KR, too */
2816 case IFM_10G_CX4: /* KX4 */
2817 #else
2818 case IFM_10G_KR:
2819 case IFM_10G_KX4:
2820 #endif
2821 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2822 break;
2823 case IFM_5000_T:
2824 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2825 break;
2826 case IFM_2500_T:
2827 case IFM_2500_KX:
2828 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2829 break;
2830 case IFM_1000_T:
2831 case IFM_1000_LX:
2832 case IFM_1000_SX:
2833 case IFM_1000_KX:
2834 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2835 break;
2836 case IFM_100_TX:
2837 speed |= IXGBE_LINK_SPEED_100_FULL;
2838 break;
2839 case IFM_10_T:
2840 speed |= IXGBE_LINK_SPEED_10_FULL;
2841 break;
2842 case IFM_NONE:
2843 break;
2844 default:
2845 goto invalid;
2846 }
2847
2848 hw->mac.autotry_restart = TRUE;
2849 hw->mac.ops.setup_link(hw, speed, TRUE);
2850 adapter->advertise = 0;
2851 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2852 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2853 adapter->advertise |= 1 << 2;
2854 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2855 adapter->advertise |= 1 << 1;
2856 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2857 adapter->advertise |= 1 << 0;
2858 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2859 adapter->advertise |= 1 << 3;
2860 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2861 adapter->advertise |= 1 << 4;
2862 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2863 adapter->advertise |= 1 << 5;
2864 }
2865
2866 return (0);
2867
2868 invalid:
2869 device_printf(adapter->dev, "Invalid media type!\n");
2870
2871 return (EINVAL);
2872 } /* ixgbe_media_change */
2873
2874 /************************************************************************
2875 * ixgbe_set_promisc
2876 ************************************************************************/
2877 static void
2878 ixgbe_set_promisc(struct adapter *adapter)
2879 {
2880 struct ifnet *ifp = adapter->ifp;
2881 int mcnt = 0;
2882 u32 rctl;
2883 struct ether_multi *enm;
2884 struct ether_multistep step;
2885 struct ethercom *ec = &adapter->osdep.ec;
2886
2887 KASSERT(mutex_owned(&adapter->core_mtx));
2888 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2889 rctl &= (~IXGBE_FCTRL_UPE);
2890 if (ifp->if_flags & IFF_ALLMULTI)
2891 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2892 else {
2893 ETHER_LOCK(ec);
2894 ETHER_FIRST_MULTI(step, ec, enm);
2895 while (enm != NULL) {
2896 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2897 break;
2898 mcnt++;
2899 ETHER_NEXT_MULTI(step, enm);
2900 }
2901 ETHER_UNLOCK(ec);
2902 }
2903 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2904 rctl &= (~IXGBE_FCTRL_MPE);
2905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2906
2907 if (ifp->if_flags & IFF_PROMISC) {
2908 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2910 } else if (ifp->if_flags & IFF_ALLMULTI) {
2911 rctl |= IXGBE_FCTRL_MPE;
2912 rctl &= ~IXGBE_FCTRL_UPE;
2913 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2914 }
2915 } /* ixgbe_set_promisc */
2916
2917 /************************************************************************
2918 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2919 ************************************************************************/
2920 static int
2921 ixgbe_msix_link(void *arg)
2922 {
2923 struct adapter *adapter = arg;
2924 struct ixgbe_hw *hw = &adapter->hw;
2925 u32 eicr, eicr_mask;
2926 s32 retval;
2927
2928 ++adapter->link_irq.ev_count;
2929
2930 /* Pause other interrupts */
2931 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2932
2933 /* First get the cause */
2934 /*
2935 * The specifications of 82598, 82599, X540 and X550 say EICS register
2936 * is write only. However, Linux says it is a workaround for silicon
2937 * errata to read EICS instead of EICR to get interrupt cause. It seems
2938 * there is a problem about read clear mechanism for EICR register.
2939 */
2940 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2941 /* Be sure the queue bits are not cleared */
2942 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2943 /* Clear interrupt with write */
2944 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2945
2946 /* Link status change */
2947 if (eicr & IXGBE_EICR_LSC) {
2948 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2949 softint_schedule(adapter->link_si);
2950 }
2951
2952 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2953 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2954 (eicr & IXGBE_EICR_FLOW_DIR)) {
2955 /* This is probably overkill :) */
2956 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
2957 return 1;
2958 /* Disable the interrupt */
2959 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2960 softint_schedule(adapter->fdir_si);
2961 }
2962
2963 if (eicr & IXGBE_EICR_ECC) {
2964 device_printf(adapter->dev,
2965 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2966 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2967 }
2968
2969 /* Check for over temp condition */
2970 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2971 switch (adapter->hw.mac.type) {
2972 case ixgbe_mac_X550EM_a:
2973 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2974 break;
2975 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2976 IXGBE_EICR_GPI_SDP0_X550EM_a);
2977 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2978 IXGBE_EICR_GPI_SDP0_X550EM_a);
2979 retval = hw->phy.ops.check_overtemp(hw);
2980 if (retval != IXGBE_ERR_OVERTEMP)
2981 break;
2982 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2983 device_printf(adapter->dev, "System shutdown required!\n");
2984 break;
2985 default:
2986 if (!(eicr & IXGBE_EICR_TS))
2987 break;
2988 retval = hw->phy.ops.check_overtemp(hw);
2989 if (retval != IXGBE_ERR_OVERTEMP)
2990 break;
2991 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2992 device_printf(adapter->dev, "System shutdown required!\n");
2993 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2994 break;
2995 }
2996 }
2997
2998 /* Check for VF message */
2999 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3000 (eicr & IXGBE_EICR_MAILBOX))
3001 softint_schedule(adapter->mbx_si);
3002 }
3003
3004 if (ixgbe_is_sfp(hw)) {
3005 /* Pluggable optics-related interrupt */
3006 if (hw->mac.type >= ixgbe_mac_X540)
3007 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3008 else
3009 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3010
3011 if (eicr & eicr_mask) {
3012 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3013 softint_schedule(adapter->mod_si);
3014 }
3015
3016 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3017 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3018 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3019 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3020 softint_schedule(adapter->msf_si);
3021 }
3022 }
3023
3024 /* Check for fan failure */
3025 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3026 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3027 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3028 }
3029
3030 /* External PHY interrupt */
3031 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3032 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3033 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3034 softint_schedule(adapter->phy_si);
3035 }
3036
3037 /* Re-enable other interrupts */
3038 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3039 return 1;
3040 } /* ixgbe_msix_link */
3041
3042 static void
3043 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
3044 {
3045 struct adapter *adapter = que->adapter;
3046
3047 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3048 itr |= itr << 16;
3049 else
3050 itr |= IXGBE_EITR_CNT_WDIS;
3051
3052 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
3053 itr);
3054 }
3055
3056
3057 /************************************************************************
3058 * ixgbe_sysctl_interrupt_rate_handler
3059 ************************************************************************/
3060 static int
3061 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3062 {
3063 struct sysctlnode node = *rnode;
3064 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3065 struct adapter *adapter = que->adapter;
3066 uint32_t reg, usec, rate;
3067 int error;
3068
3069 if (que == NULL)
3070 return 0;
3071 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3072 usec = ((reg & 0x0FF8) >> 3);
3073 if (usec > 0)
3074 rate = 500000 / usec;
3075 else
3076 rate = 0;
3077 node.sysctl_data = &rate;
3078 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3079 if (error || newp == NULL)
3080 return error;
3081 reg &= ~0xfff; /* default, no limitation */
3082 if (rate > 0 && rate < 500000) {
3083 if (rate < 1000)
3084 rate = 1000;
3085 reg |= ((4000000/rate) & 0xff8);
3086 /*
3087 * When RSC is used, ITR interval must be larger than
3088 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3089 * The minimum value is always greater than 2us on 100M
3090 * (and 10M?(not documented)), but it's not on 1G and higher.
3091 */
3092 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3093 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3094 if ((adapter->num_queues > 1)
3095 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3096 return EINVAL;
3097 }
3098 ixgbe_max_interrupt_rate = rate;
3099 } else
3100 ixgbe_max_interrupt_rate = 0;
3101 ixgbe_eitr_write(que, reg);
3102
3103 return (0);
3104 } /* ixgbe_sysctl_interrupt_rate_handler */
3105
3106 const struct sysctlnode *
3107 ixgbe_sysctl_instance(struct adapter *adapter)
3108 {
3109 const char *dvname;
3110 struct sysctllog **log;
3111 int rc;
3112 const struct sysctlnode *rnode;
3113
3114 if (adapter->sysctltop != NULL)
3115 return adapter->sysctltop;
3116
3117 log = &adapter->sysctllog;
3118 dvname = device_xname(adapter->dev);
3119
3120 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3121 0, CTLTYPE_NODE, dvname,
3122 SYSCTL_DESCR("ixgbe information and settings"),
3123 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3124 goto err;
3125
3126 return rnode;
3127 err:
3128 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3129 return NULL;
3130 }
3131
3132 /************************************************************************
3133 * ixgbe_add_device_sysctls
3134 ************************************************************************/
3135 static void
3136 ixgbe_add_device_sysctls(struct adapter *adapter)
3137 {
3138 device_t dev = adapter->dev;
3139 struct ixgbe_hw *hw = &adapter->hw;
3140 struct sysctllog **log;
3141 const struct sysctlnode *rnode, *cnode;
3142
3143 log = &adapter->sysctllog;
3144
3145 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3146 aprint_error_dev(dev, "could not create sysctl root\n");
3147 return;
3148 }
3149
3150 if (sysctl_createv(log, 0, &rnode, &cnode,
3151 CTLFLAG_READONLY, CTLTYPE_INT,
3152 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3153 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3154 aprint_error_dev(dev, "could not create sysctl\n");
3155
3156 if (sysctl_createv(log, 0, &rnode, &cnode,
3157 CTLFLAG_READONLY, CTLTYPE_INT,
3158 "num_queues", SYSCTL_DESCR("Number of queues"),
3159 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3160 aprint_error_dev(dev, "could not create sysctl\n");
3161
3162 /* Sysctls for all devices */
3163 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3164 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3165 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3166 CTL_EOL) != 0)
3167 aprint_error_dev(dev, "could not create sysctl\n");
3168
3169 adapter->enable_aim = ixgbe_enable_aim;
3170 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3171 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3172 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3173 aprint_error_dev(dev, "could not create sysctl\n");
3174
3175 if (sysctl_createv(log, 0, &rnode, &cnode,
3176 CTLFLAG_READWRITE, CTLTYPE_INT,
3177 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3178 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3179 CTL_EOL) != 0)
3180 aprint_error_dev(dev, "could not create sysctl\n");
3181
3182 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3183 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3184 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3185 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3186 aprint_error_dev(dev, "could not create sysctl\n");
3187
3188 #ifdef IXGBE_DEBUG
3189 /* testing sysctls (for all devices) */
3190 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3191 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3192 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3193 CTL_EOL) != 0)
3194 aprint_error_dev(dev, "could not create sysctl\n");
3195
3196 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3197 CTLTYPE_STRING, "print_rss_config",
3198 SYSCTL_DESCR("Prints RSS Configuration"),
3199 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3200 CTL_EOL) != 0)
3201 aprint_error_dev(dev, "could not create sysctl\n");
3202 #endif
3203 /* for X550 series devices */
3204 if (hw->mac.type >= ixgbe_mac_X550)
3205 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3206 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3207 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3208 CTL_EOL) != 0)
3209 aprint_error_dev(dev, "could not create sysctl\n");
3210
3211 /* for WoL-capable devices */
3212 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3213 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3214 CTLTYPE_BOOL, "wol_enable",
3215 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3216 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3217 CTL_EOL) != 0)
3218 aprint_error_dev(dev, "could not create sysctl\n");
3219
3220 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3221 CTLTYPE_INT, "wufc",
3222 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3223 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3224 CTL_EOL) != 0)
3225 aprint_error_dev(dev, "could not create sysctl\n");
3226 }
3227
3228 /* for X552/X557-AT devices */
3229 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3230 const struct sysctlnode *phy_node;
3231
3232 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3233 "phy", SYSCTL_DESCR("External PHY sysctls"),
3234 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3235 aprint_error_dev(dev, "could not create sysctl\n");
3236 return;
3237 }
3238
3239 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3240 CTLTYPE_INT, "temp",
3241 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3242 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3243 CTL_EOL) != 0)
3244 aprint_error_dev(dev, "could not create sysctl\n");
3245
3246 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3247 CTLTYPE_INT, "overtemp_occurred",
3248 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3249 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3250 CTL_CREATE, CTL_EOL) != 0)
3251 aprint_error_dev(dev, "could not create sysctl\n");
3252 }
3253
3254 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3255 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3256 CTLTYPE_INT, "eee_state",
3257 SYSCTL_DESCR("EEE Power Save State"),
3258 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3259 CTL_EOL) != 0)
3260 aprint_error_dev(dev, "could not create sysctl\n");
3261 }
3262 } /* ixgbe_add_device_sysctls */
3263
3264 /************************************************************************
3265 * ixgbe_allocate_pci_resources
3266 ************************************************************************/
3267 static int
3268 ixgbe_allocate_pci_resources(struct adapter *adapter,
3269 const struct pci_attach_args *pa)
3270 {
3271 pcireg_t memtype;
3272 device_t dev = adapter->dev;
3273 bus_addr_t addr;
3274 int flags;
3275
3276 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3277 switch (memtype) {
3278 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3279 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3280 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3281 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3282 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3283 goto map_err;
3284 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3285 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3286 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3287 }
3288 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3289 adapter->osdep.mem_size, flags,
3290 &adapter->osdep.mem_bus_space_handle) != 0) {
3291 map_err:
3292 adapter->osdep.mem_size = 0;
3293 aprint_error_dev(dev, "unable to map BAR0\n");
3294 return ENXIO;
3295 }
3296 break;
3297 default:
3298 aprint_error_dev(dev, "unexpected type on BAR0\n");
3299 return ENXIO;
3300 }
3301
3302 return (0);
3303 } /* ixgbe_allocate_pci_resources */
3304
3305 static void
3306 ixgbe_free_softint(struct adapter *adapter)
3307 {
3308 struct ix_queue *que = adapter->queues;
3309 struct tx_ring *txr = adapter->tx_rings;
3310 int i;
3311
3312 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3313 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3314 if (txr->txr_si != NULL)
3315 softint_disestablish(txr->txr_si);
3316 }
3317 if (que->que_si != NULL)
3318 softint_disestablish(que->que_si);
3319 }
3320 if (adapter->txr_wq != NULL)
3321 workqueue_destroy(adapter->txr_wq);
3322 if (adapter->txr_wq_enqueued != NULL)
3323 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3324 if (adapter->que_wq != NULL)
3325 workqueue_destroy(adapter->que_wq);
3326
3327 /* Drain the Link queue */
3328 if (adapter->link_si != NULL) {
3329 softint_disestablish(adapter->link_si);
3330 adapter->link_si = NULL;
3331 }
3332 if (adapter->mod_si != NULL) {
3333 softint_disestablish(adapter->mod_si);
3334 adapter->mod_si = NULL;
3335 }
3336 if (adapter->msf_si != NULL) {
3337 softint_disestablish(adapter->msf_si);
3338 adapter->msf_si = NULL;
3339 }
3340 if (adapter->phy_si != NULL) {
3341 softint_disestablish(adapter->phy_si);
3342 adapter->phy_si = NULL;
3343 }
3344 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3345 if (adapter->fdir_si != NULL) {
3346 softint_disestablish(adapter->fdir_si);
3347 adapter->fdir_si = NULL;
3348 }
3349 }
3350 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3351 if (adapter->mbx_si != NULL) {
3352 softint_disestablish(adapter->mbx_si);
3353 adapter->mbx_si = NULL;
3354 }
3355 }
3356 } /* ixgbe_free_softint */
3357
3358 /************************************************************************
3359 * ixgbe_detach - Device removal routine
3360 *
3361 * Called when the driver is being removed.
3362 * Stops the adapter and deallocates all the resources
3363 * that were allocated for driver operation.
3364 *
3365 * return 0 on success, positive on failure
3366 ************************************************************************/
3367 static int
3368 ixgbe_detach(device_t dev, int flags)
3369 {
3370 struct adapter *adapter = device_private(dev);
3371 struct rx_ring *rxr = adapter->rx_rings;
3372 struct tx_ring *txr = adapter->tx_rings;
3373 struct ixgbe_hw *hw = &adapter->hw;
3374 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3375 u32 ctrl_ext;
3376
3377 INIT_DEBUGOUT("ixgbe_detach: begin");
3378 if (adapter->osdep.attached == false)
3379 return 0;
3380
3381 if (ixgbe_pci_iov_detach(dev) != 0) {
3382 device_printf(dev, "SR-IOV in use; detach first.\n");
3383 return (EBUSY);
3384 }
3385
3386 /* Stop the interface. Callouts are stopped in it. */
3387 ixgbe_ifstop(adapter->ifp, 1);
3388 #if NVLAN > 0
3389 /* Make sure VLANs are not using driver */
3390 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3391 ; /* nothing to do: no VLANs */
3392 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3393 vlan_ifdetach(adapter->ifp);
3394 else {
3395 aprint_error_dev(dev, "VLANs in use, detach first\n");
3396 return (EBUSY);
3397 }
3398 #endif
3399
3400 pmf_device_deregister(dev);
3401
3402 ether_ifdetach(adapter->ifp);
3403 /* Stop the adapter */
3404 IXGBE_CORE_LOCK(adapter);
3405 ixgbe_setup_low_power_mode(adapter);
3406 IXGBE_CORE_UNLOCK(adapter);
3407
3408 ixgbe_free_softint(adapter);
3409
3410 /* let hardware know driver is unloading */
3411 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3412 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3413 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3414
3415 callout_halt(&adapter->timer, NULL);
3416
3417 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3418 netmap_detach(adapter->ifp);
3419
3420 ixgbe_free_pci_resources(adapter);
3421 #if 0 /* XXX the NetBSD port is probably missing something here */
3422 bus_generic_detach(dev);
3423 #endif
3424 if_detach(adapter->ifp);
3425 if_percpuq_destroy(adapter->ipq);
3426
3427 sysctl_teardown(&adapter->sysctllog);
3428 evcnt_detach(&adapter->efbig_tx_dma_setup);
3429 evcnt_detach(&adapter->mbuf_defrag_failed);
3430 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3431 evcnt_detach(&adapter->einval_tx_dma_setup);
3432 evcnt_detach(&adapter->other_tx_dma_setup);
3433 evcnt_detach(&adapter->eagain_tx_dma_setup);
3434 evcnt_detach(&adapter->enomem_tx_dma_setup);
3435 evcnt_detach(&adapter->watchdog_events);
3436 evcnt_detach(&adapter->tso_err);
3437 evcnt_detach(&adapter->link_irq);
3438 evcnt_detach(&adapter->link_sicount);
3439 evcnt_detach(&adapter->mod_sicount);
3440 evcnt_detach(&adapter->msf_sicount);
3441 evcnt_detach(&adapter->phy_sicount);
3442
3443 txr = adapter->tx_rings;
3444 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3445 evcnt_detach(&adapter->queues[i].irqs);
3446 evcnt_detach(&adapter->queues[i].handleq);
3447 evcnt_detach(&adapter->queues[i].req);
3448 evcnt_detach(&txr->no_desc_avail);
3449 evcnt_detach(&txr->total_packets);
3450 evcnt_detach(&txr->tso_tx);
3451 #ifndef IXGBE_LEGACY_TX
3452 evcnt_detach(&txr->pcq_drops);
3453 #endif
3454
3455 if (i < __arraycount(stats->mpc)) {
3456 evcnt_detach(&stats->mpc[i]);
3457 if (hw->mac.type == ixgbe_mac_82598EB)
3458 evcnt_detach(&stats->rnbc[i]);
3459 }
3460 if (i < __arraycount(stats->pxontxc)) {
3461 evcnt_detach(&stats->pxontxc[i]);
3462 evcnt_detach(&stats->pxonrxc[i]);
3463 evcnt_detach(&stats->pxofftxc[i]);
3464 evcnt_detach(&stats->pxoffrxc[i]);
3465 evcnt_detach(&stats->pxon2offc[i]);
3466 }
3467 if (i < __arraycount(stats->qprc)) {
3468 evcnt_detach(&stats->qprc[i]);
3469 evcnt_detach(&stats->qptc[i]);
3470 evcnt_detach(&stats->qbrc[i]);
3471 evcnt_detach(&stats->qbtc[i]);
3472 evcnt_detach(&stats->qprdc[i]);
3473 }
3474
3475 evcnt_detach(&rxr->rx_packets);
3476 evcnt_detach(&rxr->rx_bytes);
3477 evcnt_detach(&rxr->rx_copies);
3478 evcnt_detach(&rxr->no_jmbuf);
3479 evcnt_detach(&rxr->rx_discarded);
3480 }
3481 evcnt_detach(&stats->ipcs);
3482 evcnt_detach(&stats->l4cs);
3483 evcnt_detach(&stats->ipcs_bad);
3484 evcnt_detach(&stats->l4cs_bad);
3485 evcnt_detach(&stats->intzero);
3486 evcnt_detach(&stats->legint);
3487 evcnt_detach(&stats->crcerrs);
3488 evcnt_detach(&stats->illerrc);
3489 evcnt_detach(&stats->errbc);
3490 evcnt_detach(&stats->mspdc);
3491 if (hw->mac.type >= ixgbe_mac_X550)
3492 evcnt_detach(&stats->mbsdc);
3493 evcnt_detach(&stats->mpctotal);
3494 evcnt_detach(&stats->mlfc);
3495 evcnt_detach(&stats->mrfc);
3496 evcnt_detach(&stats->rlec);
3497 evcnt_detach(&stats->lxontxc);
3498 evcnt_detach(&stats->lxonrxc);
3499 evcnt_detach(&stats->lxofftxc);
3500 evcnt_detach(&stats->lxoffrxc);
3501
3502 /* Packet Reception Stats */
3503 evcnt_detach(&stats->tor);
3504 evcnt_detach(&stats->gorc);
3505 evcnt_detach(&stats->tpr);
3506 evcnt_detach(&stats->gprc);
3507 evcnt_detach(&stats->mprc);
3508 evcnt_detach(&stats->bprc);
3509 evcnt_detach(&stats->prc64);
3510 evcnt_detach(&stats->prc127);
3511 evcnt_detach(&stats->prc255);
3512 evcnt_detach(&stats->prc511);
3513 evcnt_detach(&stats->prc1023);
3514 evcnt_detach(&stats->prc1522);
3515 evcnt_detach(&stats->ruc);
3516 evcnt_detach(&stats->rfc);
3517 evcnt_detach(&stats->roc);
3518 evcnt_detach(&stats->rjc);
3519 evcnt_detach(&stats->mngprc);
3520 evcnt_detach(&stats->mngpdc);
3521 evcnt_detach(&stats->xec);
3522
3523 /* Packet Transmission Stats */
3524 evcnt_detach(&stats->gotc);
3525 evcnt_detach(&stats->tpt);
3526 evcnt_detach(&stats->gptc);
3527 evcnt_detach(&stats->bptc);
3528 evcnt_detach(&stats->mptc);
3529 evcnt_detach(&stats->mngptc);
3530 evcnt_detach(&stats->ptc64);
3531 evcnt_detach(&stats->ptc127);
3532 evcnt_detach(&stats->ptc255);
3533 evcnt_detach(&stats->ptc511);
3534 evcnt_detach(&stats->ptc1023);
3535 evcnt_detach(&stats->ptc1522);
3536
3537 ixgbe_free_transmit_structures(adapter);
3538 ixgbe_free_receive_structures(adapter);
3539 for (int i = 0; i < adapter->num_queues; i++) {
3540 struct ix_queue * que = &adapter->queues[i];
3541 mutex_destroy(&que->dc_mtx);
3542 }
3543 free(adapter->queues, M_DEVBUF);
3544 free(adapter->mta, M_DEVBUF);
3545
3546 IXGBE_CORE_LOCK_DESTROY(adapter);
3547
3548 return (0);
3549 } /* ixgbe_detach */
3550
3551 /************************************************************************
3552 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3553 *
3554 * Prepare the adapter/port for LPLU and/or WoL
3555 ************************************************************************/
3556 static int
3557 ixgbe_setup_low_power_mode(struct adapter *adapter)
3558 {
3559 struct ixgbe_hw *hw = &adapter->hw;
3560 device_t dev = adapter->dev;
3561 s32 error = 0;
3562
3563 KASSERT(mutex_owned(&adapter->core_mtx));
3564
3565 /* Limit power management flow to X550EM baseT */
3566 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3567 hw->phy.ops.enter_lplu) {
3568 /* X550EM baseT adapters need a special LPLU flow */
3569 hw->phy.reset_disable = true;
3570 ixgbe_stop(adapter);
3571 error = hw->phy.ops.enter_lplu(hw);
3572 if (error)
3573 device_printf(dev,
3574 "Error entering LPLU: %d\n", error);
3575 hw->phy.reset_disable = false;
3576 } else {
3577 /* Just stop for other adapters */
3578 ixgbe_stop(adapter);
3579 }
3580
3581 if (!hw->wol_enabled) {
3582 ixgbe_set_phy_power(hw, FALSE);
3583 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3584 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3585 } else {
3586 /* Turn off support for APM wakeup. (Using ACPI instead) */
3587 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3588 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3589
3590 /*
3591 * Clear Wake Up Status register to prevent any previous wakeup
3592 * events from waking us up immediately after we suspend.
3593 */
3594 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3595
3596 /*
3597 * Program the Wakeup Filter Control register with user filter
3598 * settings
3599 */
3600 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3601
3602 /* Enable wakeups and power management in Wakeup Control */
3603 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3604 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3605
3606 }
3607
3608 return error;
3609 } /* ixgbe_setup_low_power_mode */
3610
3611 /************************************************************************
3612 * ixgbe_shutdown - Shutdown entry point
3613 ************************************************************************/
3614 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3615 static int
3616 ixgbe_shutdown(device_t dev)
3617 {
3618 struct adapter *adapter = device_private(dev);
3619 int error = 0;
3620
3621 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3622
3623 IXGBE_CORE_LOCK(adapter);
3624 error = ixgbe_setup_low_power_mode(adapter);
3625 IXGBE_CORE_UNLOCK(adapter);
3626
3627 return (error);
3628 } /* ixgbe_shutdown */
3629 #endif
3630
3631 /************************************************************************
3632 * ixgbe_suspend
3633 *
3634 * From D0 to D3
3635 ************************************************************************/
3636 static bool
3637 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3638 {
3639 struct adapter *adapter = device_private(dev);
3640 int error = 0;
3641
3642 INIT_DEBUGOUT("ixgbe_suspend: begin");
3643
3644 IXGBE_CORE_LOCK(adapter);
3645
3646 error = ixgbe_setup_low_power_mode(adapter);
3647
3648 IXGBE_CORE_UNLOCK(adapter);
3649
3650 return (error);
3651 } /* ixgbe_suspend */
3652
3653 /************************************************************************
3654 * ixgbe_resume
3655 *
3656 * From D3 to D0
3657 ************************************************************************/
3658 static bool
3659 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3660 {
3661 struct adapter *adapter = device_private(dev);
3662 struct ifnet *ifp = adapter->ifp;
3663 struct ixgbe_hw *hw = &adapter->hw;
3664 u32 wus;
3665
3666 INIT_DEBUGOUT("ixgbe_resume: begin");
3667
3668 IXGBE_CORE_LOCK(adapter);
3669
3670 /* Read & clear WUS register */
3671 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3672 if (wus)
3673 device_printf(dev, "Woken up by (WUS): %#010x\n",
3674 IXGBE_READ_REG(hw, IXGBE_WUS));
3675 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3676 /* And clear WUFC until next low-power transition */
3677 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3678
3679 /*
3680 * Required after D3->D0 transition;
3681 * will re-advertise all previous advertised speeds
3682 */
3683 if (ifp->if_flags & IFF_UP)
3684 ixgbe_init_locked(adapter);
3685
3686 IXGBE_CORE_UNLOCK(adapter);
3687
3688 return true;
3689 } /* ixgbe_resume */
3690
3691 /*
3692 * Set the various hardware offload abilities.
3693 *
3694 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3695 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3696 * mbuf offload flags the driver will understand.
3697 */
3698 static void
3699 ixgbe_set_if_hwassist(struct adapter *adapter)
3700 {
3701 /* XXX */
3702 }
3703
3704 /************************************************************************
3705 * ixgbe_init_locked - Init entry point
3706 *
3707 * Used in two ways: It is used by the stack as an init
3708 * entry point in network interface structure. It is also
3709 * used by the driver as a hw/sw initialization routine to
3710 * get to a consistent state.
3711 *
3712 * return 0 on success, positive on failure
3713 ************************************************************************/
3714 static void
3715 ixgbe_init_locked(struct adapter *adapter)
3716 {
3717 struct ifnet *ifp = adapter->ifp;
3718 device_t dev = adapter->dev;
3719 struct ixgbe_hw *hw = &adapter->hw;
3720 struct tx_ring *txr;
3721 struct rx_ring *rxr;
3722 u32 txdctl, mhadd;
3723 u32 rxdctl, rxctrl;
3724 u32 ctrl_ext;
3725 int err = 0;
3726
3727 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3728
3729 KASSERT(mutex_owned(&adapter->core_mtx));
3730 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3731
3732 hw->adapter_stopped = FALSE;
3733 ixgbe_stop_adapter(hw);
3734 callout_stop(&adapter->timer);
3735
3736 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3737 adapter->max_frame_size =
3738 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3739
3740 /* Queue indices may change with IOV mode */
3741 ixgbe_align_all_queue_indices(adapter);
3742
3743 /* reprogram the RAR[0] in case user changed it. */
3744 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3745
3746 /* Get the latest mac address, User can use a LAA */
3747 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3748 IXGBE_ETH_LENGTH_OF_ADDRESS);
3749 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3750 hw->addr_ctrl.rar_used_count = 1;
3751
3752 /* Set hardware offload abilities from ifnet flags */
3753 ixgbe_set_if_hwassist(adapter);
3754
3755 /* Prepare transmit descriptors and buffers */
3756 if (ixgbe_setup_transmit_structures(adapter)) {
3757 device_printf(dev, "Could not setup transmit structures\n");
3758 ixgbe_stop(adapter);
3759 return;
3760 }
3761
3762 ixgbe_init_hw(hw);
3763 ixgbe_initialize_iov(adapter);
3764 ixgbe_initialize_transmit_units(adapter);
3765
3766 /* Setup Multicast table */
3767 ixgbe_set_multi(adapter);
3768
3769 /* Determine the correct mbuf pool, based on frame size */
3770 if (adapter->max_frame_size <= MCLBYTES)
3771 adapter->rx_mbuf_sz = MCLBYTES;
3772 else
3773 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3774
3775 /* Prepare receive descriptors and buffers */
3776 if (ixgbe_setup_receive_structures(adapter)) {
3777 device_printf(dev, "Could not setup receive structures\n");
3778 ixgbe_stop(adapter);
3779 return;
3780 }
3781
3782 /* Configure RX settings */
3783 ixgbe_initialize_receive_units(adapter);
3784
3785 /* Enable SDP & MSI-X interrupts based on adapter */
3786 ixgbe_config_gpie(adapter);
3787
3788 /* Set MTU size */
3789 if (ifp->if_mtu > ETHERMTU) {
3790 /* aka IXGBE_MAXFRS on 82599 and newer */
3791 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3792 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3793 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3794 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3795 }
3796
3797 /* Now enable all the queues */
3798 for (int i = 0; i < adapter->num_queues; i++) {
3799 txr = &adapter->tx_rings[i];
3800 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3801 txdctl |= IXGBE_TXDCTL_ENABLE;
3802 /* Set WTHRESH to 8, burst writeback */
3803 txdctl |= (8 << 16);
3804 /*
3805 * When the internal queue falls below PTHRESH (32),
3806 * start prefetching as long as there are at least
3807 * HTHRESH (1) buffers ready. The values are taken
3808 * from the Intel linux driver 3.8.21.
3809 * Prefetching enables tx line rate even with 1 queue.
3810 */
3811 txdctl |= (32 << 0) | (1 << 8);
3812 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3813 }
3814
3815 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
3816 rxr = &adapter->rx_rings[i];
3817 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3818 if (hw->mac.type == ixgbe_mac_82598EB) {
3819 /*
3820 * PTHRESH = 21
3821 * HTHRESH = 4
3822 * WTHRESH = 8
3823 */
3824 rxdctl &= ~0x3FFFFF;
3825 rxdctl |= 0x080420;
3826 }
3827 rxdctl |= IXGBE_RXDCTL_ENABLE;
3828 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3829 for (; j < 10; j++) {
3830 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3831 IXGBE_RXDCTL_ENABLE)
3832 break;
3833 else
3834 msec_delay(1);
3835 }
3836 wmb();
3837
3838 /*
3839 * In netmap mode, we must preserve the buffers made
3840 * available to userspace before the if_init()
3841 * (this is true by default on the TX side, because
3842 * init makes all buffers available to userspace).
3843 *
3844 * netmap_reset() and the device specific routines
3845 * (e.g. ixgbe_setup_receive_rings()) map these
3846 * buffers at the end of the NIC ring, so here we
3847 * must set the RDT (tail) register to make sure
3848 * they are not overwritten.
3849 *
3850 * In this driver the NIC ring starts at RDH = 0,
3851 * RDT points to the last slot available for reception (?),
3852 * so RDT = num_rx_desc - 1 means the whole ring is available.
3853 */
3854 #ifdef DEV_NETMAP
3855 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3856 (ifp->if_capenable & IFCAP_NETMAP)) {
3857 struct netmap_adapter *na = NA(adapter->ifp);
3858 struct netmap_kring *kring = &na->rx_rings[i];
3859 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3860
3861 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3862 } else
3863 #endif /* DEV_NETMAP */
3864 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3865 adapter->num_rx_desc - 1);
3866 }
3867
3868 /* Enable Receive engine */
3869 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3870 if (hw->mac.type == ixgbe_mac_82598EB)
3871 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3872 rxctrl |= IXGBE_RXCTRL_RXEN;
3873 ixgbe_enable_rx_dma(hw, rxctrl);
3874
3875 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3876
3877 /* Set up MSI-X routing */
3878 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3879 ixgbe_configure_ivars(adapter);
3880 /* Set up auto-mask */
3881 if (hw->mac.type == ixgbe_mac_82598EB)
3882 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3883 else {
3884 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3885 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3886 }
3887 } else { /* Simple settings for Legacy/MSI */
3888 ixgbe_set_ivar(adapter, 0, 0, 0);
3889 ixgbe_set_ivar(adapter, 0, 0, 1);
3890 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3891 }
3892
3893 ixgbe_init_fdir(adapter);
3894
3895 /*
3896 * Check on any SFP devices that
3897 * need to be kick-started
3898 */
3899 if (hw->phy.type == ixgbe_phy_none) {
3900 err = hw->phy.ops.identify(hw);
3901 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3902 device_printf(dev,
3903 "Unsupported SFP+ module type was detected.\n");
3904 return;
3905 }
3906 }
3907
3908 /* Set moderation on the Link interrupt */
3909 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3910
3911 /* Config/Enable Link */
3912 ixgbe_config_link(adapter);
3913
3914 /* Hardware Packet Buffer & Flow Control setup */
3915 ixgbe_config_delay_values(adapter);
3916
3917 /* Initialize the FC settings */
3918 ixgbe_start_hw(hw);
3919
3920 /* Set up VLAN support and filter */
3921 ixgbe_setup_vlan_hw_support(adapter);
3922
3923 /* Setup DMA Coalescing */
3924 ixgbe_config_dmac(adapter);
3925
3926 /* And now turn on interrupts */
3927 ixgbe_enable_intr(adapter);
3928
3929 /* Enable the use of the MBX by the VF's */
3930 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3931 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3932 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3933 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3934 }
3935
3936 /* Update saved flags. See ixgbe_ifflags_cb() */
3937 adapter->if_flags = ifp->if_flags;
3938
3939 /* Now inform the stack we're ready */
3940 ifp->if_flags |= IFF_RUNNING;
3941
3942 return;
3943 } /* ixgbe_init_locked */
3944
3945 /************************************************************************
3946 * ixgbe_init
3947 ************************************************************************/
3948 static int
3949 ixgbe_init(struct ifnet *ifp)
3950 {
3951 struct adapter *adapter = ifp->if_softc;
3952
3953 IXGBE_CORE_LOCK(adapter);
3954 ixgbe_init_locked(adapter);
3955 IXGBE_CORE_UNLOCK(adapter);
3956
3957 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
3958 } /* ixgbe_init */
3959
3960 /************************************************************************
3961 * ixgbe_set_ivar
3962 *
3963 * Setup the correct IVAR register for a particular MSI-X interrupt
3964 * (yes this is all very magic and confusing :)
3965 * - entry is the register array entry
3966 * - vector is the MSI-X vector for this queue
3967 * - type is RX/TX/MISC
3968 ************************************************************************/
3969 static void
3970 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3971 {
3972 struct ixgbe_hw *hw = &adapter->hw;
3973 u32 ivar, index;
3974
3975 vector |= IXGBE_IVAR_ALLOC_VAL;
3976
3977 switch (hw->mac.type) {
3978 case ixgbe_mac_82598EB:
3979 if (type == -1)
3980 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3981 else
3982 entry += (type * 64);
3983 index = (entry >> 2) & 0x1F;
3984 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3985 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3986 ivar |= (vector << (8 * (entry & 0x3)));
3987 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3988 break;
3989 case ixgbe_mac_82599EB:
3990 case ixgbe_mac_X540:
3991 case ixgbe_mac_X550:
3992 case ixgbe_mac_X550EM_x:
3993 case ixgbe_mac_X550EM_a:
3994 if (type == -1) { /* MISC IVAR */
3995 index = (entry & 1) * 8;
3996 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3997 ivar &= ~(0xFF << index);
3998 ivar |= (vector << index);
3999 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4000 } else { /* RX/TX IVARS */
4001 index = (16 * (entry & 1)) + (8 * type);
4002 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4003 ivar &= ~(0xFF << index);
4004 ivar |= (vector << index);
4005 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4006 }
4007 break;
4008 default:
4009 break;
4010 }
4011 } /* ixgbe_set_ivar */
4012
4013 /************************************************************************
4014 * ixgbe_configure_ivars
4015 ************************************************************************/
4016 static void
4017 ixgbe_configure_ivars(struct adapter *adapter)
4018 {
4019 struct ix_queue *que = adapter->queues;
4020 u32 newitr;
4021
4022 if (ixgbe_max_interrupt_rate > 0)
4023 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4024 else {
4025 /*
4026 * Disable DMA coalescing if interrupt moderation is
4027 * disabled.
4028 */
4029 adapter->dmac = 0;
4030 newitr = 0;
4031 }
4032
4033 for (int i = 0; i < adapter->num_queues; i++, que++) {
4034 struct rx_ring *rxr = &adapter->rx_rings[i];
4035 struct tx_ring *txr = &adapter->tx_rings[i];
4036 /* First the RX queue entry */
4037 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4038 /* ... and the TX */
4039 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4040 /* Set an Initial EITR value */
4041 ixgbe_eitr_write(que, newitr);
4042 /*
4043 * To eliminate influence of the previous state.
4044 * At this point, Tx/Rx interrupt handler
4045 * (ixgbe_msix_que()) cannot be called, so both
4046 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4047 */
4048 que->eitr_setting = 0;
4049 }
4050
4051 /* For the Link interrupt */
4052 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4053 } /* ixgbe_configure_ivars */
4054
4055 /************************************************************************
4056 * ixgbe_config_gpie
4057 ************************************************************************/
4058 static void
4059 ixgbe_config_gpie(struct adapter *adapter)
4060 {
4061 struct ixgbe_hw *hw = &adapter->hw;
4062 u32 gpie;
4063
4064 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4065
4066 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4067 /* Enable Enhanced MSI-X mode */
4068 gpie |= IXGBE_GPIE_MSIX_MODE
4069 | IXGBE_GPIE_EIAME
4070 | IXGBE_GPIE_PBA_SUPPORT
4071 | IXGBE_GPIE_OCD;
4072 }
4073
4074 /* Fan Failure Interrupt */
4075 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4076 gpie |= IXGBE_SDP1_GPIEN;
4077
4078 /* Thermal Sensor Interrupt */
4079 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4080 gpie |= IXGBE_SDP0_GPIEN_X540;
4081
4082 /* Link detection */
4083 switch (hw->mac.type) {
4084 case ixgbe_mac_82599EB:
4085 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4086 break;
4087 case ixgbe_mac_X550EM_x:
4088 case ixgbe_mac_X550EM_a:
4089 gpie |= IXGBE_SDP0_GPIEN_X540;
4090 break;
4091 default:
4092 break;
4093 }
4094
4095 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4096
4097 } /* ixgbe_config_gpie */
4098
4099 /************************************************************************
4100 * ixgbe_config_delay_values
4101 *
4102 * Requires adapter->max_frame_size to be set.
4103 ************************************************************************/
4104 static void
4105 ixgbe_config_delay_values(struct adapter *adapter)
4106 {
4107 struct ixgbe_hw *hw = &adapter->hw;
4108 u32 rxpb, frame, size, tmp;
4109
4110 frame = adapter->max_frame_size;
4111
4112 /* Calculate High Water */
4113 switch (hw->mac.type) {
4114 case ixgbe_mac_X540:
4115 case ixgbe_mac_X550:
4116 case ixgbe_mac_X550EM_x:
4117 case ixgbe_mac_X550EM_a:
4118 tmp = IXGBE_DV_X540(frame, frame);
4119 break;
4120 default:
4121 tmp = IXGBE_DV(frame, frame);
4122 break;
4123 }
4124 size = IXGBE_BT2KB(tmp);
4125 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4126 hw->fc.high_water[0] = rxpb - size;
4127
4128 /* Now calculate Low Water */
4129 switch (hw->mac.type) {
4130 case ixgbe_mac_X540:
4131 case ixgbe_mac_X550:
4132 case ixgbe_mac_X550EM_x:
4133 case ixgbe_mac_X550EM_a:
4134 tmp = IXGBE_LOW_DV_X540(frame);
4135 break;
4136 default:
4137 tmp = IXGBE_LOW_DV(frame);
4138 break;
4139 }
4140 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4141
4142 hw->fc.pause_time = IXGBE_FC_PAUSE;
4143 hw->fc.send_xon = TRUE;
4144 } /* ixgbe_config_delay_values */
4145
4146 /************************************************************************
4147 * ixgbe_set_multi - Multicast Update
4148 *
4149 * Called whenever multicast address list is updated.
4150 ************************************************************************/
4151 static void
4152 ixgbe_set_multi(struct adapter *adapter)
4153 {
4154 struct ixgbe_mc_addr *mta;
4155 struct ifnet *ifp = adapter->ifp;
4156 u8 *update_ptr;
4157 int mcnt = 0;
4158 u32 fctrl;
4159 struct ethercom *ec = &adapter->osdep.ec;
4160 struct ether_multi *enm;
4161 struct ether_multistep step;
4162
4163 KASSERT(mutex_owned(&adapter->core_mtx));
4164 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4165
4166 mta = adapter->mta;
4167 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4168
4169 ifp->if_flags &= ~IFF_ALLMULTI;
4170 ETHER_LOCK(ec);
4171 ETHER_FIRST_MULTI(step, ec, enm);
4172 while (enm != NULL) {
4173 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4174 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4175 ETHER_ADDR_LEN) != 0)) {
4176 ifp->if_flags |= IFF_ALLMULTI;
4177 break;
4178 }
4179 bcopy(enm->enm_addrlo,
4180 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4181 mta[mcnt].vmdq = adapter->pool;
4182 mcnt++;
4183 ETHER_NEXT_MULTI(step, enm);
4184 }
4185 ETHER_UNLOCK(ec);
4186
4187 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4188 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4189 if (ifp->if_flags & IFF_PROMISC)
4190 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4191 else if (ifp->if_flags & IFF_ALLMULTI) {
4192 fctrl |= IXGBE_FCTRL_MPE;
4193 }
4194
4195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4196
4197 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4198 update_ptr = (u8 *)mta;
4199 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4200 ixgbe_mc_array_itr, TRUE);
4201 }
4202
4203 } /* ixgbe_set_multi */
4204
4205 /************************************************************************
4206 * ixgbe_mc_array_itr
4207 *
4208 * An iterator function needed by the multicast shared code.
4209 * It feeds the shared code routine the addresses in the
4210 * array of ixgbe_set_multi() one by one.
4211 ************************************************************************/
4212 static u8 *
4213 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4214 {
4215 struct ixgbe_mc_addr *mta;
4216
4217 mta = (struct ixgbe_mc_addr *)*update_ptr;
4218 *vmdq = mta->vmdq;
4219
4220 *update_ptr = (u8*)(mta + 1);
4221
4222 return (mta->addr);
4223 } /* ixgbe_mc_array_itr */
4224
4225 /************************************************************************
4226 * ixgbe_local_timer - Timer routine
4227 *
4228 * Checks for link status, updates statistics,
4229 * and runs the watchdog check.
4230 ************************************************************************/
4231 static void
4232 ixgbe_local_timer(void *arg)
4233 {
4234 struct adapter *adapter = arg;
4235
4236 IXGBE_CORE_LOCK(adapter);
4237 ixgbe_local_timer1(adapter);
4238 IXGBE_CORE_UNLOCK(adapter);
4239 }
4240
4241 static void
4242 ixgbe_local_timer1(void *arg)
4243 {
4244 struct adapter *adapter = arg;
4245 device_t dev = adapter->dev;
4246 struct ix_queue *que = adapter->queues;
4247 u64 queues = 0;
4248 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4249 int hung = 0;
4250 int i;
4251
4252 KASSERT(mutex_owned(&adapter->core_mtx));
4253
4254 /* Check for pluggable optics */
4255 if (adapter->sfp_probe)
4256 if (!ixgbe_sfp_probe(adapter))
4257 goto out; /* Nothing to do */
4258
4259 ixgbe_update_link_status(adapter);
4260 ixgbe_update_stats_counters(adapter);
4261
4262 /* Update some event counters */
4263 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4264 que = adapter->queues;
4265 for (i = 0; i < adapter->num_queues; i++, que++) {
4266 struct tx_ring *txr = que->txr;
4267
4268 v0 += txr->q_efbig_tx_dma_setup;
4269 v1 += txr->q_mbuf_defrag_failed;
4270 v2 += txr->q_efbig2_tx_dma_setup;
4271 v3 += txr->q_einval_tx_dma_setup;
4272 v4 += txr->q_other_tx_dma_setup;
4273 v5 += txr->q_eagain_tx_dma_setup;
4274 v6 += txr->q_enomem_tx_dma_setup;
4275 v7 += txr->q_tso_err;
4276 }
4277 adapter->efbig_tx_dma_setup.ev_count = v0;
4278 adapter->mbuf_defrag_failed.ev_count = v1;
4279 adapter->efbig2_tx_dma_setup.ev_count = v2;
4280 adapter->einval_tx_dma_setup.ev_count = v3;
4281 adapter->other_tx_dma_setup.ev_count = v4;
4282 adapter->eagain_tx_dma_setup.ev_count = v5;
4283 adapter->enomem_tx_dma_setup.ev_count = v6;
4284 adapter->tso_err.ev_count = v7;
4285
4286 /*
4287 * Check the TX queues status
4288 * - mark hung queues so we don't schedule on them
4289 * - watchdog only if all queues show hung
4290 */
4291 que = adapter->queues;
4292 for (i = 0; i < adapter->num_queues; i++, que++) {
4293 /* Keep track of queues with work for soft irq */
4294 if (que->txr->busy)
4295 queues |= ((u64)1 << que->me);
4296 /*
4297 * Each time txeof runs without cleaning, but there
4298 * are uncleaned descriptors it increments busy. If
4299 * we get to the MAX we declare it hung.
4300 */
4301 if (que->busy == IXGBE_QUEUE_HUNG) {
4302 ++hung;
4303 /* Mark the queue as inactive */
4304 adapter->active_queues &= ~((u64)1 << que->me);
4305 continue;
4306 } else {
4307 /* Check if we've come back from hung */
4308 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4309 adapter->active_queues |= ((u64)1 << que->me);
4310 }
4311 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4312 device_printf(dev,
4313 "Warning queue %d appears to be hung!\n", i);
4314 que->txr->busy = IXGBE_QUEUE_HUNG;
4315 ++hung;
4316 }
4317 }
4318
4319 /* Only truely watchdog if all queues show hung */
4320 if (hung == adapter->num_queues)
4321 goto watchdog;
4322 else if (queues != 0) { /* Force an IRQ on queues with work */
4323 que = adapter->queues;
4324 for (i = 0; i < adapter->num_queues; i++, que++) {
4325 mutex_enter(&que->dc_mtx);
4326 if (que->disabled_count == 0)
4327 ixgbe_rearm_queues(adapter,
4328 queues & ((u64)1 << i));
4329 mutex_exit(&que->dc_mtx);
4330 }
4331 }
4332
4333 out:
4334 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4335 return;
4336
4337 watchdog:
4338 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4339 adapter->ifp->if_flags &= ~IFF_RUNNING;
4340 adapter->watchdog_events.ev_count++;
4341 ixgbe_init_locked(adapter);
4342 } /* ixgbe_local_timer */
4343
4344 /************************************************************************
4345 * ixgbe_sfp_probe
4346 *
4347 * Determine if a port had optics inserted.
4348 ************************************************************************/
4349 static bool
4350 ixgbe_sfp_probe(struct adapter *adapter)
4351 {
4352 struct ixgbe_hw *hw = &adapter->hw;
4353 device_t dev = adapter->dev;
4354 bool result = FALSE;
4355
4356 if ((hw->phy.type == ixgbe_phy_nl) &&
4357 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4358 s32 ret = hw->phy.ops.identify_sfp(hw);
4359 if (ret)
4360 goto out;
4361 ret = hw->phy.ops.reset(hw);
4362 adapter->sfp_probe = FALSE;
4363 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4364 device_printf(dev,"Unsupported SFP+ module detected!");
4365 device_printf(dev,
4366 "Reload driver with supported module.\n");
4367 goto out;
4368 } else
4369 device_printf(dev, "SFP+ module detected!\n");
4370 /* We now have supported optics */
4371 result = TRUE;
4372 }
4373 out:
4374
4375 return (result);
4376 } /* ixgbe_sfp_probe */
4377
4378 /************************************************************************
4379 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4380 ************************************************************************/
4381 static void
4382 ixgbe_handle_mod(void *context)
4383 {
4384 struct adapter *adapter = context;
4385 struct ixgbe_hw *hw = &adapter->hw;
4386 device_t dev = adapter->dev;
4387 u32 err, cage_full = 0;
4388
4389 ++adapter->mod_sicount.ev_count;
4390 if (adapter->hw.need_crosstalk_fix) {
4391 switch (hw->mac.type) {
4392 case ixgbe_mac_82599EB:
4393 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4394 IXGBE_ESDP_SDP2;
4395 break;
4396 case ixgbe_mac_X550EM_x:
4397 case ixgbe_mac_X550EM_a:
4398 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4399 IXGBE_ESDP_SDP0;
4400 break;
4401 default:
4402 break;
4403 }
4404
4405 if (!cage_full)
4406 return;
4407 }
4408
4409 err = hw->phy.ops.identify_sfp(hw);
4410 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4411 device_printf(dev,
4412 "Unsupported SFP+ module type was detected.\n");
4413 return;
4414 }
4415
4416 if (hw->mac.type == ixgbe_mac_82598EB)
4417 err = hw->phy.ops.reset(hw);
4418 else
4419 err = hw->mac.ops.setup_sfp(hw);
4420
4421 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4422 device_printf(dev,
4423 "Setup failure - unsupported SFP+ module type.\n");
4424 return;
4425 }
4426 softint_schedule(adapter->msf_si);
4427 } /* ixgbe_handle_mod */
4428
4429
4430 /************************************************************************
4431 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4432 ************************************************************************/
4433 static void
4434 ixgbe_handle_msf(void *context)
4435 {
4436 struct adapter *adapter = context;
4437 struct ixgbe_hw *hw = &adapter->hw;
4438 u32 autoneg;
4439 bool negotiate;
4440
4441 ++adapter->msf_sicount.ev_count;
4442 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4443 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4444
4445 autoneg = hw->phy.autoneg_advertised;
4446 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4447 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4448 else
4449 negotiate = 0;
4450 if (hw->mac.ops.setup_link)
4451 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4452
4453 /* Adjust media types shown in ifconfig */
4454 ifmedia_removeall(&adapter->media);
4455 ixgbe_add_media_types(adapter);
4456 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4457 } /* ixgbe_handle_msf */
4458
4459 /************************************************************************
4460 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4461 ************************************************************************/
4462 static void
4463 ixgbe_handle_phy(void *context)
4464 {
4465 struct adapter *adapter = context;
4466 struct ixgbe_hw *hw = &adapter->hw;
4467 int error;
4468
4469 ++adapter->phy_sicount.ev_count;
4470 error = hw->phy.ops.handle_lasi(hw);
4471 if (error == IXGBE_ERR_OVERTEMP)
4472 device_printf(adapter->dev,
4473 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4474 " PHY will downshift to lower power state!\n");
4475 else if (error)
4476 device_printf(adapter->dev,
4477 "Error handling LASI interrupt: %d\n", error);
4478 } /* ixgbe_handle_phy */
4479
4480 static void
4481 ixgbe_ifstop(struct ifnet *ifp, int disable)
4482 {
4483 struct adapter *adapter = ifp->if_softc;
4484
4485 IXGBE_CORE_LOCK(adapter);
4486 ixgbe_stop(adapter);
4487 IXGBE_CORE_UNLOCK(adapter);
4488 }
4489
4490 /************************************************************************
4491 * ixgbe_stop - Stop the hardware
4492 *
4493 * Disables all traffic on the adapter by issuing a
4494 * global reset on the MAC and deallocates TX/RX buffers.
4495 ************************************************************************/
4496 static void
4497 ixgbe_stop(void *arg)
4498 {
4499 struct ifnet *ifp;
4500 struct adapter *adapter = arg;
4501 struct ixgbe_hw *hw = &adapter->hw;
4502
4503 ifp = adapter->ifp;
4504
4505 KASSERT(mutex_owned(&adapter->core_mtx));
4506
4507 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4508 ixgbe_disable_intr(adapter);
4509 callout_stop(&adapter->timer);
4510
4511 /* Let the stack know...*/
4512 ifp->if_flags &= ~IFF_RUNNING;
4513
4514 ixgbe_reset_hw(hw);
4515 hw->adapter_stopped = FALSE;
4516 ixgbe_stop_adapter(hw);
4517 if (hw->mac.type == ixgbe_mac_82599EB)
4518 ixgbe_stop_mac_link_on_d3_82599(hw);
4519 /* Turn off the laser - noop with no optics */
4520 ixgbe_disable_tx_laser(hw);
4521
4522 /* Update the stack */
4523 adapter->link_up = FALSE;
4524 ixgbe_update_link_status(adapter);
4525
4526 /* reprogram the RAR[0] in case user changed it. */
4527 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4528
4529 return;
4530 } /* ixgbe_stop */
4531
4532 /************************************************************************
4533 * ixgbe_update_link_status - Update OS on link state
4534 *
4535 * Note: Only updates the OS on the cached link state.
4536 * The real check of the hardware only happens with
4537 * a link interrupt.
4538 ************************************************************************/
4539 static void
4540 ixgbe_update_link_status(struct adapter *adapter)
4541 {
4542 struct ifnet *ifp = adapter->ifp;
4543 device_t dev = adapter->dev;
4544 struct ixgbe_hw *hw = &adapter->hw;
4545
4546 KASSERT(mutex_owned(&adapter->core_mtx));
4547
4548 if (adapter->link_up) {
4549 if (adapter->link_active == FALSE) {
4550 /*
4551 * To eliminate influence of the previous state
4552 * in the same way as ixgbe_init_locked().
4553 */
4554 struct ix_queue *que = adapter->queues;
4555 for (int i = 0; i < adapter->num_queues; i++, que++)
4556 que->eitr_setting = 0;
4557
4558 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4559 /*
4560 * Discard count for both MAC Local Fault and
4561 * Remote Fault because those registers are
4562 * valid only when the link speed is up and
4563 * 10Gbps.
4564 */
4565 IXGBE_READ_REG(hw, IXGBE_MLFC);
4566 IXGBE_READ_REG(hw, IXGBE_MRFC);
4567 }
4568
4569 if (bootverbose) {
4570 const char *bpsmsg;
4571
4572 switch (adapter->link_speed) {
4573 case IXGBE_LINK_SPEED_10GB_FULL:
4574 bpsmsg = "10 Gbps";
4575 break;
4576 case IXGBE_LINK_SPEED_5GB_FULL:
4577 bpsmsg = "5 Gbps";
4578 break;
4579 case IXGBE_LINK_SPEED_2_5GB_FULL:
4580 bpsmsg = "2.5 Gbps";
4581 break;
4582 case IXGBE_LINK_SPEED_1GB_FULL:
4583 bpsmsg = "1 Gbps";
4584 break;
4585 case IXGBE_LINK_SPEED_100_FULL:
4586 bpsmsg = "100 Mbps";
4587 break;
4588 case IXGBE_LINK_SPEED_10_FULL:
4589 bpsmsg = "10 Mbps";
4590 break;
4591 default:
4592 bpsmsg = "unknown speed";
4593 break;
4594 }
4595 device_printf(dev, "Link is up %s %s \n",
4596 bpsmsg, "Full Duplex");
4597 }
4598 adapter->link_active = TRUE;
4599 /* Update any Flow Control changes */
4600 ixgbe_fc_enable(&adapter->hw);
4601 /* Update DMA coalescing config */
4602 ixgbe_config_dmac(adapter);
4603 if_link_state_change(ifp, LINK_STATE_UP);
4604 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4605 ixgbe_ping_all_vfs(adapter);
4606 }
4607 } else { /* Link down */
4608 if (adapter->link_active == TRUE) {
4609 if (bootverbose)
4610 device_printf(dev, "Link is Down\n");
4611 if_link_state_change(ifp, LINK_STATE_DOWN);
4612 adapter->link_active = FALSE;
4613 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4614 ixgbe_ping_all_vfs(adapter);
4615 ixgbe_drain_all(adapter);
4616 }
4617 }
4618 } /* ixgbe_update_link_status */
4619
4620 /************************************************************************
4621 * ixgbe_config_dmac - Configure DMA Coalescing
4622 ************************************************************************/
4623 static void
4624 ixgbe_config_dmac(struct adapter *adapter)
4625 {
4626 struct ixgbe_hw *hw = &adapter->hw;
4627 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4628
4629 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4630 return;
4631
4632 if (dcfg->watchdog_timer ^ adapter->dmac ||
4633 dcfg->link_speed ^ adapter->link_speed) {
4634 dcfg->watchdog_timer = adapter->dmac;
4635 dcfg->fcoe_en = false;
4636 dcfg->link_speed = adapter->link_speed;
4637 dcfg->num_tcs = 1;
4638
4639 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4640 dcfg->watchdog_timer, dcfg->link_speed);
4641
4642 hw->mac.ops.dmac_config(hw);
4643 }
4644 } /* ixgbe_config_dmac */
4645
4646 /************************************************************************
4647 * ixgbe_enable_intr
4648 ************************************************************************/
4649 static void
4650 ixgbe_enable_intr(struct adapter *adapter)
4651 {
4652 struct ixgbe_hw *hw = &adapter->hw;
4653 struct ix_queue *que = adapter->queues;
4654 u32 mask, fwsm;
4655
4656 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4657
4658 switch (adapter->hw.mac.type) {
4659 case ixgbe_mac_82599EB:
4660 mask |= IXGBE_EIMS_ECC;
4661 /* Temperature sensor on some adapters */
4662 mask |= IXGBE_EIMS_GPI_SDP0;
4663 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4664 mask |= IXGBE_EIMS_GPI_SDP1;
4665 mask |= IXGBE_EIMS_GPI_SDP2;
4666 break;
4667 case ixgbe_mac_X540:
4668 /* Detect if Thermal Sensor is enabled */
4669 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4670 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4671 mask |= IXGBE_EIMS_TS;
4672 mask |= IXGBE_EIMS_ECC;
4673 break;
4674 case ixgbe_mac_X550:
4675 /* MAC thermal sensor is automatically enabled */
4676 mask |= IXGBE_EIMS_TS;
4677 mask |= IXGBE_EIMS_ECC;
4678 break;
4679 case ixgbe_mac_X550EM_x:
4680 case ixgbe_mac_X550EM_a:
4681 /* Some devices use SDP0 for important information */
4682 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4683 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4684 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4685 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4686 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4687 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4688 mask |= IXGBE_EICR_GPI_SDP0_X540;
4689 mask |= IXGBE_EIMS_ECC;
4690 break;
4691 default:
4692 break;
4693 }
4694
4695 /* Enable Fan Failure detection */
4696 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4697 mask |= IXGBE_EIMS_GPI_SDP1;
4698 /* Enable SR-IOV */
4699 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4700 mask |= IXGBE_EIMS_MAILBOX;
4701 /* Enable Flow Director */
4702 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4703 mask |= IXGBE_EIMS_FLOW_DIR;
4704
4705 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4706
4707 /* With MSI-X we use auto clear */
4708 if (adapter->msix_mem) {
4709 mask = IXGBE_EIMS_ENABLE_MASK;
4710 /* Don't autoclear Link */
4711 mask &= ~IXGBE_EIMS_OTHER;
4712 mask &= ~IXGBE_EIMS_LSC;
4713 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4714 mask &= ~IXGBE_EIMS_MAILBOX;
4715 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4716 }
4717
4718 /*
4719 * Now enable all queues, this is done separately to
4720 * allow for handling the extended (beyond 32) MSI-X
4721 * vectors that can be used by 82599
4722 */
4723 for (int i = 0; i < adapter->num_queues; i++, que++)
4724 ixgbe_enable_queue(adapter, que->msix);
4725
4726 IXGBE_WRITE_FLUSH(hw);
4727
4728 return;
4729 } /* ixgbe_enable_intr */
4730
4731 /************************************************************************
4732 * ixgbe_disable_intr_internal
4733 ************************************************************************/
4734 static void
4735 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4736 {
4737 struct ix_queue *que = adapter->queues;
4738
4739 /* disable interrupts other than queues */
4740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4741
4742 if (adapter->msix_mem)
4743 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4744
4745 for (int i = 0; i < adapter->num_queues; i++, que++)
4746 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4747
4748 IXGBE_WRITE_FLUSH(&adapter->hw);
4749
4750 } /* ixgbe_do_disable_intr_internal */
4751
4752 /************************************************************************
4753 * ixgbe_disable_intr
4754 ************************************************************************/
4755 static void
4756 ixgbe_disable_intr(struct adapter *adapter)
4757 {
4758
4759 ixgbe_disable_intr_internal(adapter, true);
4760 } /* ixgbe_disable_intr */
4761
4762 /************************************************************************
4763 * ixgbe_ensure_disabled_intr
4764 ************************************************************************/
4765 void
4766 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4767 {
4768
4769 ixgbe_disable_intr_internal(adapter, false);
4770 } /* ixgbe_ensure_disabled_intr */
4771
4772 /************************************************************************
4773 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4774 ************************************************************************/
4775 static int
4776 ixgbe_legacy_irq(void *arg)
4777 {
4778 struct ix_queue *que = arg;
4779 struct adapter *adapter = que->adapter;
4780 struct ixgbe_hw *hw = &adapter->hw;
4781 struct ifnet *ifp = adapter->ifp;
4782 struct tx_ring *txr = adapter->tx_rings;
4783 bool more = false;
4784 u32 eicr, eicr_mask;
4785
4786 /* Silicon errata #26 on 82598 */
4787 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4788
4789 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4790
4791 adapter->stats.pf.legint.ev_count++;
4792 ++que->irqs.ev_count;
4793 if (eicr == 0) {
4794 adapter->stats.pf.intzero.ev_count++;
4795 if ((ifp->if_flags & IFF_UP) != 0)
4796 ixgbe_enable_intr(adapter);
4797 return 0;
4798 }
4799
4800 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4801 #ifdef __NetBSD__
4802 /* Don't run ixgbe_rxeof in interrupt context */
4803 more = true;
4804 #else
4805 more = ixgbe_rxeof(que);
4806 #endif
4807
4808 IXGBE_TX_LOCK(txr);
4809 ixgbe_txeof(txr);
4810 #ifdef notyet
4811 if (!ixgbe_ring_empty(ifp, txr->br))
4812 ixgbe_start_locked(ifp, txr);
4813 #endif
4814 IXGBE_TX_UNLOCK(txr);
4815 }
4816
4817 /* Check for fan failure */
4818 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4819 ixgbe_check_fan_failure(adapter, eicr, true);
4820 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4821 }
4822
4823 /* Link status change */
4824 if (eicr & IXGBE_EICR_LSC)
4825 softint_schedule(adapter->link_si);
4826
4827 if (ixgbe_is_sfp(hw)) {
4828 /* Pluggable optics-related interrupt */
4829 if (hw->mac.type >= ixgbe_mac_X540)
4830 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4831 else
4832 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4833
4834 if (eicr & eicr_mask) {
4835 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4836 softint_schedule(adapter->mod_si);
4837 }
4838
4839 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4840 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4841 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4842 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4843 softint_schedule(adapter->msf_si);
4844 }
4845 }
4846
4847 /* External PHY interrupt */
4848 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4849 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4850 softint_schedule(adapter->phy_si);
4851
4852 if (more) {
4853 que->req.ev_count++;
4854 ixgbe_sched_handle_que(adapter, que);
4855 } else
4856 ixgbe_enable_intr(adapter);
4857
4858 return 1;
4859 } /* ixgbe_legacy_irq */
4860
4861 /************************************************************************
4862 * ixgbe_free_pciintr_resources
4863 ************************************************************************/
4864 static void
4865 ixgbe_free_pciintr_resources(struct adapter *adapter)
4866 {
4867 struct ix_queue *que = adapter->queues;
4868 int rid;
4869
4870 /*
4871 * Release all msix queue resources:
4872 */
4873 for (int i = 0; i < adapter->num_queues; i++, que++) {
4874 if (que->res != NULL) {
4875 pci_intr_disestablish(adapter->osdep.pc,
4876 adapter->osdep.ihs[i]);
4877 adapter->osdep.ihs[i] = NULL;
4878 }
4879 }
4880
4881 /* Clean the Legacy or Link interrupt last */
4882 if (adapter->vector) /* we are doing MSIX */
4883 rid = adapter->vector;
4884 else
4885 rid = 0;
4886
4887 if (adapter->osdep.ihs[rid] != NULL) {
4888 pci_intr_disestablish(adapter->osdep.pc,
4889 adapter->osdep.ihs[rid]);
4890 adapter->osdep.ihs[rid] = NULL;
4891 }
4892
4893 if (adapter->osdep.intrs != NULL) {
4894 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4895 adapter->osdep.nintrs);
4896 adapter->osdep.intrs = NULL;
4897 }
4898
4899 return;
4900 } /* ixgbe_free_pciintr_resources */
4901
4902 /************************************************************************
4903 * ixgbe_free_pci_resources
4904 ************************************************************************/
4905 static void
4906 ixgbe_free_pci_resources(struct adapter *adapter)
4907 {
4908
4909 ixgbe_free_pciintr_resources(adapter);
4910
4911 if (adapter->osdep.mem_size != 0) {
4912 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
4913 adapter->osdep.mem_bus_space_handle,
4914 adapter->osdep.mem_size);
4915 }
4916
4917 return;
4918 } /* ixgbe_free_pci_resources */
4919
4920 /************************************************************************
4921 * ixgbe_set_sysctl_value
4922 ************************************************************************/
4923 static void
4924 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4925 const char *description, int *limit, int value)
4926 {
4927 device_t dev = adapter->dev;
4928 struct sysctllog **log;
4929 const struct sysctlnode *rnode, *cnode;
4930
4931 log = &adapter->sysctllog;
4932 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4933 aprint_error_dev(dev, "could not create sysctl root\n");
4934 return;
4935 }
4936 if (sysctl_createv(log, 0, &rnode, &cnode,
4937 CTLFLAG_READWRITE, CTLTYPE_INT,
4938 name, SYSCTL_DESCR(description),
4939 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
4940 aprint_error_dev(dev, "could not create sysctl\n");
4941 *limit = value;
4942 } /* ixgbe_set_sysctl_value */
4943
4944 /************************************************************************
4945 * ixgbe_sysctl_flowcntl
4946 *
4947 * SYSCTL wrapper around setting Flow Control
4948 ************************************************************************/
4949 static int
4950 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
4951 {
4952 struct sysctlnode node = *rnode;
4953 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4954 int error, fc;
4955
4956 fc = adapter->hw.fc.current_mode;
4957 node.sysctl_data = &fc;
4958 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4959 if (error != 0 || newp == NULL)
4960 return error;
4961
4962 /* Don't bother if it's not changed */
4963 if (fc == adapter->hw.fc.current_mode)
4964 return (0);
4965
4966 return ixgbe_set_flowcntl(adapter, fc);
4967 } /* ixgbe_sysctl_flowcntl */
4968
4969 /************************************************************************
4970 * ixgbe_set_flowcntl - Set flow control
4971 *
4972 * Flow control values:
4973 * 0 - off
4974 * 1 - rx pause
4975 * 2 - tx pause
4976 * 3 - full
4977 ************************************************************************/
4978 static int
4979 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4980 {
4981 switch (fc) {
4982 case ixgbe_fc_rx_pause:
4983 case ixgbe_fc_tx_pause:
4984 case ixgbe_fc_full:
4985 adapter->hw.fc.requested_mode = fc;
4986 if (adapter->num_queues > 1)
4987 ixgbe_disable_rx_drop(adapter);
4988 break;
4989 case ixgbe_fc_none:
4990 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4991 if (adapter->num_queues > 1)
4992 ixgbe_enable_rx_drop(adapter);
4993 break;
4994 default:
4995 return (EINVAL);
4996 }
4997
4998 #if 0 /* XXX NetBSD */
4999 /* Don't autoneg if forcing a value */
5000 adapter->hw.fc.disable_fc_autoneg = TRUE;
5001 #endif
5002 ixgbe_fc_enable(&adapter->hw);
5003
5004 return (0);
5005 } /* ixgbe_set_flowcntl */
5006
5007 /************************************************************************
5008 * ixgbe_enable_rx_drop
5009 *
5010 * Enable the hardware to drop packets when the buffer is
5011 * full. This is useful with multiqueue, so that no single
5012 * queue being full stalls the entire RX engine. We only
5013 * enable this when Multiqueue is enabled AND Flow Control
5014 * is disabled.
5015 ************************************************************************/
5016 static void
5017 ixgbe_enable_rx_drop(struct adapter *adapter)
5018 {
5019 struct ixgbe_hw *hw = &adapter->hw;
5020 struct rx_ring *rxr;
5021 u32 srrctl;
5022
5023 for (int i = 0; i < adapter->num_queues; i++) {
5024 rxr = &adapter->rx_rings[i];
5025 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5026 srrctl |= IXGBE_SRRCTL_DROP_EN;
5027 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5028 }
5029
5030 /* enable drop for each vf */
5031 for (int i = 0; i < adapter->num_vfs; i++) {
5032 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5033 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5034 IXGBE_QDE_ENABLE));
5035 }
5036 } /* ixgbe_enable_rx_drop */
5037
5038 /************************************************************************
5039 * ixgbe_disable_rx_drop
5040 ************************************************************************/
5041 static void
5042 ixgbe_disable_rx_drop(struct adapter *adapter)
5043 {
5044 struct ixgbe_hw *hw = &adapter->hw;
5045 struct rx_ring *rxr;
5046 u32 srrctl;
5047
5048 for (int i = 0; i < adapter->num_queues; i++) {
5049 rxr = &adapter->rx_rings[i];
5050 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5051 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5052 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5053 }
5054
5055 /* disable drop for each vf */
5056 for (int i = 0; i < adapter->num_vfs; i++) {
5057 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5058 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5059 }
5060 } /* ixgbe_disable_rx_drop */
5061
5062 /************************************************************************
5063 * ixgbe_sysctl_advertise
5064 *
5065 * SYSCTL wrapper around setting advertised speed
5066 ************************************************************************/
5067 static int
5068 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5069 {
5070 struct sysctlnode node = *rnode;
5071 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5072 int error = 0, advertise;
5073
5074 advertise = adapter->advertise;
5075 node.sysctl_data = &advertise;
5076 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5077 if (error != 0 || newp == NULL)
5078 return error;
5079
5080 return ixgbe_set_advertise(adapter, advertise);
5081 } /* ixgbe_sysctl_advertise */
5082
5083 /************************************************************************
5084 * ixgbe_set_advertise - Control advertised link speed
5085 *
5086 * Flags:
5087 * 0x00 - Default (all capable link speed)
5088 * 0x01 - advertise 100 Mb
5089 * 0x02 - advertise 1G
5090 * 0x04 - advertise 10G
5091 * 0x08 - advertise 10 Mb
5092 * 0x10 - advertise 2.5G
5093 * 0x20 - advertise 5G
5094 ************************************************************************/
5095 static int
5096 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5097 {
5098 device_t dev;
5099 struct ixgbe_hw *hw;
5100 ixgbe_link_speed speed = 0;
5101 ixgbe_link_speed link_caps = 0;
5102 s32 err = IXGBE_NOT_IMPLEMENTED;
5103 bool negotiate = FALSE;
5104
5105 /* Checks to validate new value */
5106 if (adapter->advertise == advertise) /* no change */
5107 return (0);
5108
5109 dev = adapter->dev;
5110 hw = &adapter->hw;
5111
5112 /* No speed changes for backplane media */
5113 if (hw->phy.media_type == ixgbe_media_type_backplane)
5114 return (ENODEV);
5115
5116 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5117 (hw->phy.multispeed_fiber))) {
5118 device_printf(dev,
5119 "Advertised speed can only be set on copper or "
5120 "multispeed fiber media types.\n");
5121 return (EINVAL);
5122 }
5123
5124 if (advertise < 0x0 || advertise > 0x2f) {
5125 device_printf(dev,
5126 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5127 return (EINVAL);
5128 }
5129
5130 if (hw->mac.ops.get_link_capabilities) {
5131 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5132 &negotiate);
5133 if (err != IXGBE_SUCCESS) {
5134 device_printf(dev, "Unable to determine supported advertise speeds\n");
5135 return (ENODEV);
5136 }
5137 }
5138
5139 /* Set new value and report new advertised mode */
5140 if (advertise & 0x1) {
5141 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5142 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5143 return (EINVAL);
5144 }
5145 speed |= IXGBE_LINK_SPEED_100_FULL;
5146 }
5147 if (advertise & 0x2) {
5148 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5149 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5150 return (EINVAL);
5151 }
5152 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5153 }
5154 if (advertise & 0x4) {
5155 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5156 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5157 return (EINVAL);
5158 }
5159 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5160 }
5161 if (advertise & 0x8) {
5162 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5163 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5164 return (EINVAL);
5165 }
5166 speed |= IXGBE_LINK_SPEED_10_FULL;
5167 }
5168 if (advertise & 0x10) {
5169 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5170 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5171 return (EINVAL);
5172 }
5173 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5174 }
5175 if (advertise & 0x20) {
5176 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5177 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5178 return (EINVAL);
5179 }
5180 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5181 }
5182 if (advertise == 0)
5183 speed = link_caps; /* All capable link speed */
5184
5185 hw->mac.autotry_restart = TRUE;
5186 hw->mac.ops.setup_link(hw, speed, TRUE);
5187 adapter->advertise = advertise;
5188
5189 return (0);
5190 } /* ixgbe_set_advertise */
5191
5192 /************************************************************************
5193 * ixgbe_get_advertise - Get current advertised speed settings
5194 *
5195 * Formatted for sysctl usage.
5196 * Flags:
5197 * 0x01 - advertise 100 Mb
5198 * 0x02 - advertise 1G
5199 * 0x04 - advertise 10G
5200 * 0x08 - advertise 10 Mb (yes, Mb)
5201 * 0x10 - advertise 2.5G
5202 * 0x20 - advertise 5G
5203 ************************************************************************/
5204 static int
5205 ixgbe_get_advertise(struct adapter *adapter)
5206 {
5207 struct ixgbe_hw *hw = &adapter->hw;
5208 int speed;
5209 ixgbe_link_speed link_caps = 0;
5210 s32 err;
5211 bool negotiate = FALSE;
5212
5213 /*
5214 * Advertised speed means nothing unless it's copper or
5215 * multi-speed fiber
5216 */
5217 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5218 !(hw->phy.multispeed_fiber))
5219 return (0);
5220
5221 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5222 if (err != IXGBE_SUCCESS)
5223 return (0);
5224
5225 speed =
5226 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5227 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5228 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5229 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5230 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5231 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5232
5233 return speed;
5234 } /* ixgbe_get_advertise */
5235
5236 /************************************************************************
5237 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5238 *
5239 * Control values:
5240 * 0/1 - off / on (use default value of 1000)
5241 *
5242 * Legal timer values are:
5243 * 50,100,250,500,1000,2000,5000,10000
5244 *
5245 * Turning off interrupt moderation will also turn this off.
5246 ************************************************************************/
5247 static int
5248 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5249 {
5250 struct sysctlnode node = *rnode;
5251 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5252 struct ifnet *ifp = adapter->ifp;
5253 int error;
5254 int newval;
5255
5256 newval = adapter->dmac;
5257 node.sysctl_data = &newval;
5258 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5259 if ((error) || (newp == NULL))
5260 return (error);
5261
5262 switch (newval) {
5263 case 0:
5264 /* Disabled */
5265 adapter->dmac = 0;
5266 break;
5267 case 1:
5268 /* Enable and use default */
5269 adapter->dmac = 1000;
5270 break;
5271 case 50:
5272 case 100:
5273 case 250:
5274 case 500:
5275 case 1000:
5276 case 2000:
5277 case 5000:
5278 case 10000:
5279 /* Legal values - allow */
5280 adapter->dmac = newval;
5281 break;
5282 default:
5283 /* Do nothing, illegal value */
5284 return (EINVAL);
5285 }
5286
5287 /* Re-initialize hardware if it's already running */
5288 if (ifp->if_flags & IFF_RUNNING)
5289 ifp->if_init(ifp);
5290
5291 return (0);
5292 }
5293
5294 #ifdef IXGBE_DEBUG
5295 /************************************************************************
5296 * ixgbe_sysctl_power_state
5297 *
5298 * Sysctl to test power states
5299 * Values:
5300 * 0 - set device to D0
5301 * 3 - set device to D3
5302 * (none) - get current device power state
5303 ************************************************************************/
5304 static int
5305 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5306 {
5307 #ifdef notyet
5308 struct sysctlnode node = *rnode;
5309 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5310 device_t dev = adapter->dev;
5311 int curr_ps, new_ps, error = 0;
5312
5313 curr_ps = new_ps = pci_get_powerstate(dev);
5314
5315 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5316 if ((error) || (req->newp == NULL))
5317 return (error);
5318
5319 if (new_ps == curr_ps)
5320 return (0);
5321
5322 if (new_ps == 3 && curr_ps == 0)
5323 error = DEVICE_SUSPEND(dev);
5324 else if (new_ps == 0 && curr_ps == 3)
5325 error = DEVICE_RESUME(dev);
5326 else
5327 return (EINVAL);
5328
5329 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5330
5331 return (error);
5332 #else
5333 return 0;
5334 #endif
5335 } /* ixgbe_sysctl_power_state */
5336 #endif
5337
5338 /************************************************************************
5339 * ixgbe_sysctl_wol_enable
5340 *
5341 * Sysctl to enable/disable the WoL capability,
5342 * if supported by the adapter.
5343 *
5344 * Values:
5345 * 0 - disabled
5346 * 1 - enabled
5347 ************************************************************************/
5348 static int
5349 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5350 {
5351 struct sysctlnode node = *rnode;
5352 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5353 struct ixgbe_hw *hw = &adapter->hw;
5354 bool new_wol_enabled;
5355 int error = 0;
5356
5357 new_wol_enabled = hw->wol_enabled;
5358 node.sysctl_data = &new_wol_enabled;
5359 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5360 if ((error) || (newp == NULL))
5361 return (error);
5362 if (new_wol_enabled == hw->wol_enabled)
5363 return (0);
5364
5365 if (new_wol_enabled && !adapter->wol_support)
5366 return (ENODEV);
5367 else
5368 hw->wol_enabled = new_wol_enabled;
5369
5370 return (0);
5371 } /* ixgbe_sysctl_wol_enable */
5372
5373 /************************************************************************
5374 * ixgbe_sysctl_wufc - Wake Up Filter Control
5375 *
5376 * Sysctl to enable/disable the types of packets that the
5377 * adapter will wake up on upon receipt.
5378 * Flags:
5379 * 0x1 - Link Status Change
5380 * 0x2 - Magic Packet
5381 * 0x4 - Direct Exact
5382 * 0x8 - Directed Multicast
5383 * 0x10 - Broadcast
5384 * 0x20 - ARP/IPv4 Request Packet
5385 * 0x40 - Direct IPv4 Packet
5386 * 0x80 - Direct IPv6 Packet
5387 *
5388 * Settings not listed above will cause the sysctl to return an error.
5389 ************************************************************************/
5390 static int
5391 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5392 {
5393 struct sysctlnode node = *rnode;
5394 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5395 int error = 0;
5396 u32 new_wufc;
5397
5398 new_wufc = adapter->wufc;
5399 node.sysctl_data = &new_wufc;
5400 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5401 if ((error) || (newp == NULL))
5402 return (error);
5403 if (new_wufc == adapter->wufc)
5404 return (0);
5405
5406 if (new_wufc & 0xffffff00)
5407 return (EINVAL);
5408
5409 new_wufc &= 0xff;
5410 new_wufc |= (0xffffff & adapter->wufc);
5411 adapter->wufc = new_wufc;
5412
5413 return (0);
5414 } /* ixgbe_sysctl_wufc */
5415
5416 #ifdef IXGBE_DEBUG
5417 /************************************************************************
5418 * ixgbe_sysctl_print_rss_config
5419 ************************************************************************/
5420 static int
5421 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5422 {
5423 #ifdef notyet
5424 struct sysctlnode node = *rnode;
5425 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5426 struct ixgbe_hw *hw = &adapter->hw;
5427 device_t dev = adapter->dev;
5428 struct sbuf *buf;
5429 int error = 0, reta_size;
5430 u32 reg;
5431
5432 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5433 if (!buf) {
5434 device_printf(dev, "Could not allocate sbuf for output.\n");
5435 return (ENOMEM);
5436 }
5437
5438 // TODO: use sbufs to make a string to print out
5439 /* Set multiplier for RETA setup and table size based on MAC */
5440 switch (adapter->hw.mac.type) {
5441 case ixgbe_mac_X550:
5442 case ixgbe_mac_X550EM_x:
5443 case ixgbe_mac_X550EM_a:
5444 reta_size = 128;
5445 break;
5446 default:
5447 reta_size = 32;
5448 break;
5449 }
5450
5451 /* Print out the redirection table */
5452 sbuf_cat(buf, "\n");
5453 for (int i = 0; i < reta_size; i++) {
5454 if (i < 32) {
5455 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5456 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5457 } else {
5458 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5459 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5460 }
5461 }
5462
5463 // TODO: print more config
5464
5465 error = sbuf_finish(buf);
5466 if (error)
5467 device_printf(dev, "Error finishing sbuf: %d\n", error);
5468
5469 sbuf_delete(buf);
5470 #endif
5471 return (0);
5472 } /* ixgbe_sysctl_print_rss_config */
5473 #endif /* IXGBE_DEBUG */
5474
5475 /************************************************************************
5476 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5477 *
5478 * For X552/X557-AT devices using an external PHY
5479 ************************************************************************/
5480 static int
5481 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5482 {
5483 struct sysctlnode node = *rnode;
5484 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5485 struct ixgbe_hw *hw = &adapter->hw;
5486 int val;
5487 u16 reg;
5488 int error;
5489
5490 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5491 device_printf(adapter->dev,
5492 "Device has no supported external thermal sensor.\n");
5493 return (ENODEV);
5494 }
5495
5496 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5497 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5498 device_printf(adapter->dev,
5499 "Error reading from PHY's current temperature register\n");
5500 return (EAGAIN);
5501 }
5502
5503 node.sysctl_data = &val;
5504
5505 /* Shift temp for output */
5506 val = reg >> 8;
5507
5508 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5509 if ((error) || (newp == NULL))
5510 return (error);
5511
5512 return (0);
5513 } /* ixgbe_sysctl_phy_temp */
5514
5515 /************************************************************************
5516 * ixgbe_sysctl_phy_overtemp_occurred
5517 *
5518 * Reports (directly from the PHY) whether the current PHY
5519 * temperature is over the overtemp threshold.
5520 ************************************************************************/
5521 static int
5522 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5523 {
5524 struct sysctlnode node = *rnode;
5525 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5526 struct ixgbe_hw *hw = &adapter->hw;
5527 int val, error;
5528 u16 reg;
5529
5530 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5531 device_printf(adapter->dev,
5532 "Device has no supported external thermal sensor.\n");
5533 return (ENODEV);
5534 }
5535
5536 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5537 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5538 device_printf(adapter->dev,
5539 "Error reading from PHY's temperature status register\n");
5540 return (EAGAIN);
5541 }
5542
5543 node.sysctl_data = &val;
5544
5545 /* Get occurrence bit */
5546 val = !!(reg & 0x4000);
5547
5548 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5549 if ((error) || (newp == NULL))
5550 return (error);
5551
5552 return (0);
5553 } /* ixgbe_sysctl_phy_overtemp_occurred */
5554
5555 /************************************************************************
5556 * ixgbe_sysctl_eee_state
5557 *
5558 * Sysctl to set EEE power saving feature
5559 * Values:
5560 * 0 - disable EEE
5561 * 1 - enable EEE
5562 * (none) - get current device EEE state
5563 ************************************************************************/
5564 static int
5565 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5566 {
5567 struct sysctlnode node = *rnode;
5568 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5569 struct ifnet *ifp = adapter->ifp;
5570 device_t dev = adapter->dev;
5571 int curr_eee, new_eee, error = 0;
5572 s32 retval;
5573
5574 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5575 node.sysctl_data = &new_eee;
5576 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5577 if ((error) || (newp == NULL))
5578 return (error);
5579
5580 /* Nothing to do */
5581 if (new_eee == curr_eee)
5582 return (0);
5583
5584 /* Not supported */
5585 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5586 return (EINVAL);
5587
5588 /* Bounds checking */
5589 if ((new_eee < 0) || (new_eee > 1))
5590 return (EINVAL);
5591
5592 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5593 if (retval) {
5594 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5595 return (EINVAL);
5596 }
5597
5598 /* Restart auto-neg */
5599 ifp->if_init(ifp);
5600
5601 device_printf(dev, "New EEE state: %d\n", new_eee);
5602
5603 /* Cache new value */
5604 if (new_eee)
5605 adapter->feat_en |= IXGBE_FEATURE_EEE;
5606 else
5607 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5608
5609 return (error);
5610 } /* ixgbe_sysctl_eee_state */
5611
5612 /************************************************************************
5613 * ixgbe_init_device_features
5614 ************************************************************************/
5615 static void
5616 ixgbe_init_device_features(struct adapter *adapter)
5617 {
5618 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5619 | IXGBE_FEATURE_RSS
5620 | IXGBE_FEATURE_MSI
5621 | IXGBE_FEATURE_MSIX
5622 | IXGBE_FEATURE_LEGACY_IRQ
5623 | IXGBE_FEATURE_LEGACY_TX;
5624
5625 /* Set capabilities first... */
5626 switch (adapter->hw.mac.type) {
5627 case ixgbe_mac_82598EB:
5628 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5629 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5630 break;
5631 case ixgbe_mac_X540:
5632 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5633 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5634 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5635 (adapter->hw.bus.func == 0))
5636 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5637 break;
5638 case ixgbe_mac_X550:
5639 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5640 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5641 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5642 break;
5643 case ixgbe_mac_X550EM_x:
5644 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5645 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5646 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5647 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5648 break;
5649 case ixgbe_mac_X550EM_a:
5650 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5651 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5652 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5653 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5654 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5655 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5656 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5657 }
5658 break;
5659 case ixgbe_mac_82599EB:
5660 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5661 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5662 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5663 (adapter->hw.bus.func == 0))
5664 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5665 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5666 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5667 break;
5668 default:
5669 break;
5670 }
5671
5672 /* Enabled by default... */
5673 /* Fan failure detection */
5674 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5675 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5676 /* Netmap */
5677 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5678 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5679 /* EEE */
5680 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5681 adapter->feat_en |= IXGBE_FEATURE_EEE;
5682 /* Thermal Sensor */
5683 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5684 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5685
5686 /* Enabled via global sysctl... */
5687 /* Flow Director */
5688 if (ixgbe_enable_fdir) {
5689 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5690 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5691 else
5692 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5693 }
5694 /* Legacy (single queue) transmit */
5695 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5696 ixgbe_enable_legacy_tx)
5697 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5698 /*
5699 * Message Signal Interrupts - Extended (MSI-X)
5700 * Normal MSI is only enabled if MSI-X calls fail.
5701 */
5702 if (!ixgbe_enable_msix)
5703 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5704 /* Receive-Side Scaling (RSS) */
5705 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5706 adapter->feat_en |= IXGBE_FEATURE_RSS;
5707
5708 /* Disable features with unmet dependencies... */
5709 /* No MSI-X */
5710 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5711 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5712 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5713 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5714 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5715 }
5716 } /* ixgbe_init_device_features */
5717
5718 /************************************************************************
5719 * ixgbe_probe - Device identification routine
5720 *
5721 * Determines if the driver should be loaded on
5722 * adapter based on its PCI vendor/device ID.
5723 *
5724 * return BUS_PROBE_DEFAULT on success, positive on failure
5725 ************************************************************************/
5726 static int
5727 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5728 {
5729 const struct pci_attach_args *pa = aux;
5730
5731 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5732 }
5733
5734 static ixgbe_vendor_info_t *
5735 ixgbe_lookup(const struct pci_attach_args *pa)
5736 {
5737 ixgbe_vendor_info_t *ent;
5738 pcireg_t subid;
5739
5740 INIT_DEBUGOUT("ixgbe_lookup: begin");
5741
5742 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5743 return NULL;
5744
5745 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5746
5747 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5748 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5749 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5750 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5751 (ent->subvendor_id == 0)) &&
5752 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5753 (ent->subdevice_id == 0))) {
5754 ++ixgbe_total_ports;
5755 return ent;
5756 }
5757 }
5758 return NULL;
5759 }
5760
5761 static int
5762 ixgbe_ifflags_cb(struct ethercom *ec)
5763 {
5764 struct ifnet *ifp = &ec->ec_if;
5765 struct adapter *adapter = ifp->if_softc;
5766 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
5767
5768 IXGBE_CORE_LOCK(adapter);
5769
5770 if (change != 0)
5771 adapter->if_flags = ifp->if_flags;
5772
5773 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5774 rc = ENETRESET;
5775 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5776 ixgbe_set_promisc(adapter);
5777
5778 /* Set up VLAN support and filter */
5779 ixgbe_setup_vlan_hw_support(adapter);
5780
5781 IXGBE_CORE_UNLOCK(adapter);
5782
5783 return rc;
5784 }
5785
5786 /************************************************************************
5787 * ixgbe_ioctl - Ioctl entry point
5788 *
5789 * Called when the user wants to configure the interface.
5790 *
5791 * return 0 on success, positive on failure
5792 ************************************************************************/
5793 static int
5794 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5795 {
5796 struct adapter *adapter = ifp->if_softc;
5797 struct ixgbe_hw *hw = &adapter->hw;
5798 struct ifcapreq *ifcr = data;
5799 struct ifreq *ifr = data;
5800 int error = 0;
5801 int l4csum_en;
5802 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5803 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5804
5805 switch (command) {
5806 case SIOCSIFFLAGS:
5807 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5808 break;
5809 case SIOCADDMULTI:
5810 case SIOCDELMULTI:
5811 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5812 break;
5813 case SIOCSIFMEDIA:
5814 case SIOCGIFMEDIA:
5815 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5816 break;
5817 case SIOCSIFCAP:
5818 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5819 break;
5820 case SIOCSIFMTU:
5821 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5822 break;
5823 #ifdef __NetBSD__
5824 case SIOCINITIFADDR:
5825 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5826 break;
5827 case SIOCGIFFLAGS:
5828 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5829 break;
5830 case SIOCGIFAFLAG_IN:
5831 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5832 break;
5833 case SIOCGIFADDR:
5834 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5835 break;
5836 case SIOCGIFMTU:
5837 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5838 break;
5839 case SIOCGIFCAP:
5840 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5841 break;
5842 case SIOCGETHERCAP:
5843 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5844 break;
5845 case SIOCGLIFADDR:
5846 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5847 break;
5848 case SIOCZIFDATA:
5849 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5850 hw->mac.ops.clear_hw_cntrs(hw);
5851 ixgbe_clear_evcnt(adapter);
5852 break;
5853 case SIOCAIFADDR:
5854 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5855 break;
5856 #endif
5857 default:
5858 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5859 break;
5860 }
5861
5862 switch (command) {
5863 case SIOCSIFMEDIA:
5864 case SIOCGIFMEDIA:
5865 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5866 case SIOCGI2C:
5867 {
5868 struct ixgbe_i2c_req i2c;
5869
5870 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5871 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5872 if (error != 0)
5873 break;
5874 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5875 error = EINVAL;
5876 break;
5877 }
5878 if (i2c.len > sizeof(i2c.data)) {
5879 error = EINVAL;
5880 break;
5881 }
5882
5883 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5884 i2c.dev_addr, i2c.data);
5885 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5886 break;
5887 }
5888 case SIOCSIFCAP:
5889 /* Layer-4 Rx checksum offload has to be turned on and
5890 * off as a unit.
5891 */
5892 l4csum_en = ifcr->ifcr_capenable & l4csum;
5893 if (l4csum_en != l4csum && l4csum_en != 0)
5894 return EINVAL;
5895 /*FALLTHROUGH*/
5896 case SIOCADDMULTI:
5897 case SIOCDELMULTI:
5898 case SIOCSIFFLAGS:
5899 case SIOCSIFMTU:
5900 default:
5901 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5902 return error;
5903 if ((ifp->if_flags & IFF_RUNNING) == 0)
5904 ;
5905 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5906 IXGBE_CORE_LOCK(adapter);
5907 if ((ifp->if_flags & IFF_RUNNING) != 0)
5908 ixgbe_init_locked(adapter);
5909 ixgbe_recalculate_max_frame(adapter);
5910 IXGBE_CORE_UNLOCK(adapter);
5911 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
5912 /*
5913 * Multicast list has changed; set the hardware filter
5914 * accordingly.
5915 */
5916 IXGBE_CORE_LOCK(adapter);
5917 ixgbe_disable_intr(adapter);
5918 ixgbe_set_multi(adapter);
5919 ixgbe_enable_intr(adapter);
5920 IXGBE_CORE_UNLOCK(adapter);
5921 }
5922 return 0;
5923 }
5924
5925 return error;
5926 } /* ixgbe_ioctl */
5927
5928 /************************************************************************
5929 * ixgbe_check_fan_failure
5930 ************************************************************************/
5931 static void
5932 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
5933 {
5934 u32 mask;
5935
5936 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
5937 IXGBE_ESDP_SDP1;
5938
5939 if (reg & mask)
5940 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5941 } /* ixgbe_check_fan_failure */
5942
5943 /************************************************************************
5944 * ixgbe_handle_que
5945 ************************************************************************/
5946 static void
5947 ixgbe_handle_que(void *context)
5948 {
5949 struct ix_queue *que = context;
5950 struct adapter *adapter = que->adapter;
5951 struct tx_ring *txr = que->txr;
5952 struct ifnet *ifp = adapter->ifp;
5953 bool more = false;
5954
5955 que->handleq.ev_count++;
5956
5957 if (ifp->if_flags & IFF_RUNNING) {
5958 more = ixgbe_rxeof(que);
5959 IXGBE_TX_LOCK(txr);
5960 more |= ixgbe_txeof(txr);
5961 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5962 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
5963 ixgbe_mq_start_locked(ifp, txr);
5964 /* Only for queue 0 */
5965 /* NetBSD still needs this for CBQ */
5966 if ((&adapter->queues[0] == que)
5967 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
5968 ixgbe_legacy_start_locked(ifp, txr);
5969 IXGBE_TX_UNLOCK(txr);
5970 }
5971
5972 if (more) {
5973 que->req.ev_count++;
5974 ixgbe_sched_handle_que(adapter, que);
5975 } else if (que->res != NULL) {
5976 /* Re-enable this interrupt */
5977 ixgbe_enable_queue(adapter, que->msix);
5978 } else
5979 ixgbe_enable_intr(adapter);
5980
5981 return;
5982 } /* ixgbe_handle_que */
5983
5984 /************************************************************************
5985 * ixgbe_handle_que_work
5986 ************************************************************************/
5987 static void
5988 ixgbe_handle_que_work(struct work *wk, void *context)
5989 {
5990 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
5991
5992 /*
5993 * "enqueued flag" is not required here.
5994 * See ixgbe_msix_que().
5995 */
5996 ixgbe_handle_que(que);
5997 }
5998
5999 /************************************************************************
6000 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6001 ************************************************************************/
6002 static int
6003 ixgbe_allocate_legacy(struct adapter *adapter,
6004 const struct pci_attach_args *pa)
6005 {
6006 device_t dev = adapter->dev;
6007 struct ix_queue *que = adapter->queues;
6008 struct tx_ring *txr = adapter->tx_rings;
6009 int counts[PCI_INTR_TYPE_SIZE];
6010 pci_intr_type_t intr_type, max_type;
6011 char intrbuf[PCI_INTRSTR_LEN];
6012 const char *intrstr = NULL;
6013
6014 /* We allocate a single interrupt resource */
6015 max_type = PCI_INTR_TYPE_MSI;
6016 counts[PCI_INTR_TYPE_MSIX] = 0;
6017 counts[PCI_INTR_TYPE_MSI] =
6018 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6019 /* Check not feat_en but feat_cap to fallback to INTx */
6020 counts[PCI_INTR_TYPE_INTX] =
6021 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6022
6023 alloc_retry:
6024 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6025 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6026 return ENXIO;
6027 }
6028 adapter->osdep.nintrs = 1;
6029 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6030 intrbuf, sizeof(intrbuf));
6031 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6032 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6033 device_xname(dev));
6034 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6035 if (adapter->osdep.ihs[0] == NULL) {
6036 aprint_error_dev(dev,"unable to establish %s\n",
6037 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6038 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6039 adapter->osdep.intrs = NULL;
6040 switch (intr_type) {
6041 case PCI_INTR_TYPE_MSI:
6042 /* The next try is for INTx: Disable MSI */
6043 max_type = PCI_INTR_TYPE_INTX;
6044 counts[PCI_INTR_TYPE_INTX] = 1;
6045 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6046 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6047 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6048 goto alloc_retry;
6049 } else
6050 break;
6051 case PCI_INTR_TYPE_INTX:
6052 default:
6053 /* See below */
6054 break;
6055 }
6056 }
6057 if (intr_type == PCI_INTR_TYPE_INTX) {
6058 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6059 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6060 }
6061 if (adapter->osdep.ihs[0] == NULL) {
6062 aprint_error_dev(dev,
6063 "couldn't establish interrupt%s%s\n",
6064 intrstr ? " at " : "", intrstr ? intrstr : "");
6065 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6066 adapter->osdep.intrs = NULL;
6067 return ENXIO;
6068 }
6069 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6070 /*
6071 * Try allocating a fast interrupt and the associated deferred
6072 * processing contexts.
6073 */
6074 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6075 txr->txr_si =
6076 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6077 ixgbe_deferred_mq_start, txr);
6078 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6079 ixgbe_handle_que, que);
6080
6081 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6082 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6083 aprint_error_dev(dev,
6084 "could not establish software interrupts\n");
6085
6086 return ENXIO;
6087 }
6088 /* For simplicity in the handlers */
6089 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6090
6091 return (0);
6092 } /* ixgbe_allocate_legacy */
6093
6094 /************************************************************************
6095 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6096 ************************************************************************/
6097 static int
6098 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6099 {
6100 device_t dev = adapter->dev;
6101 struct ix_queue *que = adapter->queues;
6102 struct tx_ring *txr = adapter->tx_rings;
6103 pci_chipset_tag_t pc;
6104 char intrbuf[PCI_INTRSTR_LEN];
6105 char intr_xname[32];
6106 char wqname[MAXCOMLEN];
6107 const char *intrstr = NULL;
6108 int error, vector = 0;
6109 int cpu_id = 0;
6110 kcpuset_t *affinity;
6111 #ifdef RSS
6112 unsigned int rss_buckets = 0;
6113 kcpuset_t cpu_mask;
6114 #endif
6115
6116 pc = adapter->osdep.pc;
6117 #ifdef RSS
6118 /*
6119 * If we're doing RSS, the number of queues needs to
6120 * match the number of RSS buckets that are configured.
6121 *
6122 * + If there's more queues than RSS buckets, we'll end
6123 * up with queues that get no traffic.
6124 *
6125 * + If there's more RSS buckets than queues, we'll end
6126 * up having multiple RSS buckets map to the same queue,
6127 * so there'll be some contention.
6128 */
6129 rss_buckets = rss_getnumbuckets();
6130 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6131 (adapter->num_queues != rss_buckets)) {
6132 device_printf(dev,
6133 "%s: number of queues (%d) != number of RSS buckets (%d)"
6134 "; performance will be impacted.\n",
6135 __func__, adapter->num_queues, rss_buckets);
6136 }
6137 #endif
6138
6139 adapter->osdep.nintrs = adapter->num_queues + 1;
6140 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6141 adapter->osdep.nintrs) != 0) {
6142 aprint_error_dev(dev,
6143 "failed to allocate MSI-X interrupt\n");
6144 return (ENXIO);
6145 }
6146
6147 kcpuset_create(&affinity, false);
6148 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6149 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6150 device_xname(dev), i);
6151 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6152 sizeof(intrbuf));
6153 #ifdef IXGBE_MPSAFE
6154 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6155 true);
6156 #endif
6157 /* Set the handler function */
6158 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6159 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6160 intr_xname);
6161 if (que->res == NULL) {
6162 aprint_error_dev(dev,
6163 "Failed to register QUE handler\n");
6164 error = ENXIO;
6165 goto err_out;
6166 }
6167 que->msix = vector;
6168 adapter->active_queues |= (u64)(1 << que->msix);
6169
6170 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6171 #ifdef RSS
6172 /*
6173 * The queue ID is used as the RSS layer bucket ID.
6174 * We look up the queue ID -> RSS CPU ID and select
6175 * that.
6176 */
6177 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6178 CPU_SETOF(cpu_id, &cpu_mask);
6179 #endif
6180 } else {
6181 /*
6182 * Bind the MSI-X vector, and thus the
6183 * rings to the corresponding CPU.
6184 *
6185 * This just happens to match the default RSS
6186 * round-robin bucket -> queue -> CPU allocation.
6187 */
6188 if (adapter->num_queues > 1)
6189 cpu_id = i;
6190 }
6191 /* Round-robin affinity */
6192 kcpuset_zero(affinity);
6193 kcpuset_set(affinity, cpu_id % ncpu);
6194 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6195 NULL);
6196 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6197 intrstr);
6198 if (error == 0) {
6199 #if 1 /* def IXGBE_DEBUG */
6200 #ifdef RSS
6201 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6202 cpu_id % ncpu);
6203 #else
6204 aprint_normal(", bound queue %d to cpu %d", i,
6205 cpu_id % ncpu);
6206 #endif
6207 #endif /* IXGBE_DEBUG */
6208 }
6209 aprint_normal("\n");
6210
6211 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6212 txr->txr_si = softint_establish(
6213 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6214 ixgbe_deferred_mq_start, txr);
6215 if (txr->txr_si == NULL) {
6216 aprint_error_dev(dev,
6217 "couldn't establish software interrupt\n");
6218 error = ENXIO;
6219 goto err_out;
6220 }
6221 }
6222 que->que_si
6223 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6224 ixgbe_handle_que, que);
6225 if (que->que_si == NULL) {
6226 aprint_error_dev(dev,
6227 "couldn't establish software interrupt\n");
6228 error = ENXIO;
6229 goto err_out;
6230 }
6231 }
6232 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6233 error = workqueue_create(&adapter->txr_wq, wqname,
6234 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6235 IXGBE_WORKQUEUE_FLAGS);
6236 if (error) {
6237 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6238 goto err_out;
6239 }
6240 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6241
6242 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6243 error = workqueue_create(&adapter->que_wq, wqname,
6244 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6245 IXGBE_WORKQUEUE_FLAGS);
6246 if (error) {
6247 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6248 goto err_out;
6249 }
6250
6251 /* and Link */
6252 cpu_id++;
6253 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6254 adapter->vector = vector;
6255 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6256 sizeof(intrbuf));
6257 #ifdef IXGBE_MPSAFE
6258 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6259 true);
6260 #endif
6261 /* Set the link handler function */
6262 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6263 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6264 intr_xname);
6265 if (adapter->osdep.ihs[vector] == NULL) {
6266 adapter->res = NULL;
6267 aprint_error_dev(dev, "Failed to register LINK handler\n");
6268 error = ENXIO;
6269 goto err_out;
6270 }
6271 /* Round-robin affinity */
6272 kcpuset_zero(affinity);
6273 kcpuset_set(affinity, cpu_id % ncpu);
6274 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6275 NULL);
6276
6277 aprint_normal_dev(dev,
6278 "for link, interrupting at %s", intrstr);
6279 if (error == 0)
6280 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6281 else
6282 aprint_normal("\n");
6283
6284 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6285 adapter->mbx_si =
6286 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6287 ixgbe_handle_mbx, adapter);
6288 if (adapter->mbx_si == NULL) {
6289 aprint_error_dev(dev,
6290 "could not establish software interrupts\n");
6291
6292 error = ENXIO;
6293 goto err_out;
6294 }
6295 }
6296
6297 kcpuset_destroy(affinity);
6298 aprint_normal_dev(dev,
6299 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6300
6301 return (0);
6302
6303 err_out:
6304 kcpuset_destroy(affinity);
6305 ixgbe_free_softint(adapter);
6306 ixgbe_free_pciintr_resources(adapter);
6307 return (error);
6308 } /* ixgbe_allocate_msix */
6309
6310 /************************************************************************
6311 * ixgbe_configure_interrupts
6312 *
6313 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6314 * This will also depend on user settings.
6315 ************************************************************************/
6316 static int
6317 ixgbe_configure_interrupts(struct adapter *adapter)
6318 {
6319 device_t dev = adapter->dev;
6320 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6321 int want, queues, msgs;
6322
6323 /* Default to 1 queue if MSI-X setup fails */
6324 adapter->num_queues = 1;
6325
6326 /* Override by tuneable */
6327 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6328 goto msi;
6329
6330 /*
6331 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6332 * interrupt slot.
6333 */
6334 if (ncpu == 1)
6335 goto msi;
6336
6337 /* First try MSI-X */
6338 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6339 msgs = MIN(msgs, IXG_MAX_NINTR);
6340 if (msgs < 2)
6341 goto msi;
6342
6343 adapter->msix_mem = (void *)1; /* XXX */
6344
6345 /* Figure out a reasonable auto config value */
6346 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6347
6348 #ifdef RSS
6349 /* If we're doing RSS, clamp at the number of RSS buckets */
6350 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6351 queues = min(queues, rss_getnumbuckets());
6352 #endif
6353 if (ixgbe_num_queues > queues) {
6354 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6355 ixgbe_num_queues = queues;
6356 }
6357
6358 if (ixgbe_num_queues != 0)
6359 queues = ixgbe_num_queues;
6360 else
6361 queues = min(queues,
6362 min(mac->max_tx_queues, mac->max_rx_queues));
6363
6364 /* reflect correct sysctl value */
6365 ixgbe_num_queues = queues;
6366
6367 /*
6368 * Want one vector (RX/TX pair) per queue
6369 * plus an additional for Link.
6370 */
6371 want = queues + 1;
6372 if (msgs >= want)
6373 msgs = want;
6374 else {
6375 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6376 "%d vectors but %d queues wanted!\n",
6377 msgs, want);
6378 goto msi;
6379 }
6380 adapter->num_queues = queues;
6381 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6382 return (0);
6383
6384 /*
6385 * MSI-X allocation failed or provided us with
6386 * less vectors than needed. Free MSI-X resources
6387 * and we'll try enabling MSI.
6388 */
6389 msi:
6390 /* Without MSI-X, some features are no longer supported */
6391 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6392 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6393 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6394 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6395
6396 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6397 adapter->msix_mem = NULL; /* XXX */
6398 if (msgs > 1)
6399 msgs = 1;
6400 if (msgs != 0) {
6401 msgs = 1;
6402 adapter->feat_en |= IXGBE_FEATURE_MSI;
6403 return (0);
6404 }
6405
6406 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6407 aprint_error_dev(dev,
6408 "Device does not support legacy interrupts.\n");
6409 return 1;
6410 }
6411
6412 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6413
6414 return (0);
6415 } /* ixgbe_configure_interrupts */
6416
6417
6418 /************************************************************************
6419 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6420 *
6421 * Done outside of interrupt context since the driver might sleep
6422 ************************************************************************/
6423 static void
6424 ixgbe_handle_link(void *context)
6425 {
6426 struct adapter *adapter = context;
6427 struct ixgbe_hw *hw = &adapter->hw;
6428
6429 IXGBE_CORE_LOCK(adapter);
6430 ++adapter->link_sicount.ev_count;
6431 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6432 ixgbe_update_link_status(adapter);
6433
6434 /* Re-enable link interrupts */
6435 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6436
6437 IXGBE_CORE_UNLOCK(adapter);
6438 } /* ixgbe_handle_link */
6439
6440 /************************************************************************
6441 * ixgbe_rearm_queues
6442 ************************************************************************/
6443 static void
6444 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6445 {
6446 u32 mask;
6447
6448 switch (adapter->hw.mac.type) {
6449 case ixgbe_mac_82598EB:
6450 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6452 break;
6453 case ixgbe_mac_82599EB:
6454 case ixgbe_mac_X540:
6455 case ixgbe_mac_X550:
6456 case ixgbe_mac_X550EM_x:
6457 case ixgbe_mac_X550EM_a:
6458 mask = (queues & 0xFFFFFFFF);
6459 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6460 mask = (queues >> 32);
6461 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6462 break;
6463 default:
6464 break;
6465 }
6466 } /* ixgbe_rearm_queues */
6467