ixgbe.c revision 1.128.2.3 1 /* $NetBSD: ixgbe.c,v 1.128.2.3 2018/03/30 06:20:15 pgoyette Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 char ixgbe_driver_version[] = "3.2.12-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 static void ixgbe_rearm_queues(struct adapter *, u64);
195
196 static void ixgbe_initialize_transmit_units(struct adapter *);
197 static void ixgbe_initialize_receive_units(struct adapter *);
198 static void ixgbe_enable_rx_drop(struct adapter *);
199 static void ixgbe_disable_rx_drop(struct adapter *);
200 static void ixgbe_initialize_rss_mapping(struct adapter *);
201
202 static void ixgbe_enable_intr(struct adapter *);
203 static void ixgbe_disable_intr(struct adapter *);
204 static void ixgbe_update_stats_counters(struct adapter *);
205 static void ixgbe_set_promisc(struct adapter *);
206 static void ixgbe_set_multi(struct adapter *);
207 static void ixgbe_update_link_status(struct adapter *);
208 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
209 static void ixgbe_configure_ivars(struct adapter *);
210 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
211 static void ixgbe_eitr_write(struct ix_queue *, uint32_t);
212
213 static void ixgbe_setup_vlan_hw_support(struct adapter *);
214 #if 0
215 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
216 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
217 #endif
218
219 static void ixgbe_add_device_sysctls(struct adapter *);
220 static void ixgbe_add_hw_stats(struct adapter *);
221 static void ixgbe_clear_evcnt(struct adapter *);
222 static int ixgbe_set_flowcntl(struct adapter *, int);
223 static int ixgbe_set_advertise(struct adapter *, int);
224 static int ixgbe_get_advertise(struct adapter *);
225
226 /* Sysctl handlers */
227 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
228 const char *, int *, int);
229 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
230 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
231 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
235 #ifdef IXGBE_DEBUG
236 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
238 #endif
239 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
246
247 /* Support for pluggable optic modules */
248 static bool ixgbe_sfp_probe(struct adapter *);
249
250 /* Legacy (single vector) interrupt handler */
251 static int ixgbe_legacy_irq(void *);
252
253 /* The MSI/MSI-X Interrupt handlers */
254 static int ixgbe_msix_que(void *);
255 static int ixgbe_msix_link(void *);
256
257 /* Software interrupts for deferred work */
258 static void ixgbe_handle_que(void *);
259 static void ixgbe_handle_link(void *);
260 static void ixgbe_handle_msf(void *);
261 static void ixgbe_handle_mod(void *);
262 static void ixgbe_handle_phy(void *);
263
264 /* Workqueue handler for deferred work */
265 static void ixgbe_handle_que_work(struct work *, void *);
266
267 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
268
269 /************************************************************************
270 * NetBSD Device Interface Entry Points
271 ************************************************************************/
272 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
273 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
274 DVF_DETACH_SHUTDOWN);
275
276 #if 0
277 devclass_t ix_devclass;
278 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
279
280 MODULE_DEPEND(ix, pci, 1, 1, 1);
281 MODULE_DEPEND(ix, ether, 1, 1, 1);
282 #ifdef DEV_NETMAP
283 MODULE_DEPEND(ix, netmap, 1, 1, 1);
284 #endif
285 #endif
286
287 /*
288 * TUNEABLE PARAMETERS:
289 */
290
291 /*
292 * AIM: Adaptive Interrupt Moderation
293 * which means that the interrupt rate
294 * is varied over time based on the
295 * traffic for that interrupt vector
296 */
297 static bool ixgbe_enable_aim = true;
298 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
299 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
300 "Enable adaptive interrupt moderation");
301
302 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
303 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
304 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
305
306 /* How many packets rxeof tries to clean at a time */
307 static int ixgbe_rx_process_limit = 256;
308 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
309 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
310
311 /* How many packets txeof tries to clean at a time */
312 static int ixgbe_tx_process_limit = 256;
313 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
314 &ixgbe_tx_process_limit, 0,
315 "Maximum number of sent packets to process at a time, -1 means unlimited");
316
317 /* Flow control setting, default to full */
318 static int ixgbe_flow_control = ixgbe_fc_full;
319 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
320 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
321
322 /* Which pakcet processing uses workqueue or softint */
323 static bool ixgbe_txrx_workqueue = false;
324
325 /*
326 * Smart speed setting, default to on
327 * this only works as a compile option
328 * right now as its during attach, set
329 * this to 'ixgbe_smart_speed_off' to
330 * disable.
331 */
332 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
333
334 /*
335 * MSI-X should be the default for best performance,
336 * but this allows it to be forced off for testing.
337 */
338 static int ixgbe_enable_msix = 1;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
340 "Enable MSI-X interrupts");
341
342 /*
343 * Number of Queues, can be set to 0,
344 * it then autoconfigures based on the
345 * number of cpus with a max of 8. This
346 * can be overriden manually here.
347 */
348 static int ixgbe_num_queues = 0;
349 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
350 "Number of queues to configure, 0 indicates autoconfigure");
351
352 /*
353 * Number of TX descriptors per ring,
354 * setting higher than RX as this seems
355 * the better performing choice.
356 */
357 static int ixgbe_txd = PERFORM_TXD;
358 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
359 "Number of transmit descriptors per queue");
360
361 /* Number of RX descriptors per ring */
362 static int ixgbe_rxd = PERFORM_RXD;
363 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
364 "Number of receive descriptors per queue");
365
366 /*
367 * Defining this on will allow the use
368 * of unsupported SFP+ modules, note that
369 * doing so you are on your own :)
370 */
371 static int allow_unsupported_sfp = false;
372 #define TUNABLE_INT(__x, __y)
373 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
374
375 /*
376 * Not sure if Flow Director is fully baked,
377 * so we'll default to turning it off.
378 */
379 static int ixgbe_enable_fdir = 0;
380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
381 "Enable Flow Director");
382
383 /* Legacy Transmit (single queue) */
384 static int ixgbe_enable_legacy_tx = 0;
385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
386 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
387
388 /* Receive-Side Scaling */
389 static int ixgbe_enable_rss = 1;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
391 "Enable Receive-Side Scaling (RSS)");
392
393 /* Keep running tab on them for sanity check */
394 static int ixgbe_total_ports;
395
396 #if 0
397 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
398 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
399 #endif
400
401 #ifdef NET_MPSAFE
402 #define IXGBE_MPSAFE 1
403 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
404 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
405 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
406 #else
407 #define IXGBE_CALLOUT_FLAGS 0
408 #define IXGBE_SOFTINFT_FLAGS 0
409 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
410 #endif
411 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
412
413 /************************************************************************
414 * ixgbe_initialize_rss_mapping
415 ************************************************************************/
416 static void
417 ixgbe_initialize_rss_mapping(struct adapter *adapter)
418 {
419 struct ixgbe_hw *hw = &adapter->hw;
420 u32 reta = 0, mrqc, rss_key[10];
421 int queue_id, table_size, index_mult;
422 int i, j;
423 u32 rss_hash_config;
424
425 /* force use default RSS key. */
426 #ifdef __NetBSD__
427 rss_getkey((uint8_t *) &rss_key);
428 #else
429 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
430 /* Fetch the configured RSS key */
431 rss_getkey((uint8_t *) &rss_key);
432 } else {
433 /* set up random bits */
434 cprng_fast(&rss_key, sizeof(rss_key));
435 }
436 #endif
437
438 /* Set multiplier for RETA setup and table size based on MAC */
439 index_mult = 0x1;
440 table_size = 128;
441 switch (adapter->hw.mac.type) {
442 case ixgbe_mac_82598EB:
443 index_mult = 0x11;
444 break;
445 case ixgbe_mac_X550:
446 case ixgbe_mac_X550EM_x:
447 case ixgbe_mac_X550EM_a:
448 table_size = 512;
449 break;
450 default:
451 break;
452 }
453
454 /* Set up the redirection table */
455 for (i = 0, j = 0; i < table_size; i++, j++) {
456 if (j == adapter->num_queues)
457 j = 0;
458
459 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
460 /*
461 * Fetch the RSS bucket id for the given indirection
462 * entry. Cap it at the number of configured buckets
463 * (which is num_queues.)
464 */
465 queue_id = rss_get_indirection_to_bucket(i);
466 queue_id = queue_id % adapter->num_queues;
467 } else
468 queue_id = (j * index_mult);
469
470 /*
471 * The low 8 bits are for hash value (n+0);
472 * The next 8 bits are for hash value (n+1), etc.
473 */
474 reta = reta >> 8;
475 reta = reta | (((uint32_t) queue_id) << 24);
476 if ((i & 3) == 3) {
477 if (i < 128)
478 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
479 else
480 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
481 reta);
482 reta = 0;
483 }
484 }
485
486 /* Now fill our hash function seeds */
487 for (i = 0; i < 10; i++)
488 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
489
490 /* Perform hash on these packet types */
491 if (adapter->feat_en & IXGBE_FEATURE_RSS)
492 rss_hash_config = rss_gethashconfig();
493 else {
494 /*
495 * Disable UDP - IP fragments aren't currently being handled
496 * and so we end up with a mix of 2-tuple and 4-tuple
497 * traffic.
498 */
499 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
500 | RSS_HASHTYPE_RSS_TCP_IPV4
501 | RSS_HASHTYPE_RSS_IPV6
502 | RSS_HASHTYPE_RSS_TCP_IPV6
503 | RSS_HASHTYPE_RSS_IPV6_EX
504 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
505 }
506
507 mrqc = IXGBE_MRQC_RSSEN;
508 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
509 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
526 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
527 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
528 } /* ixgbe_initialize_rss_mapping */
529
530 /************************************************************************
531 * ixgbe_initialize_receive_units - Setup receive registers and features.
532 ************************************************************************/
533 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
534
535 static void
536 ixgbe_initialize_receive_units(struct adapter *adapter)
537 {
538 struct rx_ring *rxr = adapter->rx_rings;
539 struct ixgbe_hw *hw = &adapter->hw;
540 struct ifnet *ifp = adapter->ifp;
541 int i, j;
542 u32 bufsz, fctrl, srrctl, rxcsum;
543 u32 hlreg;
544
545 /*
546 * Make sure receives are disabled while
547 * setting up the descriptor ring
548 */
549 ixgbe_disable_rx(hw);
550
551 /* Enable broadcasts */
552 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
553 fctrl |= IXGBE_FCTRL_BAM;
554 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
555 fctrl |= IXGBE_FCTRL_DPF;
556 fctrl |= IXGBE_FCTRL_PMCF;
557 }
558 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
559
560 /* Set for Jumbo Frames? */
561 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
562 if (ifp->if_mtu > ETHERMTU)
563 hlreg |= IXGBE_HLREG0_JUMBOEN;
564 else
565 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
566
567 #ifdef DEV_NETMAP
568 /* CRC stripping is conditional in Netmap */
569 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
570 (ifp->if_capenable & IFCAP_NETMAP) &&
571 !ix_crcstrip)
572 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
573 else
574 #endif /* DEV_NETMAP */
575 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
576
577 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
578
579 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
580 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
581
582 for (i = 0; i < adapter->num_queues; i++, rxr++) {
583 u64 rdba = rxr->rxdma.dma_paddr;
584 u32 tqsmreg, reg;
585 int regnum = i / 4; /* 1 register per 4 queues */
586 int regshift = i % 4; /* 4 bits per 1 queue */
587 j = rxr->me;
588
589 /* Setup the Base and Length of the Rx Descriptor Ring */
590 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
591 (rdba & 0x00000000ffffffffULL));
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
593 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
594 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
595
596 /* Set up the SRRCTL register */
597 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
598 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
599 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
600 srrctl |= bufsz;
601 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
602
603 /* Set RQSMR (Receive Queue Statistic Mapping) register */
604 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
605 reg &= ~(0x000000ff << (regshift * 8));
606 reg |= i << (regshift * 8);
607 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
608
609 /*
610 * Set RQSMR (Receive Queue Statistic Mapping) register.
611 * Register location for queue 0...7 are different between
612 * 82598 and newer.
613 */
614 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
615 tqsmreg = IXGBE_TQSMR(regnum);
616 else
617 tqsmreg = IXGBE_TQSM(regnum);
618 reg = IXGBE_READ_REG(hw, tqsmreg);
619 reg &= ~(0x000000ff << (regshift * 8));
620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, tqsmreg, reg);
622
623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere.
628 */
629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 }
635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641
642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me);
644 }
645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 }
653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655
656 ixgbe_initialize_rss_mapping(adapter);
657
658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 }
662
663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD;
665
666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671
672 return;
673 } /* ixgbe_initialize_receive_units */
674
675 /************************************************************************
676 * ixgbe_initialize_transmit_units - Enable transmit units.
677 ************************************************************************/
678 static void
679 ixgbe_initialize_transmit_units(struct adapter *adapter)
680 {
681 struct tx_ring *txr = adapter->tx_rings;
682 struct ixgbe_hw *hw = &adapter->hw;
683
684 /* Setup the Base and Length of the Tx Descriptor Ring */
685 for (int i = 0; i < adapter->num_queues; i++, txr++) {
686 u64 tdba = txr->txdma.dma_paddr;
687 u32 txctrl = 0;
688 int j = txr->me;
689
690 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
691 (tdba & 0x00000000ffffffffULL));
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
693 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
694 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
695
696 /* Setup the HW Tx Head and Tail descriptor pointers */
697 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
698 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
699
700 /* Cache the tail address */
701 txr->tail = IXGBE_TDT(j);
702
703 /* Disable Head Writeback */
704 /*
705 * Note: for X550 series devices, these registers are actually
706 * prefixed with TPH_ isntead of DCA_, but the addresses and
707 * fields remain the same.
708 */
709 switch (hw->mac.type) {
710 case ixgbe_mac_82598EB:
711 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
712 break;
713 default:
714 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
715 break;
716 }
717 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
718 switch (hw->mac.type) {
719 case ixgbe_mac_82598EB:
720 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
721 break;
722 default:
723 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
724 break;
725 }
726
727 }
728
729 if (hw->mac.type != ixgbe_mac_82598EB) {
730 u32 dmatxctl, rttdcs;
731
732 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
733 dmatxctl |= IXGBE_DMATXCTL_TE;
734 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
735 /* Disable arbiter to set MTQC */
736 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
737 rttdcs |= IXGBE_RTTDCS_ARBDIS;
738 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
739 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
740 ixgbe_get_mtqc(adapter->iov_mode));
741 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
742 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
743 }
744
745 return;
746 } /* ixgbe_initialize_transmit_units */
747
748 /************************************************************************
749 * ixgbe_attach - Device initialization routine
750 *
751 * Called when the driver is being loaded.
752 * Identifies the type of hardware, allocates all resources
753 * and initializes the hardware.
754 *
755 * return 0 on success, positive on failure
756 ************************************************************************/
757 static void
758 ixgbe_attach(device_t parent, device_t dev, void *aux)
759 {
760 struct adapter *adapter;
761 struct ixgbe_hw *hw;
762 int error = -1;
763 u32 ctrl_ext;
764 u16 high, low, nvmreg;
765 pcireg_t id, subid;
766 ixgbe_vendor_info_t *ent;
767 struct pci_attach_args *pa = aux;
768 const char *str;
769 char buf[256];
770
771 INIT_DEBUGOUT("ixgbe_attach: begin");
772
773 /* Allocate, clear, and link in our adapter structure */
774 adapter = device_private(dev);
775 adapter->hw.back = adapter;
776 adapter->dev = dev;
777 hw = &adapter->hw;
778 adapter->osdep.pc = pa->pa_pc;
779 adapter->osdep.tag = pa->pa_tag;
780 if (pci_dma64_available(pa))
781 adapter->osdep.dmat = pa->pa_dmat64;
782 else
783 adapter->osdep.dmat = pa->pa_dmat;
784 adapter->osdep.attached = false;
785
786 ent = ixgbe_lookup(pa);
787
788 KASSERT(ent != NULL);
789
790 aprint_normal(": %s, Version - %s\n",
791 ixgbe_strings[ent->index], ixgbe_driver_version);
792
793 /* Core Lock Init*/
794 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
795
796 /* Set up the timer callout */
797 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
798
799 /* Determine hardware revision */
800 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
801 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
802
803 hw->vendor_id = PCI_VENDOR(id);
804 hw->device_id = PCI_PRODUCT(id);
805 hw->revision_id =
806 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
807 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
808 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
809
810 /*
811 * Make sure BUSMASTER is set
812 */
813 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
814
815 /* Do base PCI setup - map BAR0 */
816 if (ixgbe_allocate_pci_resources(adapter, pa)) {
817 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
818 error = ENXIO;
819 goto err_out;
820 }
821
822 /* let hardware know driver is loaded */
823 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
824 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
825 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
826
827 /*
828 * Initialize the shared code
829 */
830 if (ixgbe_init_shared_code(hw)) {
831 aprint_error_dev(dev, "Unable to initialize the shared code\n");
832 error = ENXIO;
833 goto err_out;
834 }
835
836 switch (hw->mac.type) {
837 case ixgbe_mac_82598EB:
838 str = "82598EB";
839 break;
840 case ixgbe_mac_82599EB:
841 str = "82599EB";
842 break;
843 case ixgbe_mac_X540:
844 str = "X540";
845 break;
846 case ixgbe_mac_X550:
847 str = "X550";
848 break;
849 case ixgbe_mac_X550EM_x:
850 str = "X550EM";
851 break;
852 case ixgbe_mac_X550EM_a:
853 str = "X550EM A";
854 break;
855 default:
856 str = "Unknown";
857 break;
858 }
859 aprint_normal_dev(dev, "device %s\n", str);
860
861 if (hw->mbx.ops.init_params)
862 hw->mbx.ops.init_params(hw);
863
864 hw->allow_unsupported_sfp = allow_unsupported_sfp;
865
866 /* Pick up the 82599 settings */
867 if (hw->mac.type != ixgbe_mac_82598EB) {
868 hw->phy.smart_speed = ixgbe_smart_speed;
869 adapter->num_segs = IXGBE_82599_SCATTER;
870 } else
871 adapter->num_segs = IXGBE_82598_SCATTER;
872
873 hw->mac.ops.set_lan_id(hw);
874 ixgbe_init_device_features(adapter);
875
876 if (ixgbe_configure_interrupts(adapter)) {
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* Allocate multicast array memory. */
882 adapter->mta = malloc(sizeof(*adapter->mta) *
883 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
884 if (adapter->mta == NULL) {
885 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
886 error = ENOMEM;
887 goto err_out;
888 }
889
890 /* Enable WoL (if supported) */
891 ixgbe_check_wol_support(adapter);
892
893 /* Verify adapter fan is still functional (if applicable) */
894 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
895 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
896 ixgbe_check_fan_failure(adapter, esdp, FALSE);
897 }
898
899 /* Ensure SW/FW semaphore is free */
900 ixgbe_init_swfw_semaphore(hw);
901
902 /* Enable EEE power saving */
903 if (adapter->feat_en & IXGBE_FEATURE_EEE)
904 hw->mac.ops.setup_eee(hw, TRUE);
905
906 /* Set an initial default flow control value */
907 hw->fc.requested_mode = ixgbe_flow_control;
908
909 /* Sysctls for limiting the amount of work done in the taskqueues */
910 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
911 "max number of rx packets to process",
912 &adapter->rx_process_limit, ixgbe_rx_process_limit);
913
914 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
915 "max number of tx packets to process",
916 &adapter->tx_process_limit, ixgbe_tx_process_limit);
917
918 /* Do descriptor calc and sanity checks */
919 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
920 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
921 aprint_error_dev(dev, "TXD config issue, using default!\n");
922 adapter->num_tx_desc = DEFAULT_TXD;
923 } else
924 adapter->num_tx_desc = ixgbe_txd;
925
926 /*
927 * With many RX rings it is easy to exceed the
928 * system mbuf allocation. Tuning nmbclusters
929 * can alleviate this.
930 */
931 if (nmbclusters > 0) {
932 int s;
933 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
934 if (s > nmbclusters) {
935 aprint_error_dev(dev, "RX Descriptors exceed "
936 "system mbuf max, using default instead!\n");
937 ixgbe_rxd = DEFAULT_RXD;
938 }
939 }
940
941 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
942 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
943 aprint_error_dev(dev, "RXD config issue, using default!\n");
944 adapter->num_rx_desc = DEFAULT_RXD;
945 } else
946 adapter->num_rx_desc = ixgbe_rxd;
947
948 /* Allocate our TX/RX Queues */
949 if (ixgbe_allocate_queues(adapter)) {
950 error = ENOMEM;
951 goto err_out;
952 }
953
954 hw->phy.reset_if_overtemp = TRUE;
955 error = ixgbe_reset_hw(hw);
956 hw->phy.reset_if_overtemp = FALSE;
957 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
958 /*
959 * No optics in this port, set up
960 * so the timer routine will probe
961 * for later insertion.
962 */
963 adapter->sfp_probe = TRUE;
964 error = IXGBE_SUCCESS;
965 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
966 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
967 error = EIO;
968 goto err_late;
969 } else if (error) {
970 aprint_error_dev(dev, "Hardware initialization failed\n");
971 error = EIO;
972 goto err_late;
973 }
974
975 /* Make sure we have a good EEPROM before we read from it */
976 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
977 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
978 error = EIO;
979 goto err_late;
980 }
981
982 aprint_normal("%s:", device_xname(dev));
983 /* NVM Image Version */
984 switch (hw->mac.type) {
985 case ixgbe_mac_X540:
986 case ixgbe_mac_X550EM_a:
987 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
988 if (nvmreg == 0xffff)
989 break;
990 high = (nvmreg >> 12) & 0x0f;
991 low = (nvmreg >> 4) & 0xff;
992 id = nvmreg & 0x0f;
993 aprint_normal(" NVM Image Version %u.", high);
994 if (hw->mac.type == ixgbe_mac_X540)
995 str = "%x";
996 else
997 str = "%02x";
998 aprint_normal(str, low);
999 aprint_normal(" ID 0x%x,", id);
1000 break;
1001 case ixgbe_mac_X550EM_x:
1002 case ixgbe_mac_X550:
1003 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1004 if (nvmreg == 0xffff)
1005 break;
1006 high = (nvmreg >> 12) & 0x0f;
1007 low = nvmreg & 0xff;
1008 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1009 break;
1010 default:
1011 break;
1012 }
1013
1014 /* PHY firmware revision */
1015 switch (hw->mac.type) {
1016 case ixgbe_mac_X540:
1017 case ixgbe_mac_X550:
1018 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1019 if (nvmreg == 0xffff)
1020 break;
1021 high = (nvmreg >> 12) & 0x0f;
1022 low = (nvmreg >> 4) & 0xff;
1023 id = nvmreg & 0x000f;
1024 aprint_normal(" PHY FW Revision %u.", high);
1025 if (hw->mac.type == ixgbe_mac_X540)
1026 str = "%x";
1027 else
1028 str = "%02x";
1029 aprint_normal(str, low);
1030 aprint_normal(" ID 0x%x,", id);
1031 break;
1032 default:
1033 break;
1034 }
1035
1036 /* NVM Map version & OEM NVM Image version */
1037 switch (hw->mac.type) {
1038 case ixgbe_mac_X550:
1039 case ixgbe_mac_X550EM_x:
1040 case ixgbe_mac_X550EM_a:
1041 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1042 if (nvmreg != 0xffff) {
1043 high = (nvmreg >> 12) & 0x0f;
1044 low = nvmreg & 0x00ff;
1045 aprint_normal(" NVM Map version %u.%02x,", high, low);
1046 }
1047 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1048 if (nvmreg != 0xffff) {
1049 high = (nvmreg >> 12) & 0x0f;
1050 low = nvmreg & 0x00ff;
1051 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1052 low);
1053 }
1054 break;
1055 default:
1056 break;
1057 }
1058
1059 /* Print the ETrackID */
1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1061 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1062 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1063
1064 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1065 error = ixgbe_allocate_msix(adapter, pa);
1066 if (error) {
1067 /* Free allocated queue structures first */
1068 ixgbe_free_transmit_structures(adapter);
1069 ixgbe_free_receive_structures(adapter);
1070 free(adapter->queues, M_DEVBUF);
1071
1072 /* Fallback to legacy interrupt */
1073 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1074 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1075 adapter->feat_en |= IXGBE_FEATURE_MSI;
1076 adapter->num_queues = 1;
1077
1078 /* Allocate our TX/RX Queues again */
1079 if (ixgbe_allocate_queues(adapter)) {
1080 error = ENOMEM;
1081 goto err_out;
1082 }
1083 }
1084 }
1085 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1086 error = ixgbe_allocate_legacy(adapter, pa);
1087 if (error)
1088 goto err_late;
1089
1090 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1091 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1092 ixgbe_handle_link, adapter);
1093 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1094 ixgbe_handle_mod, adapter);
1095 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1096 ixgbe_handle_msf, adapter);
1097 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1098 ixgbe_handle_phy, adapter);
1099 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1100 adapter->fdir_si =
1101 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1102 ixgbe_reinit_fdir, adapter);
1103 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1104 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1105 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1106 && (adapter->fdir_si == NULL))) {
1107 aprint_error_dev(dev,
1108 "could not establish software interrupts ()\n");
1109 goto err_out;
1110 }
1111
1112 error = ixgbe_start_hw(hw);
1113 switch (error) {
1114 case IXGBE_ERR_EEPROM_VERSION:
1115 aprint_error_dev(dev, "This device is a pre-production adapter/"
1116 "LOM. Please be aware there may be issues associated "
1117 "with your hardware.\nIf you are experiencing problems "
1118 "please contact your Intel or hardware representative "
1119 "who provided you with this hardware.\n");
1120 break;
1121 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1122 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1123 error = EIO;
1124 goto err_late;
1125 case IXGBE_ERR_SFP_NOT_PRESENT:
1126 aprint_error_dev(dev, "No SFP+ Module found\n");
1127 /* falls thru */
1128 default:
1129 break;
1130 }
1131
1132 /* Setup OS specific network interface */
1133 if (ixgbe_setup_interface(dev, adapter) != 0)
1134 goto err_late;
1135
1136 /*
1137 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1138 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1139 */
1140 if (hw->phy.media_type == ixgbe_media_type_copper) {
1141 uint16_t id1, id2;
1142 int oui, model, rev;
1143 const char *descr;
1144
1145 id1 = hw->phy.id >> 16;
1146 id2 = hw->phy.id & 0xffff;
1147 oui = MII_OUI(id1, id2);
1148 model = MII_MODEL(id2);
1149 rev = MII_REV(id2);
1150 if ((descr = mii_get_descr(oui, model)) != NULL)
1151 aprint_normal_dev(dev,
1152 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1153 descr, oui, model, rev);
1154 else
1155 aprint_normal_dev(dev,
1156 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1157 oui, model, rev);
1158 }
1159
1160 /* Enable the optics for 82599 SFP+ fiber */
1161 ixgbe_enable_tx_laser(hw);
1162
1163 /* Enable power to the phy. */
1164 ixgbe_set_phy_power(hw, TRUE);
1165
1166 /* Initialize statistics */
1167 ixgbe_update_stats_counters(adapter);
1168
1169 /* Check PCIE slot type/speed/width */
1170 ixgbe_get_slot_info(adapter);
1171
1172 /*
1173 * Do time init and sysctl init here, but
1174 * only on the first port of a bypass adapter.
1175 */
1176 ixgbe_bypass_init(adapter);
1177
1178 /* Set an initial dmac value */
1179 adapter->dmac = 0;
1180 /* Set initial advertised speeds (if applicable) */
1181 adapter->advertise = ixgbe_get_advertise(adapter);
1182
1183 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1184 ixgbe_define_iov_schemas(dev, &error);
1185
1186 /* Add sysctls */
1187 ixgbe_add_device_sysctls(adapter);
1188 ixgbe_add_hw_stats(adapter);
1189
1190 /* For Netmap */
1191 adapter->init_locked = ixgbe_init_locked;
1192 adapter->stop_locked = ixgbe_stop;
1193
1194 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1195 ixgbe_netmap_attach(adapter);
1196
1197 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1198 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1199 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1200 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1201
1202 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1203 pmf_class_network_register(dev, adapter->ifp);
1204 else
1205 aprint_error_dev(dev, "couldn't establish power handler\n");
1206
1207 INIT_DEBUGOUT("ixgbe_attach: end");
1208 adapter->osdep.attached = true;
1209
1210 return;
1211
1212 err_late:
1213 ixgbe_free_transmit_structures(adapter);
1214 ixgbe_free_receive_structures(adapter);
1215 free(adapter->queues, M_DEVBUF);
1216 err_out:
1217 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1218 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1219 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1220 ixgbe_free_softint(adapter);
1221 ixgbe_free_pci_resources(adapter);
1222 if (adapter->mta != NULL)
1223 free(adapter->mta, M_DEVBUF);
1224 IXGBE_CORE_LOCK_DESTROY(adapter);
1225
1226 return;
1227 } /* ixgbe_attach */
1228
1229 /************************************************************************
1230 * ixgbe_check_wol_support
1231 *
1232 * Checks whether the adapter's ports are capable of
1233 * Wake On LAN by reading the adapter's NVM.
1234 *
1235 * Sets each port's hw->wol_enabled value depending
1236 * on the value read here.
1237 ************************************************************************/
1238 static void
1239 ixgbe_check_wol_support(struct adapter *adapter)
1240 {
1241 struct ixgbe_hw *hw = &adapter->hw;
1242 u16 dev_caps = 0;
1243
1244 /* Find out WoL support for port */
1245 adapter->wol_support = hw->wol_enabled = 0;
1246 ixgbe_get_device_caps(hw, &dev_caps);
1247 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1248 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1249 hw->bus.func == 0))
1250 adapter->wol_support = hw->wol_enabled = 1;
1251
1252 /* Save initial wake up filter configuration */
1253 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1254
1255 return;
1256 } /* ixgbe_check_wol_support */
1257
1258 /************************************************************************
1259 * ixgbe_setup_interface
1260 *
1261 * Setup networking device structure and register an interface.
1262 ************************************************************************/
1263 static int
1264 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1265 {
1266 struct ethercom *ec = &adapter->osdep.ec;
1267 struct ifnet *ifp;
1268 int rv;
1269
1270 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1271
1272 ifp = adapter->ifp = &ec->ec_if;
1273 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1274 ifp->if_baudrate = IF_Gbps(10);
1275 ifp->if_init = ixgbe_init;
1276 ifp->if_stop = ixgbe_ifstop;
1277 ifp->if_softc = adapter;
1278 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1279 #ifdef IXGBE_MPSAFE
1280 ifp->if_extflags = IFEF_MPSAFE;
1281 #endif
1282 ifp->if_ioctl = ixgbe_ioctl;
1283 #if __FreeBSD_version >= 1100045
1284 /* TSO parameters */
1285 ifp->if_hw_tsomax = 65518;
1286 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1287 ifp->if_hw_tsomaxsegsize = 2048;
1288 #endif
1289 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1290 #if 0
1291 ixgbe_start_locked = ixgbe_legacy_start_locked;
1292 #endif
1293 } else {
1294 ifp->if_transmit = ixgbe_mq_start;
1295 #if 0
1296 ixgbe_start_locked = ixgbe_mq_start_locked;
1297 #endif
1298 }
1299 ifp->if_start = ixgbe_legacy_start;
1300 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1301 IFQ_SET_READY(&ifp->if_snd);
1302
1303 rv = if_initialize(ifp);
1304 if (rv != 0) {
1305 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1306 return rv;
1307 }
1308 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1309 ether_ifattach(ifp, adapter->hw.mac.addr);
1310 /*
1311 * We use per TX queue softint, so if_deferred_start_init() isn't
1312 * used.
1313 */
1314 if_register(ifp);
1315 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1316
1317 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1318
1319 /*
1320 * Tell the upper layer(s) we support long frames.
1321 */
1322 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1323
1324 /* Set capability flags */
1325 ifp->if_capabilities |= IFCAP_RXCSUM
1326 | IFCAP_TXCSUM
1327 | IFCAP_TSOv4
1328 | IFCAP_TSOv6
1329 | IFCAP_LRO;
1330 ifp->if_capenable = 0;
1331
1332 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1333 | ETHERCAP_VLAN_HWCSUM
1334 | ETHERCAP_JUMBO_MTU
1335 | ETHERCAP_VLAN_MTU;
1336
1337 /* Enable the above capabilities by default */
1338 ec->ec_capenable = ec->ec_capabilities;
1339
1340 /*
1341 * Don't turn this on by default, if vlans are
1342 * created on another pseudo device (eg. lagg)
1343 * then vlan events are not passed thru, breaking
1344 * operation, but with HW FILTER off it works. If
1345 * using vlans directly on the ixgbe driver you can
1346 * enable this and get full hardware tag filtering.
1347 */
1348 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1349
1350 /*
1351 * Specify the media types supported by this adapter and register
1352 * callbacks to update media and link information
1353 */
1354 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1355 ixgbe_media_status);
1356
1357 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1358 ixgbe_add_media_types(adapter);
1359
1360 /* Set autoselect media by default */
1361 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1362
1363 return (0);
1364 } /* ixgbe_setup_interface */
1365
1366 /************************************************************************
1367 * ixgbe_add_media_types
1368 ************************************************************************/
1369 static void
1370 ixgbe_add_media_types(struct adapter *adapter)
1371 {
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 device_t dev = adapter->dev;
1374 u64 layer;
1375
1376 layer = adapter->phy_layer;
1377
1378 #define ADD(mm, dd) \
1379 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1380
1381 /* Media types with matching NetBSD media defines */
1382 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1383 ADD(IFM_10G_T | IFM_FDX, 0);
1384 }
1385 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1386 ADD(IFM_1000_T | IFM_FDX, 0);
1387 }
1388 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1389 ADD(IFM_100_TX | IFM_FDX, 0);
1390 }
1391 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1392 ADD(IFM_10_T | IFM_FDX, 0);
1393 }
1394
1395 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1396 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1397 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1398 }
1399
1400 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1401 ADD(IFM_10G_LR | IFM_FDX, 0);
1402 if (hw->phy.multispeed_fiber) {
1403 ADD(IFM_1000_LX | IFM_FDX, 0);
1404 }
1405 }
1406 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1407 ADD(IFM_10G_SR | IFM_FDX, 0);
1408 if (hw->phy.multispeed_fiber) {
1409 ADD(IFM_1000_SX | IFM_FDX, 0);
1410 }
1411 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1412 ADD(IFM_1000_SX | IFM_FDX, 0);
1413 }
1414 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1415 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1416 }
1417
1418 #ifdef IFM_ETH_XTYPE
1419 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1420 ADD(IFM_10G_KR | IFM_FDX, 0);
1421 }
1422 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1423 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1424 }
1425 #else
1426 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1427 device_printf(dev, "Media supported: 10GbaseKR\n");
1428 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1429 ADD(IFM_10G_SR | IFM_FDX, 0);
1430 }
1431 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1432 device_printf(dev, "Media supported: 10GbaseKX4\n");
1433 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1434 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1435 }
1436 #endif
1437 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1438 ADD(IFM_1000_KX | IFM_FDX, 0);
1439 }
1440 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1441 ADD(IFM_2500_KX | IFM_FDX, 0);
1442 }
1443 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1444 ADD(IFM_2500_T | IFM_FDX, 0);
1445 }
1446 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1447 ADD(IFM_5000_T | IFM_FDX, 0);
1448 }
1449 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1450 device_printf(dev, "Media supported: 1000baseBX\n");
1451 /* XXX no ifmedia_set? */
1452
1453 ADD(IFM_AUTO, 0);
1454
1455 #undef ADD
1456 } /* ixgbe_add_media_types */
1457
1458 /************************************************************************
1459 * ixgbe_is_sfp
1460 ************************************************************************/
1461 static inline bool
1462 ixgbe_is_sfp(struct ixgbe_hw *hw)
1463 {
1464 switch (hw->mac.type) {
1465 case ixgbe_mac_82598EB:
1466 if (hw->phy.type == ixgbe_phy_nl)
1467 return TRUE;
1468 return FALSE;
1469 case ixgbe_mac_82599EB:
1470 switch (hw->mac.ops.get_media_type(hw)) {
1471 case ixgbe_media_type_fiber:
1472 case ixgbe_media_type_fiber_qsfp:
1473 return TRUE;
1474 default:
1475 return FALSE;
1476 }
1477 case ixgbe_mac_X550EM_x:
1478 case ixgbe_mac_X550EM_a:
1479 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1480 return TRUE;
1481 return FALSE;
1482 default:
1483 return FALSE;
1484 }
1485 } /* ixgbe_is_sfp */
1486
1487 /************************************************************************
1488 * ixgbe_config_link
1489 ************************************************************************/
1490 static void
1491 ixgbe_config_link(struct adapter *adapter)
1492 {
1493 struct ixgbe_hw *hw = &adapter->hw;
1494 u32 autoneg, err = 0;
1495 bool sfp, negotiate = false;
1496
1497 sfp = ixgbe_is_sfp(hw);
1498
1499 if (sfp) {
1500 if (hw->phy.multispeed_fiber) {
1501 hw->mac.ops.setup_sfp(hw);
1502 ixgbe_enable_tx_laser(hw);
1503 kpreempt_disable();
1504 softint_schedule(adapter->msf_si);
1505 kpreempt_enable();
1506 } else {
1507 kpreempt_disable();
1508 softint_schedule(adapter->mod_si);
1509 kpreempt_enable();
1510 }
1511 } else {
1512 if (hw->mac.ops.check_link)
1513 err = ixgbe_check_link(hw, &adapter->link_speed,
1514 &adapter->link_up, FALSE);
1515 if (err)
1516 goto out;
1517 autoneg = hw->phy.autoneg_advertised;
1518 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1519 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1520 &negotiate);
1521 if (err)
1522 goto out;
1523 if (hw->mac.ops.setup_link)
1524 err = hw->mac.ops.setup_link(hw, autoneg,
1525 adapter->link_up);
1526 }
1527 out:
1528
1529 return;
1530 } /* ixgbe_config_link */
1531
1532 /************************************************************************
1533 * ixgbe_update_stats_counters - Update board statistics counters.
1534 ************************************************************************/
1535 static void
1536 ixgbe_update_stats_counters(struct adapter *adapter)
1537 {
1538 struct ifnet *ifp = adapter->ifp;
1539 struct ixgbe_hw *hw = &adapter->hw;
1540 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1541 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1542 u64 total_missed_rx = 0;
1543 uint64_t crcerrs, rlec;
1544
1545 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1546 stats->crcerrs.ev_count += crcerrs;
1547 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1548 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1549 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1550 if (hw->mac.type == ixgbe_mac_X550)
1551 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1552
1553 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1554 int j = i % adapter->num_queues;
1555 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1556 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1557 stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1558 }
1559 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1560 uint32_t mp;
1561 int j = i % adapter->num_queues;
1562
1563 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1564 /* global total per queue */
1565 stats->mpc[j].ev_count += mp;
1566 /* running comprehensive total for stats display */
1567 total_missed_rx += mp;
1568
1569 if (hw->mac.type == ixgbe_mac_82598EB)
1570 stats->rnbc[j].ev_count
1571 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1572
1573 }
1574 stats->mpctotal.ev_count += total_missed_rx;
1575
1576 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1577 if ((adapter->link_active == TRUE)
1578 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1579 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1580 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1581 }
1582 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1583 stats->rlec.ev_count += rlec;
1584
1585 /* Hardware workaround, gprc counts missed packets */
1586 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1587
1588 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1589 stats->lxontxc.ev_count += lxon;
1590 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1591 stats->lxofftxc.ev_count += lxoff;
1592 total = lxon + lxoff;
1593
1594 if (hw->mac.type != ixgbe_mac_82598EB) {
1595 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1596 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1597 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1598 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1599 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1600 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1601 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1602 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1603 } else {
1604 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1605 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1606 /* 82598 only has a counter in the high register */
1607 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1608 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1609 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1610 }
1611
1612 /*
1613 * Workaround: mprc hardware is incorrectly counting
1614 * broadcasts, so for now we subtract those.
1615 */
1616 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1617 stats->bprc.ev_count += bprc;
1618 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1619 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1620
1621 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1622 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1623 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1624 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1625 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1626 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1627
1628 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1629 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1630 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1631
1632 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1633 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1634 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1635 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1636 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1637 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1638 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1639 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1640 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1641 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1642 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1643 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1644 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1645 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1646 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1647 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1648 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1649 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1650 /* Only read FCOE on 82599 */
1651 if (hw->mac.type != ixgbe_mac_82598EB) {
1652 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1653 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1654 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1655 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1656 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1657 }
1658
1659 /* Fill out the OS statistics structure */
1660 /*
1661 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1662 * adapter->stats counters. It's required to make ifconfig -z
1663 * (SOICZIFDATA) work.
1664 */
1665 ifp->if_collisions = 0;
1666
1667 /* Rx Errors */
1668 ifp->if_iqdrops += total_missed_rx;
1669 ifp->if_ierrors += crcerrs + rlec;
1670 } /* ixgbe_update_stats_counters */
1671
1672 /************************************************************************
1673 * ixgbe_add_hw_stats
1674 *
1675 * Add sysctl variables, one per statistic, to the system.
1676 ************************************************************************/
1677 static void
1678 ixgbe_add_hw_stats(struct adapter *adapter)
1679 {
1680 device_t dev = adapter->dev;
1681 const struct sysctlnode *rnode, *cnode;
1682 struct sysctllog **log = &adapter->sysctllog;
1683 struct tx_ring *txr = adapter->tx_rings;
1684 struct rx_ring *rxr = adapter->rx_rings;
1685 struct ixgbe_hw *hw = &adapter->hw;
1686 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1687 const char *xname = device_xname(dev);
1688
1689 /* Driver Statistics */
1690 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1691 NULL, xname, "Driver tx dma soft fail EFBIG");
1692 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1693 NULL, xname, "m_defrag() failed");
1694 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1695 NULL, xname, "Driver tx dma hard fail EFBIG");
1696 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1697 NULL, xname, "Driver tx dma hard fail EINVAL");
1698 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1699 NULL, xname, "Driver tx dma hard fail other");
1700 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1701 NULL, xname, "Driver tx dma soft fail EAGAIN");
1702 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1703 NULL, xname, "Driver tx dma soft fail ENOMEM");
1704 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1705 NULL, xname, "Watchdog timeouts");
1706 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1707 NULL, xname, "TSO errors");
1708 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1709 NULL, xname, "Link MSI-X IRQ Handled");
1710 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1711 NULL, xname, "Link softint");
1712 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1713 NULL, xname, "module softint");
1714 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1715 NULL, xname, "multimode softint");
1716 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1717 NULL, xname, "external PHY softint");
1718
1719 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1720 #ifdef LRO
1721 struct lro_ctrl *lro = &rxr->lro;
1722 #endif /* LRO */
1723
1724 snprintf(adapter->queues[i].evnamebuf,
1725 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1726 xname, i);
1727 snprintf(adapter->queues[i].namebuf,
1728 sizeof(adapter->queues[i].namebuf), "q%d", i);
1729
1730 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1731 aprint_error_dev(dev, "could not create sysctl root\n");
1732 break;
1733 }
1734
1735 if (sysctl_createv(log, 0, &rnode, &rnode,
1736 0, CTLTYPE_NODE,
1737 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1738 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1739 break;
1740
1741 if (sysctl_createv(log, 0, &rnode, &cnode,
1742 CTLFLAG_READWRITE, CTLTYPE_INT,
1743 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1744 ixgbe_sysctl_interrupt_rate_handler, 0,
1745 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1746 break;
1747
1748 if (sysctl_createv(log, 0, &rnode, &cnode,
1749 CTLFLAG_READONLY, CTLTYPE_INT,
1750 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1751 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1752 0, CTL_CREATE, CTL_EOL) != 0)
1753 break;
1754
1755 if (sysctl_createv(log, 0, &rnode, &cnode,
1756 CTLFLAG_READONLY, CTLTYPE_INT,
1757 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1758 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1759 0, CTL_CREATE, CTL_EOL) != 0)
1760 break;
1761
1762 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1763 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1764 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1765 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1766 "Handled queue in softint");
1767 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1768 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1769 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1770 NULL, adapter->queues[i].evnamebuf, "TSO");
1771 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1772 NULL, adapter->queues[i].evnamebuf,
1773 "Queue No Descriptor Available");
1774 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1775 NULL, adapter->queues[i].evnamebuf,
1776 "Queue Packets Transmitted");
1777 #ifndef IXGBE_LEGACY_TX
1778 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1779 NULL, adapter->queues[i].evnamebuf,
1780 "Packets dropped in pcq");
1781 #endif
1782
1783 if (sysctl_createv(log, 0, &rnode, &cnode,
1784 CTLFLAG_READONLY,
1785 CTLTYPE_INT,
1786 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1787 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1788 CTL_CREATE, CTL_EOL) != 0)
1789 break;
1790
1791 if (sysctl_createv(log, 0, &rnode, &cnode,
1792 CTLFLAG_READONLY,
1793 CTLTYPE_INT,
1794 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1795 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1796 CTL_CREATE, CTL_EOL) != 0)
1797 break;
1798
1799 if (i < __arraycount(stats->mpc)) {
1800 evcnt_attach_dynamic(&stats->mpc[i],
1801 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1802 "RX Missed Packet Count");
1803 if (hw->mac.type == ixgbe_mac_82598EB)
1804 evcnt_attach_dynamic(&stats->rnbc[i],
1805 EVCNT_TYPE_MISC, NULL,
1806 adapter->queues[i].evnamebuf,
1807 "Receive No Buffers");
1808 }
1809 if (i < __arraycount(stats->pxontxc)) {
1810 evcnt_attach_dynamic(&stats->pxontxc[i],
1811 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1812 "pxontxc");
1813 evcnt_attach_dynamic(&stats->pxonrxc[i],
1814 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1815 "pxonrxc");
1816 evcnt_attach_dynamic(&stats->pxofftxc[i],
1817 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1818 "pxofftxc");
1819 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1820 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1821 "pxoffrxc");
1822 evcnt_attach_dynamic(&stats->pxon2offc[i],
1823 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1824 "pxon2offc");
1825 }
1826 if (i < __arraycount(stats->qprc)) {
1827 evcnt_attach_dynamic(&stats->qprc[i],
1828 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1829 "qprc");
1830 evcnt_attach_dynamic(&stats->qptc[i],
1831 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1832 "qptc");
1833 evcnt_attach_dynamic(&stats->qbrc[i],
1834 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1835 "qbrc");
1836 evcnt_attach_dynamic(&stats->qbtc[i],
1837 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1838 "qbtc");
1839 evcnt_attach_dynamic(&stats->qprdc[i],
1840 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1841 "qprdc");
1842 }
1843
1844 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1845 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1846 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1847 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1848 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1849 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1850 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1851 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1852 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1853 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1854 #ifdef LRO
1855 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1856 CTLFLAG_RD, &lro->lro_queued, 0,
1857 "LRO Queued");
1858 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1859 CTLFLAG_RD, &lro->lro_flushed, 0,
1860 "LRO Flushed");
1861 #endif /* LRO */
1862 }
1863
1864 /* MAC stats get their own sub node */
1865
1866 snprintf(stats->namebuf,
1867 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1868
1869 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1870 stats->namebuf, "rx csum offload - IP");
1871 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1872 stats->namebuf, "rx csum offload - L4");
1873 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1874 stats->namebuf, "rx csum offload - IP bad");
1875 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1876 stats->namebuf, "rx csum offload - L4 bad");
1877 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1878 stats->namebuf, "Interrupt conditions zero");
1879 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1880 stats->namebuf, "Legacy interrupts");
1881
1882 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1883 stats->namebuf, "CRC Errors");
1884 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1885 stats->namebuf, "Illegal Byte Errors");
1886 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1887 stats->namebuf, "Byte Errors");
1888 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1889 stats->namebuf, "MAC Short Packets Discarded");
1890 if (hw->mac.type >= ixgbe_mac_X550)
1891 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1892 stats->namebuf, "Bad SFD");
1893 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1894 stats->namebuf, "Total Packets Missed");
1895 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1896 stats->namebuf, "MAC Local Faults");
1897 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1898 stats->namebuf, "MAC Remote Faults");
1899 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1900 stats->namebuf, "Receive Length Errors");
1901 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1902 stats->namebuf, "Link XON Transmitted");
1903 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1904 stats->namebuf, "Link XON Received");
1905 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1906 stats->namebuf, "Link XOFF Transmitted");
1907 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1908 stats->namebuf, "Link XOFF Received");
1909
1910 /* Packet Reception Stats */
1911 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1912 stats->namebuf, "Total Octets Received");
1913 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1914 stats->namebuf, "Good Octets Received");
1915 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1916 stats->namebuf, "Total Packets Received");
1917 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1918 stats->namebuf, "Good Packets Received");
1919 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1920 stats->namebuf, "Multicast Packets Received");
1921 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1922 stats->namebuf, "Broadcast Packets Received");
1923 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1924 stats->namebuf, "64 byte frames received ");
1925 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1926 stats->namebuf, "65-127 byte frames received");
1927 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1928 stats->namebuf, "128-255 byte frames received");
1929 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1930 stats->namebuf, "256-511 byte frames received");
1931 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1932 stats->namebuf, "512-1023 byte frames received");
1933 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1934 stats->namebuf, "1023-1522 byte frames received");
1935 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1936 stats->namebuf, "Receive Undersized");
1937 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1938 stats->namebuf, "Fragmented Packets Received ");
1939 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "Oversized Packets Received");
1941 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "Received Jabber");
1943 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "Management Packets Received");
1945 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "Management Packets Dropped");
1947 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Checksum Errors");
1949
1950 /* Packet Transmission Stats */
1951 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "Good Octets Transmitted");
1953 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Total Packets Transmitted");
1955 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "Good Packets Transmitted");
1957 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "Broadcast Packets Transmitted");
1959 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "Multicast Packets Transmitted");
1961 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "Management Packets Transmitted");
1963 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "64 byte frames transmitted ");
1965 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "65-127 byte frames transmitted");
1967 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "128-255 byte frames transmitted");
1969 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "256-511 byte frames transmitted");
1971 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "512-1023 byte frames transmitted");
1973 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "1024-1522 byte frames transmitted");
1975 } /* ixgbe_add_hw_stats */
1976
1977 static void
1978 ixgbe_clear_evcnt(struct adapter *adapter)
1979 {
1980 struct tx_ring *txr = adapter->tx_rings;
1981 struct rx_ring *rxr = adapter->rx_rings;
1982 struct ixgbe_hw *hw = &adapter->hw;
1983 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1984
1985 adapter->efbig_tx_dma_setup.ev_count = 0;
1986 adapter->mbuf_defrag_failed.ev_count = 0;
1987 adapter->efbig2_tx_dma_setup.ev_count = 0;
1988 adapter->einval_tx_dma_setup.ev_count = 0;
1989 adapter->other_tx_dma_setup.ev_count = 0;
1990 adapter->eagain_tx_dma_setup.ev_count = 0;
1991 adapter->enomem_tx_dma_setup.ev_count = 0;
1992 adapter->tso_err.ev_count = 0;
1993 adapter->watchdog_events.ev_count = 0;
1994 adapter->link_irq.ev_count = 0;
1995 adapter->link_sicount.ev_count = 0;
1996 adapter->mod_sicount.ev_count = 0;
1997 adapter->msf_sicount.ev_count = 0;
1998 adapter->phy_sicount.ev_count = 0;
1999
2000 txr = adapter->tx_rings;
2001 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2002 adapter->queues[i].irqs.ev_count = 0;
2003 adapter->queues[i].handleq.ev_count = 0;
2004 adapter->queues[i].req.ev_count = 0;
2005 txr->no_desc_avail.ev_count = 0;
2006 txr->total_packets.ev_count = 0;
2007 txr->tso_tx.ev_count = 0;
2008 #ifndef IXGBE_LEGACY_TX
2009 txr->pcq_drops.ev_count = 0;
2010 #endif
2011 txr->q_efbig_tx_dma_setup = 0;
2012 txr->q_mbuf_defrag_failed = 0;
2013 txr->q_efbig2_tx_dma_setup = 0;
2014 txr->q_einval_tx_dma_setup = 0;
2015 txr->q_other_tx_dma_setup = 0;
2016 txr->q_eagain_tx_dma_setup = 0;
2017 txr->q_enomem_tx_dma_setup = 0;
2018 txr->q_tso_err = 0;
2019
2020 if (i < __arraycount(stats->mpc)) {
2021 stats->mpc[i].ev_count = 0;
2022 if (hw->mac.type == ixgbe_mac_82598EB)
2023 stats->rnbc[i].ev_count = 0;
2024 }
2025 if (i < __arraycount(stats->pxontxc)) {
2026 stats->pxontxc[i].ev_count = 0;
2027 stats->pxonrxc[i].ev_count = 0;
2028 stats->pxofftxc[i].ev_count = 0;
2029 stats->pxoffrxc[i].ev_count = 0;
2030 stats->pxon2offc[i].ev_count = 0;
2031 }
2032 if (i < __arraycount(stats->qprc)) {
2033 stats->qprc[i].ev_count = 0;
2034 stats->qptc[i].ev_count = 0;
2035 stats->qbrc[i].ev_count = 0;
2036 stats->qbtc[i].ev_count = 0;
2037 stats->qprdc[i].ev_count = 0;
2038 }
2039
2040 rxr->rx_packets.ev_count = 0;
2041 rxr->rx_bytes.ev_count = 0;
2042 rxr->rx_copies.ev_count = 0;
2043 rxr->no_jmbuf.ev_count = 0;
2044 rxr->rx_discarded.ev_count = 0;
2045 }
2046 stats->ipcs.ev_count = 0;
2047 stats->l4cs.ev_count = 0;
2048 stats->ipcs_bad.ev_count = 0;
2049 stats->l4cs_bad.ev_count = 0;
2050 stats->intzero.ev_count = 0;
2051 stats->legint.ev_count = 0;
2052 stats->crcerrs.ev_count = 0;
2053 stats->illerrc.ev_count = 0;
2054 stats->errbc.ev_count = 0;
2055 stats->mspdc.ev_count = 0;
2056 stats->mbsdc.ev_count = 0;
2057 stats->mpctotal.ev_count = 0;
2058 stats->mlfc.ev_count = 0;
2059 stats->mrfc.ev_count = 0;
2060 stats->rlec.ev_count = 0;
2061 stats->lxontxc.ev_count = 0;
2062 stats->lxonrxc.ev_count = 0;
2063 stats->lxofftxc.ev_count = 0;
2064 stats->lxoffrxc.ev_count = 0;
2065
2066 /* Packet Reception Stats */
2067 stats->tor.ev_count = 0;
2068 stats->gorc.ev_count = 0;
2069 stats->tpr.ev_count = 0;
2070 stats->gprc.ev_count = 0;
2071 stats->mprc.ev_count = 0;
2072 stats->bprc.ev_count = 0;
2073 stats->prc64.ev_count = 0;
2074 stats->prc127.ev_count = 0;
2075 stats->prc255.ev_count = 0;
2076 stats->prc511.ev_count = 0;
2077 stats->prc1023.ev_count = 0;
2078 stats->prc1522.ev_count = 0;
2079 stats->ruc.ev_count = 0;
2080 stats->rfc.ev_count = 0;
2081 stats->roc.ev_count = 0;
2082 stats->rjc.ev_count = 0;
2083 stats->mngprc.ev_count = 0;
2084 stats->mngpdc.ev_count = 0;
2085 stats->xec.ev_count = 0;
2086
2087 /* Packet Transmission Stats */
2088 stats->gotc.ev_count = 0;
2089 stats->tpt.ev_count = 0;
2090 stats->gptc.ev_count = 0;
2091 stats->bptc.ev_count = 0;
2092 stats->mptc.ev_count = 0;
2093 stats->mngptc.ev_count = 0;
2094 stats->ptc64.ev_count = 0;
2095 stats->ptc127.ev_count = 0;
2096 stats->ptc255.ev_count = 0;
2097 stats->ptc511.ev_count = 0;
2098 stats->ptc1023.ev_count = 0;
2099 stats->ptc1522.ev_count = 0;
2100 }
2101
2102 /************************************************************************
2103 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2104 *
2105 * Retrieves the TDH value from the hardware
2106 ************************************************************************/
2107 static int
2108 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2109 {
2110 struct sysctlnode node = *rnode;
2111 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2112 uint32_t val;
2113
2114 if (!txr)
2115 return (0);
2116
2117 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2118 node.sysctl_data = &val;
2119 return sysctl_lookup(SYSCTLFN_CALL(&node));
2120 } /* ixgbe_sysctl_tdh_handler */
2121
2122 /************************************************************************
2123 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2124 *
2125 * Retrieves the TDT value from the hardware
2126 ************************************************************************/
2127 static int
2128 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2129 {
2130 struct sysctlnode node = *rnode;
2131 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2132 uint32_t val;
2133
2134 if (!txr)
2135 return (0);
2136
2137 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2138 node.sysctl_data = &val;
2139 return sysctl_lookup(SYSCTLFN_CALL(&node));
2140 } /* ixgbe_sysctl_tdt_handler */
2141
2142 /************************************************************************
2143 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2144 *
2145 * Retrieves the RDH value from the hardware
2146 ************************************************************************/
2147 static int
2148 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2149 {
2150 struct sysctlnode node = *rnode;
2151 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2152 uint32_t val;
2153
2154 if (!rxr)
2155 return (0);
2156
2157 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2158 node.sysctl_data = &val;
2159 return sysctl_lookup(SYSCTLFN_CALL(&node));
2160 } /* ixgbe_sysctl_rdh_handler */
2161
2162 /************************************************************************
2163 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2164 *
2165 * Retrieves the RDT value from the hardware
2166 ************************************************************************/
2167 static int
2168 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2169 {
2170 struct sysctlnode node = *rnode;
2171 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2172 uint32_t val;
2173
2174 if (!rxr)
2175 return (0);
2176
2177 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2178 node.sysctl_data = &val;
2179 return sysctl_lookup(SYSCTLFN_CALL(&node));
2180 } /* ixgbe_sysctl_rdt_handler */
2181
2182 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2183 /************************************************************************
2184 * ixgbe_register_vlan
2185 *
2186 * Run via vlan config EVENT, it enables us to use the
2187 * HW Filter table since we can get the vlan id. This
2188 * just creates the entry in the soft version of the
2189 * VFTA, init will repopulate the real table.
2190 ************************************************************************/
2191 static void
2192 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2193 {
2194 struct adapter *adapter = ifp->if_softc;
2195 u16 index, bit;
2196
2197 if (ifp->if_softc != arg) /* Not our event */
2198 return;
2199
2200 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2201 return;
2202
2203 IXGBE_CORE_LOCK(adapter);
2204 index = (vtag >> 5) & 0x7F;
2205 bit = vtag & 0x1F;
2206 adapter->shadow_vfta[index] |= (1 << bit);
2207 ixgbe_setup_vlan_hw_support(adapter);
2208 IXGBE_CORE_UNLOCK(adapter);
2209 } /* ixgbe_register_vlan */
2210
2211 /************************************************************************
2212 * ixgbe_unregister_vlan
2213 *
2214 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2215 ************************************************************************/
2216 static void
2217 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2218 {
2219 struct adapter *adapter = ifp->if_softc;
2220 u16 index, bit;
2221
2222 if (ifp->if_softc != arg)
2223 return;
2224
2225 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2226 return;
2227
2228 IXGBE_CORE_LOCK(adapter);
2229 index = (vtag >> 5) & 0x7F;
2230 bit = vtag & 0x1F;
2231 adapter->shadow_vfta[index] &= ~(1 << bit);
2232 /* Re-init to load the changes */
2233 ixgbe_setup_vlan_hw_support(adapter);
2234 IXGBE_CORE_UNLOCK(adapter);
2235 } /* ixgbe_unregister_vlan */
2236 #endif
2237
2238 static void
2239 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2240 {
2241 struct ethercom *ec = &adapter->osdep.ec;
2242 struct ixgbe_hw *hw = &adapter->hw;
2243 struct rx_ring *rxr;
2244 int i;
2245 u32 ctrl;
2246
2247
2248 /*
2249 * We get here thru init_locked, meaning
2250 * a soft reset, this has already cleared
2251 * the VFTA and other state, so if there
2252 * have been no vlan's registered do nothing.
2253 */
2254 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2255 return;
2256
2257 /* Setup the queues for vlans */
2258 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2259 for (i = 0; i < adapter->num_queues; i++) {
2260 rxr = &adapter->rx_rings[i];
2261 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2262 if (hw->mac.type != ixgbe_mac_82598EB) {
2263 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2264 ctrl |= IXGBE_RXDCTL_VME;
2265 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2266 }
2267 rxr->vtag_strip = TRUE;
2268 }
2269 }
2270
2271 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2272 return;
2273 /*
2274 * A soft reset zero's out the VFTA, so
2275 * we need to repopulate it now.
2276 */
2277 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2278 if (adapter->shadow_vfta[i] != 0)
2279 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2280 adapter->shadow_vfta[i]);
2281
2282 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2283 /* Enable the Filter Table if enabled */
2284 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2285 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2286 ctrl |= IXGBE_VLNCTRL_VFE;
2287 }
2288 if (hw->mac.type == ixgbe_mac_82598EB)
2289 ctrl |= IXGBE_VLNCTRL_VME;
2290 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2291 } /* ixgbe_setup_vlan_hw_support */
2292
2293 /************************************************************************
2294 * ixgbe_get_slot_info
2295 *
2296 * Get the width and transaction speed of
2297 * the slot this adapter is plugged into.
2298 ************************************************************************/
2299 static void
2300 ixgbe_get_slot_info(struct adapter *adapter)
2301 {
2302 device_t dev = adapter->dev;
2303 struct ixgbe_hw *hw = &adapter->hw;
2304 u32 offset;
2305 // struct ixgbe_mac_info *mac = &hw->mac;
2306 u16 link;
2307 int bus_info_valid = TRUE;
2308
2309 /* Some devices are behind an internal bridge */
2310 switch (hw->device_id) {
2311 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2312 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2313 goto get_parent_info;
2314 default:
2315 break;
2316 }
2317
2318 ixgbe_get_bus_info(hw);
2319
2320 /*
2321 * Some devices don't use PCI-E, but there is no need
2322 * to display "Unknown" for bus speed and width.
2323 */
2324 switch (hw->mac.type) {
2325 case ixgbe_mac_X550EM_x:
2326 case ixgbe_mac_X550EM_a:
2327 return;
2328 default:
2329 goto display;
2330 }
2331
2332 get_parent_info:
2333 /*
2334 * For the Quad port adapter we need to parse back
2335 * up the PCI tree to find the speed of the expansion
2336 * slot into which this adapter is plugged. A bit more work.
2337 */
2338 dev = device_parent(device_parent(dev));
2339 #if 0
2340 #ifdef IXGBE_DEBUG
2341 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2342 pci_get_slot(dev), pci_get_function(dev));
2343 #endif
2344 dev = device_parent(device_parent(dev));
2345 #ifdef IXGBE_DEBUG
2346 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2347 pci_get_slot(dev), pci_get_function(dev));
2348 #endif
2349 #endif
2350 /* Now get the PCI Express Capabilities offset */
2351 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2352 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2353 /*
2354 * Hmm...can't get PCI-Express capabilities.
2355 * Falling back to default method.
2356 */
2357 bus_info_valid = FALSE;
2358 ixgbe_get_bus_info(hw);
2359 goto display;
2360 }
2361 /* ...and read the Link Status Register */
2362 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2363 offset + PCIE_LCSR) >> 16;
2364 ixgbe_set_pci_config_data_generic(hw, link);
2365
2366 display:
2367 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2368 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2369 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2370 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2371 "Unknown"),
2372 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2373 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2374 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2375 "Unknown"));
2376
2377 if (bus_info_valid) {
2378 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2379 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2380 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2381 device_printf(dev, "PCI-Express bandwidth available"
2382 " for this card\n is not sufficient for"
2383 " optimal performance.\n");
2384 device_printf(dev, "For optimal performance a x8 "
2385 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2386 }
2387 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2388 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2389 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2390 device_printf(dev, "PCI-Express bandwidth available"
2391 " for this card\n is not sufficient for"
2392 " optimal performance.\n");
2393 device_printf(dev, "For optimal performance a x8 "
2394 "PCIE Gen3 slot is required.\n");
2395 }
2396 } else
2397 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2398
2399 return;
2400 } /* ixgbe_get_slot_info */
2401
2402 /************************************************************************
2403 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2404 ************************************************************************/
2405 static inline void
2406 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2407 {
2408 struct ixgbe_hw *hw = &adapter->hw;
2409 struct ix_queue *que = &adapter->queues[vector];
2410 u64 queue = (u64)(1ULL << vector);
2411 u32 mask;
2412
2413 mutex_enter(&que->dc_mtx);
2414 if (que->disabled_count > 0 && --que->disabled_count > 0)
2415 goto out;
2416
2417 if (hw->mac.type == ixgbe_mac_82598EB) {
2418 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2419 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2420 } else {
2421 mask = (queue & 0xFFFFFFFF);
2422 if (mask)
2423 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2424 mask = (queue >> 32);
2425 if (mask)
2426 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2427 }
2428 out:
2429 mutex_exit(&que->dc_mtx);
2430 } /* ixgbe_enable_queue */
2431
2432 /************************************************************************
2433 * ixgbe_disable_queue_internal
2434 ************************************************************************/
2435 static inline void
2436 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2437 {
2438 struct ixgbe_hw *hw = &adapter->hw;
2439 struct ix_queue *que = &adapter->queues[vector];
2440 u64 queue = (u64)(1ULL << vector);
2441 u32 mask;
2442
2443 mutex_enter(&que->dc_mtx);
2444
2445 if (que->disabled_count > 0) {
2446 if (nestok)
2447 que->disabled_count++;
2448 goto out;
2449 }
2450 que->disabled_count++;
2451
2452 if (hw->mac.type == ixgbe_mac_82598EB) {
2453 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2454 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2455 } else {
2456 mask = (queue & 0xFFFFFFFF);
2457 if (mask)
2458 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2459 mask = (queue >> 32);
2460 if (mask)
2461 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2462 }
2463 out:
2464 mutex_exit(&que->dc_mtx);
2465 } /* ixgbe_disable_queue_internal */
2466
2467 /************************************************************************
2468 * ixgbe_disable_queue
2469 ************************************************************************/
2470 static inline void
2471 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2472 {
2473
2474 ixgbe_disable_queue_internal(adapter, vector, true);
2475 } /* ixgbe_disable_queue */
2476
2477 /************************************************************************
2478 * ixgbe_sched_handle_que - schedule deferred packet processing
2479 ************************************************************************/
2480 static inline void
2481 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2482 {
2483
2484 if (adapter->txrx_use_workqueue) {
2485 /*
2486 * adapter->que_wq is bound to each CPU instead of
2487 * each NIC queue to reduce workqueue kthread. As we
2488 * should consider about interrupt affinity in this
2489 * function, the workqueue kthread must be WQ_PERCPU.
2490 * If create WQ_PERCPU workqueue kthread for each NIC
2491 * queue, that number of created workqueue kthread is
2492 * (number of used NIC queue) * (number of CPUs) =
2493 * (number of CPUs) ^ 2 most often.
2494 *
2495 * The same NIC queue's interrupts are avoided by
2496 * masking the queue's interrupt. And different
2497 * NIC queue's interrupts use different struct work
2498 * (que->wq_cookie). So, "enqueued flag" to avoid
2499 * twice workqueue_enqueue() is not required .
2500 */
2501 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2502 } else {
2503 softint_schedule(que->que_si);
2504 }
2505 }
2506
2507 /************************************************************************
2508 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2509 ************************************************************************/
2510 static int
2511 ixgbe_msix_que(void *arg)
2512 {
2513 struct ix_queue *que = arg;
2514 struct adapter *adapter = que->adapter;
2515 struct ifnet *ifp = adapter->ifp;
2516 struct tx_ring *txr = que->txr;
2517 struct rx_ring *rxr = que->rxr;
2518 bool more;
2519 u32 newitr = 0;
2520
2521 /* Protect against spurious interrupts */
2522 if ((ifp->if_flags & IFF_RUNNING) == 0)
2523 return 0;
2524
2525 ixgbe_disable_queue(adapter, que->msix);
2526 ++que->irqs.ev_count;
2527
2528 #ifdef __NetBSD__
2529 /* Don't run ixgbe_rxeof in interrupt context */
2530 more = true;
2531 #else
2532 more = ixgbe_rxeof(que);
2533 #endif
2534
2535 IXGBE_TX_LOCK(txr);
2536 ixgbe_txeof(txr);
2537 IXGBE_TX_UNLOCK(txr);
2538
2539 /* Do AIM now? */
2540
2541 if (adapter->enable_aim == false)
2542 goto no_calc;
2543 /*
2544 * Do Adaptive Interrupt Moderation:
2545 * - Write out last calculated setting
2546 * - Calculate based on average size over
2547 * the last interval.
2548 */
2549 if (que->eitr_setting)
2550 ixgbe_eitr_write(que, que->eitr_setting);
2551
2552 que->eitr_setting = 0;
2553
2554 /* Idle, do nothing */
2555 if ((txr->bytes == 0) && (rxr->bytes == 0))
2556 goto no_calc;
2557
2558 if ((txr->bytes) && (txr->packets))
2559 newitr = txr->bytes/txr->packets;
2560 if ((rxr->bytes) && (rxr->packets))
2561 newitr = max(newitr, (rxr->bytes / rxr->packets));
2562 newitr += 24; /* account for hardware frame, crc */
2563
2564 /* set an upper boundary */
2565 newitr = min(newitr, 3000);
2566
2567 /* Be nice to the mid range */
2568 if ((newitr > 300) && (newitr < 1200))
2569 newitr = (newitr / 3);
2570 else
2571 newitr = (newitr / 2);
2572
2573 /*
2574 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2575 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2576 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2577 * on 1G and higher.
2578 */
2579 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2580 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2581 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2582 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2583 }
2584
2585 /* save for next interrupt */
2586 que->eitr_setting = newitr;
2587
2588 /* Reset state */
2589 txr->bytes = 0;
2590 txr->packets = 0;
2591 rxr->bytes = 0;
2592 rxr->packets = 0;
2593
2594 no_calc:
2595 if (more)
2596 ixgbe_sched_handle_que(adapter, que);
2597 else
2598 ixgbe_enable_queue(adapter, que->msix);
2599
2600 return 1;
2601 } /* ixgbe_msix_que */
2602
2603 /************************************************************************
2604 * ixgbe_media_status - Media Ioctl callback
2605 *
2606 * Called whenever the user queries the status of
2607 * the interface using ifconfig.
2608 ************************************************************************/
2609 static void
2610 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2611 {
2612 struct adapter *adapter = ifp->if_softc;
2613 struct ixgbe_hw *hw = &adapter->hw;
2614 int layer;
2615
2616 INIT_DEBUGOUT("ixgbe_media_status: begin");
2617 IXGBE_CORE_LOCK(adapter);
2618 ixgbe_update_link_status(adapter);
2619
2620 ifmr->ifm_status = IFM_AVALID;
2621 ifmr->ifm_active = IFM_ETHER;
2622
2623 if (!adapter->link_active) {
2624 ifmr->ifm_active |= IFM_NONE;
2625 IXGBE_CORE_UNLOCK(adapter);
2626 return;
2627 }
2628
2629 ifmr->ifm_status |= IFM_ACTIVE;
2630 layer = adapter->phy_layer;
2631
2632 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2633 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2634 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2635 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2636 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2637 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2638 switch (adapter->link_speed) {
2639 case IXGBE_LINK_SPEED_10GB_FULL:
2640 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2641 break;
2642 case IXGBE_LINK_SPEED_5GB_FULL:
2643 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2644 break;
2645 case IXGBE_LINK_SPEED_2_5GB_FULL:
2646 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2647 break;
2648 case IXGBE_LINK_SPEED_1GB_FULL:
2649 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2650 break;
2651 case IXGBE_LINK_SPEED_100_FULL:
2652 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2653 break;
2654 case IXGBE_LINK_SPEED_10_FULL:
2655 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2656 break;
2657 }
2658 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2659 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2660 switch (adapter->link_speed) {
2661 case IXGBE_LINK_SPEED_10GB_FULL:
2662 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2663 break;
2664 }
2665 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2666 switch (adapter->link_speed) {
2667 case IXGBE_LINK_SPEED_10GB_FULL:
2668 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2669 break;
2670 case IXGBE_LINK_SPEED_1GB_FULL:
2671 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2672 break;
2673 }
2674 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2675 switch (adapter->link_speed) {
2676 case IXGBE_LINK_SPEED_10GB_FULL:
2677 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2678 break;
2679 case IXGBE_LINK_SPEED_1GB_FULL:
2680 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2681 break;
2682 }
2683 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2684 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2685 switch (adapter->link_speed) {
2686 case IXGBE_LINK_SPEED_10GB_FULL:
2687 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2688 break;
2689 case IXGBE_LINK_SPEED_1GB_FULL:
2690 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2691 break;
2692 }
2693 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2694 switch (adapter->link_speed) {
2695 case IXGBE_LINK_SPEED_10GB_FULL:
2696 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2697 break;
2698 }
2699 /*
2700 * XXX: These need to use the proper media types once
2701 * they're added.
2702 */
2703 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2704 switch (adapter->link_speed) {
2705 case IXGBE_LINK_SPEED_10GB_FULL:
2706 #ifndef IFM_ETH_XTYPE
2707 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2708 #else
2709 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2710 #endif
2711 break;
2712 case IXGBE_LINK_SPEED_2_5GB_FULL:
2713 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2714 break;
2715 case IXGBE_LINK_SPEED_1GB_FULL:
2716 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2717 break;
2718 }
2719 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2720 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2721 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2722 switch (adapter->link_speed) {
2723 case IXGBE_LINK_SPEED_10GB_FULL:
2724 #ifndef IFM_ETH_XTYPE
2725 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2726 #else
2727 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2728 #endif
2729 break;
2730 case IXGBE_LINK_SPEED_2_5GB_FULL:
2731 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2732 break;
2733 case IXGBE_LINK_SPEED_1GB_FULL:
2734 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2735 break;
2736 }
2737
2738 /* If nothing is recognized... */
2739 #if 0
2740 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2741 ifmr->ifm_active |= IFM_UNKNOWN;
2742 #endif
2743
2744 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2745
2746 /* Display current flow control setting used on link */
2747 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2748 hw->fc.current_mode == ixgbe_fc_full)
2749 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2750 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2751 hw->fc.current_mode == ixgbe_fc_full)
2752 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2753
2754 IXGBE_CORE_UNLOCK(adapter);
2755
2756 return;
2757 } /* ixgbe_media_status */
2758
2759 /************************************************************************
2760 * ixgbe_media_change - Media Ioctl callback
2761 *
2762 * Called when the user changes speed/duplex using
2763 * media/mediopt option with ifconfig.
2764 ************************************************************************/
2765 static int
2766 ixgbe_media_change(struct ifnet *ifp)
2767 {
2768 struct adapter *adapter = ifp->if_softc;
2769 struct ifmedia *ifm = &adapter->media;
2770 struct ixgbe_hw *hw = &adapter->hw;
2771 ixgbe_link_speed speed = 0;
2772 ixgbe_link_speed link_caps = 0;
2773 bool negotiate = false;
2774 s32 err = IXGBE_NOT_IMPLEMENTED;
2775
2776 INIT_DEBUGOUT("ixgbe_media_change: begin");
2777
2778 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2779 return (EINVAL);
2780
2781 if (hw->phy.media_type == ixgbe_media_type_backplane)
2782 return (ENODEV);
2783
2784 /*
2785 * We don't actually need to check against the supported
2786 * media types of the adapter; ifmedia will take care of
2787 * that for us.
2788 */
2789 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2790 case IFM_AUTO:
2791 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2792 &negotiate);
2793 if (err != IXGBE_SUCCESS) {
2794 device_printf(adapter->dev, "Unable to determine "
2795 "supported advertise speeds\n");
2796 return (ENODEV);
2797 }
2798 speed |= link_caps;
2799 break;
2800 case IFM_10G_T:
2801 case IFM_10G_LRM:
2802 case IFM_10G_LR:
2803 case IFM_10G_TWINAX:
2804 #ifndef IFM_ETH_XTYPE
2805 case IFM_10G_SR: /* KR, too */
2806 case IFM_10G_CX4: /* KX4 */
2807 #else
2808 case IFM_10G_KR:
2809 case IFM_10G_KX4:
2810 #endif
2811 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2812 break;
2813 case IFM_5000_T:
2814 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2815 break;
2816 case IFM_2500_T:
2817 case IFM_2500_KX:
2818 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2819 break;
2820 case IFM_1000_T:
2821 case IFM_1000_LX:
2822 case IFM_1000_SX:
2823 case IFM_1000_KX:
2824 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2825 break;
2826 case IFM_100_TX:
2827 speed |= IXGBE_LINK_SPEED_100_FULL;
2828 break;
2829 case IFM_10_T:
2830 speed |= IXGBE_LINK_SPEED_10_FULL;
2831 break;
2832 default:
2833 goto invalid;
2834 }
2835
2836 hw->mac.autotry_restart = TRUE;
2837 hw->mac.ops.setup_link(hw, speed, TRUE);
2838 adapter->advertise = 0;
2839 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2840 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2841 adapter->advertise |= 1 << 2;
2842 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2843 adapter->advertise |= 1 << 1;
2844 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2845 adapter->advertise |= 1 << 0;
2846 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2847 adapter->advertise |= 1 << 3;
2848 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2849 adapter->advertise |= 1 << 4;
2850 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2851 adapter->advertise |= 1 << 5;
2852 }
2853
2854 return (0);
2855
2856 invalid:
2857 device_printf(adapter->dev, "Invalid media type!\n");
2858
2859 return (EINVAL);
2860 } /* ixgbe_media_change */
2861
2862 /************************************************************************
2863 * ixgbe_set_promisc
2864 ************************************************************************/
2865 static void
2866 ixgbe_set_promisc(struct adapter *adapter)
2867 {
2868 struct ifnet *ifp = adapter->ifp;
2869 int mcnt = 0;
2870 u32 rctl;
2871 struct ether_multi *enm;
2872 struct ether_multistep step;
2873 struct ethercom *ec = &adapter->osdep.ec;
2874
2875 KASSERT(mutex_owned(&adapter->core_mtx));
2876 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2877 rctl &= (~IXGBE_FCTRL_UPE);
2878 if (ifp->if_flags & IFF_ALLMULTI)
2879 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2880 else {
2881 ETHER_LOCK(ec);
2882 ETHER_FIRST_MULTI(step, ec, enm);
2883 while (enm != NULL) {
2884 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2885 break;
2886 mcnt++;
2887 ETHER_NEXT_MULTI(step, enm);
2888 }
2889 ETHER_UNLOCK(ec);
2890 }
2891 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2892 rctl &= (~IXGBE_FCTRL_MPE);
2893 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2894
2895 if (ifp->if_flags & IFF_PROMISC) {
2896 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2897 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2898 } else if (ifp->if_flags & IFF_ALLMULTI) {
2899 rctl |= IXGBE_FCTRL_MPE;
2900 rctl &= ~IXGBE_FCTRL_UPE;
2901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2902 }
2903 } /* ixgbe_set_promisc */
2904
2905 /************************************************************************
2906 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2907 ************************************************************************/
2908 static int
2909 ixgbe_msix_link(void *arg)
2910 {
2911 struct adapter *adapter = arg;
2912 struct ixgbe_hw *hw = &adapter->hw;
2913 u32 eicr, eicr_mask;
2914 s32 retval;
2915
2916 ++adapter->link_irq.ev_count;
2917
2918 /* Pause other interrupts */
2919 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2920
2921 /* First get the cause */
2922 /*
2923 * The specifications of 82598, 82599, X540 and X550 say EICS register
2924 * is write only. However, Linux says it is a workaround for silicon
2925 * errata to read EICS instead of EICR to get interrupt cause. It seems
2926 * there is a problem about read clear mechanism for EICR register.
2927 */
2928 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2929 /* Be sure the queue bits are not cleared */
2930 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2931 /* Clear interrupt with write */
2932 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2933
2934 /* Link status change */
2935 if (eicr & IXGBE_EICR_LSC) {
2936 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2937 softint_schedule(adapter->link_si);
2938 }
2939
2940 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2941 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2942 (eicr & IXGBE_EICR_FLOW_DIR)) {
2943 /* This is probably overkill :) */
2944 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
2945 return 1;
2946 /* Disable the interrupt */
2947 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2948 softint_schedule(adapter->fdir_si);
2949 }
2950
2951 if (eicr & IXGBE_EICR_ECC) {
2952 device_printf(adapter->dev,
2953 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2954 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2955 }
2956
2957 /* Check for over temp condition */
2958 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2959 switch (adapter->hw.mac.type) {
2960 case ixgbe_mac_X550EM_a:
2961 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2962 break;
2963 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2964 IXGBE_EICR_GPI_SDP0_X550EM_a);
2965 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2966 IXGBE_EICR_GPI_SDP0_X550EM_a);
2967 retval = hw->phy.ops.check_overtemp(hw);
2968 if (retval != IXGBE_ERR_OVERTEMP)
2969 break;
2970 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2971 device_printf(adapter->dev, "System shutdown required!\n");
2972 break;
2973 default:
2974 if (!(eicr & IXGBE_EICR_TS))
2975 break;
2976 retval = hw->phy.ops.check_overtemp(hw);
2977 if (retval != IXGBE_ERR_OVERTEMP)
2978 break;
2979 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2980 device_printf(adapter->dev, "System shutdown required!\n");
2981 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2982 break;
2983 }
2984 }
2985
2986 /* Check for VF message */
2987 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2988 (eicr & IXGBE_EICR_MAILBOX))
2989 softint_schedule(adapter->mbx_si);
2990 }
2991
2992 if (ixgbe_is_sfp(hw)) {
2993 /* Pluggable optics-related interrupt */
2994 if (hw->mac.type >= ixgbe_mac_X540)
2995 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2996 else
2997 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2998
2999 if (eicr & eicr_mask) {
3000 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3001 softint_schedule(adapter->mod_si);
3002 }
3003
3004 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3005 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3006 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3007 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3008 softint_schedule(adapter->msf_si);
3009 }
3010 }
3011
3012 /* Check for fan failure */
3013 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3014 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3015 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3016 }
3017
3018 /* External PHY interrupt */
3019 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3020 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3021 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3022 softint_schedule(adapter->phy_si);
3023 }
3024
3025 /* Re-enable other interrupts */
3026 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3027 return 1;
3028 } /* ixgbe_msix_link */
3029
3030 static void
3031 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
3032 {
3033 struct adapter *adapter = que->adapter;
3034
3035 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3036 itr |= itr << 16;
3037 else
3038 itr |= IXGBE_EITR_CNT_WDIS;
3039
3040 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
3041 itr);
3042 }
3043
3044
3045 /************************************************************************
3046 * ixgbe_sysctl_interrupt_rate_handler
3047 ************************************************************************/
3048 static int
3049 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3050 {
3051 struct sysctlnode node = *rnode;
3052 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3053 struct adapter *adapter = que->adapter;
3054 uint32_t reg, usec, rate;
3055 int error;
3056
3057 if (que == NULL)
3058 return 0;
3059 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3060 usec = ((reg & 0x0FF8) >> 3);
3061 if (usec > 0)
3062 rate = 500000 / usec;
3063 else
3064 rate = 0;
3065 node.sysctl_data = &rate;
3066 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3067 if (error || newp == NULL)
3068 return error;
3069 reg &= ~0xfff; /* default, no limitation */
3070 if (rate > 0 && rate < 500000) {
3071 if (rate < 1000)
3072 rate = 1000;
3073 reg |= ((4000000/rate) & 0xff8);
3074 /*
3075 * When RSC is used, ITR interval must be larger than
3076 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3077 * The minimum value is always greater than 2us on 100M
3078 * (and 10M?(not documented)), but it's not on 1G and higher.
3079 */
3080 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3081 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3082 if ((adapter->num_queues > 1)
3083 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3084 return EINVAL;
3085 }
3086 ixgbe_max_interrupt_rate = rate;
3087 } else
3088 ixgbe_max_interrupt_rate = 0;
3089 ixgbe_eitr_write(que, reg);
3090
3091 return (0);
3092 } /* ixgbe_sysctl_interrupt_rate_handler */
3093
3094 const struct sysctlnode *
3095 ixgbe_sysctl_instance(struct adapter *adapter)
3096 {
3097 const char *dvname;
3098 struct sysctllog **log;
3099 int rc;
3100 const struct sysctlnode *rnode;
3101
3102 if (adapter->sysctltop != NULL)
3103 return adapter->sysctltop;
3104
3105 log = &adapter->sysctllog;
3106 dvname = device_xname(adapter->dev);
3107
3108 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3109 0, CTLTYPE_NODE, dvname,
3110 SYSCTL_DESCR("ixgbe information and settings"),
3111 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3112 goto err;
3113
3114 return rnode;
3115 err:
3116 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3117 return NULL;
3118 }
3119
3120 /************************************************************************
3121 * ixgbe_add_device_sysctls
3122 ************************************************************************/
3123 static void
3124 ixgbe_add_device_sysctls(struct adapter *adapter)
3125 {
3126 device_t dev = adapter->dev;
3127 struct ixgbe_hw *hw = &adapter->hw;
3128 struct sysctllog **log;
3129 const struct sysctlnode *rnode, *cnode;
3130
3131 log = &adapter->sysctllog;
3132
3133 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3134 aprint_error_dev(dev, "could not create sysctl root\n");
3135 return;
3136 }
3137
3138 if (sysctl_createv(log, 0, &rnode, &cnode,
3139 CTLFLAG_READONLY, CTLTYPE_INT,
3140 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3141 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3142 aprint_error_dev(dev, "could not create sysctl\n");
3143
3144 if (sysctl_createv(log, 0, &rnode, &cnode,
3145 CTLFLAG_READONLY, CTLTYPE_INT,
3146 "num_queues", SYSCTL_DESCR("Number of queues"),
3147 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3148 aprint_error_dev(dev, "could not create sysctl\n");
3149
3150 /* Sysctls for all devices */
3151 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3152 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3153 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3154 CTL_EOL) != 0)
3155 aprint_error_dev(dev, "could not create sysctl\n");
3156
3157 adapter->enable_aim = ixgbe_enable_aim;
3158 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3159 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3160 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3161 aprint_error_dev(dev, "could not create sysctl\n");
3162
3163 if (sysctl_createv(log, 0, &rnode, &cnode,
3164 CTLFLAG_READWRITE, CTLTYPE_INT,
3165 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3166 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3167 CTL_EOL) != 0)
3168 aprint_error_dev(dev, "could not create sysctl\n");
3169
3170 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3171 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3172 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3173 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3174 aprint_error_dev(dev, "could not create sysctl\n");
3175
3176 #ifdef IXGBE_DEBUG
3177 /* testing sysctls (for all devices) */
3178 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3179 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3180 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3181 CTL_EOL) != 0)
3182 aprint_error_dev(dev, "could not create sysctl\n");
3183
3184 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3185 CTLTYPE_STRING, "print_rss_config",
3186 SYSCTL_DESCR("Prints RSS Configuration"),
3187 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3188 CTL_EOL) != 0)
3189 aprint_error_dev(dev, "could not create sysctl\n");
3190 #endif
3191 /* for X550 series devices */
3192 if (hw->mac.type >= ixgbe_mac_X550)
3193 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3194 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3195 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3196 CTL_EOL) != 0)
3197 aprint_error_dev(dev, "could not create sysctl\n");
3198
3199 /* for WoL-capable devices */
3200 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3201 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3202 CTLTYPE_BOOL, "wol_enable",
3203 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3204 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3205 CTL_EOL) != 0)
3206 aprint_error_dev(dev, "could not create sysctl\n");
3207
3208 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3209 CTLTYPE_INT, "wufc",
3210 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3211 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3212 CTL_EOL) != 0)
3213 aprint_error_dev(dev, "could not create sysctl\n");
3214 }
3215
3216 /* for X552/X557-AT devices */
3217 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3218 const struct sysctlnode *phy_node;
3219
3220 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3221 "phy", SYSCTL_DESCR("External PHY sysctls"),
3222 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3223 aprint_error_dev(dev, "could not create sysctl\n");
3224 return;
3225 }
3226
3227 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3228 CTLTYPE_INT, "temp",
3229 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3230 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3231 CTL_EOL) != 0)
3232 aprint_error_dev(dev, "could not create sysctl\n");
3233
3234 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3235 CTLTYPE_INT, "overtemp_occurred",
3236 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3237 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3238 CTL_CREATE, CTL_EOL) != 0)
3239 aprint_error_dev(dev, "could not create sysctl\n");
3240 }
3241
3242 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3243 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3244 CTLTYPE_INT, "eee_state",
3245 SYSCTL_DESCR("EEE Power Save State"),
3246 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3247 CTL_EOL) != 0)
3248 aprint_error_dev(dev, "could not create sysctl\n");
3249 }
3250 } /* ixgbe_add_device_sysctls */
3251
3252 /************************************************************************
3253 * ixgbe_allocate_pci_resources
3254 ************************************************************************/
3255 static int
3256 ixgbe_allocate_pci_resources(struct adapter *adapter,
3257 const struct pci_attach_args *pa)
3258 {
3259 pcireg_t memtype;
3260 device_t dev = adapter->dev;
3261 bus_addr_t addr;
3262 int flags;
3263
3264 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3265 switch (memtype) {
3266 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3267 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3268 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3269 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3270 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3271 goto map_err;
3272 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3273 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3274 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3275 }
3276 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3277 adapter->osdep.mem_size, flags,
3278 &adapter->osdep.mem_bus_space_handle) != 0) {
3279 map_err:
3280 adapter->osdep.mem_size = 0;
3281 aprint_error_dev(dev, "unable to map BAR0\n");
3282 return ENXIO;
3283 }
3284 break;
3285 default:
3286 aprint_error_dev(dev, "unexpected type on BAR0\n");
3287 return ENXIO;
3288 }
3289
3290 return (0);
3291 } /* ixgbe_allocate_pci_resources */
3292
3293 static void
3294 ixgbe_free_softint(struct adapter *adapter)
3295 {
3296 struct ix_queue *que = adapter->queues;
3297 struct tx_ring *txr = adapter->tx_rings;
3298 int i;
3299
3300 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3301 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3302 if (txr->txr_si != NULL)
3303 softint_disestablish(txr->txr_si);
3304 }
3305 if (que->que_si != NULL)
3306 softint_disestablish(que->que_si);
3307 }
3308 if (adapter->txr_wq != NULL)
3309 workqueue_destroy(adapter->txr_wq);
3310 if (adapter->txr_wq_enqueued != NULL)
3311 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3312 if (adapter->que_wq != NULL)
3313 workqueue_destroy(adapter->que_wq);
3314
3315 /* Drain the Link queue */
3316 if (adapter->link_si != NULL) {
3317 softint_disestablish(adapter->link_si);
3318 adapter->link_si = NULL;
3319 }
3320 if (adapter->mod_si != NULL) {
3321 softint_disestablish(adapter->mod_si);
3322 adapter->mod_si = NULL;
3323 }
3324 if (adapter->msf_si != NULL) {
3325 softint_disestablish(adapter->msf_si);
3326 adapter->msf_si = NULL;
3327 }
3328 if (adapter->phy_si != NULL) {
3329 softint_disestablish(adapter->phy_si);
3330 adapter->phy_si = NULL;
3331 }
3332 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3333 if (adapter->fdir_si != NULL) {
3334 softint_disestablish(adapter->fdir_si);
3335 adapter->fdir_si = NULL;
3336 }
3337 }
3338 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3339 if (adapter->mbx_si != NULL) {
3340 softint_disestablish(adapter->mbx_si);
3341 adapter->mbx_si = NULL;
3342 }
3343 }
3344 } /* ixgbe_free_softint */
3345
3346 /************************************************************************
3347 * ixgbe_detach - Device removal routine
3348 *
3349 * Called when the driver is being removed.
3350 * Stops the adapter and deallocates all the resources
3351 * that were allocated for driver operation.
3352 *
3353 * return 0 on success, positive on failure
3354 ************************************************************************/
3355 static int
3356 ixgbe_detach(device_t dev, int flags)
3357 {
3358 struct adapter *adapter = device_private(dev);
3359 struct rx_ring *rxr = adapter->rx_rings;
3360 struct tx_ring *txr = adapter->tx_rings;
3361 struct ixgbe_hw *hw = &adapter->hw;
3362 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3363 u32 ctrl_ext;
3364
3365 INIT_DEBUGOUT("ixgbe_detach: begin");
3366 if (adapter->osdep.attached == false)
3367 return 0;
3368
3369 if (ixgbe_pci_iov_detach(dev) != 0) {
3370 device_printf(dev, "SR-IOV in use; detach first.\n");
3371 return (EBUSY);
3372 }
3373
3374 /* Stop the interface. Callouts are stopped in it. */
3375 ixgbe_ifstop(adapter->ifp, 1);
3376 #if NVLAN > 0
3377 /* Make sure VLANs are not using driver */
3378 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3379 ; /* nothing to do: no VLANs */
3380 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3381 vlan_ifdetach(adapter->ifp);
3382 else {
3383 aprint_error_dev(dev, "VLANs in use, detach first\n");
3384 return (EBUSY);
3385 }
3386 #endif
3387
3388 pmf_device_deregister(dev);
3389
3390 ether_ifdetach(adapter->ifp);
3391 /* Stop the adapter */
3392 IXGBE_CORE_LOCK(adapter);
3393 ixgbe_setup_low_power_mode(adapter);
3394 IXGBE_CORE_UNLOCK(adapter);
3395
3396 ixgbe_free_softint(adapter);
3397
3398 /* let hardware know driver is unloading */
3399 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3400 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3401 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3402
3403 callout_halt(&adapter->timer, NULL);
3404
3405 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3406 netmap_detach(adapter->ifp);
3407
3408 ixgbe_free_pci_resources(adapter);
3409 #if 0 /* XXX the NetBSD port is probably missing something here */
3410 bus_generic_detach(dev);
3411 #endif
3412 if_detach(adapter->ifp);
3413 if_percpuq_destroy(adapter->ipq);
3414
3415 sysctl_teardown(&adapter->sysctllog);
3416 evcnt_detach(&adapter->efbig_tx_dma_setup);
3417 evcnt_detach(&adapter->mbuf_defrag_failed);
3418 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3419 evcnt_detach(&adapter->einval_tx_dma_setup);
3420 evcnt_detach(&adapter->other_tx_dma_setup);
3421 evcnt_detach(&adapter->eagain_tx_dma_setup);
3422 evcnt_detach(&adapter->enomem_tx_dma_setup);
3423 evcnt_detach(&adapter->watchdog_events);
3424 evcnt_detach(&adapter->tso_err);
3425 evcnt_detach(&adapter->link_irq);
3426 evcnt_detach(&adapter->link_sicount);
3427 evcnt_detach(&adapter->mod_sicount);
3428 evcnt_detach(&adapter->msf_sicount);
3429 evcnt_detach(&adapter->phy_sicount);
3430
3431 txr = adapter->tx_rings;
3432 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3433 evcnt_detach(&adapter->queues[i].irqs);
3434 evcnt_detach(&adapter->queues[i].handleq);
3435 evcnt_detach(&adapter->queues[i].req);
3436 evcnt_detach(&txr->no_desc_avail);
3437 evcnt_detach(&txr->total_packets);
3438 evcnt_detach(&txr->tso_tx);
3439 #ifndef IXGBE_LEGACY_TX
3440 evcnt_detach(&txr->pcq_drops);
3441 #endif
3442
3443 if (i < __arraycount(stats->mpc)) {
3444 evcnt_detach(&stats->mpc[i]);
3445 if (hw->mac.type == ixgbe_mac_82598EB)
3446 evcnt_detach(&stats->rnbc[i]);
3447 }
3448 if (i < __arraycount(stats->pxontxc)) {
3449 evcnt_detach(&stats->pxontxc[i]);
3450 evcnt_detach(&stats->pxonrxc[i]);
3451 evcnt_detach(&stats->pxofftxc[i]);
3452 evcnt_detach(&stats->pxoffrxc[i]);
3453 evcnt_detach(&stats->pxon2offc[i]);
3454 }
3455 if (i < __arraycount(stats->qprc)) {
3456 evcnt_detach(&stats->qprc[i]);
3457 evcnt_detach(&stats->qptc[i]);
3458 evcnt_detach(&stats->qbrc[i]);
3459 evcnt_detach(&stats->qbtc[i]);
3460 evcnt_detach(&stats->qprdc[i]);
3461 }
3462
3463 evcnt_detach(&rxr->rx_packets);
3464 evcnt_detach(&rxr->rx_bytes);
3465 evcnt_detach(&rxr->rx_copies);
3466 evcnt_detach(&rxr->no_jmbuf);
3467 evcnt_detach(&rxr->rx_discarded);
3468 }
3469 evcnt_detach(&stats->ipcs);
3470 evcnt_detach(&stats->l4cs);
3471 evcnt_detach(&stats->ipcs_bad);
3472 evcnt_detach(&stats->l4cs_bad);
3473 evcnt_detach(&stats->intzero);
3474 evcnt_detach(&stats->legint);
3475 evcnt_detach(&stats->crcerrs);
3476 evcnt_detach(&stats->illerrc);
3477 evcnt_detach(&stats->errbc);
3478 evcnt_detach(&stats->mspdc);
3479 if (hw->mac.type >= ixgbe_mac_X550)
3480 evcnt_detach(&stats->mbsdc);
3481 evcnt_detach(&stats->mpctotal);
3482 evcnt_detach(&stats->mlfc);
3483 evcnt_detach(&stats->mrfc);
3484 evcnt_detach(&stats->rlec);
3485 evcnt_detach(&stats->lxontxc);
3486 evcnt_detach(&stats->lxonrxc);
3487 evcnt_detach(&stats->lxofftxc);
3488 evcnt_detach(&stats->lxoffrxc);
3489
3490 /* Packet Reception Stats */
3491 evcnt_detach(&stats->tor);
3492 evcnt_detach(&stats->gorc);
3493 evcnt_detach(&stats->tpr);
3494 evcnt_detach(&stats->gprc);
3495 evcnt_detach(&stats->mprc);
3496 evcnt_detach(&stats->bprc);
3497 evcnt_detach(&stats->prc64);
3498 evcnt_detach(&stats->prc127);
3499 evcnt_detach(&stats->prc255);
3500 evcnt_detach(&stats->prc511);
3501 evcnt_detach(&stats->prc1023);
3502 evcnt_detach(&stats->prc1522);
3503 evcnt_detach(&stats->ruc);
3504 evcnt_detach(&stats->rfc);
3505 evcnt_detach(&stats->roc);
3506 evcnt_detach(&stats->rjc);
3507 evcnt_detach(&stats->mngprc);
3508 evcnt_detach(&stats->mngpdc);
3509 evcnt_detach(&stats->xec);
3510
3511 /* Packet Transmission Stats */
3512 evcnt_detach(&stats->gotc);
3513 evcnt_detach(&stats->tpt);
3514 evcnt_detach(&stats->gptc);
3515 evcnt_detach(&stats->bptc);
3516 evcnt_detach(&stats->mptc);
3517 evcnt_detach(&stats->mngptc);
3518 evcnt_detach(&stats->ptc64);
3519 evcnt_detach(&stats->ptc127);
3520 evcnt_detach(&stats->ptc255);
3521 evcnt_detach(&stats->ptc511);
3522 evcnt_detach(&stats->ptc1023);
3523 evcnt_detach(&stats->ptc1522);
3524
3525 ixgbe_free_transmit_structures(adapter);
3526 ixgbe_free_receive_structures(adapter);
3527 for (int i = 0; i < adapter->num_queues; i++) {
3528 struct ix_queue * que = &adapter->queues[i];
3529 mutex_destroy(&que->dc_mtx);
3530 }
3531 free(adapter->queues, M_DEVBUF);
3532 free(adapter->mta, M_DEVBUF);
3533
3534 IXGBE_CORE_LOCK_DESTROY(adapter);
3535
3536 return (0);
3537 } /* ixgbe_detach */
3538
3539 /************************************************************************
3540 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3541 *
3542 * Prepare the adapter/port for LPLU and/or WoL
3543 ************************************************************************/
3544 static int
3545 ixgbe_setup_low_power_mode(struct adapter *adapter)
3546 {
3547 struct ixgbe_hw *hw = &adapter->hw;
3548 device_t dev = adapter->dev;
3549 s32 error = 0;
3550
3551 KASSERT(mutex_owned(&adapter->core_mtx));
3552
3553 /* Limit power management flow to X550EM baseT */
3554 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3555 hw->phy.ops.enter_lplu) {
3556 /* X550EM baseT adapters need a special LPLU flow */
3557 hw->phy.reset_disable = true;
3558 ixgbe_stop(adapter);
3559 error = hw->phy.ops.enter_lplu(hw);
3560 if (error)
3561 device_printf(dev,
3562 "Error entering LPLU: %d\n", error);
3563 hw->phy.reset_disable = false;
3564 } else {
3565 /* Just stop for other adapters */
3566 ixgbe_stop(adapter);
3567 }
3568
3569 if (!hw->wol_enabled) {
3570 ixgbe_set_phy_power(hw, FALSE);
3571 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3572 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3573 } else {
3574 /* Turn off support for APM wakeup. (Using ACPI instead) */
3575 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3576 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3577
3578 /*
3579 * Clear Wake Up Status register to prevent any previous wakeup
3580 * events from waking us up immediately after we suspend.
3581 */
3582 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3583
3584 /*
3585 * Program the Wakeup Filter Control register with user filter
3586 * settings
3587 */
3588 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3589
3590 /* Enable wakeups and power management in Wakeup Control */
3591 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3592 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3593
3594 }
3595
3596 return error;
3597 } /* ixgbe_setup_low_power_mode */
3598
3599 /************************************************************************
3600 * ixgbe_shutdown - Shutdown entry point
3601 ************************************************************************/
3602 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3603 static int
3604 ixgbe_shutdown(device_t dev)
3605 {
3606 struct adapter *adapter = device_private(dev);
3607 int error = 0;
3608
3609 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3610
3611 IXGBE_CORE_LOCK(adapter);
3612 error = ixgbe_setup_low_power_mode(adapter);
3613 IXGBE_CORE_UNLOCK(adapter);
3614
3615 return (error);
3616 } /* ixgbe_shutdown */
3617 #endif
3618
3619 /************************************************************************
3620 * ixgbe_suspend
3621 *
3622 * From D0 to D3
3623 ************************************************************************/
3624 static bool
3625 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3626 {
3627 struct adapter *adapter = device_private(dev);
3628 int error = 0;
3629
3630 INIT_DEBUGOUT("ixgbe_suspend: begin");
3631
3632 IXGBE_CORE_LOCK(adapter);
3633
3634 error = ixgbe_setup_low_power_mode(adapter);
3635
3636 IXGBE_CORE_UNLOCK(adapter);
3637
3638 return (error);
3639 } /* ixgbe_suspend */
3640
3641 /************************************************************************
3642 * ixgbe_resume
3643 *
3644 * From D3 to D0
3645 ************************************************************************/
3646 static bool
3647 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3648 {
3649 struct adapter *adapter = device_private(dev);
3650 struct ifnet *ifp = adapter->ifp;
3651 struct ixgbe_hw *hw = &adapter->hw;
3652 u32 wus;
3653
3654 INIT_DEBUGOUT("ixgbe_resume: begin");
3655
3656 IXGBE_CORE_LOCK(adapter);
3657
3658 /* Read & clear WUS register */
3659 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3660 if (wus)
3661 device_printf(dev, "Woken up by (WUS): %#010x\n",
3662 IXGBE_READ_REG(hw, IXGBE_WUS));
3663 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3664 /* And clear WUFC until next low-power transition */
3665 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3666
3667 /*
3668 * Required after D3->D0 transition;
3669 * will re-advertise all previous advertised speeds
3670 */
3671 if (ifp->if_flags & IFF_UP)
3672 ixgbe_init_locked(adapter);
3673
3674 IXGBE_CORE_UNLOCK(adapter);
3675
3676 return true;
3677 } /* ixgbe_resume */
3678
3679 /*
3680 * Set the various hardware offload abilities.
3681 *
3682 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3683 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3684 * mbuf offload flags the driver will understand.
3685 */
3686 static void
3687 ixgbe_set_if_hwassist(struct adapter *adapter)
3688 {
3689 /* XXX */
3690 }
3691
3692 /************************************************************************
3693 * ixgbe_init_locked - Init entry point
3694 *
3695 * Used in two ways: It is used by the stack as an init
3696 * entry point in network interface structure. It is also
3697 * used by the driver as a hw/sw initialization routine to
3698 * get to a consistent state.
3699 *
3700 * return 0 on success, positive on failure
3701 ************************************************************************/
3702 static void
3703 ixgbe_init_locked(struct adapter *adapter)
3704 {
3705 struct ifnet *ifp = adapter->ifp;
3706 device_t dev = adapter->dev;
3707 struct ixgbe_hw *hw = &adapter->hw;
3708 struct tx_ring *txr;
3709 struct rx_ring *rxr;
3710 u32 txdctl, mhadd;
3711 u32 rxdctl, rxctrl;
3712 u32 ctrl_ext;
3713 int err = 0;
3714
3715 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3716
3717 KASSERT(mutex_owned(&adapter->core_mtx));
3718 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3719
3720 hw->adapter_stopped = FALSE;
3721 ixgbe_stop_adapter(hw);
3722 callout_stop(&adapter->timer);
3723
3724 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3725 adapter->max_frame_size =
3726 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3727
3728 /* Queue indices may change with IOV mode */
3729 ixgbe_align_all_queue_indices(adapter);
3730
3731 /* reprogram the RAR[0] in case user changed it. */
3732 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3733
3734 /* Get the latest mac address, User can use a LAA */
3735 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3736 IXGBE_ETH_LENGTH_OF_ADDRESS);
3737 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3738 hw->addr_ctrl.rar_used_count = 1;
3739
3740 /* Set hardware offload abilities from ifnet flags */
3741 ixgbe_set_if_hwassist(adapter);
3742
3743 /* Prepare transmit descriptors and buffers */
3744 if (ixgbe_setup_transmit_structures(adapter)) {
3745 device_printf(dev, "Could not setup transmit structures\n");
3746 ixgbe_stop(adapter);
3747 return;
3748 }
3749
3750 ixgbe_init_hw(hw);
3751 ixgbe_initialize_iov(adapter);
3752 ixgbe_initialize_transmit_units(adapter);
3753
3754 /* Setup Multicast table */
3755 ixgbe_set_multi(adapter);
3756
3757 /* Determine the correct mbuf pool, based on frame size */
3758 if (adapter->max_frame_size <= MCLBYTES)
3759 adapter->rx_mbuf_sz = MCLBYTES;
3760 else
3761 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3762
3763 /* Prepare receive descriptors and buffers */
3764 if (ixgbe_setup_receive_structures(adapter)) {
3765 device_printf(dev, "Could not setup receive structures\n");
3766 ixgbe_stop(adapter);
3767 return;
3768 }
3769
3770 /* Configure RX settings */
3771 ixgbe_initialize_receive_units(adapter);
3772
3773 /* Enable SDP & MSI-X interrupts based on adapter */
3774 ixgbe_config_gpie(adapter);
3775
3776 /* Set MTU size */
3777 if (ifp->if_mtu > ETHERMTU) {
3778 /* aka IXGBE_MAXFRS on 82599 and newer */
3779 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3780 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3781 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3782 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3783 }
3784
3785 /* Now enable all the queues */
3786 for (int i = 0; i < adapter->num_queues; i++) {
3787 txr = &adapter->tx_rings[i];
3788 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3789 txdctl |= IXGBE_TXDCTL_ENABLE;
3790 /* Set WTHRESH to 8, burst writeback */
3791 txdctl |= (8 << 16);
3792 /*
3793 * When the internal queue falls below PTHRESH (32),
3794 * start prefetching as long as there are at least
3795 * HTHRESH (1) buffers ready. The values are taken
3796 * from the Intel linux driver 3.8.21.
3797 * Prefetching enables tx line rate even with 1 queue.
3798 */
3799 txdctl |= (32 << 0) | (1 << 8);
3800 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3801 }
3802
3803 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
3804 rxr = &adapter->rx_rings[i];
3805 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3806 if (hw->mac.type == ixgbe_mac_82598EB) {
3807 /*
3808 * PTHRESH = 21
3809 * HTHRESH = 4
3810 * WTHRESH = 8
3811 */
3812 rxdctl &= ~0x3FFFFF;
3813 rxdctl |= 0x080420;
3814 }
3815 rxdctl |= IXGBE_RXDCTL_ENABLE;
3816 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3817 for (; j < 10; j++) {
3818 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3819 IXGBE_RXDCTL_ENABLE)
3820 break;
3821 else
3822 msec_delay(1);
3823 }
3824 wmb();
3825
3826 /*
3827 * In netmap mode, we must preserve the buffers made
3828 * available to userspace before the if_init()
3829 * (this is true by default on the TX side, because
3830 * init makes all buffers available to userspace).
3831 *
3832 * netmap_reset() and the device specific routines
3833 * (e.g. ixgbe_setup_receive_rings()) map these
3834 * buffers at the end of the NIC ring, so here we
3835 * must set the RDT (tail) register to make sure
3836 * they are not overwritten.
3837 *
3838 * In this driver the NIC ring starts at RDH = 0,
3839 * RDT points to the last slot available for reception (?),
3840 * so RDT = num_rx_desc - 1 means the whole ring is available.
3841 */
3842 #ifdef DEV_NETMAP
3843 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3844 (ifp->if_capenable & IFCAP_NETMAP)) {
3845 struct netmap_adapter *na = NA(adapter->ifp);
3846 struct netmap_kring *kring = &na->rx_rings[i];
3847 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3848
3849 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3850 } else
3851 #endif /* DEV_NETMAP */
3852 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3853 adapter->num_rx_desc - 1);
3854 }
3855
3856 /* Enable Receive engine */
3857 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3858 if (hw->mac.type == ixgbe_mac_82598EB)
3859 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3860 rxctrl |= IXGBE_RXCTRL_RXEN;
3861 ixgbe_enable_rx_dma(hw, rxctrl);
3862
3863 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3864
3865 /* Set up MSI-X routing */
3866 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3867 ixgbe_configure_ivars(adapter);
3868 /* Set up auto-mask */
3869 if (hw->mac.type == ixgbe_mac_82598EB)
3870 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3871 else {
3872 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3873 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3874 }
3875 } else { /* Simple settings for Legacy/MSI */
3876 ixgbe_set_ivar(adapter, 0, 0, 0);
3877 ixgbe_set_ivar(adapter, 0, 0, 1);
3878 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3879 }
3880
3881 ixgbe_init_fdir(adapter);
3882
3883 /*
3884 * Check on any SFP devices that
3885 * need to be kick-started
3886 */
3887 if (hw->phy.type == ixgbe_phy_none) {
3888 err = hw->phy.ops.identify(hw);
3889 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3890 device_printf(dev,
3891 "Unsupported SFP+ module type was detected.\n");
3892 return;
3893 }
3894 }
3895
3896 /* Set moderation on the Link interrupt */
3897 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3898
3899 /* Config/Enable Link */
3900 ixgbe_config_link(adapter);
3901
3902 /* Hardware Packet Buffer & Flow Control setup */
3903 ixgbe_config_delay_values(adapter);
3904
3905 /* Initialize the FC settings */
3906 ixgbe_start_hw(hw);
3907
3908 /* Set up VLAN support and filter */
3909 ixgbe_setup_vlan_hw_support(adapter);
3910
3911 /* Setup DMA Coalescing */
3912 ixgbe_config_dmac(adapter);
3913
3914 /* And now turn on interrupts */
3915 ixgbe_enable_intr(adapter);
3916
3917 /* Enable the use of the MBX by the VF's */
3918 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3919 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3920 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3921 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3922 }
3923
3924 /* Update saved flags. See ixgbe_ifflags_cb() */
3925 adapter->if_flags = ifp->if_flags;
3926
3927 /* Now inform the stack we're ready */
3928 ifp->if_flags |= IFF_RUNNING;
3929
3930 return;
3931 } /* ixgbe_init_locked */
3932
3933 /************************************************************************
3934 * ixgbe_init
3935 ************************************************************************/
3936 static int
3937 ixgbe_init(struct ifnet *ifp)
3938 {
3939 struct adapter *adapter = ifp->if_softc;
3940
3941 IXGBE_CORE_LOCK(adapter);
3942 ixgbe_init_locked(adapter);
3943 IXGBE_CORE_UNLOCK(adapter);
3944
3945 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
3946 } /* ixgbe_init */
3947
3948 /************************************************************************
3949 * ixgbe_set_ivar
3950 *
3951 * Setup the correct IVAR register for a particular MSI-X interrupt
3952 * (yes this is all very magic and confusing :)
3953 * - entry is the register array entry
3954 * - vector is the MSI-X vector for this queue
3955 * - type is RX/TX/MISC
3956 ************************************************************************/
3957 static void
3958 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3959 {
3960 struct ixgbe_hw *hw = &adapter->hw;
3961 u32 ivar, index;
3962
3963 vector |= IXGBE_IVAR_ALLOC_VAL;
3964
3965 switch (hw->mac.type) {
3966 case ixgbe_mac_82598EB:
3967 if (type == -1)
3968 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3969 else
3970 entry += (type * 64);
3971 index = (entry >> 2) & 0x1F;
3972 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3973 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3974 ivar |= (vector << (8 * (entry & 0x3)));
3975 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3976 break;
3977 case ixgbe_mac_82599EB:
3978 case ixgbe_mac_X540:
3979 case ixgbe_mac_X550:
3980 case ixgbe_mac_X550EM_x:
3981 case ixgbe_mac_X550EM_a:
3982 if (type == -1) { /* MISC IVAR */
3983 index = (entry & 1) * 8;
3984 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3985 ivar &= ~(0xFF << index);
3986 ivar |= (vector << index);
3987 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3988 } else { /* RX/TX IVARS */
3989 index = (16 * (entry & 1)) + (8 * type);
3990 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3991 ivar &= ~(0xFF << index);
3992 ivar |= (vector << index);
3993 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3994 }
3995 break;
3996 default:
3997 break;
3998 }
3999 } /* ixgbe_set_ivar */
4000
4001 /************************************************************************
4002 * ixgbe_configure_ivars
4003 ************************************************************************/
4004 static void
4005 ixgbe_configure_ivars(struct adapter *adapter)
4006 {
4007 struct ix_queue *que = adapter->queues;
4008 u32 newitr;
4009
4010 if (ixgbe_max_interrupt_rate > 0)
4011 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4012 else {
4013 /*
4014 * Disable DMA coalescing if interrupt moderation is
4015 * disabled.
4016 */
4017 adapter->dmac = 0;
4018 newitr = 0;
4019 }
4020
4021 for (int i = 0; i < adapter->num_queues; i++, que++) {
4022 struct rx_ring *rxr = &adapter->rx_rings[i];
4023 struct tx_ring *txr = &adapter->tx_rings[i];
4024 /* First the RX queue entry */
4025 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4026 /* ... and the TX */
4027 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4028 /* Set an Initial EITR value */
4029 ixgbe_eitr_write(que, newitr);
4030 /*
4031 * To eliminate influence of the previous state.
4032 * At this point, Tx/Rx interrupt handler
4033 * (ixgbe_msix_que()) cannot be called, so both
4034 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4035 */
4036 que->eitr_setting = 0;
4037 }
4038
4039 /* For the Link interrupt */
4040 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4041 } /* ixgbe_configure_ivars */
4042
4043 /************************************************************************
4044 * ixgbe_config_gpie
4045 ************************************************************************/
4046 static void
4047 ixgbe_config_gpie(struct adapter *adapter)
4048 {
4049 struct ixgbe_hw *hw = &adapter->hw;
4050 u32 gpie;
4051
4052 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4053
4054 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4055 /* Enable Enhanced MSI-X mode */
4056 gpie |= IXGBE_GPIE_MSIX_MODE
4057 | IXGBE_GPIE_EIAME
4058 | IXGBE_GPIE_PBA_SUPPORT
4059 | IXGBE_GPIE_OCD;
4060 }
4061
4062 /* Fan Failure Interrupt */
4063 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4064 gpie |= IXGBE_SDP1_GPIEN;
4065
4066 /* Thermal Sensor Interrupt */
4067 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4068 gpie |= IXGBE_SDP0_GPIEN_X540;
4069
4070 /* Link detection */
4071 switch (hw->mac.type) {
4072 case ixgbe_mac_82599EB:
4073 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4074 break;
4075 case ixgbe_mac_X550EM_x:
4076 case ixgbe_mac_X550EM_a:
4077 gpie |= IXGBE_SDP0_GPIEN_X540;
4078 break;
4079 default:
4080 break;
4081 }
4082
4083 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4084
4085 } /* ixgbe_config_gpie */
4086
4087 /************************************************************************
4088 * ixgbe_config_delay_values
4089 *
4090 * Requires adapter->max_frame_size to be set.
4091 ************************************************************************/
4092 static void
4093 ixgbe_config_delay_values(struct adapter *adapter)
4094 {
4095 struct ixgbe_hw *hw = &adapter->hw;
4096 u32 rxpb, frame, size, tmp;
4097
4098 frame = adapter->max_frame_size;
4099
4100 /* Calculate High Water */
4101 switch (hw->mac.type) {
4102 case ixgbe_mac_X540:
4103 case ixgbe_mac_X550:
4104 case ixgbe_mac_X550EM_x:
4105 case ixgbe_mac_X550EM_a:
4106 tmp = IXGBE_DV_X540(frame, frame);
4107 break;
4108 default:
4109 tmp = IXGBE_DV(frame, frame);
4110 break;
4111 }
4112 size = IXGBE_BT2KB(tmp);
4113 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4114 hw->fc.high_water[0] = rxpb - size;
4115
4116 /* Now calculate Low Water */
4117 switch (hw->mac.type) {
4118 case ixgbe_mac_X540:
4119 case ixgbe_mac_X550:
4120 case ixgbe_mac_X550EM_x:
4121 case ixgbe_mac_X550EM_a:
4122 tmp = IXGBE_LOW_DV_X540(frame);
4123 break;
4124 default:
4125 tmp = IXGBE_LOW_DV(frame);
4126 break;
4127 }
4128 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4129
4130 hw->fc.pause_time = IXGBE_FC_PAUSE;
4131 hw->fc.send_xon = TRUE;
4132 } /* ixgbe_config_delay_values */
4133
4134 /************************************************************************
4135 * ixgbe_set_multi - Multicast Update
4136 *
4137 * Called whenever multicast address list is updated.
4138 ************************************************************************/
4139 static void
4140 ixgbe_set_multi(struct adapter *adapter)
4141 {
4142 struct ixgbe_mc_addr *mta;
4143 struct ifnet *ifp = adapter->ifp;
4144 u8 *update_ptr;
4145 int mcnt = 0;
4146 u32 fctrl;
4147 struct ethercom *ec = &adapter->osdep.ec;
4148 struct ether_multi *enm;
4149 struct ether_multistep step;
4150
4151 KASSERT(mutex_owned(&adapter->core_mtx));
4152 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4153
4154 mta = adapter->mta;
4155 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4156
4157 ifp->if_flags &= ~IFF_ALLMULTI;
4158 ETHER_LOCK(ec);
4159 ETHER_FIRST_MULTI(step, ec, enm);
4160 while (enm != NULL) {
4161 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4162 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4163 ETHER_ADDR_LEN) != 0)) {
4164 ifp->if_flags |= IFF_ALLMULTI;
4165 break;
4166 }
4167 bcopy(enm->enm_addrlo,
4168 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4169 mta[mcnt].vmdq = adapter->pool;
4170 mcnt++;
4171 ETHER_NEXT_MULTI(step, enm);
4172 }
4173 ETHER_UNLOCK(ec);
4174
4175 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4176 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4177 if (ifp->if_flags & IFF_PROMISC)
4178 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4179 else if (ifp->if_flags & IFF_ALLMULTI) {
4180 fctrl |= IXGBE_FCTRL_MPE;
4181 }
4182
4183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4184
4185 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4186 update_ptr = (u8 *)mta;
4187 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4188 ixgbe_mc_array_itr, TRUE);
4189 }
4190
4191 } /* ixgbe_set_multi */
4192
4193 /************************************************************************
4194 * ixgbe_mc_array_itr
4195 *
4196 * An iterator function needed by the multicast shared code.
4197 * It feeds the shared code routine the addresses in the
4198 * array of ixgbe_set_multi() one by one.
4199 ************************************************************************/
4200 static u8 *
4201 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4202 {
4203 struct ixgbe_mc_addr *mta;
4204
4205 mta = (struct ixgbe_mc_addr *)*update_ptr;
4206 *vmdq = mta->vmdq;
4207
4208 *update_ptr = (u8*)(mta + 1);
4209
4210 return (mta->addr);
4211 } /* ixgbe_mc_array_itr */
4212
4213 /************************************************************************
4214 * ixgbe_local_timer - Timer routine
4215 *
4216 * Checks for link status, updates statistics,
4217 * and runs the watchdog check.
4218 ************************************************************************/
4219 static void
4220 ixgbe_local_timer(void *arg)
4221 {
4222 struct adapter *adapter = arg;
4223
4224 IXGBE_CORE_LOCK(adapter);
4225 ixgbe_local_timer1(adapter);
4226 IXGBE_CORE_UNLOCK(adapter);
4227 }
4228
4229 static void
4230 ixgbe_local_timer1(void *arg)
4231 {
4232 struct adapter *adapter = arg;
4233 device_t dev = adapter->dev;
4234 struct ix_queue *que = adapter->queues;
4235 u64 queues = 0;
4236 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4237 int hung = 0;
4238 int i;
4239
4240 KASSERT(mutex_owned(&adapter->core_mtx));
4241
4242 /* Check for pluggable optics */
4243 if (adapter->sfp_probe)
4244 if (!ixgbe_sfp_probe(adapter))
4245 goto out; /* Nothing to do */
4246
4247 ixgbe_update_link_status(adapter);
4248 ixgbe_update_stats_counters(adapter);
4249
4250 /* Update some event counters */
4251 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4252 que = adapter->queues;
4253 for (i = 0; i < adapter->num_queues; i++, que++) {
4254 struct tx_ring *txr = que->txr;
4255
4256 v0 += txr->q_efbig_tx_dma_setup;
4257 v1 += txr->q_mbuf_defrag_failed;
4258 v2 += txr->q_efbig2_tx_dma_setup;
4259 v3 += txr->q_einval_tx_dma_setup;
4260 v4 += txr->q_other_tx_dma_setup;
4261 v5 += txr->q_eagain_tx_dma_setup;
4262 v6 += txr->q_enomem_tx_dma_setup;
4263 v7 += txr->q_tso_err;
4264 }
4265 adapter->efbig_tx_dma_setup.ev_count = v0;
4266 adapter->mbuf_defrag_failed.ev_count = v1;
4267 adapter->efbig2_tx_dma_setup.ev_count = v2;
4268 adapter->einval_tx_dma_setup.ev_count = v3;
4269 adapter->other_tx_dma_setup.ev_count = v4;
4270 adapter->eagain_tx_dma_setup.ev_count = v5;
4271 adapter->enomem_tx_dma_setup.ev_count = v6;
4272 adapter->tso_err.ev_count = v7;
4273
4274 /*
4275 * Check the TX queues status
4276 * - mark hung queues so we don't schedule on them
4277 * - watchdog only if all queues show hung
4278 */
4279 que = adapter->queues;
4280 for (i = 0; i < adapter->num_queues; i++, que++) {
4281 /* Keep track of queues with work for soft irq */
4282 if (que->txr->busy)
4283 queues |= ((u64)1 << que->me);
4284 /*
4285 * Each time txeof runs without cleaning, but there
4286 * are uncleaned descriptors it increments busy. If
4287 * we get to the MAX we declare it hung.
4288 */
4289 if (que->busy == IXGBE_QUEUE_HUNG) {
4290 ++hung;
4291 /* Mark the queue as inactive */
4292 adapter->active_queues &= ~((u64)1 << que->me);
4293 continue;
4294 } else {
4295 /* Check if we've come back from hung */
4296 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4297 adapter->active_queues |= ((u64)1 << que->me);
4298 }
4299 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4300 device_printf(dev,
4301 "Warning queue %d appears to be hung!\n", i);
4302 que->txr->busy = IXGBE_QUEUE_HUNG;
4303 ++hung;
4304 }
4305 }
4306
4307 /* Only truely watchdog if all queues show hung */
4308 if (hung == adapter->num_queues)
4309 goto watchdog;
4310 else if (queues != 0) { /* Force an IRQ on queues with work */
4311 que = adapter->queues;
4312 for (i = 0; i < adapter->num_queues; i++, que++) {
4313 mutex_enter(&que->dc_mtx);
4314 if (que->disabled_count == 0)
4315 ixgbe_rearm_queues(adapter,
4316 queues & ((u64)1 << i));
4317 mutex_exit(&que->dc_mtx);
4318 }
4319 }
4320
4321 out:
4322 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4323 return;
4324
4325 watchdog:
4326 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4327 adapter->ifp->if_flags &= ~IFF_RUNNING;
4328 adapter->watchdog_events.ev_count++;
4329 ixgbe_init_locked(adapter);
4330 } /* ixgbe_local_timer */
4331
4332 /************************************************************************
4333 * ixgbe_sfp_probe
4334 *
4335 * Determine if a port had optics inserted.
4336 ************************************************************************/
4337 static bool
4338 ixgbe_sfp_probe(struct adapter *adapter)
4339 {
4340 struct ixgbe_hw *hw = &adapter->hw;
4341 device_t dev = adapter->dev;
4342 bool result = FALSE;
4343
4344 if ((hw->phy.type == ixgbe_phy_nl) &&
4345 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4346 s32 ret = hw->phy.ops.identify_sfp(hw);
4347 if (ret)
4348 goto out;
4349 ret = hw->phy.ops.reset(hw);
4350 adapter->sfp_probe = FALSE;
4351 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4352 device_printf(dev,"Unsupported SFP+ module detected!");
4353 device_printf(dev,
4354 "Reload driver with supported module.\n");
4355 goto out;
4356 } else
4357 device_printf(dev, "SFP+ module detected!\n");
4358 /* We now have supported optics */
4359 result = TRUE;
4360 }
4361 out:
4362
4363 return (result);
4364 } /* ixgbe_sfp_probe */
4365
4366 /************************************************************************
4367 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4368 ************************************************************************/
4369 static void
4370 ixgbe_handle_mod(void *context)
4371 {
4372 struct adapter *adapter = context;
4373 struct ixgbe_hw *hw = &adapter->hw;
4374 device_t dev = adapter->dev;
4375 u32 err, cage_full = 0;
4376
4377 ++adapter->mod_sicount.ev_count;
4378 if (adapter->hw.need_crosstalk_fix) {
4379 switch (hw->mac.type) {
4380 case ixgbe_mac_82599EB:
4381 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4382 IXGBE_ESDP_SDP2;
4383 break;
4384 case ixgbe_mac_X550EM_x:
4385 case ixgbe_mac_X550EM_a:
4386 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4387 IXGBE_ESDP_SDP0;
4388 break;
4389 default:
4390 break;
4391 }
4392
4393 if (!cage_full)
4394 return;
4395 }
4396
4397 err = hw->phy.ops.identify_sfp(hw);
4398 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4399 device_printf(dev,
4400 "Unsupported SFP+ module type was detected.\n");
4401 return;
4402 }
4403
4404 err = hw->mac.ops.setup_sfp(hw);
4405 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4406 device_printf(dev,
4407 "Setup failure - unsupported SFP+ module type.\n");
4408 return;
4409 }
4410 softint_schedule(adapter->msf_si);
4411 } /* ixgbe_handle_mod */
4412
4413
4414 /************************************************************************
4415 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4416 ************************************************************************/
4417 static void
4418 ixgbe_handle_msf(void *context)
4419 {
4420 struct adapter *adapter = context;
4421 struct ixgbe_hw *hw = &adapter->hw;
4422 u32 autoneg;
4423 bool negotiate;
4424
4425 ++adapter->msf_sicount.ev_count;
4426 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4427 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4428
4429 autoneg = hw->phy.autoneg_advertised;
4430 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4431 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4432 else
4433 negotiate = 0;
4434 if (hw->mac.ops.setup_link)
4435 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4436
4437 /* Adjust media types shown in ifconfig */
4438 ifmedia_removeall(&adapter->media);
4439 ixgbe_add_media_types(adapter);
4440 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4441 } /* ixgbe_handle_msf */
4442
4443 /************************************************************************
4444 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4445 ************************************************************************/
4446 static void
4447 ixgbe_handle_phy(void *context)
4448 {
4449 struct adapter *adapter = context;
4450 struct ixgbe_hw *hw = &adapter->hw;
4451 int error;
4452
4453 ++adapter->phy_sicount.ev_count;
4454 error = hw->phy.ops.handle_lasi(hw);
4455 if (error == IXGBE_ERR_OVERTEMP)
4456 device_printf(adapter->dev,
4457 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4458 " PHY will downshift to lower power state!\n");
4459 else if (error)
4460 device_printf(adapter->dev,
4461 "Error handling LASI interrupt: %d\n", error);
4462 } /* ixgbe_handle_phy */
4463
4464 static void
4465 ixgbe_ifstop(struct ifnet *ifp, int disable)
4466 {
4467 struct adapter *adapter = ifp->if_softc;
4468
4469 IXGBE_CORE_LOCK(adapter);
4470 ixgbe_stop(adapter);
4471 IXGBE_CORE_UNLOCK(adapter);
4472 }
4473
4474 /************************************************************************
4475 * ixgbe_stop - Stop the hardware
4476 *
4477 * Disables all traffic on the adapter by issuing a
4478 * global reset on the MAC and deallocates TX/RX buffers.
4479 ************************************************************************/
4480 static void
4481 ixgbe_stop(void *arg)
4482 {
4483 struct ifnet *ifp;
4484 struct adapter *adapter = arg;
4485 struct ixgbe_hw *hw = &adapter->hw;
4486
4487 ifp = adapter->ifp;
4488
4489 KASSERT(mutex_owned(&adapter->core_mtx));
4490
4491 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4492 ixgbe_disable_intr(adapter);
4493 callout_stop(&adapter->timer);
4494
4495 /* Let the stack know...*/
4496 ifp->if_flags &= ~IFF_RUNNING;
4497
4498 ixgbe_reset_hw(hw);
4499 hw->adapter_stopped = FALSE;
4500 ixgbe_stop_adapter(hw);
4501 if (hw->mac.type == ixgbe_mac_82599EB)
4502 ixgbe_stop_mac_link_on_d3_82599(hw);
4503 /* Turn off the laser - noop with no optics */
4504 ixgbe_disable_tx_laser(hw);
4505
4506 /* Update the stack */
4507 adapter->link_up = FALSE;
4508 ixgbe_update_link_status(adapter);
4509
4510 /* reprogram the RAR[0] in case user changed it. */
4511 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4512
4513 return;
4514 } /* ixgbe_stop */
4515
4516 /************************************************************************
4517 * ixgbe_update_link_status - Update OS on link state
4518 *
4519 * Note: Only updates the OS on the cached link state.
4520 * The real check of the hardware only happens with
4521 * a link interrupt.
4522 ************************************************************************/
4523 static void
4524 ixgbe_update_link_status(struct adapter *adapter)
4525 {
4526 struct ifnet *ifp = adapter->ifp;
4527 device_t dev = adapter->dev;
4528 struct ixgbe_hw *hw = &adapter->hw;
4529
4530 KASSERT(mutex_owned(&adapter->core_mtx));
4531
4532 if (adapter->link_up) {
4533 if (adapter->link_active == FALSE) {
4534 /*
4535 * To eliminate influence of the previous state
4536 * in the same way as ixgbe_init_locked().
4537 */
4538 struct ix_queue *que = adapter->queues;
4539 for (int i = 0; i < adapter->num_queues; i++, que++)
4540 que->eitr_setting = 0;
4541
4542 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4543 /*
4544 * Discard count for both MAC Local Fault and
4545 * Remote Fault because those registers are
4546 * valid only when the link speed is up and
4547 * 10Gbps.
4548 */
4549 IXGBE_READ_REG(hw, IXGBE_MLFC);
4550 IXGBE_READ_REG(hw, IXGBE_MRFC);
4551 }
4552
4553 if (bootverbose) {
4554 const char *bpsmsg;
4555
4556 switch (adapter->link_speed) {
4557 case IXGBE_LINK_SPEED_10GB_FULL:
4558 bpsmsg = "10 Gbps";
4559 break;
4560 case IXGBE_LINK_SPEED_5GB_FULL:
4561 bpsmsg = "5 Gbps";
4562 break;
4563 case IXGBE_LINK_SPEED_2_5GB_FULL:
4564 bpsmsg = "2.5 Gbps";
4565 break;
4566 case IXGBE_LINK_SPEED_1GB_FULL:
4567 bpsmsg = "1 Gbps";
4568 break;
4569 case IXGBE_LINK_SPEED_100_FULL:
4570 bpsmsg = "100 Mbps";
4571 break;
4572 case IXGBE_LINK_SPEED_10_FULL:
4573 bpsmsg = "10 Mbps";
4574 break;
4575 default:
4576 bpsmsg = "unknown speed";
4577 break;
4578 }
4579 device_printf(dev, "Link is up %s %s \n",
4580 bpsmsg, "Full Duplex");
4581 }
4582 adapter->link_active = TRUE;
4583 /* Update any Flow Control changes */
4584 ixgbe_fc_enable(&adapter->hw);
4585 /* Update DMA coalescing config */
4586 ixgbe_config_dmac(adapter);
4587 if_link_state_change(ifp, LINK_STATE_UP);
4588 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4589 ixgbe_ping_all_vfs(adapter);
4590 }
4591 } else { /* Link down */
4592 if (adapter->link_active == TRUE) {
4593 if (bootverbose)
4594 device_printf(dev, "Link is Down\n");
4595 if_link_state_change(ifp, LINK_STATE_DOWN);
4596 adapter->link_active = FALSE;
4597 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4598 ixgbe_ping_all_vfs(adapter);
4599 }
4600 }
4601 } /* ixgbe_update_link_status */
4602
4603 /************************************************************************
4604 * ixgbe_config_dmac - Configure DMA Coalescing
4605 ************************************************************************/
4606 static void
4607 ixgbe_config_dmac(struct adapter *adapter)
4608 {
4609 struct ixgbe_hw *hw = &adapter->hw;
4610 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4611
4612 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4613 return;
4614
4615 if (dcfg->watchdog_timer ^ adapter->dmac ||
4616 dcfg->link_speed ^ adapter->link_speed) {
4617 dcfg->watchdog_timer = adapter->dmac;
4618 dcfg->fcoe_en = false;
4619 dcfg->link_speed = adapter->link_speed;
4620 dcfg->num_tcs = 1;
4621
4622 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4623 dcfg->watchdog_timer, dcfg->link_speed);
4624
4625 hw->mac.ops.dmac_config(hw);
4626 }
4627 } /* ixgbe_config_dmac */
4628
4629 /************************************************************************
4630 * ixgbe_enable_intr
4631 ************************************************************************/
4632 static void
4633 ixgbe_enable_intr(struct adapter *adapter)
4634 {
4635 struct ixgbe_hw *hw = &adapter->hw;
4636 struct ix_queue *que = adapter->queues;
4637 u32 mask, fwsm;
4638
4639 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4640
4641 switch (adapter->hw.mac.type) {
4642 case ixgbe_mac_82599EB:
4643 mask |= IXGBE_EIMS_ECC;
4644 /* Temperature sensor on some adapters */
4645 mask |= IXGBE_EIMS_GPI_SDP0;
4646 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4647 mask |= IXGBE_EIMS_GPI_SDP1;
4648 mask |= IXGBE_EIMS_GPI_SDP2;
4649 break;
4650 case ixgbe_mac_X540:
4651 /* Detect if Thermal Sensor is enabled */
4652 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4653 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4654 mask |= IXGBE_EIMS_TS;
4655 mask |= IXGBE_EIMS_ECC;
4656 break;
4657 case ixgbe_mac_X550:
4658 /* MAC thermal sensor is automatically enabled */
4659 mask |= IXGBE_EIMS_TS;
4660 mask |= IXGBE_EIMS_ECC;
4661 break;
4662 case ixgbe_mac_X550EM_x:
4663 case ixgbe_mac_X550EM_a:
4664 /* Some devices use SDP0 for important information */
4665 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4666 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4667 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4668 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4669 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4670 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4671 mask |= IXGBE_EICR_GPI_SDP0_X540;
4672 mask |= IXGBE_EIMS_ECC;
4673 break;
4674 default:
4675 break;
4676 }
4677
4678 /* Enable Fan Failure detection */
4679 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4680 mask |= IXGBE_EIMS_GPI_SDP1;
4681 /* Enable SR-IOV */
4682 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4683 mask |= IXGBE_EIMS_MAILBOX;
4684 /* Enable Flow Director */
4685 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4686 mask |= IXGBE_EIMS_FLOW_DIR;
4687
4688 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4689
4690 /* With MSI-X we use auto clear */
4691 if (adapter->msix_mem) {
4692 mask = IXGBE_EIMS_ENABLE_MASK;
4693 /* Don't autoclear Link */
4694 mask &= ~IXGBE_EIMS_OTHER;
4695 mask &= ~IXGBE_EIMS_LSC;
4696 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4697 mask &= ~IXGBE_EIMS_MAILBOX;
4698 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4699 }
4700
4701 /*
4702 * Now enable all queues, this is done separately to
4703 * allow for handling the extended (beyond 32) MSI-X
4704 * vectors that can be used by 82599
4705 */
4706 for (int i = 0; i < adapter->num_queues; i++, que++)
4707 ixgbe_enable_queue(adapter, que->msix);
4708
4709 IXGBE_WRITE_FLUSH(hw);
4710
4711 return;
4712 } /* ixgbe_enable_intr */
4713
4714 /************************************************************************
4715 * ixgbe_disable_intr_internal
4716 ************************************************************************/
4717 static void
4718 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4719 {
4720 struct ix_queue *que = adapter->queues;
4721
4722 /* disable interrupts other than queues */
4723 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4724
4725 if (adapter->msix_mem)
4726 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4727
4728 for (int i = 0; i < adapter->num_queues; i++, que++)
4729 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4730
4731 IXGBE_WRITE_FLUSH(&adapter->hw);
4732
4733 } /* ixgbe_do_disable_intr_internal */
4734
4735 /************************************************************************
4736 * ixgbe_disable_intr
4737 ************************************************************************/
4738 static void
4739 ixgbe_disable_intr(struct adapter *adapter)
4740 {
4741
4742 ixgbe_disable_intr_internal(adapter, true);
4743 } /* ixgbe_disable_intr */
4744
4745 /************************************************************************
4746 * ixgbe_ensure_disabled_intr
4747 ************************************************************************/
4748 void
4749 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4750 {
4751
4752 ixgbe_disable_intr_internal(adapter, false);
4753 } /* ixgbe_ensure_disabled_intr */
4754
4755 /************************************************************************
4756 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4757 ************************************************************************/
4758 static int
4759 ixgbe_legacy_irq(void *arg)
4760 {
4761 struct ix_queue *que = arg;
4762 struct adapter *adapter = que->adapter;
4763 struct ixgbe_hw *hw = &adapter->hw;
4764 struct ifnet *ifp = adapter->ifp;
4765 struct tx_ring *txr = adapter->tx_rings;
4766 bool more = false;
4767 u32 eicr, eicr_mask;
4768
4769 /* Silicon errata #26 on 82598 */
4770 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4771
4772 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4773
4774 adapter->stats.pf.legint.ev_count++;
4775 ++que->irqs.ev_count;
4776 if (eicr == 0) {
4777 adapter->stats.pf.intzero.ev_count++;
4778 if ((ifp->if_flags & IFF_UP) != 0)
4779 ixgbe_enable_intr(adapter);
4780 return 0;
4781 }
4782
4783 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4784 #ifdef __NetBSD__
4785 /* Don't run ixgbe_rxeof in interrupt context */
4786 more = true;
4787 #else
4788 more = ixgbe_rxeof(que);
4789 #endif
4790
4791 IXGBE_TX_LOCK(txr);
4792 ixgbe_txeof(txr);
4793 #ifdef notyet
4794 if (!ixgbe_ring_empty(ifp, txr->br))
4795 ixgbe_start_locked(ifp, txr);
4796 #endif
4797 IXGBE_TX_UNLOCK(txr);
4798 }
4799
4800 /* Check for fan failure */
4801 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4802 ixgbe_check_fan_failure(adapter, eicr, true);
4803 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4804 }
4805
4806 /* Link status change */
4807 if (eicr & IXGBE_EICR_LSC)
4808 softint_schedule(adapter->link_si);
4809
4810 if (ixgbe_is_sfp(hw)) {
4811 /* Pluggable optics-related interrupt */
4812 if (hw->mac.type >= ixgbe_mac_X540)
4813 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4814 else
4815 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4816
4817 if (eicr & eicr_mask) {
4818 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4819 softint_schedule(adapter->mod_si);
4820 }
4821
4822 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4823 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4824 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4825 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4826 softint_schedule(adapter->msf_si);
4827 }
4828 }
4829
4830 /* External PHY interrupt */
4831 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4832 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4833 softint_schedule(adapter->phy_si);
4834
4835 if (more) {
4836 que->req.ev_count++;
4837 ixgbe_sched_handle_que(adapter, que);
4838 } else
4839 ixgbe_enable_intr(adapter);
4840
4841 return 1;
4842 } /* ixgbe_legacy_irq */
4843
4844 /************************************************************************
4845 * ixgbe_free_pciintr_resources
4846 ************************************************************************/
4847 static void
4848 ixgbe_free_pciintr_resources(struct adapter *adapter)
4849 {
4850 struct ix_queue *que = adapter->queues;
4851 int rid;
4852
4853 /*
4854 * Release all msix queue resources:
4855 */
4856 for (int i = 0; i < adapter->num_queues; i++, que++) {
4857 if (que->res != NULL) {
4858 pci_intr_disestablish(adapter->osdep.pc,
4859 adapter->osdep.ihs[i]);
4860 adapter->osdep.ihs[i] = NULL;
4861 }
4862 }
4863
4864 /* Clean the Legacy or Link interrupt last */
4865 if (adapter->vector) /* we are doing MSIX */
4866 rid = adapter->vector;
4867 else
4868 rid = 0;
4869
4870 if (adapter->osdep.ihs[rid] != NULL) {
4871 pci_intr_disestablish(adapter->osdep.pc,
4872 adapter->osdep.ihs[rid]);
4873 adapter->osdep.ihs[rid] = NULL;
4874 }
4875
4876 if (adapter->osdep.intrs != NULL) {
4877 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4878 adapter->osdep.nintrs);
4879 adapter->osdep.intrs = NULL;
4880 }
4881
4882 return;
4883 } /* ixgbe_free_pciintr_resources */
4884
4885 /************************************************************************
4886 * ixgbe_free_pci_resources
4887 ************************************************************************/
4888 static void
4889 ixgbe_free_pci_resources(struct adapter *adapter)
4890 {
4891
4892 ixgbe_free_pciintr_resources(adapter);
4893
4894 if (adapter->osdep.mem_size != 0) {
4895 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
4896 adapter->osdep.mem_bus_space_handle,
4897 adapter->osdep.mem_size);
4898 }
4899
4900 return;
4901 } /* ixgbe_free_pci_resources */
4902
4903 /************************************************************************
4904 * ixgbe_set_sysctl_value
4905 ************************************************************************/
4906 static void
4907 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4908 const char *description, int *limit, int value)
4909 {
4910 device_t dev = adapter->dev;
4911 struct sysctllog **log;
4912 const struct sysctlnode *rnode, *cnode;
4913
4914 log = &adapter->sysctllog;
4915 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4916 aprint_error_dev(dev, "could not create sysctl root\n");
4917 return;
4918 }
4919 if (sysctl_createv(log, 0, &rnode, &cnode,
4920 CTLFLAG_READWRITE, CTLTYPE_INT,
4921 name, SYSCTL_DESCR(description),
4922 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
4923 aprint_error_dev(dev, "could not create sysctl\n");
4924 *limit = value;
4925 } /* ixgbe_set_sysctl_value */
4926
4927 /************************************************************************
4928 * ixgbe_sysctl_flowcntl
4929 *
4930 * SYSCTL wrapper around setting Flow Control
4931 ************************************************************************/
4932 static int
4933 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
4934 {
4935 struct sysctlnode node = *rnode;
4936 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4937 int error, fc;
4938
4939 fc = adapter->hw.fc.current_mode;
4940 node.sysctl_data = &fc;
4941 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4942 if (error != 0 || newp == NULL)
4943 return error;
4944
4945 /* Don't bother if it's not changed */
4946 if (fc == adapter->hw.fc.current_mode)
4947 return (0);
4948
4949 return ixgbe_set_flowcntl(adapter, fc);
4950 } /* ixgbe_sysctl_flowcntl */
4951
4952 /************************************************************************
4953 * ixgbe_set_flowcntl - Set flow control
4954 *
4955 * Flow control values:
4956 * 0 - off
4957 * 1 - rx pause
4958 * 2 - tx pause
4959 * 3 - full
4960 ************************************************************************/
4961 static int
4962 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4963 {
4964 switch (fc) {
4965 case ixgbe_fc_rx_pause:
4966 case ixgbe_fc_tx_pause:
4967 case ixgbe_fc_full:
4968 adapter->hw.fc.requested_mode = fc;
4969 if (adapter->num_queues > 1)
4970 ixgbe_disable_rx_drop(adapter);
4971 break;
4972 case ixgbe_fc_none:
4973 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4974 if (adapter->num_queues > 1)
4975 ixgbe_enable_rx_drop(adapter);
4976 break;
4977 default:
4978 return (EINVAL);
4979 }
4980
4981 #if 0 /* XXX NetBSD */
4982 /* Don't autoneg if forcing a value */
4983 adapter->hw.fc.disable_fc_autoneg = TRUE;
4984 #endif
4985 ixgbe_fc_enable(&adapter->hw);
4986
4987 return (0);
4988 } /* ixgbe_set_flowcntl */
4989
4990 /************************************************************************
4991 * ixgbe_enable_rx_drop
4992 *
4993 * Enable the hardware to drop packets when the buffer is
4994 * full. This is useful with multiqueue, so that no single
4995 * queue being full stalls the entire RX engine. We only
4996 * enable this when Multiqueue is enabled AND Flow Control
4997 * is disabled.
4998 ************************************************************************/
4999 static void
5000 ixgbe_enable_rx_drop(struct adapter *adapter)
5001 {
5002 struct ixgbe_hw *hw = &adapter->hw;
5003 struct rx_ring *rxr;
5004 u32 srrctl;
5005
5006 for (int i = 0; i < adapter->num_queues; i++) {
5007 rxr = &adapter->rx_rings[i];
5008 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5009 srrctl |= IXGBE_SRRCTL_DROP_EN;
5010 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5011 }
5012
5013 /* enable drop for each vf */
5014 for (int i = 0; i < adapter->num_vfs; i++) {
5015 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5016 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5017 IXGBE_QDE_ENABLE));
5018 }
5019 } /* ixgbe_enable_rx_drop */
5020
5021 /************************************************************************
5022 * ixgbe_disable_rx_drop
5023 ************************************************************************/
5024 static void
5025 ixgbe_disable_rx_drop(struct adapter *adapter)
5026 {
5027 struct ixgbe_hw *hw = &adapter->hw;
5028 struct rx_ring *rxr;
5029 u32 srrctl;
5030
5031 for (int i = 0; i < adapter->num_queues; i++) {
5032 rxr = &adapter->rx_rings[i];
5033 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5034 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5035 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5036 }
5037
5038 /* disable drop for each vf */
5039 for (int i = 0; i < adapter->num_vfs; i++) {
5040 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5041 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5042 }
5043 } /* ixgbe_disable_rx_drop */
5044
5045 /************************************************************************
5046 * ixgbe_sysctl_advertise
5047 *
5048 * SYSCTL wrapper around setting advertised speed
5049 ************************************************************************/
5050 static int
5051 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5052 {
5053 struct sysctlnode node = *rnode;
5054 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5055 int error = 0, advertise;
5056
5057 advertise = adapter->advertise;
5058 node.sysctl_data = &advertise;
5059 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5060 if (error != 0 || newp == NULL)
5061 return error;
5062
5063 return ixgbe_set_advertise(adapter, advertise);
5064 } /* ixgbe_sysctl_advertise */
5065
5066 /************************************************************************
5067 * ixgbe_set_advertise - Control advertised link speed
5068 *
5069 * Flags:
5070 * 0x00 - Default (all capable link speed)
5071 * 0x01 - advertise 100 Mb
5072 * 0x02 - advertise 1G
5073 * 0x04 - advertise 10G
5074 * 0x08 - advertise 10 Mb
5075 * 0x10 - advertise 2.5G
5076 * 0x20 - advertise 5G
5077 ************************************************************************/
5078 static int
5079 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5080 {
5081 device_t dev;
5082 struct ixgbe_hw *hw;
5083 ixgbe_link_speed speed = 0;
5084 ixgbe_link_speed link_caps = 0;
5085 s32 err = IXGBE_NOT_IMPLEMENTED;
5086 bool negotiate = FALSE;
5087
5088 /* Checks to validate new value */
5089 if (adapter->advertise == advertise) /* no change */
5090 return (0);
5091
5092 dev = adapter->dev;
5093 hw = &adapter->hw;
5094
5095 /* No speed changes for backplane media */
5096 if (hw->phy.media_type == ixgbe_media_type_backplane)
5097 return (ENODEV);
5098
5099 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5100 (hw->phy.multispeed_fiber))) {
5101 device_printf(dev,
5102 "Advertised speed can only be set on copper or "
5103 "multispeed fiber media types.\n");
5104 return (EINVAL);
5105 }
5106
5107 if (advertise < 0x0 || advertise > 0x2f) {
5108 device_printf(dev,
5109 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5110 return (EINVAL);
5111 }
5112
5113 if (hw->mac.ops.get_link_capabilities) {
5114 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5115 &negotiate);
5116 if (err != IXGBE_SUCCESS) {
5117 device_printf(dev, "Unable to determine supported advertise speeds\n");
5118 return (ENODEV);
5119 }
5120 }
5121
5122 /* Set new value and report new advertised mode */
5123 if (advertise & 0x1) {
5124 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5125 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5126 return (EINVAL);
5127 }
5128 speed |= IXGBE_LINK_SPEED_100_FULL;
5129 }
5130 if (advertise & 0x2) {
5131 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5132 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5133 return (EINVAL);
5134 }
5135 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5136 }
5137 if (advertise & 0x4) {
5138 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5139 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5140 return (EINVAL);
5141 }
5142 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5143 }
5144 if (advertise & 0x8) {
5145 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5146 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5147 return (EINVAL);
5148 }
5149 speed |= IXGBE_LINK_SPEED_10_FULL;
5150 }
5151 if (advertise & 0x10) {
5152 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5153 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5154 return (EINVAL);
5155 }
5156 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5157 }
5158 if (advertise & 0x20) {
5159 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5160 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5161 return (EINVAL);
5162 }
5163 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5164 }
5165 if (advertise == 0)
5166 speed = link_caps; /* All capable link speed */
5167
5168 hw->mac.autotry_restart = TRUE;
5169 hw->mac.ops.setup_link(hw, speed, TRUE);
5170 adapter->advertise = advertise;
5171
5172 return (0);
5173 } /* ixgbe_set_advertise */
5174
5175 /************************************************************************
5176 * ixgbe_get_advertise - Get current advertised speed settings
5177 *
5178 * Formatted for sysctl usage.
5179 * Flags:
5180 * 0x01 - advertise 100 Mb
5181 * 0x02 - advertise 1G
5182 * 0x04 - advertise 10G
5183 * 0x08 - advertise 10 Mb (yes, Mb)
5184 * 0x10 - advertise 2.5G
5185 * 0x20 - advertise 5G
5186 ************************************************************************/
5187 static int
5188 ixgbe_get_advertise(struct adapter *adapter)
5189 {
5190 struct ixgbe_hw *hw = &adapter->hw;
5191 int speed;
5192 ixgbe_link_speed link_caps = 0;
5193 s32 err;
5194 bool negotiate = FALSE;
5195
5196 /*
5197 * Advertised speed means nothing unless it's copper or
5198 * multi-speed fiber
5199 */
5200 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5201 !(hw->phy.multispeed_fiber))
5202 return (0);
5203
5204 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5205 if (err != IXGBE_SUCCESS)
5206 return (0);
5207
5208 speed =
5209 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5210 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5211 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5212 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5213 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5214 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5215
5216 return speed;
5217 } /* ixgbe_get_advertise */
5218
5219 /************************************************************************
5220 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5221 *
5222 * Control values:
5223 * 0/1 - off / on (use default value of 1000)
5224 *
5225 * Legal timer values are:
5226 * 50,100,250,500,1000,2000,5000,10000
5227 *
5228 * Turning off interrupt moderation will also turn this off.
5229 ************************************************************************/
5230 static int
5231 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5232 {
5233 struct sysctlnode node = *rnode;
5234 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5235 struct ifnet *ifp = adapter->ifp;
5236 int error;
5237 int newval;
5238
5239 newval = adapter->dmac;
5240 node.sysctl_data = &newval;
5241 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5242 if ((error) || (newp == NULL))
5243 return (error);
5244
5245 switch (newval) {
5246 case 0:
5247 /* Disabled */
5248 adapter->dmac = 0;
5249 break;
5250 case 1:
5251 /* Enable and use default */
5252 adapter->dmac = 1000;
5253 break;
5254 case 50:
5255 case 100:
5256 case 250:
5257 case 500:
5258 case 1000:
5259 case 2000:
5260 case 5000:
5261 case 10000:
5262 /* Legal values - allow */
5263 adapter->dmac = newval;
5264 break;
5265 default:
5266 /* Do nothing, illegal value */
5267 return (EINVAL);
5268 }
5269
5270 /* Re-initialize hardware if it's already running */
5271 if (ifp->if_flags & IFF_RUNNING)
5272 ifp->if_init(ifp);
5273
5274 return (0);
5275 }
5276
5277 #ifdef IXGBE_DEBUG
5278 /************************************************************************
5279 * ixgbe_sysctl_power_state
5280 *
5281 * Sysctl to test power states
5282 * Values:
5283 * 0 - set device to D0
5284 * 3 - set device to D3
5285 * (none) - get current device power state
5286 ************************************************************************/
5287 static int
5288 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5289 {
5290 #ifdef notyet
5291 struct sysctlnode node = *rnode;
5292 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5293 device_t dev = adapter->dev;
5294 int curr_ps, new_ps, error = 0;
5295
5296 curr_ps = new_ps = pci_get_powerstate(dev);
5297
5298 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5299 if ((error) || (req->newp == NULL))
5300 return (error);
5301
5302 if (new_ps == curr_ps)
5303 return (0);
5304
5305 if (new_ps == 3 && curr_ps == 0)
5306 error = DEVICE_SUSPEND(dev);
5307 else if (new_ps == 0 && curr_ps == 3)
5308 error = DEVICE_RESUME(dev);
5309 else
5310 return (EINVAL);
5311
5312 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5313
5314 return (error);
5315 #else
5316 return 0;
5317 #endif
5318 } /* ixgbe_sysctl_power_state */
5319 #endif
5320
5321 /************************************************************************
5322 * ixgbe_sysctl_wol_enable
5323 *
5324 * Sysctl to enable/disable the WoL capability,
5325 * if supported by the adapter.
5326 *
5327 * Values:
5328 * 0 - disabled
5329 * 1 - enabled
5330 ************************************************************************/
5331 static int
5332 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5333 {
5334 struct sysctlnode node = *rnode;
5335 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5336 struct ixgbe_hw *hw = &adapter->hw;
5337 bool new_wol_enabled;
5338 int error = 0;
5339
5340 new_wol_enabled = hw->wol_enabled;
5341 node.sysctl_data = &new_wol_enabled;
5342 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5343 if ((error) || (newp == NULL))
5344 return (error);
5345 if (new_wol_enabled == hw->wol_enabled)
5346 return (0);
5347
5348 if (new_wol_enabled && !adapter->wol_support)
5349 return (ENODEV);
5350 else
5351 hw->wol_enabled = new_wol_enabled;
5352
5353 return (0);
5354 } /* ixgbe_sysctl_wol_enable */
5355
5356 /************************************************************************
5357 * ixgbe_sysctl_wufc - Wake Up Filter Control
5358 *
5359 * Sysctl to enable/disable the types of packets that the
5360 * adapter will wake up on upon receipt.
5361 * Flags:
5362 * 0x1 - Link Status Change
5363 * 0x2 - Magic Packet
5364 * 0x4 - Direct Exact
5365 * 0x8 - Directed Multicast
5366 * 0x10 - Broadcast
5367 * 0x20 - ARP/IPv4 Request Packet
5368 * 0x40 - Direct IPv4 Packet
5369 * 0x80 - Direct IPv6 Packet
5370 *
5371 * Settings not listed above will cause the sysctl to return an error.
5372 ************************************************************************/
5373 static int
5374 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5375 {
5376 struct sysctlnode node = *rnode;
5377 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5378 int error = 0;
5379 u32 new_wufc;
5380
5381 new_wufc = adapter->wufc;
5382 node.sysctl_data = &new_wufc;
5383 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5384 if ((error) || (newp == NULL))
5385 return (error);
5386 if (new_wufc == adapter->wufc)
5387 return (0);
5388
5389 if (new_wufc & 0xffffff00)
5390 return (EINVAL);
5391
5392 new_wufc &= 0xff;
5393 new_wufc |= (0xffffff & adapter->wufc);
5394 adapter->wufc = new_wufc;
5395
5396 return (0);
5397 } /* ixgbe_sysctl_wufc */
5398
5399 #ifdef IXGBE_DEBUG
5400 /************************************************************************
5401 * ixgbe_sysctl_print_rss_config
5402 ************************************************************************/
5403 static int
5404 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5405 {
5406 #ifdef notyet
5407 struct sysctlnode node = *rnode;
5408 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5409 struct ixgbe_hw *hw = &adapter->hw;
5410 device_t dev = adapter->dev;
5411 struct sbuf *buf;
5412 int error = 0, reta_size;
5413 u32 reg;
5414
5415 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5416 if (!buf) {
5417 device_printf(dev, "Could not allocate sbuf for output.\n");
5418 return (ENOMEM);
5419 }
5420
5421 // TODO: use sbufs to make a string to print out
5422 /* Set multiplier for RETA setup and table size based on MAC */
5423 switch (adapter->hw.mac.type) {
5424 case ixgbe_mac_X550:
5425 case ixgbe_mac_X550EM_x:
5426 case ixgbe_mac_X550EM_a:
5427 reta_size = 128;
5428 break;
5429 default:
5430 reta_size = 32;
5431 break;
5432 }
5433
5434 /* Print out the redirection table */
5435 sbuf_cat(buf, "\n");
5436 for (int i = 0; i < reta_size; i++) {
5437 if (i < 32) {
5438 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5439 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5440 } else {
5441 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5442 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5443 }
5444 }
5445
5446 // TODO: print more config
5447
5448 error = sbuf_finish(buf);
5449 if (error)
5450 device_printf(dev, "Error finishing sbuf: %d\n", error);
5451
5452 sbuf_delete(buf);
5453 #endif
5454 return (0);
5455 } /* ixgbe_sysctl_print_rss_config */
5456 #endif /* IXGBE_DEBUG */
5457
5458 /************************************************************************
5459 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5460 *
5461 * For X552/X557-AT devices using an external PHY
5462 ************************************************************************/
5463 static int
5464 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5465 {
5466 struct sysctlnode node = *rnode;
5467 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5468 struct ixgbe_hw *hw = &adapter->hw;
5469 int val;
5470 u16 reg;
5471 int error;
5472
5473 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5474 device_printf(adapter->dev,
5475 "Device has no supported external thermal sensor.\n");
5476 return (ENODEV);
5477 }
5478
5479 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5480 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5481 device_printf(adapter->dev,
5482 "Error reading from PHY's current temperature register\n");
5483 return (EAGAIN);
5484 }
5485
5486 node.sysctl_data = &val;
5487
5488 /* Shift temp for output */
5489 val = reg >> 8;
5490
5491 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5492 if ((error) || (newp == NULL))
5493 return (error);
5494
5495 return (0);
5496 } /* ixgbe_sysctl_phy_temp */
5497
5498 /************************************************************************
5499 * ixgbe_sysctl_phy_overtemp_occurred
5500 *
5501 * Reports (directly from the PHY) whether the current PHY
5502 * temperature is over the overtemp threshold.
5503 ************************************************************************/
5504 static int
5505 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5506 {
5507 struct sysctlnode node = *rnode;
5508 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5509 struct ixgbe_hw *hw = &adapter->hw;
5510 int val, error;
5511 u16 reg;
5512
5513 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5514 device_printf(adapter->dev,
5515 "Device has no supported external thermal sensor.\n");
5516 return (ENODEV);
5517 }
5518
5519 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5520 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5521 device_printf(adapter->dev,
5522 "Error reading from PHY's temperature status register\n");
5523 return (EAGAIN);
5524 }
5525
5526 node.sysctl_data = &val;
5527
5528 /* Get occurrence bit */
5529 val = !!(reg & 0x4000);
5530
5531 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5532 if ((error) || (newp == NULL))
5533 return (error);
5534
5535 return (0);
5536 } /* ixgbe_sysctl_phy_overtemp_occurred */
5537
5538 /************************************************************************
5539 * ixgbe_sysctl_eee_state
5540 *
5541 * Sysctl to set EEE power saving feature
5542 * Values:
5543 * 0 - disable EEE
5544 * 1 - enable EEE
5545 * (none) - get current device EEE state
5546 ************************************************************************/
5547 static int
5548 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5549 {
5550 struct sysctlnode node = *rnode;
5551 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5552 struct ifnet *ifp = adapter->ifp;
5553 device_t dev = adapter->dev;
5554 int curr_eee, new_eee, error = 0;
5555 s32 retval;
5556
5557 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5558 node.sysctl_data = &new_eee;
5559 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5560 if ((error) || (newp == NULL))
5561 return (error);
5562
5563 /* Nothing to do */
5564 if (new_eee == curr_eee)
5565 return (0);
5566
5567 /* Not supported */
5568 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5569 return (EINVAL);
5570
5571 /* Bounds checking */
5572 if ((new_eee < 0) || (new_eee > 1))
5573 return (EINVAL);
5574
5575 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5576 if (retval) {
5577 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5578 return (EINVAL);
5579 }
5580
5581 /* Restart auto-neg */
5582 ifp->if_init(ifp);
5583
5584 device_printf(dev, "New EEE state: %d\n", new_eee);
5585
5586 /* Cache new value */
5587 if (new_eee)
5588 adapter->feat_en |= IXGBE_FEATURE_EEE;
5589 else
5590 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5591
5592 return (error);
5593 } /* ixgbe_sysctl_eee_state */
5594
5595 /************************************************************************
5596 * ixgbe_init_device_features
5597 ************************************************************************/
5598 static void
5599 ixgbe_init_device_features(struct adapter *adapter)
5600 {
5601 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5602 | IXGBE_FEATURE_RSS
5603 | IXGBE_FEATURE_MSI
5604 | IXGBE_FEATURE_MSIX
5605 | IXGBE_FEATURE_LEGACY_IRQ
5606 | IXGBE_FEATURE_LEGACY_TX;
5607
5608 /* Set capabilities first... */
5609 switch (adapter->hw.mac.type) {
5610 case ixgbe_mac_82598EB:
5611 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5612 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5613 break;
5614 case ixgbe_mac_X540:
5615 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5616 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5617 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5618 (adapter->hw.bus.func == 0))
5619 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5620 break;
5621 case ixgbe_mac_X550:
5622 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5623 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5624 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5625 break;
5626 case ixgbe_mac_X550EM_x:
5627 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5628 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5629 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5630 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5631 break;
5632 case ixgbe_mac_X550EM_a:
5633 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5634 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5635 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5636 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5637 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5638 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5639 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5640 }
5641 break;
5642 case ixgbe_mac_82599EB:
5643 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5644 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5645 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5646 (adapter->hw.bus.func == 0))
5647 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5648 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5649 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5650 break;
5651 default:
5652 break;
5653 }
5654
5655 /* Enabled by default... */
5656 /* Fan failure detection */
5657 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5658 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5659 /* Netmap */
5660 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5661 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5662 /* EEE */
5663 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5664 adapter->feat_en |= IXGBE_FEATURE_EEE;
5665 /* Thermal Sensor */
5666 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5667 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5668
5669 /* Enabled via global sysctl... */
5670 /* Flow Director */
5671 if (ixgbe_enable_fdir) {
5672 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5673 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5674 else
5675 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5676 }
5677 /* Legacy (single queue) transmit */
5678 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5679 ixgbe_enable_legacy_tx)
5680 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5681 /*
5682 * Message Signal Interrupts - Extended (MSI-X)
5683 * Normal MSI is only enabled if MSI-X calls fail.
5684 */
5685 if (!ixgbe_enable_msix)
5686 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5687 /* Receive-Side Scaling (RSS) */
5688 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5689 adapter->feat_en |= IXGBE_FEATURE_RSS;
5690
5691 /* Disable features with unmet dependencies... */
5692 /* No MSI-X */
5693 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5694 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5695 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5696 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5697 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5698 }
5699 } /* ixgbe_init_device_features */
5700
5701 /************************************************************************
5702 * ixgbe_probe - Device identification routine
5703 *
5704 * Determines if the driver should be loaded on
5705 * adapter based on its PCI vendor/device ID.
5706 *
5707 * return BUS_PROBE_DEFAULT on success, positive on failure
5708 ************************************************************************/
5709 static int
5710 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5711 {
5712 const struct pci_attach_args *pa = aux;
5713
5714 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5715 }
5716
5717 static ixgbe_vendor_info_t *
5718 ixgbe_lookup(const struct pci_attach_args *pa)
5719 {
5720 ixgbe_vendor_info_t *ent;
5721 pcireg_t subid;
5722
5723 INIT_DEBUGOUT("ixgbe_lookup: begin");
5724
5725 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5726 return NULL;
5727
5728 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5729
5730 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5731 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5732 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5733 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5734 (ent->subvendor_id == 0)) &&
5735 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5736 (ent->subdevice_id == 0))) {
5737 ++ixgbe_total_ports;
5738 return ent;
5739 }
5740 }
5741 return NULL;
5742 }
5743
5744 static int
5745 ixgbe_ifflags_cb(struct ethercom *ec)
5746 {
5747 struct ifnet *ifp = &ec->ec_if;
5748 struct adapter *adapter = ifp->if_softc;
5749 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
5750
5751 IXGBE_CORE_LOCK(adapter);
5752
5753 if (change != 0)
5754 adapter->if_flags = ifp->if_flags;
5755
5756 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5757 rc = ENETRESET;
5758 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5759 ixgbe_set_promisc(adapter);
5760
5761 /* Set up VLAN support and filter */
5762 ixgbe_setup_vlan_hw_support(adapter);
5763
5764 IXGBE_CORE_UNLOCK(adapter);
5765
5766 return rc;
5767 }
5768
5769 /************************************************************************
5770 * ixgbe_ioctl - Ioctl entry point
5771 *
5772 * Called when the user wants to configure the interface.
5773 *
5774 * return 0 on success, positive on failure
5775 ************************************************************************/
5776 static int
5777 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5778 {
5779 struct adapter *adapter = ifp->if_softc;
5780 struct ixgbe_hw *hw = &adapter->hw;
5781 struct ifcapreq *ifcr = data;
5782 struct ifreq *ifr = data;
5783 int error = 0;
5784 int l4csum_en;
5785 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5786 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5787
5788 switch (command) {
5789 case SIOCSIFFLAGS:
5790 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5791 break;
5792 case SIOCADDMULTI:
5793 case SIOCDELMULTI:
5794 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5795 break;
5796 case SIOCSIFMEDIA:
5797 case SIOCGIFMEDIA:
5798 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5799 break;
5800 case SIOCSIFCAP:
5801 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5802 break;
5803 case SIOCSIFMTU:
5804 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5805 break;
5806 #ifdef __NetBSD__
5807 case SIOCINITIFADDR:
5808 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5809 break;
5810 case SIOCGIFFLAGS:
5811 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5812 break;
5813 case SIOCGIFAFLAG_IN:
5814 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5815 break;
5816 case SIOCGIFADDR:
5817 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5818 break;
5819 case SIOCGIFMTU:
5820 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5821 break;
5822 case SIOCGIFCAP:
5823 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5824 break;
5825 case SIOCGETHERCAP:
5826 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5827 break;
5828 case SIOCGLIFADDR:
5829 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5830 break;
5831 case SIOCZIFDATA:
5832 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5833 hw->mac.ops.clear_hw_cntrs(hw);
5834 ixgbe_clear_evcnt(adapter);
5835 break;
5836 case SIOCAIFADDR:
5837 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5838 break;
5839 #endif
5840 default:
5841 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5842 break;
5843 }
5844
5845 switch (command) {
5846 case SIOCSIFMEDIA:
5847 case SIOCGIFMEDIA:
5848 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5849 case SIOCGI2C:
5850 {
5851 struct ixgbe_i2c_req i2c;
5852
5853 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5854 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5855 if (error != 0)
5856 break;
5857 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5858 error = EINVAL;
5859 break;
5860 }
5861 if (i2c.len > sizeof(i2c.data)) {
5862 error = EINVAL;
5863 break;
5864 }
5865
5866 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5867 i2c.dev_addr, i2c.data);
5868 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5869 break;
5870 }
5871 case SIOCSIFCAP:
5872 /* Layer-4 Rx checksum offload has to be turned on and
5873 * off as a unit.
5874 */
5875 l4csum_en = ifcr->ifcr_capenable & l4csum;
5876 if (l4csum_en != l4csum && l4csum_en != 0)
5877 return EINVAL;
5878 /*FALLTHROUGH*/
5879 case SIOCADDMULTI:
5880 case SIOCDELMULTI:
5881 case SIOCSIFFLAGS:
5882 case SIOCSIFMTU:
5883 default:
5884 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5885 return error;
5886 if ((ifp->if_flags & IFF_RUNNING) == 0)
5887 ;
5888 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5889 IXGBE_CORE_LOCK(adapter);
5890 if ((ifp->if_flags & IFF_RUNNING) != 0)
5891 ixgbe_init_locked(adapter);
5892 ixgbe_recalculate_max_frame(adapter);
5893 IXGBE_CORE_UNLOCK(adapter);
5894 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
5895 /*
5896 * Multicast list has changed; set the hardware filter
5897 * accordingly.
5898 */
5899 IXGBE_CORE_LOCK(adapter);
5900 ixgbe_disable_intr(adapter);
5901 ixgbe_set_multi(adapter);
5902 ixgbe_enable_intr(adapter);
5903 IXGBE_CORE_UNLOCK(adapter);
5904 }
5905 return 0;
5906 }
5907
5908 return error;
5909 } /* ixgbe_ioctl */
5910
5911 /************************************************************************
5912 * ixgbe_check_fan_failure
5913 ************************************************************************/
5914 static void
5915 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
5916 {
5917 u32 mask;
5918
5919 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
5920 IXGBE_ESDP_SDP1;
5921
5922 if (reg & mask)
5923 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5924 } /* ixgbe_check_fan_failure */
5925
5926 /************************************************************************
5927 * ixgbe_handle_que
5928 ************************************************************************/
5929 static void
5930 ixgbe_handle_que(void *context)
5931 {
5932 struct ix_queue *que = context;
5933 struct adapter *adapter = que->adapter;
5934 struct tx_ring *txr = que->txr;
5935 struct ifnet *ifp = adapter->ifp;
5936 bool more = false;
5937
5938 que->handleq.ev_count++;
5939
5940 if (ifp->if_flags & IFF_RUNNING) {
5941 more = ixgbe_rxeof(que);
5942 IXGBE_TX_LOCK(txr);
5943 more |= ixgbe_txeof(txr);
5944 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5945 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
5946 ixgbe_mq_start_locked(ifp, txr);
5947 /* Only for queue 0 */
5948 /* NetBSD still needs this for CBQ */
5949 if ((&adapter->queues[0] == que)
5950 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
5951 ixgbe_legacy_start_locked(ifp, txr);
5952 IXGBE_TX_UNLOCK(txr);
5953 }
5954
5955 if (more) {
5956 que->req.ev_count++;
5957 ixgbe_sched_handle_que(adapter, que);
5958 } else if (que->res != NULL) {
5959 /* Re-enable this interrupt */
5960 ixgbe_enable_queue(adapter, que->msix);
5961 } else
5962 ixgbe_enable_intr(adapter);
5963
5964 return;
5965 } /* ixgbe_handle_que */
5966
5967 /************************************************************************
5968 * ixgbe_handle_que_work
5969 ************************************************************************/
5970 static void
5971 ixgbe_handle_que_work(struct work *wk, void *context)
5972 {
5973 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
5974
5975 /*
5976 * "enqueued flag" is not required here.
5977 * See ixgbe_msix_que().
5978 */
5979 ixgbe_handle_que(que);
5980 }
5981
5982 /************************************************************************
5983 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
5984 ************************************************************************/
5985 static int
5986 ixgbe_allocate_legacy(struct adapter *adapter,
5987 const struct pci_attach_args *pa)
5988 {
5989 device_t dev = adapter->dev;
5990 struct ix_queue *que = adapter->queues;
5991 struct tx_ring *txr = adapter->tx_rings;
5992 int counts[PCI_INTR_TYPE_SIZE];
5993 pci_intr_type_t intr_type, max_type;
5994 char intrbuf[PCI_INTRSTR_LEN];
5995 const char *intrstr = NULL;
5996
5997 /* We allocate a single interrupt resource */
5998 max_type = PCI_INTR_TYPE_MSI;
5999 counts[PCI_INTR_TYPE_MSIX] = 0;
6000 counts[PCI_INTR_TYPE_MSI] =
6001 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6002 /* Check not feat_en but feat_cap to fallback to INTx */
6003 counts[PCI_INTR_TYPE_INTX] =
6004 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6005
6006 alloc_retry:
6007 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6008 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6009 return ENXIO;
6010 }
6011 adapter->osdep.nintrs = 1;
6012 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6013 intrbuf, sizeof(intrbuf));
6014 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6015 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6016 device_xname(dev));
6017 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6018 if (adapter->osdep.ihs[0] == NULL) {
6019 aprint_error_dev(dev,"unable to establish %s\n",
6020 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6021 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6022 adapter->osdep.intrs = NULL;
6023 switch (intr_type) {
6024 case PCI_INTR_TYPE_MSI:
6025 /* The next try is for INTx: Disable MSI */
6026 max_type = PCI_INTR_TYPE_INTX;
6027 counts[PCI_INTR_TYPE_INTX] = 1;
6028 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6029 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6030 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6031 goto alloc_retry;
6032 } else
6033 break;
6034 case PCI_INTR_TYPE_INTX:
6035 default:
6036 /* See below */
6037 break;
6038 }
6039 }
6040 if (intr_type == PCI_INTR_TYPE_INTX) {
6041 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6042 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6043 }
6044 if (adapter->osdep.ihs[0] == NULL) {
6045 aprint_error_dev(dev,
6046 "couldn't establish interrupt%s%s\n",
6047 intrstr ? " at " : "", intrstr ? intrstr : "");
6048 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6049 adapter->osdep.intrs = NULL;
6050 return ENXIO;
6051 }
6052 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6053 /*
6054 * Try allocating a fast interrupt and the associated deferred
6055 * processing contexts.
6056 */
6057 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6058 txr->txr_si =
6059 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6060 ixgbe_deferred_mq_start, txr);
6061 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6062 ixgbe_handle_que, que);
6063
6064 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6065 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6066 aprint_error_dev(dev,
6067 "could not establish software interrupts\n");
6068
6069 return ENXIO;
6070 }
6071 /* For simplicity in the handlers */
6072 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6073
6074 return (0);
6075 } /* ixgbe_allocate_legacy */
6076
6077 /************************************************************************
6078 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6079 ************************************************************************/
6080 static int
6081 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6082 {
6083 device_t dev = adapter->dev;
6084 struct ix_queue *que = adapter->queues;
6085 struct tx_ring *txr = adapter->tx_rings;
6086 pci_chipset_tag_t pc;
6087 char intrbuf[PCI_INTRSTR_LEN];
6088 char intr_xname[32];
6089 char wqname[MAXCOMLEN];
6090 const char *intrstr = NULL;
6091 int error, vector = 0;
6092 int cpu_id = 0;
6093 kcpuset_t *affinity;
6094 #ifdef RSS
6095 unsigned int rss_buckets = 0;
6096 kcpuset_t cpu_mask;
6097 #endif
6098
6099 pc = adapter->osdep.pc;
6100 #ifdef RSS
6101 /*
6102 * If we're doing RSS, the number of queues needs to
6103 * match the number of RSS buckets that are configured.
6104 *
6105 * + If there's more queues than RSS buckets, we'll end
6106 * up with queues that get no traffic.
6107 *
6108 * + If there's more RSS buckets than queues, we'll end
6109 * up having multiple RSS buckets map to the same queue,
6110 * so there'll be some contention.
6111 */
6112 rss_buckets = rss_getnumbuckets();
6113 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6114 (adapter->num_queues != rss_buckets)) {
6115 device_printf(dev,
6116 "%s: number of queues (%d) != number of RSS buckets (%d)"
6117 "; performance will be impacted.\n",
6118 __func__, adapter->num_queues, rss_buckets);
6119 }
6120 #endif
6121
6122 adapter->osdep.nintrs = adapter->num_queues + 1;
6123 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6124 adapter->osdep.nintrs) != 0) {
6125 aprint_error_dev(dev,
6126 "failed to allocate MSI-X interrupt\n");
6127 return (ENXIO);
6128 }
6129
6130 kcpuset_create(&affinity, false);
6131 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6132 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6133 device_xname(dev), i);
6134 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6135 sizeof(intrbuf));
6136 #ifdef IXGBE_MPSAFE
6137 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6138 true);
6139 #endif
6140 /* Set the handler function */
6141 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6142 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6143 intr_xname);
6144 if (que->res == NULL) {
6145 aprint_error_dev(dev,
6146 "Failed to register QUE handler\n");
6147 error = ENXIO;
6148 goto err_out;
6149 }
6150 que->msix = vector;
6151 adapter->active_queues |= (u64)(1 << que->msix);
6152
6153 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6154 #ifdef RSS
6155 /*
6156 * The queue ID is used as the RSS layer bucket ID.
6157 * We look up the queue ID -> RSS CPU ID and select
6158 * that.
6159 */
6160 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6161 CPU_SETOF(cpu_id, &cpu_mask);
6162 #endif
6163 } else {
6164 /*
6165 * Bind the MSI-X vector, and thus the
6166 * rings to the corresponding CPU.
6167 *
6168 * This just happens to match the default RSS
6169 * round-robin bucket -> queue -> CPU allocation.
6170 */
6171 if (adapter->num_queues > 1)
6172 cpu_id = i;
6173 }
6174 /* Round-robin affinity */
6175 kcpuset_zero(affinity);
6176 kcpuset_set(affinity, cpu_id % ncpu);
6177 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6178 NULL);
6179 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6180 intrstr);
6181 if (error == 0) {
6182 #if 1 /* def IXGBE_DEBUG */
6183 #ifdef RSS
6184 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6185 cpu_id % ncpu);
6186 #else
6187 aprint_normal(", bound queue %d to cpu %d", i,
6188 cpu_id % ncpu);
6189 #endif
6190 #endif /* IXGBE_DEBUG */
6191 }
6192 aprint_normal("\n");
6193
6194 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6195 txr->txr_si = softint_establish(
6196 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6197 ixgbe_deferred_mq_start, txr);
6198 if (txr->txr_si == NULL) {
6199 aprint_error_dev(dev,
6200 "couldn't establish software interrupt\n");
6201 error = ENXIO;
6202 goto err_out;
6203 }
6204 }
6205 que->que_si
6206 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6207 ixgbe_handle_que, que);
6208 if (que->que_si == NULL) {
6209 aprint_error_dev(dev,
6210 "couldn't establish software interrupt\n");
6211 error = ENXIO;
6212 goto err_out;
6213 }
6214 }
6215 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6216 error = workqueue_create(&adapter->txr_wq, wqname,
6217 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6218 IXGBE_WORKQUEUE_FLAGS);
6219 if (error) {
6220 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6221 goto err_out;
6222 }
6223 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6224
6225 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6226 error = workqueue_create(&adapter->que_wq, wqname,
6227 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6228 IXGBE_WORKQUEUE_FLAGS);
6229 if (error) {
6230 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6231 goto err_out;
6232 }
6233
6234 /* and Link */
6235 cpu_id++;
6236 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6237 adapter->vector = vector;
6238 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6239 sizeof(intrbuf));
6240 #ifdef IXGBE_MPSAFE
6241 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6242 true);
6243 #endif
6244 /* Set the link handler function */
6245 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6246 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6247 intr_xname);
6248 if (adapter->osdep.ihs[vector] == NULL) {
6249 adapter->res = NULL;
6250 aprint_error_dev(dev, "Failed to register LINK handler\n");
6251 error = ENXIO;
6252 goto err_out;
6253 }
6254 /* Round-robin affinity */
6255 kcpuset_zero(affinity);
6256 kcpuset_set(affinity, cpu_id % ncpu);
6257 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6258 NULL);
6259
6260 aprint_normal_dev(dev,
6261 "for link, interrupting at %s", intrstr);
6262 if (error == 0)
6263 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6264 else
6265 aprint_normal("\n");
6266
6267 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6268 adapter->mbx_si =
6269 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6270 ixgbe_handle_mbx, adapter);
6271 if (adapter->mbx_si == NULL) {
6272 aprint_error_dev(dev,
6273 "could not establish software interrupts\n");
6274
6275 error = ENXIO;
6276 goto err_out;
6277 }
6278 }
6279
6280 kcpuset_destroy(affinity);
6281 aprint_normal_dev(dev,
6282 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6283
6284 return (0);
6285
6286 err_out:
6287 kcpuset_destroy(affinity);
6288 ixgbe_free_softint(adapter);
6289 ixgbe_free_pciintr_resources(adapter);
6290 return (error);
6291 } /* ixgbe_allocate_msix */
6292
6293 /************************************************************************
6294 * ixgbe_configure_interrupts
6295 *
6296 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6297 * This will also depend on user settings.
6298 ************************************************************************/
6299 static int
6300 ixgbe_configure_interrupts(struct adapter *adapter)
6301 {
6302 device_t dev = adapter->dev;
6303 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6304 int want, queues, msgs;
6305
6306 /* Default to 1 queue if MSI-X setup fails */
6307 adapter->num_queues = 1;
6308
6309 /* Override by tuneable */
6310 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6311 goto msi;
6312
6313 /*
6314 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6315 * interrupt slot.
6316 */
6317 if (ncpu == 1)
6318 goto msi;
6319
6320 /* First try MSI-X */
6321 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6322 msgs = MIN(msgs, IXG_MAX_NINTR);
6323 if (msgs < 2)
6324 goto msi;
6325
6326 adapter->msix_mem = (void *)1; /* XXX */
6327
6328 /* Figure out a reasonable auto config value */
6329 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6330
6331 #ifdef RSS
6332 /* If we're doing RSS, clamp at the number of RSS buckets */
6333 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6334 queues = min(queues, rss_getnumbuckets());
6335 #endif
6336 if (ixgbe_num_queues > queues) {
6337 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6338 ixgbe_num_queues = queues;
6339 }
6340
6341 if (ixgbe_num_queues != 0)
6342 queues = ixgbe_num_queues;
6343 else
6344 queues = min(queues,
6345 min(mac->max_tx_queues, mac->max_rx_queues));
6346
6347 /* reflect correct sysctl value */
6348 ixgbe_num_queues = queues;
6349
6350 /*
6351 * Want one vector (RX/TX pair) per queue
6352 * plus an additional for Link.
6353 */
6354 want = queues + 1;
6355 if (msgs >= want)
6356 msgs = want;
6357 else {
6358 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6359 "%d vectors but %d queues wanted!\n",
6360 msgs, want);
6361 goto msi;
6362 }
6363 adapter->num_queues = queues;
6364 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6365 return (0);
6366
6367 /*
6368 * MSI-X allocation failed or provided us with
6369 * less vectors than needed. Free MSI-X resources
6370 * and we'll try enabling MSI.
6371 */
6372 msi:
6373 /* Without MSI-X, some features are no longer supported */
6374 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6375 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6376 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6377 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6378
6379 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6380 adapter->msix_mem = NULL; /* XXX */
6381 if (msgs > 1)
6382 msgs = 1;
6383 if (msgs != 0) {
6384 msgs = 1;
6385 adapter->feat_en |= IXGBE_FEATURE_MSI;
6386 return (0);
6387 }
6388
6389 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6390 aprint_error_dev(dev,
6391 "Device does not support legacy interrupts.\n");
6392 return 1;
6393 }
6394
6395 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6396
6397 return (0);
6398 } /* ixgbe_configure_interrupts */
6399
6400
6401 /************************************************************************
6402 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6403 *
6404 * Done outside of interrupt context since the driver might sleep
6405 ************************************************************************/
6406 static void
6407 ixgbe_handle_link(void *context)
6408 {
6409 struct adapter *adapter = context;
6410 struct ixgbe_hw *hw = &adapter->hw;
6411
6412 IXGBE_CORE_LOCK(adapter);
6413 ++adapter->link_sicount.ev_count;
6414 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6415 ixgbe_update_link_status(adapter);
6416
6417 /* Re-enable link interrupts */
6418 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6419
6420 IXGBE_CORE_UNLOCK(adapter);
6421 } /* ixgbe_handle_link */
6422
6423 /************************************************************************
6424 * ixgbe_rearm_queues
6425 ************************************************************************/
6426 static void
6427 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6428 {
6429 u32 mask;
6430
6431 switch (adapter->hw.mac.type) {
6432 case ixgbe_mac_82598EB:
6433 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6435 break;
6436 case ixgbe_mac_82599EB:
6437 case ixgbe_mac_X540:
6438 case ixgbe_mac_X550:
6439 case ixgbe_mac_X550EM_x:
6440 case ixgbe_mac_X550EM_a:
6441 mask = (queues & 0xFFFFFFFF);
6442 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6443 mask = (queues >> 32);
6444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6445 break;
6446 default:
6447 break;
6448 }
6449 } /* ixgbe_rearm_queues */
6450