ixgbe.c revision 1.157 1 /* $NetBSD: ixgbe.c,v 1.157 2018/05/30 08:35:26 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 char ixgbe_driver_version[] = "4.0.1-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 static void ixgbe_rearm_queues(struct adapter *, u64);
195
196 static void ixgbe_initialize_transmit_units(struct adapter *);
197 static void ixgbe_initialize_receive_units(struct adapter *);
198 static void ixgbe_enable_rx_drop(struct adapter *);
199 static void ixgbe_disable_rx_drop(struct adapter *);
200 static void ixgbe_initialize_rss_mapping(struct adapter *);
201
202 static void ixgbe_enable_intr(struct adapter *);
203 static void ixgbe_disable_intr(struct adapter *);
204 static void ixgbe_update_stats_counters(struct adapter *);
205 static void ixgbe_set_promisc(struct adapter *);
206 static void ixgbe_set_multi(struct adapter *);
207 static void ixgbe_update_link_status(struct adapter *);
208 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
209 static void ixgbe_configure_ivars(struct adapter *);
210 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
211 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
212
213 static void ixgbe_setup_vlan_hw_support(struct adapter *);
214 #if 0
215 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
216 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
217 #endif
218
219 static void ixgbe_add_device_sysctls(struct adapter *);
220 static void ixgbe_add_hw_stats(struct adapter *);
221 static void ixgbe_clear_evcnt(struct adapter *);
222 static int ixgbe_set_flowcntl(struct adapter *, int);
223 static int ixgbe_set_advertise(struct adapter *, int);
224 static int ixgbe_get_advertise(struct adapter *);
225
226 /* Sysctl handlers */
227 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
228 const char *, int *, int);
229 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
230 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
231 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
235 #ifdef IXGBE_DEBUG
236 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
238 #endif
239 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
247
248 /* Support for pluggable optic modules */
249 static bool ixgbe_sfp_probe(struct adapter *);
250
251 /* Legacy (single vector) interrupt handler */
252 static int ixgbe_legacy_irq(void *);
253
254 /* The MSI/MSI-X Interrupt handlers */
255 static int ixgbe_msix_que(void *);
256 static int ixgbe_msix_link(void *);
257
258 /* Software interrupts for deferred work */
259 static void ixgbe_handle_que(void *);
260 static void ixgbe_handle_link(void *);
261 static void ixgbe_handle_msf(void *);
262 static void ixgbe_handle_mod(void *);
263 static void ixgbe_handle_phy(void *);
264
265 /* Workqueue handler for deferred work */
266 static void ixgbe_handle_que_work(struct work *, void *);
267
268 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
269
270 /************************************************************************
271 * NetBSD Device Interface Entry Points
272 ************************************************************************/
273 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
274 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
275 DVF_DETACH_SHUTDOWN);
276
277 #if 0
278 devclass_t ix_devclass;
279 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
280
281 MODULE_DEPEND(ix, pci, 1, 1, 1);
282 MODULE_DEPEND(ix, ether, 1, 1, 1);
283 #ifdef DEV_NETMAP
284 MODULE_DEPEND(ix, netmap, 1, 1, 1);
285 #endif
286 #endif
287
288 /*
289 * TUNEABLE PARAMETERS:
290 */
291
292 /*
293 * AIM: Adaptive Interrupt Moderation
294 * which means that the interrupt rate
295 * is varied over time based on the
296 * traffic for that interrupt vector
297 */
298 static bool ixgbe_enable_aim = true;
299 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
300 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
301 "Enable adaptive interrupt moderation");
302
303 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
304 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
305 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
306
307 /* How many packets rxeof tries to clean at a time */
308 static int ixgbe_rx_process_limit = 256;
309 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
310 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
311
312 /* How many packets txeof tries to clean at a time */
313 static int ixgbe_tx_process_limit = 256;
314 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
315 &ixgbe_tx_process_limit, 0,
316 "Maximum number of sent packets to process at a time, -1 means unlimited");
317
318 /* Flow control setting, default to full */
319 static int ixgbe_flow_control = ixgbe_fc_full;
320 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
321 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
322
323 /* Which pakcet processing uses workqueue or softint */
324 static bool ixgbe_txrx_workqueue = false;
325
326 /*
327 * Smart speed setting, default to on
328 * this only works as a compile option
329 * right now as its during attach, set
330 * this to 'ixgbe_smart_speed_off' to
331 * disable.
332 */
333 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
334
335 /*
336 * MSI-X should be the default for best performance,
337 * but this allows it to be forced off for testing.
338 */
339 static int ixgbe_enable_msix = 1;
340 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
341 "Enable MSI-X interrupts");
342
343 /*
344 * Number of Queues, can be set to 0,
345 * it then autoconfigures based on the
346 * number of cpus with a max of 8. This
347 * can be overriden manually here.
348 */
349 static int ixgbe_num_queues = 0;
350 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
351 "Number of queues to configure, 0 indicates autoconfigure");
352
353 /*
354 * Number of TX descriptors per ring,
355 * setting higher than RX as this seems
356 * the better performing choice.
357 */
358 static int ixgbe_txd = PERFORM_TXD;
359 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
360 "Number of transmit descriptors per queue");
361
362 /* Number of RX descriptors per ring */
363 static int ixgbe_rxd = PERFORM_RXD;
364 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
365 "Number of receive descriptors per queue");
366
367 /*
368 * Defining this on will allow the use
369 * of unsupported SFP+ modules, note that
370 * doing so you are on your own :)
371 */
372 static int allow_unsupported_sfp = false;
373 #define TUNABLE_INT(__x, __y)
374 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
375
376 /*
377 * Not sure if Flow Director is fully baked,
378 * so we'll default to turning it off.
379 */
380 static int ixgbe_enable_fdir = 0;
381 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
382 "Enable Flow Director");
383
384 /* Legacy Transmit (single queue) */
385 static int ixgbe_enable_legacy_tx = 0;
386 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
387 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
388
389 /* Receive-Side Scaling */
390 static int ixgbe_enable_rss = 1;
391 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
392 "Enable Receive-Side Scaling (RSS)");
393
394 /* Keep running tab on them for sanity check */
395 static int ixgbe_total_ports;
396
397 #if 0
398 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
399 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
400 #endif
401
402 #ifdef NET_MPSAFE
403 #define IXGBE_MPSAFE 1
404 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
405 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
406 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
407 #else
408 #define IXGBE_CALLOUT_FLAGS 0
409 #define IXGBE_SOFTINFT_FLAGS 0
410 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
411 #endif
412 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
413
414 /************************************************************************
415 * ixgbe_initialize_rss_mapping
416 ************************************************************************/
417 static void
418 ixgbe_initialize_rss_mapping(struct adapter *adapter)
419 {
420 struct ixgbe_hw *hw = &adapter->hw;
421 u32 reta = 0, mrqc, rss_key[10];
422 int queue_id, table_size, index_mult;
423 int i, j;
424 u32 rss_hash_config;
425
426 /* force use default RSS key. */
427 #ifdef __NetBSD__
428 rss_getkey((uint8_t *) &rss_key);
429 #else
430 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
431 /* Fetch the configured RSS key */
432 rss_getkey((uint8_t *) &rss_key);
433 } else {
434 /* set up random bits */
435 cprng_fast(&rss_key, sizeof(rss_key));
436 }
437 #endif
438
439 /* Set multiplier for RETA setup and table size based on MAC */
440 index_mult = 0x1;
441 table_size = 128;
442 switch (adapter->hw.mac.type) {
443 case ixgbe_mac_82598EB:
444 index_mult = 0x11;
445 break;
446 case ixgbe_mac_X550:
447 case ixgbe_mac_X550EM_x:
448 case ixgbe_mac_X550EM_a:
449 table_size = 512;
450 break;
451 default:
452 break;
453 }
454
455 /* Set up the redirection table */
456 for (i = 0, j = 0; i < table_size; i++, j++) {
457 if (j == adapter->num_queues)
458 j = 0;
459
460 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
461 /*
462 * Fetch the RSS bucket id for the given indirection
463 * entry. Cap it at the number of configured buckets
464 * (which is num_queues.)
465 */
466 queue_id = rss_get_indirection_to_bucket(i);
467 queue_id = queue_id % adapter->num_queues;
468 } else
469 queue_id = (j * index_mult);
470
471 /*
472 * The low 8 bits are for hash value (n+0);
473 * The next 8 bits are for hash value (n+1), etc.
474 */
475 reta = reta >> 8;
476 reta = reta | (((uint32_t) queue_id) << 24);
477 if ((i & 3) == 3) {
478 if (i < 128)
479 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
480 else
481 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
482 reta);
483 reta = 0;
484 }
485 }
486
487 /* Now fill our hash function seeds */
488 for (i = 0; i < 10; i++)
489 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
490
491 /* Perform hash on these packet types */
492 if (adapter->feat_en & IXGBE_FEATURE_RSS)
493 rss_hash_config = rss_gethashconfig();
494 else {
495 /*
496 * Disable UDP - IP fragments aren't currently being handled
497 * and so we end up with a mix of 2-tuple and 4-tuple
498 * traffic.
499 */
500 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
501 | RSS_HASHTYPE_RSS_TCP_IPV4
502 | RSS_HASHTYPE_RSS_IPV6
503 | RSS_HASHTYPE_RSS_TCP_IPV6
504 | RSS_HASHTYPE_RSS_IPV6_EX
505 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
506 }
507
508 mrqc = IXGBE_MRQC_RSSEN;
509 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
510 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
511 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
512 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
513 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
514 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
527 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
528 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
529 } /* ixgbe_initialize_rss_mapping */
530
531 /************************************************************************
532 * ixgbe_initialize_receive_units - Setup receive registers and features.
533 ************************************************************************/
534 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
535
536 static void
537 ixgbe_initialize_receive_units(struct adapter *adapter)
538 {
539 struct rx_ring *rxr = adapter->rx_rings;
540 struct ixgbe_hw *hw = &adapter->hw;
541 struct ifnet *ifp = adapter->ifp;
542 int i, j;
543 u32 bufsz, fctrl, srrctl, rxcsum;
544 u32 hlreg;
545
546 /*
547 * Make sure receives are disabled while
548 * setting up the descriptor ring
549 */
550 ixgbe_disable_rx(hw);
551
552 /* Enable broadcasts */
553 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
554 fctrl |= IXGBE_FCTRL_BAM;
555 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
556 fctrl |= IXGBE_FCTRL_DPF;
557 fctrl |= IXGBE_FCTRL_PMCF;
558 }
559 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
560
561 /* Set for Jumbo Frames? */
562 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
563 if (ifp->if_mtu > ETHERMTU)
564 hlreg |= IXGBE_HLREG0_JUMBOEN;
565 else
566 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
567
568 #ifdef DEV_NETMAP
569 /* CRC stripping is conditional in Netmap */
570 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
571 (ifp->if_capenable & IFCAP_NETMAP) &&
572 !ix_crcstrip)
573 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
574 else
575 #endif /* DEV_NETMAP */
576 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
577
578 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
579
580 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
581 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
582
583 for (i = 0; i < adapter->num_queues; i++, rxr++) {
584 u64 rdba = rxr->rxdma.dma_paddr;
585 u32 reg;
586 int regnum = i / 4; /* 1 register per 4 queues */
587 int regshift = i % 4; /* 4 bits per 1 queue */
588 j = rxr->me;
589
590 /* Setup the Base and Length of the Rx Descriptor Ring */
591 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
592 (rdba & 0x00000000ffffffffULL));
593 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
594 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
595 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
596
597 /* Set up the SRRCTL register */
598 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
599 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
600 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
601 srrctl |= bufsz;
602 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
603
604 /* Set RQSMR (Receive Queue Statistic Mapping) register */
605 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
606 reg &= ~(0x000000ff << (regshift * 8));
607 reg |= i << (regshift * 8);
608 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
609
610 /*
611 * Set DROP_EN iff we have no flow control and >1 queue.
612 * Note that srrctl was cleared shortly before during reset,
613 * so we do not need to clear the bit, but do it just in case
614 * this code is moved elsewhere.
615 */
616 if (adapter->num_queues > 1 &&
617 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
618 srrctl |= IXGBE_SRRCTL_DROP_EN;
619 } else {
620 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
621 }
622
623 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
624
625 /* Setup the HW Rx Head and Tail Descriptor Pointers */
626 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
627 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
628
629 /* Set the driver rx tail address */
630 rxr->tail = IXGBE_RDT(rxr->me);
631 }
632
633 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
634 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
635 | IXGBE_PSRTYPE_UDPHDR
636 | IXGBE_PSRTYPE_IPV4HDR
637 | IXGBE_PSRTYPE_IPV6HDR;
638 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
639 }
640
641 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
642
643 ixgbe_initialize_rss_mapping(adapter);
644
645 if (adapter->num_queues > 1) {
646 /* RSS and RX IPP Checksum are mutually exclusive */
647 rxcsum |= IXGBE_RXCSUM_PCSD;
648 }
649
650 if (ifp->if_capenable & IFCAP_RXCSUM)
651 rxcsum |= IXGBE_RXCSUM_PCSD;
652
653 /* This is useful for calculating UDP/IP fragment checksums */
654 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
655 rxcsum |= IXGBE_RXCSUM_IPPCSE;
656
657 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
658
659 } /* ixgbe_initialize_receive_units */
660
661 /************************************************************************
662 * ixgbe_initialize_transmit_units - Enable transmit units.
663 ************************************************************************/
664 static void
665 ixgbe_initialize_transmit_units(struct adapter *adapter)
666 {
667 struct tx_ring *txr = adapter->tx_rings;
668 struct ixgbe_hw *hw = &adapter->hw;
669 int i;
670
671 /* Setup the Base and Length of the Tx Descriptor Ring */
672 for (i = 0; i < adapter->num_queues; i++, txr++) {
673 u64 tdba = txr->txdma.dma_paddr;
674 u32 txctrl = 0;
675 u32 tqsmreg, reg;
676 int regnum = i / 4; /* 1 register per 4 queues */
677 int regshift = i % 4; /* 4 bits per 1 queue */
678 int j = txr->me;
679
680 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
681 (tdba & 0x00000000ffffffffULL));
682 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
683 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
684 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
685
686 /*
687 * Set TQSMR (Transmit Queue Statistic Mapping) register.
688 * Register location is different between 82598 and others.
689 */
690 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
691 tqsmreg = IXGBE_TQSMR(regnum);
692 else
693 tqsmreg = IXGBE_TQSM(regnum);
694 reg = IXGBE_READ_REG(hw, tqsmreg);
695 reg &= ~(0x000000ff << (regshift * 8));
696 reg |= i << (regshift * 8);
697 IXGBE_WRITE_REG(hw, tqsmreg, reg);
698
699 /* Setup the HW Tx Head and Tail descriptor pointers */
700 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
701 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
702
703 /* Cache the tail address */
704 txr->tail = IXGBE_TDT(j);
705
706 txr->txr_no_space = false;
707
708 /* Disable Head Writeback */
709 /*
710 * Note: for X550 series devices, these registers are actually
711 * prefixed with TPH_ isntead of DCA_, but the addresses and
712 * fields remain the same.
713 */
714 switch (hw->mac.type) {
715 case ixgbe_mac_82598EB:
716 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
717 break;
718 default:
719 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
720 break;
721 }
722 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
723 switch (hw->mac.type) {
724 case ixgbe_mac_82598EB:
725 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
726 break;
727 default:
728 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
729 break;
730 }
731
732 }
733
734 if (hw->mac.type != ixgbe_mac_82598EB) {
735 u32 dmatxctl, rttdcs;
736
737 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
738 dmatxctl |= IXGBE_DMATXCTL_TE;
739 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
740 /* Disable arbiter to set MTQC */
741 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
742 rttdcs |= IXGBE_RTTDCS_ARBDIS;
743 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
744 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
745 ixgbe_get_mtqc(adapter->iov_mode));
746 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
747 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
748 }
749
750 return;
751 } /* ixgbe_initialize_transmit_units */
752
753 /************************************************************************
754 * ixgbe_attach - Device initialization routine
755 *
756 * Called when the driver is being loaded.
757 * Identifies the type of hardware, allocates all resources
758 * and initializes the hardware.
759 *
760 * return 0 on success, positive on failure
761 ************************************************************************/
762 static void
763 ixgbe_attach(device_t parent, device_t dev, void *aux)
764 {
765 struct adapter *adapter;
766 struct ixgbe_hw *hw;
767 int error = -1;
768 u32 ctrl_ext;
769 u16 high, low, nvmreg;
770 pcireg_t id, subid;
771 ixgbe_vendor_info_t *ent;
772 struct pci_attach_args *pa = aux;
773 const char *str;
774 char buf[256];
775
776 INIT_DEBUGOUT("ixgbe_attach: begin");
777
778 /* Allocate, clear, and link in our adapter structure */
779 adapter = device_private(dev);
780 adapter->hw.back = adapter;
781 adapter->dev = dev;
782 hw = &adapter->hw;
783 adapter->osdep.pc = pa->pa_pc;
784 adapter->osdep.tag = pa->pa_tag;
785 if (pci_dma64_available(pa))
786 adapter->osdep.dmat = pa->pa_dmat64;
787 else
788 adapter->osdep.dmat = pa->pa_dmat;
789 adapter->osdep.attached = false;
790
791 ent = ixgbe_lookup(pa);
792
793 KASSERT(ent != NULL);
794
795 aprint_normal(": %s, Version - %s\n",
796 ixgbe_strings[ent->index], ixgbe_driver_version);
797
798 /* Core Lock Init*/
799 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
800
801 /* Set up the timer callout */
802 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
803
804 /* Determine hardware revision */
805 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
806 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
807
808 hw->vendor_id = PCI_VENDOR(id);
809 hw->device_id = PCI_PRODUCT(id);
810 hw->revision_id =
811 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
812 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
813 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
814
815 /*
816 * Make sure BUSMASTER is set
817 */
818 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
819
820 /* Do base PCI setup - map BAR0 */
821 if (ixgbe_allocate_pci_resources(adapter, pa)) {
822 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
823 error = ENXIO;
824 goto err_out;
825 }
826
827 /* let hardware know driver is loaded */
828 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
829 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
830 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
831
832 /*
833 * Initialize the shared code
834 */
835 if (ixgbe_init_shared_code(hw) != 0) {
836 aprint_error_dev(dev, "Unable to initialize the shared code\n");
837 error = ENXIO;
838 goto err_out;
839 }
840
841 switch (hw->mac.type) {
842 case ixgbe_mac_82598EB:
843 str = "82598EB";
844 break;
845 case ixgbe_mac_82599EB:
846 str = "82599EB";
847 break;
848 case ixgbe_mac_X540:
849 str = "X540";
850 break;
851 case ixgbe_mac_X550:
852 str = "X550";
853 break;
854 case ixgbe_mac_X550EM_x:
855 str = "X550EM";
856 break;
857 case ixgbe_mac_X550EM_a:
858 str = "X550EM A";
859 break;
860 default:
861 str = "Unknown";
862 break;
863 }
864 aprint_normal_dev(dev, "device %s\n", str);
865
866 if (hw->mbx.ops.init_params)
867 hw->mbx.ops.init_params(hw);
868
869 hw->allow_unsupported_sfp = allow_unsupported_sfp;
870
871 /* Pick up the 82599 settings */
872 if (hw->mac.type != ixgbe_mac_82598EB) {
873 hw->phy.smart_speed = ixgbe_smart_speed;
874 adapter->num_segs = IXGBE_82599_SCATTER;
875 } else
876 adapter->num_segs = IXGBE_82598_SCATTER;
877
878 hw->mac.ops.set_lan_id(hw);
879 ixgbe_init_device_features(adapter);
880
881 if (ixgbe_configure_interrupts(adapter)) {
882 error = ENXIO;
883 goto err_out;
884 }
885
886 /* Allocate multicast array memory. */
887 adapter->mta = malloc(sizeof(*adapter->mta) *
888 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
889 if (adapter->mta == NULL) {
890 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
891 error = ENOMEM;
892 goto err_out;
893 }
894
895 /* Enable WoL (if supported) */
896 ixgbe_check_wol_support(adapter);
897
898 /* Verify adapter fan is still functional (if applicable) */
899 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
900 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
901 ixgbe_check_fan_failure(adapter, esdp, FALSE);
902 }
903
904 /* Ensure SW/FW semaphore is free */
905 ixgbe_init_swfw_semaphore(hw);
906
907 /* Enable EEE power saving */
908 if (adapter->feat_en & IXGBE_FEATURE_EEE)
909 hw->mac.ops.setup_eee(hw, TRUE);
910
911 /* Set an initial default flow control value */
912 hw->fc.requested_mode = ixgbe_flow_control;
913
914 /* Sysctls for limiting the amount of work done in the taskqueues */
915 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
916 "max number of rx packets to process",
917 &adapter->rx_process_limit, ixgbe_rx_process_limit);
918
919 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
920 "max number of tx packets to process",
921 &adapter->tx_process_limit, ixgbe_tx_process_limit);
922
923 /* Do descriptor calc and sanity checks */
924 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
925 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
926 aprint_error_dev(dev, "TXD config issue, using default!\n");
927 adapter->num_tx_desc = DEFAULT_TXD;
928 } else
929 adapter->num_tx_desc = ixgbe_txd;
930
931 /*
932 * With many RX rings it is easy to exceed the
933 * system mbuf allocation. Tuning nmbclusters
934 * can alleviate this.
935 */
936 if (nmbclusters > 0) {
937 int s;
938 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
939 if (s > nmbclusters) {
940 aprint_error_dev(dev, "RX Descriptors exceed "
941 "system mbuf max, using default instead!\n");
942 ixgbe_rxd = DEFAULT_RXD;
943 }
944 }
945
946 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
947 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
948 aprint_error_dev(dev, "RXD config issue, using default!\n");
949 adapter->num_rx_desc = DEFAULT_RXD;
950 } else
951 adapter->num_rx_desc = ixgbe_rxd;
952
953 /* Allocate our TX/RX Queues */
954 if (ixgbe_allocate_queues(adapter)) {
955 error = ENOMEM;
956 goto err_out;
957 }
958
959 hw->phy.reset_if_overtemp = TRUE;
960 error = ixgbe_reset_hw(hw);
961 hw->phy.reset_if_overtemp = FALSE;
962 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
963 /*
964 * No optics in this port, set up
965 * so the timer routine will probe
966 * for later insertion.
967 */
968 adapter->sfp_probe = TRUE;
969 error = IXGBE_SUCCESS;
970 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
971 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
972 error = EIO;
973 goto err_late;
974 } else if (error) {
975 aprint_error_dev(dev, "Hardware initialization failed\n");
976 error = EIO;
977 goto err_late;
978 }
979
980 /* Make sure we have a good EEPROM before we read from it */
981 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
982 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
983 error = EIO;
984 goto err_late;
985 }
986
987 aprint_normal("%s:", device_xname(dev));
988 /* NVM Image Version */
989 switch (hw->mac.type) {
990 case ixgbe_mac_X540:
991 case ixgbe_mac_X550EM_a:
992 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
993 if (nvmreg == 0xffff)
994 break;
995 high = (nvmreg >> 12) & 0x0f;
996 low = (nvmreg >> 4) & 0xff;
997 id = nvmreg & 0x0f;
998 aprint_normal(" NVM Image Version %u.", high);
999 if (hw->mac.type == ixgbe_mac_X540)
1000 str = "%x";
1001 else
1002 str = "%02x";
1003 aprint_normal(str, low);
1004 aprint_normal(" ID 0x%x,", id);
1005 break;
1006 case ixgbe_mac_X550EM_x:
1007 case ixgbe_mac_X550:
1008 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1009 if (nvmreg == 0xffff)
1010 break;
1011 high = (nvmreg >> 12) & 0x0f;
1012 low = nvmreg & 0xff;
1013 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1014 break;
1015 default:
1016 break;
1017 }
1018
1019 /* PHY firmware revision */
1020 switch (hw->mac.type) {
1021 case ixgbe_mac_X540:
1022 case ixgbe_mac_X550:
1023 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1024 if (nvmreg == 0xffff)
1025 break;
1026 high = (nvmreg >> 12) & 0x0f;
1027 low = (nvmreg >> 4) & 0xff;
1028 id = nvmreg & 0x000f;
1029 aprint_normal(" PHY FW Revision %u.", high);
1030 if (hw->mac.type == ixgbe_mac_X540)
1031 str = "%x";
1032 else
1033 str = "%02x";
1034 aprint_normal(str, low);
1035 aprint_normal(" ID 0x%x,", id);
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 /* NVM Map version & OEM NVM Image version */
1042 switch (hw->mac.type) {
1043 case ixgbe_mac_X550:
1044 case ixgbe_mac_X550EM_x:
1045 case ixgbe_mac_X550EM_a:
1046 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1047 if (nvmreg != 0xffff) {
1048 high = (nvmreg >> 12) & 0x0f;
1049 low = nvmreg & 0x00ff;
1050 aprint_normal(" NVM Map version %u.%02x,", high, low);
1051 }
1052 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1053 if (nvmreg != 0xffff) {
1054 high = (nvmreg >> 12) & 0x0f;
1055 low = nvmreg & 0x00ff;
1056 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1057 low);
1058 }
1059 break;
1060 default:
1061 break;
1062 }
1063
1064 /* Print the ETrackID */
1065 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1066 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1067 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1068
1069 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1070 error = ixgbe_allocate_msix(adapter, pa);
1071 if (error) {
1072 /* Free allocated queue structures first */
1073 ixgbe_free_transmit_structures(adapter);
1074 ixgbe_free_receive_structures(adapter);
1075 free(adapter->queues, M_DEVBUF);
1076
1077 /* Fallback to legacy interrupt */
1078 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1079 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1080 adapter->feat_en |= IXGBE_FEATURE_MSI;
1081 adapter->num_queues = 1;
1082
1083 /* Allocate our TX/RX Queues again */
1084 if (ixgbe_allocate_queues(adapter)) {
1085 error = ENOMEM;
1086 goto err_out;
1087 }
1088 }
1089 }
1090 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1091 error = ixgbe_allocate_legacy(adapter, pa);
1092 if (error)
1093 goto err_late;
1094
1095 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1096 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1097 ixgbe_handle_link, adapter);
1098 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1099 ixgbe_handle_mod, adapter);
1100 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_handle_msf, adapter);
1102 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_phy, adapter);
1104 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1105 adapter->fdir_si =
1106 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_reinit_fdir, adapter);
1108 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1109 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1110 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1111 && (adapter->fdir_si == NULL))) {
1112 aprint_error_dev(dev,
1113 "could not establish software interrupts ()\n");
1114 goto err_out;
1115 }
1116
1117 error = ixgbe_start_hw(hw);
1118 switch (error) {
1119 case IXGBE_ERR_EEPROM_VERSION:
1120 aprint_error_dev(dev, "This device is a pre-production adapter/"
1121 "LOM. Please be aware there may be issues associated "
1122 "with your hardware.\nIf you are experiencing problems "
1123 "please contact your Intel or hardware representative "
1124 "who provided you with this hardware.\n");
1125 break;
1126 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1127 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1128 error = EIO;
1129 goto err_late;
1130 case IXGBE_ERR_SFP_NOT_PRESENT:
1131 aprint_error_dev(dev, "No SFP+ Module found\n");
1132 /* falls thru */
1133 default:
1134 break;
1135 }
1136
1137 /* Setup OS specific network interface */
1138 if (ixgbe_setup_interface(dev, adapter) != 0)
1139 goto err_late;
1140
1141 /*
1142 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1143 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1144 */
1145 if (hw->phy.media_type == ixgbe_media_type_copper) {
1146 uint16_t id1, id2;
1147 int oui, model, rev;
1148 const char *descr;
1149
1150 id1 = hw->phy.id >> 16;
1151 id2 = hw->phy.id & 0xffff;
1152 oui = MII_OUI(id1, id2);
1153 model = MII_MODEL(id2);
1154 rev = MII_REV(id2);
1155 if ((descr = mii_get_descr(oui, model)) != NULL)
1156 aprint_normal_dev(dev,
1157 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1158 descr, oui, model, rev);
1159 else
1160 aprint_normal_dev(dev,
1161 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1162 oui, model, rev);
1163 }
1164
1165 /* Enable the optics for 82599 SFP+ fiber */
1166 ixgbe_enable_tx_laser(hw);
1167
1168 /* Enable power to the phy. */
1169 ixgbe_set_phy_power(hw, TRUE);
1170
1171 /* Initialize statistics */
1172 ixgbe_update_stats_counters(adapter);
1173
1174 /* Check PCIE slot type/speed/width */
1175 ixgbe_get_slot_info(adapter);
1176
1177 /*
1178 * Do time init and sysctl init here, but
1179 * only on the first port of a bypass adapter.
1180 */
1181 ixgbe_bypass_init(adapter);
1182
1183 /* Set an initial dmac value */
1184 adapter->dmac = 0;
1185 /* Set initial advertised speeds (if applicable) */
1186 adapter->advertise = ixgbe_get_advertise(adapter);
1187
1188 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1189 ixgbe_define_iov_schemas(dev, &error);
1190
1191 /* Add sysctls */
1192 ixgbe_add_device_sysctls(adapter);
1193 ixgbe_add_hw_stats(adapter);
1194
1195 /* For Netmap */
1196 adapter->init_locked = ixgbe_init_locked;
1197 adapter->stop_locked = ixgbe_stop;
1198
1199 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1200 ixgbe_netmap_attach(adapter);
1201
1202 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1203 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1204 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1205 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1206
1207 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1208 pmf_class_network_register(dev, adapter->ifp);
1209 else
1210 aprint_error_dev(dev, "couldn't establish power handler\n");
1211
1212 INIT_DEBUGOUT("ixgbe_attach: end");
1213 adapter->osdep.attached = true;
1214
1215 return;
1216
1217 err_late:
1218 ixgbe_free_transmit_structures(adapter);
1219 ixgbe_free_receive_structures(adapter);
1220 free(adapter->queues, M_DEVBUF);
1221 err_out:
1222 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1223 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1224 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1225 ixgbe_free_softint(adapter);
1226 ixgbe_free_pci_resources(adapter);
1227 if (adapter->mta != NULL)
1228 free(adapter->mta, M_DEVBUF);
1229 IXGBE_CORE_LOCK_DESTROY(adapter);
1230
1231 return;
1232 } /* ixgbe_attach */
1233
1234 /************************************************************************
1235 * ixgbe_check_wol_support
1236 *
1237 * Checks whether the adapter's ports are capable of
1238 * Wake On LAN by reading the adapter's NVM.
1239 *
1240 * Sets each port's hw->wol_enabled value depending
1241 * on the value read here.
1242 ************************************************************************/
1243 static void
1244 ixgbe_check_wol_support(struct adapter *adapter)
1245 {
1246 struct ixgbe_hw *hw = &adapter->hw;
1247 u16 dev_caps = 0;
1248
1249 /* Find out WoL support for port */
1250 adapter->wol_support = hw->wol_enabled = 0;
1251 ixgbe_get_device_caps(hw, &dev_caps);
1252 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1253 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1254 hw->bus.func == 0))
1255 adapter->wol_support = hw->wol_enabled = 1;
1256
1257 /* Save initial wake up filter configuration */
1258 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1259
1260 return;
1261 } /* ixgbe_check_wol_support */
1262
1263 /************************************************************************
1264 * ixgbe_setup_interface
1265 *
1266 * Setup networking device structure and register an interface.
1267 ************************************************************************/
1268 static int
1269 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1270 {
1271 struct ethercom *ec = &adapter->osdep.ec;
1272 struct ifnet *ifp;
1273 int rv;
1274
1275 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1276
1277 ifp = adapter->ifp = &ec->ec_if;
1278 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1279 ifp->if_baudrate = IF_Gbps(10);
1280 ifp->if_init = ixgbe_init;
1281 ifp->if_stop = ixgbe_ifstop;
1282 ifp->if_softc = adapter;
1283 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1284 #ifdef IXGBE_MPSAFE
1285 ifp->if_extflags = IFEF_MPSAFE;
1286 #endif
1287 ifp->if_ioctl = ixgbe_ioctl;
1288 #if __FreeBSD_version >= 1100045
1289 /* TSO parameters */
1290 ifp->if_hw_tsomax = 65518;
1291 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1292 ifp->if_hw_tsomaxsegsize = 2048;
1293 #endif
1294 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1295 #if 0
1296 ixgbe_start_locked = ixgbe_legacy_start_locked;
1297 #endif
1298 } else {
1299 ifp->if_transmit = ixgbe_mq_start;
1300 #if 0
1301 ixgbe_start_locked = ixgbe_mq_start_locked;
1302 #endif
1303 }
1304 ifp->if_start = ixgbe_legacy_start;
1305 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1306 IFQ_SET_READY(&ifp->if_snd);
1307
1308 rv = if_initialize(ifp);
1309 if (rv != 0) {
1310 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1311 return rv;
1312 }
1313 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1314 ether_ifattach(ifp, adapter->hw.mac.addr);
1315 /*
1316 * We use per TX queue softint, so if_deferred_start_init() isn't
1317 * used.
1318 */
1319 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1320
1321 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1322
1323 /*
1324 * Tell the upper layer(s) we support long frames.
1325 */
1326 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1327
1328 /* Set capability flags */
1329 ifp->if_capabilities |= IFCAP_RXCSUM
1330 | IFCAP_TXCSUM
1331 | IFCAP_TSOv4
1332 | IFCAP_TSOv6
1333 | IFCAP_LRO;
1334 ifp->if_capenable = 0;
1335
1336 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1337 | ETHERCAP_VLAN_HWCSUM
1338 | ETHERCAP_JUMBO_MTU
1339 | ETHERCAP_VLAN_MTU;
1340
1341 /* Enable the above capabilities by default */
1342 ec->ec_capenable = ec->ec_capabilities;
1343
1344 /*
1345 * Don't turn this on by default, if vlans are
1346 * created on another pseudo device (eg. lagg)
1347 * then vlan events are not passed thru, breaking
1348 * operation, but with HW FILTER off it works. If
1349 * using vlans directly on the ixgbe driver you can
1350 * enable this and get full hardware tag filtering.
1351 */
1352 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1353
1354 /*
1355 * Specify the media types supported by this adapter and register
1356 * callbacks to update media and link information
1357 */
1358 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1359 ixgbe_media_status);
1360
1361 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1362 ixgbe_add_media_types(adapter);
1363
1364 /* Set autoselect media by default */
1365 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1366
1367 if_register(ifp);
1368
1369 return (0);
1370 } /* ixgbe_setup_interface */
1371
1372 /************************************************************************
1373 * ixgbe_add_media_types
1374 ************************************************************************/
1375 static void
1376 ixgbe_add_media_types(struct adapter *adapter)
1377 {
1378 struct ixgbe_hw *hw = &adapter->hw;
1379 device_t dev = adapter->dev;
1380 u64 layer;
1381
1382 layer = adapter->phy_layer;
1383
1384 #define ADD(mm, dd) \
1385 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1386
1387 ADD(IFM_NONE, 0);
1388
1389 /* Media types with matching NetBSD media defines */
1390 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1391 ADD(IFM_10G_T | IFM_FDX, 0);
1392 }
1393 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1394 ADD(IFM_1000_T | IFM_FDX, 0);
1395 }
1396 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1397 ADD(IFM_100_TX | IFM_FDX, 0);
1398 }
1399 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1400 ADD(IFM_10_T | IFM_FDX, 0);
1401 }
1402
1403 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1404 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1405 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1406 }
1407
1408 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1409 ADD(IFM_10G_LR | IFM_FDX, 0);
1410 if (hw->phy.multispeed_fiber) {
1411 ADD(IFM_1000_LX | IFM_FDX, 0);
1412 }
1413 }
1414 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1415 ADD(IFM_10G_SR | IFM_FDX, 0);
1416 if (hw->phy.multispeed_fiber) {
1417 ADD(IFM_1000_SX | IFM_FDX, 0);
1418 }
1419 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1420 ADD(IFM_1000_SX | IFM_FDX, 0);
1421 }
1422 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1423 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1424 }
1425
1426 #ifdef IFM_ETH_XTYPE
1427 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1428 ADD(IFM_10G_KR | IFM_FDX, 0);
1429 }
1430 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1431 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1432 }
1433 #else
1434 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1435 device_printf(dev, "Media supported: 10GbaseKR\n");
1436 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1437 ADD(IFM_10G_SR | IFM_FDX, 0);
1438 }
1439 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1440 device_printf(dev, "Media supported: 10GbaseKX4\n");
1441 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1442 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1443 }
1444 #endif
1445 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1446 ADD(IFM_1000_KX | IFM_FDX, 0);
1447 }
1448 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1449 ADD(IFM_2500_KX | IFM_FDX, 0);
1450 }
1451 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1452 ADD(IFM_2500_T | IFM_FDX, 0);
1453 }
1454 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1455 ADD(IFM_5000_T | IFM_FDX, 0);
1456 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1458 device_printf(dev, "Media supported: 1000baseBX\n");
1459 /* XXX no ifmedia_set? */
1460
1461 ADD(IFM_AUTO, 0);
1462
1463 #undef ADD
1464 } /* ixgbe_add_media_types */
1465
1466 /************************************************************************
1467 * ixgbe_is_sfp
1468 ************************************************************************/
1469 static inline bool
1470 ixgbe_is_sfp(struct ixgbe_hw *hw)
1471 {
1472 switch (hw->mac.type) {
1473 case ixgbe_mac_82598EB:
1474 if (hw->phy.type == ixgbe_phy_nl)
1475 return (TRUE);
1476 return (FALSE);
1477 case ixgbe_mac_82599EB:
1478 switch (hw->mac.ops.get_media_type(hw)) {
1479 case ixgbe_media_type_fiber:
1480 case ixgbe_media_type_fiber_qsfp:
1481 return (TRUE);
1482 default:
1483 return (FALSE);
1484 }
1485 case ixgbe_mac_X550EM_x:
1486 case ixgbe_mac_X550EM_a:
1487 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1488 return (TRUE);
1489 return (FALSE);
1490 default:
1491 return (FALSE);
1492 }
1493 } /* ixgbe_is_sfp */
1494
1495 /************************************************************************
1496 * ixgbe_config_link
1497 ************************************************************************/
1498 static void
1499 ixgbe_config_link(struct adapter *adapter)
1500 {
1501 struct ixgbe_hw *hw = &adapter->hw;
1502 u32 autoneg, err = 0;
1503 bool sfp, negotiate = false;
1504
1505 sfp = ixgbe_is_sfp(hw);
1506
1507 if (sfp) {
1508 if (hw->phy.multispeed_fiber) {
1509 ixgbe_enable_tx_laser(hw);
1510 kpreempt_disable();
1511 softint_schedule(adapter->msf_si);
1512 kpreempt_enable();
1513 }
1514 kpreempt_disable();
1515 softint_schedule(adapter->mod_si);
1516 kpreempt_enable();
1517 } else {
1518 struct ifmedia *ifm = &adapter->media;
1519
1520 if (hw->mac.ops.check_link)
1521 err = ixgbe_check_link(hw, &adapter->link_speed,
1522 &adapter->link_up, FALSE);
1523 if (err)
1524 return;
1525
1526 /*
1527 * Check if it's the first call. If it's the first call,
1528 * get value for auto negotiation.
1529 */
1530 autoneg = hw->phy.autoneg_advertised;
1531 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1532 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1533 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1534 &negotiate);
1535 if (err)
1536 return;
1537 if (hw->mac.ops.setup_link)
1538 err = hw->mac.ops.setup_link(hw, autoneg,
1539 adapter->link_up);
1540 }
1541
1542 } /* ixgbe_config_link */
1543
1544 /************************************************************************
1545 * ixgbe_update_stats_counters - Update board statistics counters.
1546 ************************************************************************/
1547 static void
1548 ixgbe_update_stats_counters(struct adapter *adapter)
1549 {
1550 struct ifnet *ifp = adapter->ifp;
1551 struct ixgbe_hw *hw = &adapter->hw;
1552 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1553 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1554 u64 total_missed_rx = 0;
1555 uint64_t crcerrs, rlec;
1556
1557 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1558 stats->crcerrs.ev_count += crcerrs;
1559 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1560 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1561 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1562 if (hw->mac.type == ixgbe_mac_X550)
1563 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1564
1565 /* 16 registers */
1566 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1567 int j = i % adapter->num_queues;
1568
1569 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1570 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1571 if (hw->mac.type >= ixgbe_mac_82599EB) {
1572 stats->qprdc[j].ev_count
1573 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1574 }
1575 }
1576
1577 /* 8 registers */
1578 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1579 uint32_t mp;
1580 int j = i % adapter->num_queues;
1581
1582 /* MPC */
1583 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1584 /* global total per queue */
1585 stats->mpc[j].ev_count += mp;
1586 /* running comprehensive total for stats display */
1587 total_missed_rx += mp;
1588
1589 if (hw->mac.type == ixgbe_mac_82598EB)
1590 stats->rnbc[j].ev_count
1591 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1592
1593 stats->pxontxc[j].ev_count
1594 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1595 stats->pxofftxc[j].ev_count
1596 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1597 if (hw->mac.type >= ixgbe_mac_82599EB) {
1598 stats->pxonrxc[j].ev_count
1599 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1600 stats->pxoffrxc[j].ev_count
1601 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1602 stats->pxon2offc[j].ev_count
1603 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1604 } else {
1605 stats->pxonrxc[j].ev_count
1606 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1607 stats->pxoffrxc[j].ev_count
1608 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1609 }
1610 }
1611 stats->mpctotal.ev_count += total_missed_rx;
1612
1613 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1614 if ((adapter->link_active == TRUE)
1615 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1616 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1617 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1618 }
1619 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1620 stats->rlec.ev_count += rlec;
1621
1622 /* Hardware workaround, gprc counts missed packets */
1623 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1624
1625 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1626 stats->lxontxc.ev_count += lxon;
1627 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1628 stats->lxofftxc.ev_count += lxoff;
1629 total = lxon + lxoff;
1630
1631 if (hw->mac.type != ixgbe_mac_82598EB) {
1632 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1633 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1634 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1635 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1636 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1637 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1638 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1639 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1640 } else {
1641 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1642 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1643 /* 82598 only has a counter in the high register */
1644 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1645 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1646 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1647 }
1648
1649 /*
1650 * Workaround: mprc hardware is incorrectly counting
1651 * broadcasts, so for now we subtract those.
1652 */
1653 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1654 stats->bprc.ev_count += bprc;
1655 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1656 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1657
1658 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1659 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1660 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1661 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1662 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1663 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1664
1665 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1666 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1667 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1668
1669 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1670 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1671 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1672 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1673 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1674 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1675 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1676 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1677 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1678 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1679 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1680 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1681 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1682 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1683 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1684 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1685 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1686 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1687 /* Only read FCOE on 82599 */
1688 if (hw->mac.type != ixgbe_mac_82598EB) {
1689 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1690 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1691 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1692 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1693 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1694 }
1695
1696 /* Fill out the OS statistics structure */
1697 /*
1698 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1699 * adapter->stats counters. It's required to make ifconfig -z
1700 * (SOICZIFDATA) work.
1701 */
1702 ifp->if_collisions = 0;
1703
1704 /* Rx Errors */
1705 ifp->if_iqdrops += total_missed_rx;
1706 ifp->if_ierrors += crcerrs + rlec;
1707 } /* ixgbe_update_stats_counters */
1708
1709 /************************************************************************
1710 * ixgbe_add_hw_stats
1711 *
1712 * Add sysctl variables, one per statistic, to the system.
1713 ************************************************************************/
1714 static void
1715 ixgbe_add_hw_stats(struct adapter *adapter)
1716 {
1717 device_t dev = adapter->dev;
1718 const struct sysctlnode *rnode, *cnode;
1719 struct sysctllog **log = &adapter->sysctllog;
1720 struct tx_ring *txr = adapter->tx_rings;
1721 struct rx_ring *rxr = adapter->rx_rings;
1722 struct ixgbe_hw *hw = &adapter->hw;
1723 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1724 const char *xname = device_xname(dev);
1725 int i;
1726
1727 /* Driver Statistics */
1728 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1729 NULL, xname, "Driver tx dma soft fail EFBIG");
1730 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1731 NULL, xname, "m_defrag() failed");
1732 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1733 NULL, xname, "Driver tx dma hard fail EFBIG");
1734 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1735 NULL, xname, "Driver tx dma hard fail EINVAL");
1736 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1737 NULL, xname, "Driver tx dma hard fail other");
1738 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1739 NULL, xname, "Driver tx dma soft fail EAGAIN");
1740 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1741 NULL, xname, "Driver tx dma soft fail ENOMEM");
1742 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1743 NULL, xname, "Watchdog timeouts");
1744 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1745 NULL, xname, "TSO errors");
1746 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1747 NULL, xname, "Link MSI-X IRQ Handled");
1748 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1749 NULL, xname, "Link softint");
1750 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1751 NULL, xname, "module softint");
1752 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1753 NULL, xname, "multimode softint");
1754 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1755 NULL, xname, "external PHY softint");
1756
1757 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1758 #ifdef LRO
1759 struct lro_ctrl *lro = &rxr->lro;
1760 #endif /* LRO */
1761
1762 snprintf(adapter->queues[i].evnamebuf,
1763 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1764 xname, i);
1765 snprintf(adapter->queues[i].namebuf,
1766 sizeof(adapter->queues[i].namebuf), "q%d", i);
1767
1768 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1769 aprint_error_dev(dev, "could not create sysctl root\n");
1770 break;
1771 }
1772
1773 if (sysctl_createv(log, 0, &rnode, &rnode,
1774 0, CTLTYPE_NODE,
1775 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1776 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1777 break;
1778
1779 if (sysctl_createv(log, 0, &rnode, &cnode,
1780 CTLFLAG_READWRITE, CTLTYPE_INT,
1781 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1782 ixgbe_sysctl_interrupt_rate_handler, 0,
1783 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1784 break;
1785
1786 if (sysctl_createv(log, 0, &rnode, &cnode,
1787 CTLFLAG_READONLY, CTLTYPE_INT,
1788 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1789 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1790 0, CTL_CREATE, CTL_EOL) != 0)
1791 break;
1792
1793 if (sysctl_createv(log, 0, &rnode, &cnode,
1794 CTLFLAG_READONLY, CTLTYPE_INT,
1795 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1796 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1797 0, CTL_CREATE, CTL_EOL) != 0)
1798 break;
1799
1800 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1801 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1802 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1803 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1804 "Handled queue in softint");
1805 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1806 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1807 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1808 NULL, adapter->queues[i].evnamebuf, "TSO");
1809 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1810 NULL, adapter->queues[i].evnamebuf,
1811 "Queue No Descriptor Available");
1812 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1813 NULL, adapter->queues[i].evnamebuf,
1814 "Queue Packets Transmitted");
1815 #ifndef IXGBE_LEGACY_TX
1816 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1817 NULL, adapter->queues[i].evnamebuf,
1818 "Packets dropped in pcq");
1819 #endif
1820
1821 if (sysctl_createv(log, 0, &rnode, &cnode,
1822 CTLFLAG_READONLY,
1823 CTLTYPE_INT,
1824 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1825 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1826 CTL_CREATE, CTL_EOL) != 0)
1827 break;
1828
1829 if (sysctl_createv(log, 0, &rnode, &cnode,
1830 CTLFLAG_READONLY,
1831 CTLTYPE_INT,
1832 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1833 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1834 CTL_CREATE, CTL_EOL) != 0)
1835 break;
1836
1837 if (sysctl_createv(log, 0, &rnode, &cnode,
1838 CTLFLAG_READONLY,
1839 CTLTYPE_INT,
1840 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1841 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1842 CTL_CREATE, CTL_EOL) != 0)
1843 break;
1844
1845 if (i < __arraycount(stats->mpc)) {
1846 evcnt_attach_dynamic(&stats->mpc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1848 "RX Missed Packet Count");
1849 if (hw->mac.type == ixgbe_mac_82598EB)
1850 evcnt_attach_dynamic(&stats->rnbc[i],
1851 EVCNT_TYPE_MISC, NULL,
1852 adapter->queues[i].evnamebuf,
1853 "Receive No Buffers");
1854 }
1855 if (i < __arraycount(stats->pxontxc)) {
1856 evcnt_attach_dynamic(&stats->pxontxc[i],
1857 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1858 "pxontxc");
1859 evcnt_attach_dynamic(&stats->pxonrxc[i],
1860 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1861 "pxonrxc");
1862 evcnt_attach_dynamic(&stats->pxofftxc[i],
1863 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1864 "pxofftxc");
1865 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1866 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1867 "pxoffrxc");
1868 if (hw->mac.type >= ixgbe_mac_82599EB)
1869 evcnt_attach_dynamic(&stats->pxon2offc[i],
1870 EVCNT_TYPE_MISC, NULL,
1871 adapter->queues[i].evnamebuf,
1872 "pxon2offc");
1873 }
1874 if (i < __arraycount(stats->qprc)) {
1875 evcnt_attach_dynamic(&stats->qprc[i],
1876 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1877 "qprc");
1878 evcnt_attach_dynamic(&stats->qptc[i],
1879 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1880 "qptc");
1881 evcnt_attach_dynamic(&stats->qbrc[i],
1882 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1883 "qbrc");
1884 evcnt_attach_dynamic(&stats->qbtc[i],
1885 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1886 "qbtc");
1887 if (hw->mac.type >= ixgbe_mac_82599EB)
1888 evcnt_attach_dynamic(&stats->qprdc[i],
1889 EVCNT_TYPE_MISC, NULL,
1890 adapter->queues[i].evnamebuf, "qprdc");
1891 }
1892
1893 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1894 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1895 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1896 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1897 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1898 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1899 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1900 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1901 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1902 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1903 #ifdef LRO
1904 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1905 CTLFLAG_RD, &lro->lro_queued, 0,
1906 "LRO Queued");
1907 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1908 CTLFLAG_RD, &lro->lro_flushed, 0,
1909 "LRO Flushed");
1910 #endif /* LRO */
1911 }
1912
1913 /* MAC stats get their own sub node */
1914
1915 snprintf(stats->namebuf,
1916 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1917
1918 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1919 stats->namebuf, "rx csum offload - IP");
1920 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1921 stats->namebuf, "rx csum offload - L4");
1922 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1923 stats->namebuf, "rx csum offload - IP bad");
1924 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1925 stats->namebuf, "rx csum offload - L4 bad");
1926 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1927 stats->namebuf, "Interrupt conditions zero");
1928 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1929 stats->namebuf, "Legacy interrupts");
1930
1931 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1932 stats->namebuf, "CRC Errors");
1933 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1934 stats->namebuf, "Illegal Byte Errors");
1935 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1936 stats->namebuf, "Byte Errors");
1937 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1938 stats->namebuf, "MAC Short Packets Discarded");
1939 if (hw->mac.type >= ixgbe_mac_X550)
1940 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "Bad SFD");
1942 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "Total Packets Missed");
1944 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "MAC Local Faults");
1946 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "MAC Remote Faults");
1948 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "Receive Length Errors");
1950 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Link XON Transmitted");
1952 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Link XON Received");
1954 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Link XOFF Transmitted");
1956 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "Link XOFF Received");
1958
1959 /* Packet Reception Stats */
1960 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Total Octets Received");
1962 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "Good Octets Received");
1964 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "Total Packets Received");
1966 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "Good Packets Received");
1968 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "Multicast Packets Received");
1970 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "Broadcast Packets Received");
1972 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "64 byte frames received ");
1974 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "65-127 byte frames received");
1976 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "128-255 byte frames received");
1978 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "256-511 byte frames received");
1980 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "512-1023 byte frames received");
1982 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "1023-1522 byte frames received");
1984 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Receive Undersized");
1986 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Fragmented Packets Received ");
1988 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Oversized Packets Received");
1990 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Received Jabber");
1992 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Management Packets Received");
1994 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Management Packets Dropped");
1996 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "Checksum Errors");
1998
1999 /* Packet Transmission Stats */
2000 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "Good Octets Transmitted");
2002 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "Total Packets Transmitted");
2004 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "Good Packets Transmitted");
2006 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Broadcast Packets Transmitted");
2008 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Multicast Packets Transmitted");
2010 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Management Packets Transmitted");
2012 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "64 byte frames transmitted ");
2014 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "65-127 byte frames transmitted");
2016 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "128-255 byte frames transmitted");
2018 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "256-511 byte frames transmitted");
2020 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "512-1023 byte frames transmitted");
2022 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "1024-1522 byte frames transmitted");
2024 } /* ixgbe_add_hw_stats */
2025
2026 static void
2027 ixgbe_clear_evcnt(struct adapter *adapter)
2028 {
2029 struct tx_ring *txr = adapter->tx_rings;
2030 struct rx_ring *rxr = adapter->rx_rings;
2031 struct ixgbe_hw *hw = &adapter->hw;
2032 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2033
2034 adapter->efbig_tx_dma_setup.ev_count = 0;
2035 adapter->mbuf_defrag_failed.ev_count = 0;
2036 adapter->efbig2_tx_dma_setup.ev_count = 0;
2037 adapter->einval_tx_dma_setup.ev_count = 0;
2038 adapter->other_tx_dma_setup.ev_count = 0;
2039 adapter->eagain_tx_dma_setup.ev_count = 0;
2040 adapter->enomem_tx_dma_setup.ev_count = 0;
2041 adapter->tso_err.ev_count = 0;
2042 adapter->watchdog_events.ev_count = 0;
2043 adapter->link_irq.ev_count = 0;
2044 adapter->link_sicount.ev_count = 0;
2045 adapter->mod_sicount.ev_count = 0;
2046 adapter->msf_sicount.ev_count = 0;
2047 adapter->phy_sicount.ev_count = 0;
2048
2049 txr = adapter->tx_rings;
2050 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2051 adapter->queues[i].irqs.ev_count = 0;
2052 adapter->queues[i].handleq.ev_count = 0;
2053 adapter->queues[i].req.ev_count = 0;
2054 txr->no_desc_avail.ev_count = 0;
2055 txr->total_packets.ev_count = 0;
2056 txr->tso_tx.ev_count = 0;
2057 #ifndef IXGBE_LEGACY_TX
2058 txr->pcq_drops.ev_count = 0;
2059 #endif
2060 txr->q_efbig_tx_dma_setup = 0;
2061 txr->q_mbuf_defrag_failed = 0;
2062 txr->q_efbig2_tx_dma_setup = 0;
2063 txr->q_einval_tx_dma_setup = 0;
2064 txr->q_other_tx_dma_setup = 0;
2065 txr->q_eagain_tx_dma_setup = 0;
2066 txr->q_enomem_tx_dma_setup = 0;
2067 txr->q_tso_err = 0;
2068
2069 if (i < __arraycount(stats->mpc)) {
2070 stats->mpc[i].ev_count = 0;
2071 if (hw->mac.type == ixgbe_mac_82598EB)
2072 stats->rnbc[i].ev_count = 0;
2073 }
2074 if (i < __arraycount(stats->pxontxc)) {
2075 stats->pxontxc[i].ev_count = 0;
2076 stats->pxonrxc[i].ev_count = 0;
2077 stats->pxofftxc[i].ev_count = 0;
2078 stats->pxoffrxc[i].ev_count = 0;
2079 if (hw->mac.type >= ixgbe_mac_82599EB)
2080 stats->pxon2offc[i].ev_count = 0;
2081 }
2082 if (i < __arraycount(stats->qprc)) {
2083 stats->qprc[i].ev_count = 0;
2084 stats->qptc[i].ev_count = 0;
2085 stats->qbrc[i].ev_count = 0;
2086 stats->qbtc[i].ev_count = 0;
2087 if (hw->mac.type >= ixgbe_mac_82599EB)
2088 stats->qprdc[i].ev_count = 0;
2089 }
2090
2091 rxr->rx_packets.ev_count = 0;
2092 rxr->rx_bytes.ev_count = 0;
2093 rxr->rx_copies.ev_count = 0;
2094 rxr->no_jmbuf.ev_count = 0;
2095 rxr->rx_discarded.ev_count = 0;
2096 }
2097 stats->ipcs.ev_count = 0;
2098 stats->l4cs.ev_count = 0;
2099 stats->ipcs_bad.ev_count = 0;
2100 stats->l4cs_bad.ev_count = 0;
2101 stats->intzero.ev_count = 0;
2102 stats->legint.ev_count = 0;
2103 stats->crcerrs.ev_count = 0;
2104 stats->illerrc.ev_count = 0;
2105 stats->errbc.ev_count = 0;
2106 stats->mspdc.ev_count = 0;
2107 stats->mbsdc.ev_count = 0;
2108 stats->mpctotal.ev_count = 0;
2109 stats->mlfc.ev_count = 0;
2110 stats->mrfc.ev_count = 0;
2111 stats->rlec.ev_count = 0;
2112 stats->lxontxc.ev_count = 0;
2113 stats->lxonrxc.ev_count = 0;
2114 stats->lxofftxc.ev_count = 0;
2115 stats->lxoffrxc.ev_count = 0;
2116
2117 /* Packet Reception Stats */
2118 stats->tor.ev_count = 0;
2119 stats->gorc.ev_count = 0;
2120 stats->tpr.ev_count = 0;
2121 stats->gprc.ev_count = 0;
2122 stats->mprc.ev_count = 0;
2123 stats->bprc.ev_count = 0;
2124 stats->prc64.ev_count = 0;
2125 stats->prc127.ev_count = 0;
2126 stats->prc255.ev_count = 0;
2127 stats->prc511.ev_count = 0;
2128 stats->prc1023.ev_count = 0;
2129 stats->prc1522.ev_count = 0;
2130 stats->ruc.ev_count = 0;
2131 stats->rfc.ev_count = 0;
2132 stats->roc.ev_count = 0;
2133 stats->rjc.ev_count = 0;
2134 stats->mngprc.ev_count = 0;
2135 stats->mngpdc.ev_count = 0;
2136 stats->xec.ev_count = 0;
2137
2138 /* Packet Transmission Stats */
2139 stats->gotc.ev_count = 0;
2140 stats->tpt.ev_count = 0;
2141 stats->gptc.ev_count = 0;
2142 stats->bptc.ev_count = 0;
2143 stats->mptc.ev_count = 0;
2144 stats->mngptc.ev_count = 0;
2145 stats->ptc64.ev_count = 0;
2146 stats->ptc127.ev_count = 0;
2147 stats->ptc255.ev_count = 0;
2148 stats->ptc511.ev_count = 0;
2149 stats->ptc1023.ev_count = 0;
2150 stats->ptc1522.ev_count = 0;
2151 }
2152
2153 /************************************************************************
2154 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2155 *
2156 * Retrieves the TDH value from the hardware
2157 ************************************************************************/
2158 static int
2159 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2160 {
2161 struct sysctlnode node = *rnode;
2162 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2163 uint32_t val;
2164
2165 if (!txr)
2166 return (0);
2167
2168 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2169 node.sysctl_data = &val;
2170 return sysctl_lookup(SYSCTLFN_CALL(&node));
2171 } /* ixgbe_sysctl_tdh_handler */
2172
2173 /************************************************************************
2174 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2175 *
2176 * Retrieves the TDT value from the hardware
2177 ************************************************************************/
2178 static int
2179 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2180 {
2181 struct sysctlnode node = *rnode;
2182 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2183 uint32_t val;
2184
2185 if (!txr)
2186 return (0);
2187
2188 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2189 node.sysctl_data = &val;
2190 return sysctl_lookup(SYSCTLFN_CALL(&node));
2191 } /* ixgbe_sysctl_tdt_handler */
2192
2193 /************************************************************************
2194 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2195 * handler function
2196 *
2197 * Retrieves the next_to_check value
2198 ************************************************************************/
2199 static int
2200 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2201 {
2202 struct sysctlnode node = *rnode;
2203 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2204 uint32_t val;
2205
2206 if (!rxr)
2207 return (0);
2208
2209 val = rxr->next_to_check;
2210 node.sysctl_data = &val;
2211 return sysctl_lookup(SYSCTLFN_CALL(&node));
2212 } /* ixgbe_sysctl_next_to_check_handler */
2213
2214 /************************************************************************
2215 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2216 *
2217 * Retrieves the RDH value from the hardware
2218 ************************************************************************/
2219 static int
2220 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2221 {
2222 struct sysctlnode node = *rnode;
2223 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2224 uint32_t val;
2225
2226 if (!rxr)
2227 return (0);
2228
2229 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2230 node.sysctl_data = &val;
2231 return sysctl_lookup(SYSCTLFN_CALL(&node));
2232 } /* ixgbe_sysctl_rdh_handler */
2233
2234 /************************************************************************
2235 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2236 *
2237 * Retrieves the RDT value from the hardware
2238 ************************************************************************/
2239 static int
2240 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2241 {
2242 struct sysctlnode node = *rnode;
2243 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2244 uint32_t val;
2245
2246 if (!rxr)
2247 return (0);
2248
2249 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2250 node.sysctl_data = &val;
2251 return sysctl_lookup(SYSCTLFN_CALL(&node));
2252 } /* ixgbe_sysctl_rdt_handler */
2253
2254 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2255 /************************************************************************
2256 * ixgbe_register_vlan
2257 *
2258 * Run via vlan config EVENT, it enables us to use the
2259 * HW Filter table since we can get the vlan id. This
2260 * just creates the entry in the soft version of the
2261 * VFTA, init will repopulate the real table.
2262 ************************************************************************/
2263 static void
2264 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2265 {
2266 struct adapter *adapter = ifp->if_softc;
2267 u16 index, bit;
2268
2269 if (ifp->if_softc != arg) /* Not our event */
2270 return;
2271
2272 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2273 return;
2274
2275 IXGBE_CORE_LOCK(adapter);
2276 index = (vtag >> 5) & 0x7F;
2277 bit = vtag & 0x1F;
2278 adapter->shadow_vfta[index] |= (1 << bit);
2279 ixgbe_setup_vlan_hw_support(adapter);
2280 IXGBE_CORE_UNLOCK(adapter);
2281 } /* ixgbe_register_vlan */
2282
2283 /************************************************************************
2284 * ixgbe_unregister_vlan
2285 *
2286 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2287 ************************************************************************/
2288 static void
2289 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2290 {
2291 struct adapter *adapter = ifp->if_softc;
2292 u16 index, bit;
2293
2294 if (ifp->if_softc != arg)
2295 return;
2296
2297 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2298 return;
2299
2300 IXGBE_CORE_LOCK(adapter);
2301 index = (vtag >> 5) & 0x7F;
2302 bit = vtag & 0x1F;
2303 adapter->shadow_vfta[index] &= ~(1 << bit);
2304 /* Re-init to load the changes */
2305 ixgbe_setup_vlan_hw_support(adapter);
2306 IXGBE_CORE_UNLOCK(adapter);
2307 } /* ixgbe_unregister_vlan */
2308 #endif
2309
2310 static void
2311 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2312 {
2313 struct ethercom *ec = &adapter->osdep.ec;
2314 struct ixgbe_hw *hw = &adapter->hw;
2315 struct rx_ring *rxr;
2316 int i;
2317 u32 ctrl;
2318
2319
2320 /*
2321 * We get here thru init_locked, meaning
2322 * a soft reset, this has already cleared
2323 * the VFTA and other state, so if there
2324 * have been no vlan's registered do nothing.
2325 */
2326 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2327 return;
2328
2329 /* Setup the queues for vlans */
2330 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2331 for (i = 0; i < adapter->num_queues; i++) {
2332 rxr = &adapter->rx_rings[i];
2333 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2334 if (hw->mac.type != ixgbe_mac_82598EB) {
2335 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2336 ctrl |= IXGBE_RXDCTL_VME;
2337 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2338 }
2339 rxr->vtag_strip = TRUE;
2340 }
2341 }
2342
2343 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2344 return;
2345 /*
2346 * A soft reset zero's out the VFTA, so
2347 * we need to repopulate it now.
2348 */
2349 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2350 if (adapter->shadow_vfta[i] != 0)
2351 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2352 adapter->shadow_vfta[i]);
2353
2354 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2355 /* Enable the Filter Table if enabled */
2356 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2357 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2358 ctrl |= IXGBE_VLNCTRL_VFE;
2359 }
2360 if (hw->mac.type == ixgbe_mac_82598EB)
2361 ctrl |= IXGBE_VLNCTRL_VME;
2362 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2363 } /* ixgbe_setup_vlan_hw_support */
2364
2365 /************************************************************************
2366 * ixgbe_get_slot_info
2367 *
2368 * Get the width and transaction speed of
2369 * the slot this adapter is plugged into.
2370 ************************************************************************/
2371 static void
2372 ixgbe_get_slot_info(struct adapter *adapter)
2373 {
2374 device_t dev = adapter->dev;
2375 struct ixgbe_hw *hw = &adapter->hw;
2376 u32 offset;
2377 u16 link;
2378 int bus_info_valid = TRUE;
2379
2380 /* Some devices are behind an internal bridge */
2381 switch (hw->device_id) {
2382 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2383 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2384 goto get_parent_info;
2385 default:
2386 break;
2387 }
2388
2389 ixgbe_get_bus_info(hw);
2390
2391 /*
2392 * Some devices don't use PCI-E, but there is no need
2393 * to display "Unknown" for bus speed and width.
2394 */
2395 switch (hw->mac.type) {
2396 case ixgbe_mac_X550EM_x:
2397 case ixgbe_mac_X550EM_a:
2398 return;
2399 default:
2400 goto display;
2401 }
2402
2403 get_parent_info:
2404 /*
2405 * For the Quad port adapter we need to parse back
2406 * up the PCI tree to find the speed of the expansion
2407 * slot into which this adapter is plugged. A bit more work.
2408 */
2409 dev = device_parent(device_parent(dev));
2410 #if 0
2411 #ifdef IXGBE_DEBUG
2412 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2413 pci_get_slot(dev), pci_get_function(dev));
2414 #endif
2415 dev = device_parent(device_parent(dev));
2416 #ifdef IXGBE_DEBUG
2417 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2418 pci_get_slot(dev), pci_get_function(dev));
2419 #endif
2420 #endif
2421 /* Now get the PCI Express Capabilities offset */
2422 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2423 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2424 /*
2425 * Hmm...can't get PCI-Express capabilities.
2426 * Falling back to default method.
2427 */
2428 bus_info_valid = FALSE;
2429 ixgbe_get_bus_info(hw);
2430 goto display;
2431 }
2432 /* ...and read the Link Status Register */
2433 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2434 offset + PCIE_LCSR) >> 16;
2435 ixgbe_set_pci_config_data_generic(hw, link);
2436
2437 display:
2438 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2439 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2440 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2441 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2442 "Unknown"),
2443 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2444 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2445 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2446 "Unknown"));
2447
2448 if (bus_info_valid) {
2449 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2450 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2451 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2452 device_printf(dev, "PCI-Express bandwidth available"
2453 " for this card\n is not sufficient for"
2454 " optimal performance.\n");
2455 device_printf(dev, "For optimal performance a x8 "
2456 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2457 }
2458 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2459 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2460 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2461 device_printf(dev, "PCI-Express bandwidth available"
2462 " for this card\n is not sufficient for"
2463 " optimal performance.\n");
2464 device_printf(dev, "For optimal performance a x8 "
2465 "PCIE Gen3 slot is required.\n");
2466 }
2467 } else
2468 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2469
2470 return;
2471 } /* ixgbe_get_slot_info */
2472
2473 /************************************************************************
2474 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2475 ************************************************************************/
2476 static inline void
2477 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2478 {
2479 struct ixgbe_hw *hw = &adapter->hw;
2480 struct ix_queue *que = &adapter->queues[vector];
2481 u64 queue = (u64)(1ULL << vector);
2482 u32 mask;
2483
2484 mutex_enter(&que->dc_mtx);
2485 if (que->disabled_count > 0 && --que->disabled_count > 0)
2486 goto out;
2487
2488 if (hw->mac.type == ixgbe_mac_82598EB) {
2489 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2490 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2491 } else {
2492 mask = (queue & 0xFFFFFFFF);
2493 if (mask)
2494 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2495 mask = (queue >> 32);
2496 if (mask)
2497 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2498 }
2499 out:
2500 mutex_exit(&que->dc_mtx);
2501 } /* ixgbe_enable_queue */
2502
2503 /************************************************************************
2504 * ixgbe_disable_queue_internal
2505 ************************************************************************/
2506 static inline void
2507 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2508 {
2509 struct ixgbe_hw *hw = &adapter->hw;
2510 struct ix_queue *que = &adapter->queues[vector];
2511 u64 queue = (u64)(1ULL << vector);
2512 u32 mask;
2513
2514 mutex_enter(&que->dc_mtx);
2515
2516 if (que->disabled_count > 0) {
2517 if (nestok)
2518 que->disabled_count++;
2519 goto out;
2520 }
2521 que->disabled_count++;
2522
2523 if (hw->mac.type == ixgbe_mac_82598EB) {
2524 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2525 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2526 } else {
2527 mask = (queue & 0xFFFFFFFF);
2528 if (mask)
2529 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2530 mask = (queue >> 32);
2531 if (mask)
2532 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2533 }
2534 out:
2535 mutex_exit(&que->dc_mtx);
2536 } /* ixgbe_disable_queue_internal */
2537
2538 /************************************************************************
2539 * ixgbe_disable_queue
2540 ************************************************************************/
2541 static inline void
2542 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2543 {
2544
2545 ixgbe_disable_queue_internal(adapter, vector, true);
2546 } /* ixgbe_disable_queue */
2547
2548 /************************************************************************
2549 * ixgbe_sched_handle_que - schedule deferred packet processing
2550 ************************************************************************/
2551 static inline void
2552 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2553 {
2554
2555 if(que->txrx_use_workqueue) {
2556 /*
2557 * adapter->que_wq is bound to each CPU instead of
2558 * each NIC queue to reduce workqueue kthread. As we
2559 * should consider about interrupt affinity in this
2560 * function, the workqueue kthread must be WQ_PERCPU.
2561 * If create WQ_PERCPU workqueue kthread for each NIC
2562 * queue, that number of created workqueue kthread is
2563 * (number of used NIC queue) * (number of CPUs) =
2564 * (number of CPUs) ^ 2 most often.
2565 *
2566 * The same NIC queue's interrupts are avoided by
2567 * masking the queue's interrupt. And different
2568 * NIC queue's interrupts use different struct work
2569 * (que->wq_cookie). So, "enqueued flag" to avoid
2570 * twice workqueue_enqueue() is not required .
2571 */
2572 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2573 } else {
2574 softint_schedule(que->que_si);
2575 }
2576 }
2577
2578 /************************************************************************
2579 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2580 ************************************************************************/
2581 static int
2582 ixgbe_msix_que(void *arg)
2583 {
2584 struct ix_queue *que = arg;
2585 struct adapter *adapter = que->adapter;
2586 struct ifnet *ifp = adapter->ifp;
2587 struct tx_ring *txr = que->txr;
2588 struct rx_ring *rxr = que->rxr;
2589 bool more;
2590 u32 newitr = 0;
2591
2592 /* Protect against spurious interrupts */
2593 if ((ifp->if_flags & IFF_RUNNING) == 0)
2594 return 0;
2595
2596 ixgbe_disable_queue(adapter, que->msix);
2597 ++que->irqs.ev_count;
2598
2599 /*
2600 * Don't change "que->txrx_use_workqueue" from this point to avoid
2601 * flip-flopping softint/workqueue mode in one deferred processing.
2602 */
2603 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2604
2605 #ifdef __NetBSD__
2606 /* Don't run ixgbe_rxeof in interrupt context */
2607 more = true;
2608 #else
2609 more = ixgbe_rxeof(que);
2610 #endif
2611
2612 IXGBE_TX_LOCK(txr);
2613 ixgbe_txeof(txr);
2614 IXGBE_TX_UNLOCK(txr);
2615
2616 /* Do AIM now? */
2617
2618 if (adapter->enable_aim == false)
2619 goto no_calc;
2620 /*
2621 * Do Adaptive Interrupt Moderation:
2622 * - Write out last calculated setting
2623 * - Calculate based on average size over
2624 * the last interval.
2625 */
2626 if (que->eitr_setting)
2627 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2628
2629 que->eitr_setting = 0;
2630
2631 /* Idle, do nothing */
2632 if ((txr->bytes == 0) && (rxr->bytes == 0))
2633 goto no_calc;
2634
2635 if ((txr->bytes) && (txr->packets))
2636 newitr = txr->bytes/txr->packets;
2637 if ((rxr->bytes) && (rxr->packets))
2638 newitr = max(newitr, (rxr->bytes / rxr->packets));
2639 newitr += 24; /* account for hardware frame, crc */
2640
2641 /* set an upper boundary */
2642 newitr = min(newitr, 3000);
2643
2644 /* Be nice to the mid range */
2645 if ((newitr > 300) && (newitr < 1200))
2646 newitr = (newitr / 3);
2647 else
2648 newitr = (newitr / 2);
2649
2650 /*
2651 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2652 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2653 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2654 * on 1G and higher.
2655 */
2656 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2657 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2658 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2659 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2660 }
2661
2662 /* save for next interrupt */
2663 que->eitr_setting = newitr;
2664
2665 /* Reset state */
2666 txr->bytes = 0;
2667 txr->packets = 0;
2668 rxr->bytes = 0;
2669 rxr->packets = 0;
2670
2671 no_calc:
2672 if (more)
2673 ixgbe_sched_handle_que(adapter, que);
2674 else
2675 ixgbe_enable_queue(adapter, que->msix);
2676
2677 return 1;
2678 } /* ixgbe_msix_que */
2679
2680 /************************************************************************
2681 * ixgbe_media_status - Media Ioctl callback
2682 *
2683 * Called whenever the user queries the status of
2684 * the interface using ifconfig.
2685 ************************************************************************/
2686 static void
2687 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2688 {
2689 struct adapter *adapter = ifp->if_softc;
2690 struct ixgbe_hw *hw = &adapter->hw;
2691 int layer;
2692
2693 INIT_DEBUGOUT("ixgbe_media_status: begin");
2694 IXGBE_CORE_LOCK(adapter);
2695 ixgbe_update_link_status(adapter);
2696
2697 ifmr->ifm_status = IFM_AVALID;
2698 ifmr->ifm_active = IFM_ETHER;
2699
2700 if (!adapter->link_active) {
2701 ifmr->ifm_active |= IFM_NONE;
2702 IXGBE_CORE_UNLOCK(adapter);
2703 return;
2704 }
2705
2706 ifmr->ifm_status |= IFM_ACTIVE;
2707 layer = adapter->phy_layer;
2708
2709 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2710 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2711 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2712 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2713 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2714 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2715 switch (adapter->link_speed) {
2716 case IXGBE_LINK_SPEED_10GB_FULL:
2717 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2718 break;
2719 case IXGBE_LINK_SPEED_5GB_FULL:
2720 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2721 break;
2722 case IXGBE_LINK_SPEED_2_5GB_FULL:
2723 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2724 break;
2725 case IXGBE_LINK_SPEED_1GB_FULL:
2726 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2727 break;
2728 case IXGBE_LINK_SPEED_100_FULL:
2729 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2730 break;
2731 case IXGBE_LINK_SPEED_10_FULL:
2732 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2733 break;
2734 }
2735 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2736 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2737 switch (adapter->link_speed) {
2738 case IXGBE_LINK_SPEED_10GB_FULL:
2739 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2740 break;
2741 }
2742 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2743 switch (adapter->link_speed) {
2744 case IXGBE_LINK_SPEED_10GB_FULL:
2745 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2746 break;
2747 case IXGBE_LINK_SPEED_1GB_FULL:
2748 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2749 break;
2750 }
2751 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2752 switch (adapter->link_speed) {
2753 case IXGBE_LINK_SPEED_10GB_FULL:
2754 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2755 break;
2756 case IXGBE_LINK_SPEED_1GB_FULL:
2757 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2758 break;
2759 }
2760 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2761 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2762 switch (adapter->link_speed) {
2763 case IXGBE_LINK_SPEED_10GB_FULL:
2764 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2765 break;
2766 case IXGBE_LINK_SPEED_1GB_FULL:
2767 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2768 break;
2769 }
2770 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2771 switch (adapter->link_speed) {
2772 case IXGBE_LINK_SPEED_10GB_FULL:
2773 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2774 break;
2775 }
2776 /*
2777 * XXX: These need to use the proper media types once
2778 * they're added.
2779 */
2780 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2781 switch (adapter->link_speed) {
2782 case IXGBE_LINK_SPEED_10GB_FULL:
2783 #ifndef IFM_ETH_XTYPE
2784 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2785 #else
2786 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2787 #endif
2788 break;
2789 case IXGBE_LINK_SPEED_2_5GB_FULL:
2790 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2791 break;
2792 case IXGBE_LINK_SPEED_1GB_FULL:
2793 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2794 break;
2795 }
2796 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2797 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2798 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2799 switch (adapter->link_speed) {
2800 case IXGBE_LINK_SPEED_10GB_FULL:
2801 #ifndef IFM_ETH_XTYPE
2802 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2803 #else
2804 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2805 #endif
2806 break;
2807 case IXGBE_LINK_SPEED_2_5GB_FULL:
2808 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2809 break;
2810 case IXGBE_LINK_SPEED_1GB_FULL:
2811 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2812 break;
2813 }
2814
2815 /* If nothing is recognized... */
2816 #if 0
2817 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2818 ifmr->ifm_active |= IFM_UNKNOWN;
2819 #endif
2820
2821 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2822
2823 /* Display current flow control setting used on link */
2824 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2825 hw->fc.current_mode == ixgbe_fc_full)
2826 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2827 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2828 hw->fc.current_mode == ixgbe_fc_full)
2829 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2830
2831 IXGBE_CORE_UNLOCK(adapter);
2832
2833 return;
2834 } /* ixgbe_media_status */
2835
2836 /************************************************************************
2837 * ixgbe_media_change - Media Ioctl callback
2838 *
2839 * Called when the user changes speed/duplex using
2840 * media/mediopt option with ifconfig.
2841 ************************************************************************/
2842 static int
2843 ixgbe_media_change(struct ifnet *ifp)
2844 {
2845 struct adapter *adapter = ifp->if_softc;
2846 struct ifmedia *ifm = &adapter->media;
2847 struct ixgbe_hw *hw = &adapter->hw;
2848 ixgbe_link_speed speed = 0;
2849 ixgbe_link_speed link_caps = 0;
2850 bool negotiate = false;
2851 s32 err = IXGBE_NOT_IMPLEMENTED;
2852
2853 INIT_DEBUGOUT("ixgbe_media_change: begin");
2854
2855 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2856 return (EINVAL);
2857
2858 if (hw->phy.media_type == ixgbe_media_type_backplane)
2859 return (EPERM);
2860
2861 /*
2862 * We don't actually need to check against the supported
2863 * media types of the adapter; ifmedia will take care of
2864 * that for us.
2865 */
2866 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2867 case IFM_AUTO:
2868 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2869 &negotiate);
2870 if (err != IXGBE_SUCCESS) {
2871 device_printf(adapter->dev, "Unable to determine "
2872 "supported advertise speeds\n");
2873 return (ENODEV);
2874 }
2875 speed |= link_caps;
2876 break;
2877 case IFM_10G_T:
2878 case IFM_10G_LRM:
2879 case IFM_10G_LR:
2880 case IFM_10G_TWINAX:
2881 #ifndef IFM_ETH_XTYPE
2882 case IFM_10G_SR: /* KR, too */
2883 case IFM_10G_CX4: /* KX4 */
2884 #else
2885 case IFM_10G_KR:
2886 case IFM_10G_KX4:
2887 #endif
2888 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2889 break;
2890 case IFM_5000_T:
2891 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2892 break;
2893 case IFM_2500_T:
2894 case IFM_2500_KX:
2895 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2896 break;
2897 case IFM_1000_T:
2898 case IFM_1000_LX:
2899 case IFM_1000_SX:
2900 case IFM_1000_KX:
2901 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2902 break;
2903 case IFM_100_TX:
2904 speed |= IXGBE_LINK_SPEED_100_FULL;
2905 break;
2906 case IFM_10_T:
2907 speed |= IXGBE_LINK_SPEED_10_FULL;
2908 break;
2909 case IFM_NONE:
2910 break;
2911 default:
2912 goto invalid;
2913 }
2914
2915 hw->mac.autotry_restart = TRUE;
2916 hw->mac.ops.setup_link(hw, speed, TRUE);
2917 adapter->advertise = 0;
2918 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2919 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2920 adapter->advertise |= 1 << 2;
2921 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2922 adapter->advertise |= 1 << 1;
2923 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2924 adapter->advertise |= 1 << 0;
2925 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2926 adapter->advertise |= 1 << 3;
2927 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2928 adapter->advertise |= 1 << 4;
2929 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2930 adapter->advertise |= 1 << 5;
2931 }
2932
2933 return (0);
2934
2935 invalid:
2936 device_printf(adapter->dev, "Invalid media type!\n");
2937
2938 return (EINVAL);
2939 } /* ixgbe_media_change */
2940
2941 /************************************************************************
2942 * ixgbe_set_promisc
2943 ************************************************************************/
2944 static void
2945 ixgbe_set_promisc(struct adapter *adapter)
2946 {
2947 struct ifnet *ifp = adapter->ifp;
2948 int mcnt = 0;
2949 u32 rctl;
2950 struct ether_multi *enm;
2951 struct ether_multistep step;
2952 struct ethercom *ec = &adapter->osdep.ec;
2953
2954 KASSERT(mutex_owned(&adapter->core_mtx));
2955 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2956 rctl &= (~IXGBE_FCTRL_UPE);
2957 if (ifp->if_flags & IFF_ALLMULTI)
2958 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2959 else {
2960 ETHER_LOCK(ec);
2961 ETHER_FIRST_MULTI(step, ec, enm);
2962 while (enm != NULL) {
2963 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2964 break;
2965 mcnt++;
2966 ETHER_NEXT_MULTI(step, enm);
2967 }
2968 ETHER_UNLOCK(ec);
2969 }
2970 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2971 rctl &= (~IXGBE_FCTRL_MPE);
2972 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2973
2974 if (ifp->if_flags & IFF_PROMISC) {
2975 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2977 } else if (ifp->if_flags & IFF_ALLMULTI) {
2978 rctl |= IXGBE_FCTRL_MPE;
2979 rctl &= ~IXGBE_FCTRL_UPE;
2980 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2981 }
2982 } /* ixgbe_set_promisc */
2983
2984 /************************************************************************
2985 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2986 ************************************************************************/
2987 static int
2988 ixgbe_msix_link(void *arg)
2989 {
2990 struct adapter *adapter = arg;
2991 struct ixgbe_hw *hw = &adapter->hw;
2992 u32 eicr, eicr_mask;
2993 s32 retval;
2994
2995 ++adapter->link_irq.ev_count;
2996
2997 /* Pause other interrupts */
2998 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2999
3000 /* First get the cause */
3001 /*
3002 * The specifications of 82598, 82599, X540 and X550 say EICS register
3003 * is write only. However, Linux says it is a workaround for silicon
3004 * errata to read EICS instead of EICR to get interrupt cause. It seems
3005 * there is a problem about read clear mechanism for EICR register.
3006 */
3007 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3008 /* Be sure the queue bits are not cleared */
3009 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3010 /* Clear interrupt with write */
3011 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3012
3013 /* Link status change */
3014 if (eicr & IXGBE_EICR_LSC) {
3015 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3016 softint_schedule(adapter->link_si);
3017 }
3018
3019 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3020 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3021 (eicr & IXGBE_EICR_FLOW_DIR)) {
3022 /* This is probably overkill :) */
3023 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3024 return 1;
3025 /* Disable the interrupt */
3026 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3027 softint_schedule(adapter->fdir_si);
3028 }
3029
3030 if (eicr & IXGBE_EICR_ECC) {
3031 device_printf(adapter->dev,
3032 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3033 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3034 }
3035
3036 /* Check for over temp condition */
3037 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3038 switch (adapter->hw.mac.type) {
3039 case ixgbe_mac_X550EM_a:
3040 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3041 break;
3042 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3043 IXGBE_EICR_GPI_SDP0_X550EM_a);
3044 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3045 IXGBE_EICR_GPI_SDP0_X550EM_a);
3046 retval = hw->phy.ops.check_overtemp(hw);
3047 if (retval != IXGBE_ERR_OVERTEMP)
3048 break;
3049 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3050 device_printf(adapter->dev, "System shutdown required!\n");
3051 break;
3052 default:
3053 if (!(eicr & IXGBE_EICR_TS))
3054 break;
3055 retval = hw->phy.ops.check_overtemp(hw);
3056 if (retval != IXGBE_ERR_OVERTEMP)
3057 break;
3058 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3059 device_printf(adapter->dev, "System shutdown required!\n");
3060 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3061 break;
3062 }
3063 }
3064
3065 /* Check for VF message */
3066 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3067 (eicr & IXGBE_EICR_MAILBOX))
3068 softint_schedule(adapter->mbx_si);
3069 }
3070
3071 if (ixgbe_is_sfp(hw)) {
3072 /* Pluggable optics-related interrupt */
3073 if (hw->mac.type >= ixgbe_mac_X540)
3074 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3075 else
3076 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3077
3078 if (eicr & eicr_mask) {
3079 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3080 softint_schedule(adapter->mod_si);
3081 }
3082
3083 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3084 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3085 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3086 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3087 softint_schedule(adapter->msf_si);
3088 }
3089 }
3090
3091 /* Check for fan failure */
3092 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3093 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3094 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3095 }
3096
3097 /* External PHY interrupt */
3098 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3099 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3100 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3101 softint_schedule(adapter->phy_si);
3102 }
3103
3104 /* Re-enable other interrupts */
3105 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3106 return 1;
3107 } /* ixgbe_msix_link */
3108
3109 static void
3110 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3111 {
3112
3113 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3114 itr |= itr << 16;
3115 else
3116 itr |= IXGBE_EITR_CNT_WDIS;
3117
3118 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3119 }
3120
3121
3122 /************************************************************************
3123 * ixgbe_sysctl_interrupt_rate_handler
3124 ************************************************************************/
3125 static int
3126 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3127 {
3128 struct sysctlnode node = *rnode;
3129 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3130 struct adapter *adapter = que->adapter;
3131 uint32_t reg, usec, rate;
3132 int error;
3133
3134 if (que == NULL)
3135 return 0;
3136 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3137 usec = ((reg & 0x0FF8) >> 3);
3138 if (usec > 0)
3139 rate = 500000 / usec;
3140 else
3141 rate = 0;
3142 node.sysctl_data = &rate;
3143 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3144 if (error || newp == NULL)
3145 return error;
3146 reg &= ~0xfff; /* default, no limitation */
3147 if (rate > 0 && rate < 500000) {
3148 if (rate < 1000)
3149 rate = 1000;
3150 reg |= ((4000000/rate) & 0xff8);
3151 /*
3152 * When RSC is used, ITR interval must be larger than
3153 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3154 * The minimum value is always greater than 2us on 100M
3155 * (and 10M?(not documented)), but it's not on 1G and higher.
3156 */
3157 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3158 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3159 if ((adapter->num_queues > 1)
3160 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3161 return EINVAL;
3162 }
3163 ixgbe_max_interrupt_rate = rate;
3164 } else
3165 ixgbe_max_interrupt_rate = 0;
3166 ixgbe_eitr_write(adapter, que->msix, reg);
3167
3168 return (0);
3169 } /* ixgbe_sysctl_interrupt_rate_handler */
3170
3171 const struct sysctlnode *
3172 ixgbe_sysctl_instance(struct adapter *adapter)
3173 {
3174 const char *dvname;
3175 struct sysctllog **log;
3176 int rc;
3177 const struct sysctlnode *rnode;
3178
3179 if (adapter->sysctltop != NULL)
3180 return adapter->sysctltop;
3181
3182 log = &adapter->sysctllog;
3183 dvname = device_xname(adapter->dev);
3184
3185 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3186 0, CTLTYPE_NODE, dvname,
3187 SYSCTL_DESCR("ixgbe information and settings"),
3188 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3189 goto err;
3190
3191 return rnode;
3192 err:
3193 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3194 return NULL;
3195 }
3196
3197 /************************************************************************
3198 * ixgbe_add_device_sysctls
3199 ************************************************************************/
3200 static void
3201 ixgbe_add_device_sysctls(struct adapter *adapter)
3202 {
3203 device_t dev = adapter->dev;
3204 struct ixgbe_hw *hw = &adapter->hw;
3205 struct sysctllog **log;
3206 const struct sysctlnode *rnode, *cnode;
3207
3208 log = &adapter->sysctllog;
3209
3210 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3211 aprint_error_dev(dev, "could not create sysctl root\n");
3212 return;
3213 }
3214
3215 if (sysctl_createv(log, 0, &rnode, &cnode,
3216 CTLFLAG_READONLY, CTLTYPE_INT,
3217 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3218 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3219 aprint_error_dev(dev, "could not create sysctl\n");
3220
3221 if (sysctl_createv(log, 0, &rnode, &cnode,
3222 CTLFLAG_READONLY, CTLTYPE_INT,
3223 "num_queues", SYSCTL_DESCR("Number of queues"),
3224 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3225 aprint_error_dev(dev, "could not create sysctl\n");
3226
3227 /* Sysctls for all devices */
3228 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3229 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3230 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3231 CTL_EOL) != 0)
3232 aprint_error_dev(dev, "could not create sysctl\n");
3233
3234 adapter->enable_aim = ixgbe_enable_aim;
3235 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3236 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3237 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3238 aprint_error_dev(dev, "could not create sysctl\n");
3239
3240 if (sysctl_createv(log, 0, &rnode, &cnode,
3241 CTLFLAG_READWRITE, CTLTYPE_INT,
3242 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3243 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3244 CTL_EOL) != 0)
3245 aprint_error_dev(dev, "could not create sysctl\n");
3246
3247 /*
3248 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3249 * it causesflip-flopping softint/workqueue mode in one deferred
3250 * processing. Therefore, preempt_disable()/preempt_enable() are
3251 * required in ixgbe_sched_handle_que() to avoid
3252 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3253 * I think changing "que->txrx_use_workqueue" in interrupt handler
3254 * is lighter than doing preempt_disable()/preempt_enable() in every
3255 * ixgbe_sched_handle_que().
3256 */
3257 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3258 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3259 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3260 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3261 aprint_error_dev(dev, "could not create sysctl\n");
3262
3263 #ifdef IXGBE_DEBUG
3264 /* testing sysctls (for all devices) */
3265 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3266 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3267 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3268 CTL_EOL) != 0)
3269 aprint_error_dev(dev, "could not create sysctl\n");
3270
3271 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3272 CTLTYPE_STRING, "print_rss_config",
3273 SYSCTL_DESCR("Prints RSS Configuration"),
3274 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3275 CTL_EOL) != 0)
3276 aprint_error_dev(dev, "could not create sysctl\n");
3277 #endif
3278 /* for X550 series devices */
3279 if (hw->mac.type >= ixgbe_mac_X550)
3280 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3281 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3282 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3283 CTL_EOL) != 0)
3284 aprint_error_dev(dev, "could not create sysctl\n");
3285
3286 /* for WoL-capable devices */
3287 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3288 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3289 CTLTYPE_BOOL, "wol_enable",
3290 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3291 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3292 CTL_EOL) != 0)
3293 aprint_error_dev(dev, "could not create sysctl\n");
3294
3295 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3296 CTLTYPE_INT, "wufc",
3297 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3298 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3299 CTL_EOL) != 0)
3300 aprint_error_dev(dev, "could not create sysctl\n");
3301 }
3302
3303 /* for X552/X557-AT devices */
3304 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3305 const struct sysctlnode *phy_node;
3306
3307 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3308 "phy", SYSCTL_DESCR("External PHY sysctls"),
3309 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3310 aprint_error_dev(dev, "could not create sysctl\n");
3311 return;
3312 }
3313
3314 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3315 CTLTYPE_INT, "temp",
3316 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3317 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3318 CTL_EOL) != 0)
3319 aprint_error_dev(dev, "could not create sysctl\n");
3320
3321 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3322 CTLTYPE_INT, "overtemp_occurred",
3323 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3324 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3325 CTL_CREATE, CTL_EOL) != 0)
3326 aprint_error_dev(dev, "could not create sysctl\n");
3327 }
3328
3329 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3330 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3331 CTLTYPE_INT, "eee_state",
3332 SYSCTL_DESCR("EEE Power Save State"),
3333 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3334 CTL_EOL) != 0)
3335 aprint_error_dev(dev, "could not create sysctl\n");
3336 }
3337 } /* ixgbe_add_device_sysctls */
3338
3339 /************************************************************************
3340 * ixgbe_allocate_pci_resources
3341 ************************************************************************/
3342 static int
3343 ixgbe_allocate_pci_resources(struct adapter *adapter,
3344 const struct pci_attach_args *pa)
3345 {
3346 pcireg_t memtype;
3347 device_t dev = adapter->dev;
3348 bus_addr_t addr;
3349 int flags;
3350
3351 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3352 switch (memtype) {
3353 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3354 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3355 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3356 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3357 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3358 goto map_err;
3359 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3360 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3361 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3362 }
3363 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3364 adapter->osdep.mem_size, flags,
3365 &adapter->osdep.mem_bus_space_handle) != 0) {
3366 map_err:
3367 adapter->osdep.mem_size = 0;
3368 aprint_error_dev(dev, "unable to map BAR0\n");
3369 return ENXIO;
3370 }
3371 break;
3372 default:
3373 aprint_error_dev(dev, "unexpected type on BAR0\n");
3374 return ENXIO;
3375 }
3376
3377 return (0);
3378 } /* ixgbe_allocate_pci_resources */
3379
3380 static void
3381 ixgbe_free_softint(struct adapter *adapter)
3382 {
3383 struct ix_queue *que = adapter->queues;
3384 struct tx_ring *txr = adapter->tx_rings;
3385 int i;
3386
3387 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3388 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3389 if (txr->txr_si != NULL)
3390 softint_disestablish(txr->txr_si);
3391 }
3392 if (que->que_si != NULL)
3393 softint_disestablish(que->que_si);
3394 }
3395 if (adapter->txr_wq != NULL)
3396 workqueue_destroy(adapter->txr_wq);
3397 if (adapter->txr_wq_enqueued != NULL)
3398 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3399 if (adapter->que_wq != NULL)
3400 workqueue_destroy(adapter->que_wq);
3401
3402 /* Drain the Link queue */
3403 if (adapter->link_si != NULL) {
3404 softint_disestablish(adapter->link_si);
3405 adapter->link_si = NULL;
3406 }
3407 if (adapter->mod_si != NULL) {
3408 softint_disestablish(adapter->mod_si);
3409 adapter->mod_si = NULL;
3410 }
3411 if (adapter->msf_si != NULL) {
3412 softint_disestablish(adapter->msf_si);
3413 adapter->msf_si = NULL;
3414 }
3415 if (adapter->phy_si != NULL) {
3416 softint_disestablish(adapter->phy_si);
3417 adapter->phy_si = NULL;
3418 }
3419 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3420 if (adapter->fdir_si != NULL) {
3421 softint_disestablish(adapter->fdir_si);
3422 adapter->fdir_si = NULL;
3423 }
3424 }
3425 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3426 if (adapter->mbx_si != NULL) {
3427 softint_disestablish(adapter->mbx_si);
3428 adapter->mbx_si = NULL;
3429 }
3430 }
3431 } /* ixgbe_free_softint */
3432
3433 /************************************************************************
3434 * ixgbe_detach - Device removal routine
3435 *
3436 * Called when the driver is being removed.
3437 * Stops the adapter and deallocates all the resources
3438 * that were allocated for driver operation.
3439 *
3440 * return 0 on success, positive on failure
3441 ************************************************************************/
3442 static int
3443 ixgbe_detach(device_t dev, int flags)
3444 {
3445 struct adapter *adapter = device_private(dev);
3446 struct rx_ring *rxr = adapter->rx_rings;
3447 struct tx_ring *txr = adapter->tx_rings;
3448 struct ixgbe_hw *hw = &adapter->hw;
3449 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3450 u32 ctrl_ext;
3451
3452 INIT_DEBUGOUT("ixgbe_detach: begin");
3453 if (adapter->osdep.attached == false)
3454 return 0;
3455
3456 if (ixgbe_pci_iov_detach(dev) != 0) {
3457 device_printf(dev, "SR-IOV in use; detach first.\n");
3458 return (EBUSY);
3459 }
3460
3461 /* Stop the interface. Callouts are stopped in it. */
3462 ixgbe_ifstop(adapter->ifp, 1);
3463 #if NVLAN > 0
3464 /* Make sure VLANs are not using driver */
3465 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3466 ; /* nothing to do: no VLANs */
3467 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3468 vlan_ifdetach(adapter->ifp);
3469 else {
3470 aprint_error_dev(dev, "VLANs in use, detach first\n");
3471 return (EBUSY);
3472 }
3473 #endif
3474
3475 pmf_device_deregister(dev);
3476
3477 ether_ifdetach(adapter->ifp);
3478 /* Stop the adapter */
3479 IXGBE_CORE_LOCK(adapter);
3480 ixgbe_setup_low_power_mode(adapter);
3481 IXGBE_CORE_UNLOCK(adapter);
3482
3483 ixgbe_free_softint(adapter);
3484
3485 /* let hardware know driver is unloading */
3486 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3487 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3488 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3489
3490 callout_halt(&adapter->timer, NULL);
3491
3492 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3493 netmap_detach(adapter->ifp);
3494
3495 ixgbe_free_pci_resources(adapter);
3496 #if 0 /* XXX the NetBSD port is probably missing something here */
3497 bus_generic_detach(dev);
3498 #endif
3499 if_detach(adapter->ifp);
3500 if_percpuq_destroy(adapter->ipq);
3501
3502 sysctl_teardown(&adapter->sysctllog);
3503 evcnt_detach(&adapter->efbig_tx_dma_setup);
3504 evcnt_detach(&adapter->mbuf_defrag_failed);
3505 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3506 evcnt_detach(&adapter->einval_tx_dma_setup);
3507 evcnt_detach(&adapter->other_tx_dma_setup);
3508 evcnt_detach(&adapter->eagain_tx_dma_setup);
3509 evcnt_detach(&adapter->enomem_tx_dma_setup);
3510 evcnt_detach(&adapter->watchdog_events);
3511 evcnt_detach(&adapter->tso_err);
3512 evcnt_detach(&adapter->link_irq);
3513 evcnt_detach(&adapter->link_sicount);
3514 evcnt_detach(&adapter->mod_sicount);
3515 evcnt_detach(&adapter->msf_sicount);
3516 evcnt_detach(&adapter->phy_sicount);
3517
3518 txr = adapter->tx_rings;
3519 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3520 evcnt_detach(&adapter->queues[i].irqs);
3521 evcnt_detach(&adapter->queues[i].handleq);
3522 evcnt_detach(&adapter->queues[i].req);
3523 evcnt_detach(&txr->no_desc_avail);
3524 evcnt_detach(&txr->total_packets);
3525 evcnt_detach(&txr->tso_tx);
3526 #ifndef IXGBE_LEGACY_TX
3527 evcnt_detach(&txr->pcq_drops);
3528 #endif
3529
3530 if (i < __arraycount(stats->mpc)) {
3531 evcnt_detach(&stats->mpc[i]);
3532 if (hw->mac.type == ixgbe_mac_82598EB)
3533 evcnt_detach(&stats->rnbc[i]);
3534 }
3535 if (i < __arraycount(stats->pxontxc)) {
3536 evcnt_detach(&stats->pxontxc[i]);
3537 evcnt_detach(&stats->pxonrxc[i]);
3538 evcnt_detach(&stats->pxofftxc[i]);
3539 evcnt_detach(&stats->pxoffrxc[i]);
3540 if (hw->mac.type >= ixgbe_mac_82599EB)
3541 evcnt_detach(&stats->pxon2offc[i]);
3542 }
3543 if (i < __arraycount(stats->qprc)) {
3544 evcnt_detach(&stats->qprc[i]);
3545 evcnt_detach(&stats->qptc[i]);
3546 evcnt_detach(&stats->qbrc[i]);
3547 evcnt_detach(&stats->qbtc[i]);
3548 if (hw->mac.type >= ixgbe_mac_82599EB)
3549 evcnt_detach(&stats->qprdc[i]);
3550 }
3551
3552 evcnt_detach(&rxr->rx_packets);
3553 evcnt_detach(&rxr->rx_bytes);
3554 evcnt_detach(&rxr->rx_copies);
3555 evcnt_detach(&rxr->no_jmbuf);
3556 evcnt_detach(&rxr->rx_discarded);
3557 }
3558 evcnt_detach(&stats->ipcs);
3559 evcnt_detach(&stats->l4cs);
3560 evcnt_detach(&stats->ipcs_bad);
3561 evcnt_detach(&stats->l4cs_bad);
3562 evcnt_detach(&stats->intzero);
3563 evcnt_detach(&stats->legint);
3564 evcnt_detach(&stats->crcerrs);
3565 evcnt_detach(&stats->illerrc);
3566 evcnt_detach(&stats->errbc);
3567 evcnt_detach(&stats->mspdc);
3568 if (hw->mac.type >= ixgbe_mac_X550)
3569 evcnt_detach(&stats->mbsdc);
3570 evcnt_detach(&stats->mpctotal);
3571 evcnt_detach(&stats->mlfc);
3572 evcnt_detach(&stats->mrfc);
3573 evcnt_detach(&stats->rlec);
3574 evcnt_detach(&stats->lxontxc);
3575 evcnt_detach(&stats->lxonrxc);
3576 evcnt_detach(&stats->lxofftxc);
3577 evcnt_detach(&stats->lxoffrxc);
3578
3579 /* Packet Reception Stats */
3580 evcnt_detach(&stats->tor);
3581 evcnt_detach(&stats->gorc);
3582 evcnt_detach(&stats->tpr);
3583 evcnt_detach(&stats->gprc);
3584 evcnt_detach(&stats->mprc);
3585 evcnt_detach(&stats->bprc);
3586 evcnt_detach(&stats->prc64);
3587 evcnt_detach(&stats->prc127);
3588 evcnt_detach(&stats->prc255);
3589 evcnt_detach(&stats->prc511);
3590 evcnt_detach(&stats->prc1023);
3591 evcnt_detach(&stats->prc1522);
3592 evcnt_detach(&stats->ruc);
3593 evcnt_detach(&stats->rfc);
3594 evcnt_detach(&stats->roc);
3595 evcnt_detach(&stats->rjc);
3596 evcnt_detach(&stats->mngprc);
3597 evcnt_detach(&stats->mngpdc);
3598 evcnt_detach(&stats->xec);
3599
3600 /* Packet Transmission Stats */
3601 evcnt_detach(&stats->gotc);
3602 evcnt_detach(&stats->tpt);
3603 evcnt_detach(&stats->gptc);
3604 evcnt_detach(&stats->bptc);
3605 evcnt_detach(&stats->mptc);
3606 evcnt_detach(&stats->mngptc);
3607 evcnt_detach(&stats->ptc64);
3608 evcnt_detach(&stats->ptc127);
3609 evcnt_detach(&stats->ptc255);
3610 evcnt_detach(&stats->ptc511);
3611 evcnt_detach(&stats->ptc1023);
3612 evcnt_detach(&stats->ptc1522);
3613
3614 ixgbe_free_transmit_structures(adapter);
3615 ixgbe_free_receive_structures(adapter);
3616 for (int i = 0; i < adapter->num_queues; i++) {
3617 struct ix_queue * que = &adapter->queues[i];
3618 mutex_destroy(&que->dc_mtx);
3619 }
3620 free(adapter->queues, M_DEVBUF);
3621 free(adapter->mta, M_DEVBUF);
3622
3623 IXGBE_CORE_LOCK_DESTROY(adapter);
3624
3625 return (0);
3626 } /* ixgbe_detach */
3627
3628 /************************************************************************
3629 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3630 *
3631 * Prepare the adapter/port for LPLU and/or WoL
3632 ************************************************************************/
3633 static int
3634 ixgbe_setup_low_power_mode(struct adapter *adapter)
3635 {
3636 struct ixgbe_hw *hw = &adapter->hw;
3637 device_t dev = adapter->dev;
3638 s32 error = 0;
3639
3640 KASSERT(mutex_owned(&adapter->core_mtx));
3641
3642 /* Limit power management flow to X550EM baseT */
3643 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3644 hw->phy.ops.enter_lplu) {
3645 /* X550EM baseT adapters need a special LPLU flow */
3646 hw->phy.reset_disable = true;
3647 ixgbe_stop(adapter);
3648 error = hw->phy.ops.enter_lplu(hw);
3649 if (error)
3650 device_printf(dev,
3651 "Error entering LPLU: %d\n", error);
3652 hw->phy.reset_disable = false;
3653 } else {
3654 /* Just stop for other adapters */
3655 ixgbe_stop(adapter);
3656 }
3657
3658 if (!hw->wol_enabled) {
3659 ixgbe_set_phy_power(hw, FALSE);
3660 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3661 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3662 } else {
3663 /* Turn off support for APM wakeup. (Using ACPI instead) */
3664 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3665 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3666
3667 /*
3668 * Clear Wake Up Status register to prevent any previous wakeup
3669 * events from waking us up immediately after we suspend.
3670 */
3671 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3672
3673 /*
3674 * Program the Wakeup Filter Control register with user filter
3675 * settings
3676 */
3677 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3678
3679 /* Enable wakeups and power management in Wakeup Control */
3680 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3681 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3682
3683 }
3684
3685 return error;
3686 } /* ixgbe_setup_low_power_mode */
3687
3688 /************************************************************************
3689 * ixgbe_shutdown - Shutdown entry point
3690 ************************************************************************/
3691 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3692 static int
3693 ixgbe_shutdown(device_t dev)
3694 {
3695 struct adapter *adapter = device_private(dev);
3696 int error = 0;
3697
3698 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3699
3700 IXGBE_CORE_LOCK(adapter);
3701 error = ixgbe_setup_low_power_mode(adapter);
3702 IXGBE_CORE_UNLOCK(adapter);
3703
3704 return (error);
3705 } /* ixgbe_shutdown */
3706 #endif
3707
3708 /************************************************************************
3709 * ixgbe_suspend
3710 *
3711 * From D0 to D3
3712 ************************************************************************/
3713 static bool
3714 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3715 {
3716 struct adapter *adapter = device_private(dev);
3717 int error = 0;
3718
3719 INIT_DEBUGOUT("ixgbe_suspend: begin");
3720
3721 IXGBE_CORE_LOCK(adapter);
3722
3723 error = ixgbe_setup_low_power_mode(adapter);
3724
3725 IXGBE_CORE_UNLOCK(adapter);
3726
3727 return (error);
3728 } /* ixgbe_suspend */
3729
3730 /************************************************************************
3731 * ixgbe_resume
3732 *
3733 * From D3 to D0
3734 ************************************************************************/
3735 static bool
3736 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3737 {
3738 struct adapter *adapter = device_private(dev);
3739 struct ifnet *ifp = adapter->ifp;
3740 struct ixgbe_hw *hw = &adapter->hw;
3741 u32 wus;
3742
3743 INIT_DEBUGOUT("ixgbe_resume: begin");
3744
3745 IXGBE_CORE_LOCK(adapter);
3746
3747 /* Read & clear WUS register */
3748 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3749 if (wus)
3750 device_printf(dev, "Woken up by (WUS): %#010x\n",
3751 IXGBE_READ_REG(hw, IXGBE_WUS));
3752 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3753 /* And clear WUFC until next low-power transition */
3754 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3755
3756 /*
3757 * Required after D3->D0 transition;
3758 * will re-advertise all previous advertised speeds
3759 */
3760 if (ifp->if_flags & IFF_UP)
3761 ixgbe_init_locked(adapter);
3762
3763 IXGBE_CORE_UNLOCK(adapter);
3764
3765 return true;
3766 } /* ixgbe_resume */
3767
3768 /*
3769 * Set the various hardware offload abilities.
3770 *
3771 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3772 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3773 * mbuf offload flags the driver will understand.
3774 */
3775 static void
3776 ixgbe_set_if_hwassist(struct adapter *adapter)
3777 {
3778 /* XXX */
3779 }
3780
3781 /************************************************************************
3782 * ixgbe_init_locked - Init entry point
3783 *
3784 * Used in two ways: It is used by the stack as an init
3785 * entry point in network interface structure. It is also
3786 * used by the driver as a hw/sw initialization routine to
3787 * get to a consistent state.
3788 *
3789 * return 0 on success, positive on failure
3790 ************************************************************************/
3791 static void
3792 ixgbe_init_locked(struct adapter *adapter)
3793 {
3794 struct ifnet *ifp = adapter->ifp;
3795 device_t dev = adapter->dev;
3796 struct ixgbe_hw *hw = &adapter->hw;
3797 struct ix_queue *que;
3798 struct tx_ring *txr;
3799 struct rx_ring *rxr;
3800 u32 txdctl, mhadd;
3801 u32 rxdctl, rxctrl;
3802 u32 ctrl_ext;
3803 int i, j, err;
3804
3805 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3806
3807 KASSERT(mutex_owned(&adapter->core_mtx));
3808 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3809
3810 hw->adapter_stopped = FALSE;
3811 ixgbe_stop_adapter(hw);
3812 callout_stop(&adapter->timer);
3813 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3814 que->disabled_count = 0;
3815
3816 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3817 adapter->max_frame_size =
3818 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3819
3820 /* Queue indices may change with IOV mode */
3821 ixgbe_align_all_queue_indices(adapter);
3822
3823 /* reprogram the RAR[0] in case user changed it. */
3824 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3825
3826 /* Get the latest mac address, User can use a LAA */
3827 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3828 IXGBE_ETH_LENGTH_OF_ADDRESS);
3829 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3830 hw->addr_ctrl.rar_used_count = 1;
3831
3832 /* Set hardware offload abilities from ifnet flags */
3833 ixgbe_set_if_hwassist(adapter);
3834
3835 /* Prepare transmit descriptors and buffers */
3836 if (ixgbe_setup_transmit_structures(adapter)) {
3837 device_printf(dev, "Could not setup transmit structures\n");
3838 ixgbe_stop(adapter);
3839 return;
3840 }
3841
3842 ixgbe_init_hw(hw);
3843
3844 ixgbe_initialize_iov(adapter);
3845
3846 ixgbe_initialize_transmit_units(adapter);
3847
3848 /* Setup Multicast table */
3849 ixgbe_set_multi(adapter);
3850
3851 /* Determine the correct mbuf pool, based on frame size */
3852 if (adapter->max_frame_size <= MCLBYTES)
3853 adapter->rx_mbuf_sz = MCLBYTES;
3854 else
3855 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3856
3857 /* Prepare receive descriptors and buffers */
3858 if (ixgbe_setup_receive_structures(adapter)) {
3859 device_printf(dev, "Could not setup receive structures\n");
3860 ixgbe_stop(adapter);
3861 return;
3862 }
3863
3864 /* Configure RX settings */
3865 ixgbe_initialize_receive_units(adapter);
3866
3867 /* Enable SDP & MSI-X interrupts based on adapter */
3868 ixgbe_config_gpie(adapter);
3869
3870 /* Set MTU size */
3871 if (ifp->if_mtu > ETHERMTU) {
3872 /* aka IXGBE_MAXFRS on 82599 and newer */
3873 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3874 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3875 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3876 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3877 }
3878
3879 /* Now enable all the queues */
3880 for (i = 0; i < adapter->num_queues; i++) {
3881 txr = &adapter->tx_rings[i];
3882 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3883 txdctl |= IXGBE_TXDCTL_ENABLE;
3884 /* Set WTHRESH to 8, burst writeback */
3885 txdctl |= (8 << 16);
3886 /*
3887 * When the internal queue falls below PTHRESH (32),
3888 * start prefetching as long as there are at least
3889 * HTHRESH (1) buffers ready. The values are taken
3890 * from the Intel linux driver 3.8.21.
3891 * Prefetching enables tx line rate even with 1 queue.
3892 */
3893 txdctl |= (32 << 0) | (1 << 8);
3894 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3895 }
3896
3897 for (i = 0; i < adapter->num_queues; i++) {
3898 rxr = &adapter->rx_rings[i];
3899 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3900 if (hw->mac.type == ixgbe_mac_82598EB) {
3901 /*
3902 * PTHRESH = 21
3903 * HTHRESH = 4
3904 * WTHRESH = 8
3905 */
3906 rxdctl &= ~0x3FFFFF;
3907 rxdctl |= 0x080420;
3908 }
3909 rxdctl |= IXGBE_RXDCTL_ENABLE;
3910 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3911 for (j = 0; j < 10; j++) {
3912 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3913 IXGBE_RXDCTL_ENABLE)
3914 break;
3915 else
3916 msec_delay(1);
3917 }
3918 wmb();
3919
3920 /*
3921 * In netmap mode, we must preserve the buffers made
3922 * available to userspace before the if_init()
3923 * (this is true by default on the TX side, because
3924 * init makes all buffers available to userspace).
3925 *
3926 * netmap_reset() and the device specific routines
3927 * (e.g. ixgbe_setup_receive_rings()) map these
3928 * buffers at the end of the NIC ring, so here we
3929 * must set the RDT (tail) register to make sure
3930 * they are not overwritten.
3931 *
3932 * In this driver the NIC ring starts at RDH = 0,
3933 * RDT points to the last slot available for reception (?),
3934 * so RDT = num_rx_desc - 1 means the whole ring is available.
3935 */
3936 #ifdef DEV_NETMAP
3937 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3938 (ifp->if_capenable & IFCAP_NETMAP)) {
3939 struct netmap_adapter *na = NA(adapter->ifp);
3940 struct netmap_kring *kring = &na->rx_rings[i];
3941 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3942
3943 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3944 } else
3945 #endif /* DEV_NETMAP */
3946 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3947 adapter->num_rx_desc - 1);
3948 }
3949
3950 /* Enable Receive engine */
3951 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3952 if (hw->mac.type == ixgbe_mac_82598EB)
3953 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3954 rxctrl |= IXGBE_RXCTRL_RXEN;
3955 ixgbe_enable_rx_dma(hw, rxctrl);
3956
3957 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3958
3959 /* Set up MSI/MSI-X routing */
3960 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3961 ixgbe_configure_ivars(adapter);
3962 /* Set up auto-mask */
3963 if (hw->mac.type == ixgbe_mac_82598EB)
3964 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3965 else {
3966 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3967 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3968 }
3969 } else { /* Simple settings for Legacy/MSI */
3970 ixgbe_set_ivar(adapter, 0, 0, 0);
3971 ixgbe_set_ivar(adapter, 0, 0, 1);
3972 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3973 }
3974
3975 ixgbe_init_fdir(adapter);
3976
3977 /*
3978 * Check on any SFP devices that
3979 * need to be kick-started
3980 */
3981 if (hw->phy.type == ixgbe_phy_none) {
3982 err = hw->phy.ops.identify(hw);
3983 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3984 device_printf(dev,
3985 "Unsupported SFP+ module type was detected.\n");
3986 return;
3987 }
3988 }
3989
3990 /* Set moderation on the Link interrupt */
3991 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
3992
3993 /* Enable power to the phy. */
3994 ixgbe_set_phy_power(hw, TRUE);
3995
3996 /* Config/Enable Link */
3997 ixgbe_config_link(adapter);
3998
3999 /* Hardware Packet Buffer & Flow Control setup */
4000 ixgbe_config_delay_values(adapter);
4001
4002 /* Initialize the FC settings */
4003 ixgbe_start_hw(hw);
4004
4005 /* Set up VLAN support and filter */
4006 ixgbe_setup_vlan_hw_support(adapter);
4007
4008 /* Setup DMA Coalescing */
4009 ixgbe_config_dmac(adapter);
4010
4011 /* And now turn on interrupts */
4012 ixgbe_enable_intr(adapter);
4013
4014 /* Enable the use of the MBX by the VF's */
4015 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4016 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4017 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4018 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4019 }
4020
4021 /* Update saved flags. See ixgbe_ifflags_cb() */
4022 adapter->if_flags = ifp->if_flags;
4023
4024 /* Now inform the stack we're ready */
4025 ifp->if_flags |= IFF_RUNNING;
4026
4027 return;
4028 } /* ixgbe_init_locked */
4029
4030 /************************************************************************
4031 * ixgbe_init
4032 ************************************************************************/
4033 static int
4034 ixgbe_init(struct ifnet *ifp)
4035 {
4036 struct adapter *adapter = ifp->if_softc;
4037
4038 IXGBE_CORE_LOCK(adapter);
4039 ixgbe_init_locked(adapter);
4040 IXGBE_CORE_UNLOCK(adapter);
4041
4042 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4043 } /* ixgbe_init */
4044
4045 /************************************************************************
4046 * ixgbe_set_ivar
4047 *
4048 * Setup the correct IVAR register for a particular MSI-X interrupt
4049 * (yes this is all very magic and confusing :)
4050 * - entry is the register array entry
4051 * - vector is the MSI-X vector for this queue
4052 * - type is RX/TX/MISC
4053 ************************************************************************/
4054 static void
4055 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4056 {
4057 struct ixgbe_hw *hw = &adapter->hw;
4058 u32 ivar, index;
4059
4060 vector |= IXGBE_IVAR_ALLOC_VAL;
4061
4062 switch (hw->mac.type) {
4063 case ixgbe_mac_82598EB:
4064 if (type == -1)
4065 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4066 else
4067 entry += (type * 64);
4068 index = (entry >> 2) & 0x1F;
4069 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4070 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4071 ivar |= (vector << (8 * (entry & 0x3)));
4072 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4073 break;
4074 case ixgbe_mac_82599EB:
4075 case ixgbe_mac_X540:
4076 case ixgbe_mac_X550:
4077 case ixgbe_mac_X550EM_x:
4078 case ixgbe_mac_X550EM_a:
4079 if (type == -1) { /* MISC IVAR */
4080 index = (entry & 1) * 8;
4081 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4082 ivar &= ~(0xFF << index);
4083 ivar |= (vector << index);
4084 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4085 } else { /* RX/TX IVARS */
4086 index = (16 * (entry & 1)) + (8 * type);
4087 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4088 ivar &= ~(0xFF << index);
4089 ivar |= (vector << index);
4090 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4091 }
4092 break;
4093 default:
4094 break;
4095 }
4096 } /* ixgbe_set_ivar */
4097
4098 /************************************************************************
4099 * ixgbe_configure_ivars
4100 ************************************************************************/
4101 static void
4102 ixgbe_configure_ivars(struct adapter *adapter)
4103 {
4104 struct ix_queue *que = adapter->queues;
4105 u32 newitr;
4106
4107 if (ixgbe_max_interrupt_rate > 0)
4108 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4109 else {
4110 /*
4111 * Disable DMA coalescing if interrupt moderation is
4112 * disabled.
4113 */
4114 adapter->dmac = 0;
4115 newitr = 0;
4116 }
4117
4118 for (int i = 0; i < adapter->num_queues; i++, que++) {
4119 struct rx_ring *rxr = &adapter->rx_rings[i];
4120 struct tx_ring *txr = &adapter->tx_rings[i];
4121 /* First the RX queue entry */
4122 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4123 /* ... and the TX */
4124 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4125 /* Set an Initial EITR value */
4126 ixgbe_eitr_write(adapter, que->msix, newitr);
4127 /*
4128 * To eliminate influence of the previous state.
4129 * At this point, Tx/Rx interrupt handler
4130 * (ixgbe_msix_que()) cannot be called, so both
4131 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4132 */
4133 que->eitr_setting = 0;
4134 }
4135
4136 /* For the Link interrupt */
4137 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4138 } /* ixgbe_configure_ivars */
4139
4140 /************************************************************************
4141 * ixgbe_config_gpie
4142 ************************************************************************/
4143 static void
4144 ixgbe_config_gpie(struct adapter *adapter)
4145 {
4146 struct ixgbe_hw *hw = &adapter->hw;
4147 u32 gpie;
4148
4149 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4150
4151 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4152 /* Enable Enhanced MSI-X mode */
4153 gpie |= IXGBE_GPIE_MSIX_MODE
4154 | IXGBE_GPIE_EIAME
4155 | IXGBE_GPIE_PBA_SUPPORT
4156 | IXGBE_GPIE_OCD;
4157 }
4158
4159 /* Fan Failure Interrupt */
4160 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4161 gpie |= IXGBE_SDP1_GPIEN;
4162
4163 /* Thermal Sensor Interrupt */
4164 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4165 gpie |= IXGBE_SDP0_GPIEN_X540;
4166
4167 /* Link detection */
4168 switch (hw->mac.type) {
4169 case ixgbe_mac_82599EB:
4170 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4171 break;
4172 case ixgbe_mac_X550EM_x:
4173 case ixgbe_mac_X550EM_a:
4174 gpie |= IXGBE_SDP0_GPIEN_X540;
4175 break;
4176 default:
4177 break;
4178 }
4179
4180 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4181
4182 } /* ixgbe_config_gpie */
4183
4184 /************************************************************************
4185 * ixgbe_config_delay_values
4186 *
4187 * Requires adapter->max_frame_size to be set.
4188 ************************************************************************/
4189 static void
4190 ixgbe_config_delay_values(struct adapter *adapter)
4191 {
4192 struct ixgbe_hw *hw = &adapter->hw;
4193 u32 rxpb, frame, size, tmp;
4194
4195 frame = adapter->max_frame_size;
4196
4197 /* Calculate High Water */
4198 switch (hw->mac.type) {
4199 case ixgbe_mac_X540:
4200 case ixgbe_mac_X550:
4201 case ixgbe_mac_X550EM_x:
4202 case ixgbe_mac_X550EM_a:
4203 tmp = IXGBE_DV_X540(frame, frame);
4204 break;
4205 default:
4206 tmp = IXGBE_DV(frame, frame);
4207 break;
4208 }
4209 size = IXGBE_BT2KB(tmp);
4210 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4211 hw->fc.high_water[0] = rxpb - size;
4212
4213 /* Now calculate Low Water */
4214 switch (hw->mac.type) {
4215 case ixgbe_mac_X540:
4216 case ixgbe_mac_X550:
4217 case ixgbe_mac_X550EM_x:
4218 case ixgbe_mac_X550EM_a:
4219 tmp = IXGBE_LOW_DV_X540(frame);
4220 break;
4221 default:
4222 tmp = IXGBE_LOW_DV(frame);
4223 break;
4224 }
4225 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4226
4227 hw->fc.pause_time = IXGBE_FC_PAUSE;
4228 hw->fc.send_xon = TRUE;
4229 } /* ixgbe_config_delay_values */
4230
4231 /************************************************************************
4232 * ixgbe_set_multi - Multicast Update
4233 *
4234 * Called whenever multicast address list is updated.
4235 ************************************************************************/
4236 static void
4237 ixgbe_set_multi(struct adapter *adapter)
4238 {
4239 struct ixgbe_mc_addr *mta;
4240 struct ifnet *ifp = adapter->ifp;
4241 u8 *update_ptr;
4242 int mcnt = 0;
4243 u32 fctrl;
4244 struct ethercom *ec = &adapter->osdep.ec;
4245 struct ether_multi *enm;
4246 struct ether_multistep step;
4247
4248 KASSERT(mutex_owned(&adapter->core_mtx));
4249 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4250
4251 mta = adapter->mta;
4252 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4253
4254 ifp->if_flags &= ~IFF_ALLMULTI;
4255 ETHER_LOCK(ec);
4256 ETHER_FIRST_MULTI(step, ec, enm);
4257 while (enm != NULL) {
4258 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4259 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4260 ETHER_ADDR_LEN) != 0)) {
4261 ifp->if_flags |= IFF_ALLMULTI;
4262 break;
4263 }
4264 bcopy(enm->enm_addrlo,
4265 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4266 mta[mcnt].vmdq = adapter->pool;
4267 mcnt++;
4268 ETHER_NEXT_MULTI(step, enm);
4269 }
4270 ETHER_UNLOCK(ec);
4271
4272 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4273 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4274 if (ifp->if_flags & IFF_PROMISC)
4275 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4276 else if (ifp->if_flags & IFF_ALLMULTI) {
4277 fctrl |= IXGBE_FCTRL_MPE;
4278 }
4279
4280 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4281
4282 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4283 update_ptr = (u8 *)mta;
4284 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4285 ixgbe_mc_array_itr, TRUE);
4286 }
4287
4288 } /* ixgbe_set_multi */
4289
4290 /************************************************************************
4291 * ixgbe_mc_array_itr
4292 *
4293 * An iterator function needed by the multicast shared code.
4294 * It feeds the shared code routine the addresses in the
4295 * array of ixgbe_set_multi() one by one.
4296 ************************************************************************/
4297 static u8 *
4298 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4299 {
4300 struct ixgbe_mc_addr *mta;
4301
4302 mta = (struct ixgbe_mc_addr *)*update_ptr;
4303 *vmdq = mta->vmdq;
4304
4305 *update_ptr = (u8*)(mta + 1);
4306
4307 return (mta->addr);
4308 } /* ixgbe_mc_array_itr */
4309
4310 /************************************************************************
4311 * ixgbe_local_timer - Timer routine
4312 *
4313 * Checks for link status, updates statistics,
4314 * and runs the watchdog check.
4315 ************************************************************************/
4316 static void
4317 ixgbe_local_timer(void *arg)
4318 {
4319 struct adapter *adapter = arg;
4320
4321 IXGBE_CORE_LOCK(adapter);
4322 ixgbe_local_timer1(adapter);
4323 IXGBE_CORE_UNLOCK(adapter);
4324 }
4325
4326 static void
4327 ixgbe_local_timer1(void *arg)
4328 {
4329 struct adapter *adapter = arg;
4330 device_t dev = adapter->dev;
4331 struct ix_queue *que = adapter->queues;
4332 u64 queues = 0;
4333 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4334 int hung = 0;
4335 int i;
4336
4337 KASSERT(mutex_owned(&adapter->core_mtx));
4338
4339 /* Check for pluggable optics */
4340 if (adapter->sfp_probe)
4341 if (!ixgbe_sfp_probe(adapter))
4342 goto out; /* Nothing to do */
4343
4344 ixgbe_update_link_status(adapter);
4345 ixgbe_update_stats_counters(adapter);
4346
4347 /* Update some event counters */
4348 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4349 que = adapter->queues;
4350 for (i = 0; i < adapter->num_queues; i++, que++) {
4351 struct tx_ring *txr = que->txr;
4352
4353 v0 += txr->q_efbig_tx_dma_setup;
4354 v1 += txr->q_mbuf_defrag_failed;
4355 v2 += txr->q_efbig2_tx_dma_setup;
4356 v3 += txr->q_einval_tx_dma_setup;
4357 v4 += txr->q_other_tx_dma_setup;
4358 v5 += txr->q_eagain_tx_dma_setup;
4359 v6 += txr->q_enomem_tx_dma_setup;
4360 v7 += txr->q_tso_err;
4361 }
4362 adapter->efbig_tx_dma_setup.ev_count = v0;
4363 adapter->mbuf_defrag_failed.ev_count = v1;
4364 adapter->efbig2_tx_dma_setup.ev_count = v2;
4365 adapter->einval_tx_dma_setup.ev_count = v3;
4366 adapter->other_tx_dma_setup.ev_count = v4;
4367 adapter->eagain_tx_dma_setup.ev_count = v5;
4368 adapter->enomem_tx_dma_setup.ev_count = v6;
4369 adapter->tso_err.ev_count = v7;
4370
4371 /*
4372 * Check the TX queues status
4373 * - mark hung queues so we don't schedule on them
4374 * - watchdog only if all queues show hung
4375 */
4376 que = adapter->queues;
4377 for (i = 0; i < adapter->num_queues; i++, que++) {
4378 /* Keep track of queues with work for soft irq */
4379 if (que->txr->busy)
4380 queues |= ((u64)1 << que->me);
4381 /*
4382 * Each time txeof runs without cleaning, but there
4383 * are uncleaned descriptors it increments busy. If
4384 * we get to the MAX we declare it hung.
4385 */
4386 if (que->busy == IXGBE_QUEUE_HUNG) {
4387 ++hung;
4388 /* Mark the queue as inactive */
4389 adapter->active_queues &= ~((u64)1 << que->me);
4390 continue;
4391 } else {
4392 /* Check if we've come back from hung */
4393 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4394 adapter->active_queues |= ((u64)1 << que->me);
4395 }
4396 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4397 device_printf(dev,
4398 "Warning queue %d appears to be hung!\n", i);
4399 que->txr->busy = IXGBE_QUEUE_HUNG;
4400 ++hung;
4401 }
4402 }
4403
4404 /* Only truely watchdog if all queues show hung */
4405 if (hung == adapter->num_queues)
4406 goto watchdog;
4407 else if (queues != 0) { /* Force an IRQ on queues with work */
4408 que = adapter->queues;
4409 for (i = 0; i < adapter->num_queues; i++, que++) {
4410 mutex_enter(&que->dc_mtx);
4411 if (que->disabled_count == 0)
4412 ixgbe_rearm_queues(adapter,
4413 queues & ((u64)1 << i));
4414 mutex_exit(&que->dc_mtx);
4415 }
4416 }
4417
4418 out:
4419 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4420 return;
4421
4422 watchdog:
4423 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4424 adapter->ifp->if_flags &= ~IFF_RUNNING;
4425 adapter->watchdog_events.ev_count++;
4426 ixgbe_init_locked(adapter);
4427 } /* ixgbe_local_timer */
4428
4429 /************************************************************************
4430 * ixgbe_sfp_probe
4431 *
4432 * Determine if a port had optics inserted.
4433 ************************************************************************/
4434 static bool
4435 ixgbe_sfp_probe(struct adapter *adapter)
4436 {
4437 struct ixgbe_hw *hw = &adapter->hw;
4438 device_t dev = adapter->dev;
4439 bool result = FALSE;
4440
4441 if ((hw->phy.type == ixgbe_phy_nl) &&
4442 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4443 s32 ret = hw->phy.ops.identify_sfp(hw);
4444 if (ret)
4445 goto out;
4446 ret = hw->phy.ops.reset(hw);
4447 adapter->sfp_probe = FALSE;
4448 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4449 device_printf(dev,"Unsupported SFP+ module detected!");
4450 device_printf(dev,
4451 "Reload driver with supported module.\n");
4452 goto out;
4453 } else
4454 device_printf(dev, "SFP+ module detected!\n");
4455 /* We now have supported optics */
4456 result = TRUE;
4457 }
4458 out:
4459
4460 return (result);
4461 } /* ixgbe_sfp_probe */
4462
4463 /************************************************************************
4464 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4465 ************************************************************************/
4466 static void
4467 ixgbe_handle_mod(void *context)
4468 {
4469 struct adapter *adapter = context;
4470 struct ixgbe_hw *hw = &adapter->hw;
4471 device_t dev = adapter->dev;
4472 u32 err, cage_full = 0;
4473
4474 ++adapter->mod_sicount.ev_count;
4475 if (adapter->hw.need_crosstalk_fix) {
4476 switch (hw->mac.type) {
4477 case ixgbe_mac_82599EB:
4478 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4479 IXGBE_ESDP_SDP2;
4480 break;
4481 case ixgbe_mac_X550EM_x:
4482 case ixgbe_mac_X550EM_a:
4483 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4484 IXGBE_ESDP_SDP0;
4485 break;
4486 default:
4487 break;
4488 }
4489
4490 if (!cage_full)
4491 return;
4492 }
4493
4494 err = hw->phy.ops.identify_sfp(hw);
4495 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4496 device_printf(dev,
4497 "Unsupported SFP+ module type was detected.\n");
4498 return;
4499 }
4500
4501 if (hw->mac.type == ixgbe_mac_82598EB)
4502 err = hw->phy.ops.reset(hw);
4503 else
4504 err = hw->mac.ops.setup_sfp(hw);
4505
4506 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4507 device_printf(dev,
4508 "Setup failure - unsupported SFP+ module type.\n");
4509 return;
4510 }
4511 softint_schedule(adapter->msf_si);
4512 } /* ixgbe_handle_mod */
4513
4514
4515 /************************************************************************
4516 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4517 ************************************************************************/
4518 static void
4519 ixgbe_handle_msf(void *context)
4520 {
4521 struct adapter *adapter = context;
4522 struct ixgbe_hw *hw = &adapter->hw;
4523 u32 autoneg;
4524 bool negotiate;
4525
4526 ++adapter->msf_sicount.ev_count;
4527 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4528 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4529
4530 autoneg = hw->phy.autoneg_advertised;
4531 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4532 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4533 else
4534 negotiate = 0;
4535 if (hw->mac.ops.setup_link)
4536 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4537
4538 /* Adjust media types shown in ifconfig */
4539 ifmedia_removeall(&adapter->media);
4540 ixgbe_add_media_types(adapter);
4541 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4542 } /* ixgbe_handle_msf */
4543
4544 /************************************************************************
4545 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4546 ************************************************************************/
4547 static void
4548 ixgbe_handle_phy(void *context)
4549 {
4550 struct adapter *adapter = context;
4551 struct ixgbe_hw *hw = &adapter->hw;
4552 int error;
4553
4554 ++adapter->phy_sicount.ev_count;
4555 error = hw->phy.ops.handle_lasi(hw);
4556 if (error == IXGBE_ERR_OVERTEMP)
4557 device_printf(adapter->dev,
4558 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4559 " PHY will downshift to lower power state!\n");
4560 else if (error)
4561 device_printf(adapter->dev,
4562 "Error handling LASI interrupt: %d\n", error);
4563 } /* ixgbe_handle_phy */
4564
4565 static void
4566 ixgbe_ifstop(struct ifnet *ifp, int disable)
4567 {
4568 struct adapter *adapter = ifp->if_softc;
4569
4570 IXGBE_CORE_LOCK(adapter);
4571 ixgbe_stop(adapter);
4572 IXGBE_CORE_UNLOCK(adapter);
4573 }
4574
4575 /************************************************************************
4576 * ixgbe_stop - Stop the hardware
4577 *
4578 * Disables all traffic on the adapter by issuing a
4579 * global reset on the MAC and deallocates TX/RX buffers.
4580 ************************************************************************/
4581 static void
4582 ixgbe_stop(void *arg)
4583 {
4584 struct ifnet *ifp;
4585 struct adapter *adapter = arg;
4586 struct ixgbe_hw *hw = &adapter->hw;
4587
4588 ifp = adapter->ifp;
4589
4590 KASSERT(mutex_owned(&adapter->core_mtx));
4591
4592 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4593 ixgbe_disable_intr(adapter);
4594 callout_stop(&adapter->timer);
4595
4596 /* Let the stack know...*/
4597 ifp->if_flags &= ~IFF_RUNNING;
4598
4599 ixgbe_reset_hw(hw);
4600 hw->adapter_stopped = FALSE;
4601 ixgbe_stop_adapter(hw);
4602 if (hw->mac.type == ixgbe_mac_82599EB)
4603 ixgbe_stop_mac_link_on_d3_82599(hw);
4604 /* Turn off the laser - noop with no optics */
4605 ixgbe_disable_tx_laser(hw);
4606
4607 /* Update the stack */
4608 adapter->link_up = FALSE;
4609 ixgbe_update_link_status(adapter);
4610
4611 /* reprogram the RAR[0] in case user changed it. */
4612 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4613
4614 return;
4615 } /* ixgbe_stop */
4616
4617 /************************************************************************
4618 * ixgbe_update_link_status - Update OS on link state
4619 *
4620 * Note: Only updates the OS on the cached link state.
4621 * The real check of the hardware only happens with
4622 * a link interrupt.
4623 ************************************************************************/
4624 static void
4625 ixgbe_update_link_status(struct adapter *adapter)
4626 {
4627 struct ifnet *ifp = adapter->ifp;
4628 device_t dev = adapter->dev;
4629 struct ixgbe_hw *hw = &adapter->hw;
4630
4631 KASSERT(mutex_owned(&adapter->core_mtx));
4632
4633 if (adapter->link_up) {
4634 if (adapter->link_active == FALSE) {
4635 /*
4636 * To eliminate influence of the previous state
4637 * in the same way as ixgbe_init_locked().
4638 */
4639 struct ix_queue *que = adapter->queues;
4640 for (int i = 0; i < adapter->num_queues; i++, que++)
4641 que->eitr_setting = 0;
4642
4643 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4644 /*
4645 * Discard count for both MAC Local Fault and
4646 * Remote Fault because those registers are
4647 * valid only when the link speed is up and
4648 * 10Gbps.
4649 */
4650 IXGBE_READ_REG(hw, IXGBE_MLFC);
4651 IXGBE_READ_REG(hw, IXGBE_MRFC);
4652 }
4653
4654 if (bootverbose) {
4655 const char *bpsmsg;
4656
4657 switch (adapter->link_speed) {
4658 case IXGBE_LINK_SPEED_10GB_FULL:
4659 bpsmsg = "10 Gbps";
4660 break;
4661 case IXGBE_LINK_SPEED_5GB_FULL:
4662 bpsmsg = "5 Gbps";
4663 break;
4664 case IXGBE_LINK_SPEED_2_5GB_FULL:
4665 bpsmsg = "2.5 Gbps";
4666 break;
4667 case IXGBE_LINK_SPEED_1GB_FULL:
4668 bpsmsg = "1 Gbps";
4669 break;
4670 case IXGBE_LINK_SPEED_100_FULL:
4671 bpsmsg = "100 Mbps";
4672 break;
4673 case IXGBE_LINK_SPEED_10_FULL:
4674 bpsmsg = "10 Mbps";
4675 break;
4676 default:
4677 bpsmsg = "unknown speed";
4678 break;
4679 }
4680 device_printf(dev, "Link is up %s %s \n",
4681 bpsmsg, "Full Duplex");
4682 }
4683 adapter->link_active = TRUE;
4684 /* Update any Flow Control changes */
4685 ixgbe_fc_enable(&adapter->hw);
4686 /* Update DMA coalescing config */
4687 ixgbe_config_dmac(adapter);
4688 if_link_state_change(ifp, LINK_STATE_UP);
4689
4690 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4691 ixgbe_ping_all_vfs(adapter);
4692 }
4693 } else { /* Link down */
4694 if (adapter->link_active == TRUE) {
4695 if (bootverbose)
4696 device_printf(dev, "Link is Down\n");
4697 if_link_state_change(ifp, LINK_STATE_DOWN);
4698 adapter->link_active = FALSE;
4699 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4700 ixgbe_ping_all_vfs(adapter);
4701 ixgbe_drain_all(adapter);
4702 }
4703 }
4704 } /* ixgbe_update_link_status */
4705
4706 /************************************************************************
4707 * ixgbe_config_dmac - Configure DMA Coalescing
4708 ************************************************************************/
4709 static void
4710 ixgbe_config_dmac(struct adapter *adapter)
4711 {
4712 struct ixgbe_hw *hw = &adapter->hw;
4713 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4714
4715 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4716 return;
4717
4718 if (dcfg->watchdog_timer ^ adapter->dmac ||
4719 dcfg->link_speed ^ adapter->link_speed) {
4720 dcfg->watchdog_timer = adapter->dmac;
4721 dcfg->fcoe_en = false;
4722 dcfg->link_speed = adapter->link_speed;
4723 dcfg->num_tcs = 1;
4724
4725 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4726 dcfg->watchdog_timer, dcfg->link_speed);
4727
4728 hw->mac.ops.dmac_config(hw);
4729 }
4730 } /* ixgbe_config_dmac */
4731
4732 /************************************************************************
4733 * ixgbe_enable_intr
4734 ************************************************************************/
4735 static void
4736 ixgbe_enable_intr(struct adapter *adapter)
4737 {
4738 struct ixgbe_hw *hw = &adapter->hw;
4739 struct ix_queue *que = adapter->queues;
4740 u32 mask, fwsm;
4741
4742 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4743
4744 switch (adapter->hw.mac.type) {
4745 case ixgbe_mac_82599EB:
4746 mask |= IXGBE_EIMS_ECC;
4747 /* Temperature sensor on some adapters */
4748 mask |= IXGBE_EIMS_GPI_SDP0;
4749 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4750 mask |= IXGBE_EIMS_GPI_SDP1;
4751 mask |= IXGBE_EIMS_GPI_SDP2;
4752 break;
4753 case ixgbe_mac_X540:
4754 /* Detect if Thermal Sensor is enabled */
4755 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4756 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4757 mask |= IXGBE_EIMS_TS;
4758 mask |= IXGBE_EIMS_ECC;
4759 break;
4760 case ixgbe_mac_X550:
4761 /* MAC thermal sensor is automatically enabled */
4762 mask |= IXGBE_EIMS_TS;
4763 mask |= IXGBE_EIMS_ECC;
4764 break;
4765 case ixgbe_mac_X550EM_x:
4766 case ixgbe_mac_X550EM_a:
4767 /* Some devices use SDP0 for important information */
4768 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4769 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4770 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4771 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4772 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4773 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4774 mask |= IXGBE_EICR_GPI_SDP0_X540;
4775 mask |= IXGBE_EIMS_ECC;
4776 break;
4777 default:
4778 break;
4779 }
4780
4781 /* Enable Fan Failure detection */
4782 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4783 mask |= IXGBE_EIMS_GPI_SDP1;
4784 /* Enable SR-IOV */
4785 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4786 mask |= IXGBE_EIMS_MAILBOX;
4787 /* Enable Flow Director */
4788 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4789 mask |= IXGBE_EIMS_FLOW_DIR;
4790
4791 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4792
4793 /* With MSI-X we use auto clear */
4794 if (adapter->msix_mem) {
4795 mask = IXGBE_EIMS_ENABLE_MASK;
4796 /* Don't autoclear Link */
4797 mask &= ~IXGBE_EIMS_OTHER;
4798 mask &= ~IXGBE_EIMS_LSC;
4799 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4800 mask &= ~IXGBE_EIMS_MAILBOX;
4801 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4802 }
4803
4804 /*
4805 * Now enable all queues, this is done separately to
4806 * allow for handling the extended (beyond 32) MSI-X
4807 * vectors that can be used by 82599
4808 */
4809 for (int i = 0; i < adapter->num_queues; i++, que++)
4810 ixgbe_enable_queue(adapter, que->msix);
4811
4812 IXGBE_WRITE_FLUSH(hw);
4813
4814 } /* ixgbe_enable_intr */
4815
4816 /************************************************************************
4817 * ixgbe_disable_intr_internal
4818 ************************************************************************/
4819 static void
4820 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4821 {
4822 struct ix_queue *que = adapter->queues;
4823
4824 /* disable interrupts other than queues */
4825 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4826
4827 if (adapter->msix_mem)
4828 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4829
4830 for (int i = 0; i < adapter->num_queues; i++, que++)
4831 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4832
4833 IXGBE_WRITE_FLUSH(&adapter->hw);
4834
4835 } /* ixgbe_do_disable_intr_internal */
4836
4837 /************************************************************************
4838 * ixgbe_disable_intr
4839 ************************************************************************/
4840 static void
4841 ixgbe_disable_intr(struct adapter *adapter)
4842 {
4843
4844 ixgbe_disable_intr_internal(adapter, true);
4845 } /* ixgbe_disable_intr */
4846
4847 /************************************************************************
4848 * ixgbe_ensure_disabled_intr
4849 ************************************************************************/
4850 void
4851 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4852 {
4853
4854 ixgbe_disable_intr_internal(adapter, false);
4855 } /* ixgbe_ensure_disabled_intr */
4856
4857 /************************************************************************
4858 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4859 ************************************************************************/
4860 static int
4861 ixgbe_legacy_irq(void *arg)
4862 {
4863 struct ix_queue *que = arg;
4864 struct adapter *adapter = que->adapter;
4865 struct ixgbe_hw *hw = &adapter->hw;
4866 struct ifnet *ifp = adapter->ifp;
4867 struct tx_ring *txr = adapter->tx_rings;
4868 bool more = false;
4869 u32 eicr, eicr_mask;
4870
4871 /* Silicon errata #26 on 82598 */
4872 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4873
4874 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4875
4876 adapter->stats.pf.legint.ev_count++;
4877 ++que->irqs.ev_count;
4878 if (eicr == 0) {
4879 adapter->stats.pf.intzero.ev_count++;
4880 if ((ifp->if_flags & IFF_UP) != 0)
4881 ixgbe_enable_intr(adapter);
4882 return 0;
4883 }
4884
4885 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4886 /*
4887 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
4888 */
4889 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
4890
4891 #ifdef __NetBSD__
4892 /* Don't run ixgbe_rxeof in interrupt context */
4893 more = true;
4894 #else
4895 more = ixgbe_rxeof(que);
4896 #endif
4897
4898 IXGBE_TX_LOCK(txr);
4899 ixgbe_txeof(txr);
4900 #ifdef notyet
4901 if (!ixgbe_ring_empty(ifp, txr->br))
4902 ixgbe_start_locked(ifp, txr);
4903 #endif
4904 IXGBE_TX_UNLOCK(txr);
4905 }
4906
4907 /* Check for fan failure */
4908 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4909 ixgbe_check_fan_failure(adapter, eicr, true);
4910 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4911 }
4912
4913 /* Link status change */
4914 if (eicr & IXGBE_EICR_LSC)
4915 softint_schedule(adapter->link_si);
4916
4917 if (ixgbe_is_sfp(hw)) {
4918 /* Pluggable optics-related interrupt */
4919 if (hw->mac.type >= ixgbe_mac_X540)
4920 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4921 else
4922 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4923
4924 if (eicr & eicr_mask) {
4925 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4926 softint_schedule(adapter->mod_si);
4927 }
4928
4929 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4930 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4931 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4932 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4933 softint_schedule(adapter->msf_si);
4934 }
4935 }
4936
4937 /* External PHY interrupt */
4938 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4939 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4940 softint_schedule(adapter->phy_si);
4941
4942 if (more) {
4943 que->req.ev_count++;
4944 ixgbe_sched_handle_que(adapter, que);
4945 } else
4946 ixgbe_enable_intr(adapter);
4947
4948 return 1;
4949 } /* ixgbe_legacy_irq */
4950
4951 /************************************************************************
4952 * ixgbe_free_pciintr_resources
4953 ************************************************************************/
4954 static void
4955 ixgbe_free_pciintr_resources(struct adapter *adapter)
4956 {
4957 struct ix_queue *que = adapter->queues;
4958 int rid;
4959
4960 /*
4961 * Release all msix queue resources:
4962 */
4963 for (int i = 0; i < adapter->num_queues; i++, que++) {
4964 if (que->res != NULL) {
4965 pci_intr_disestablish(adapter->osdep.pc,
4966 adapter->osdep.ihs[i]);
4967 adapter->osdep.ihs[i] = NULL;
4968 }
4969 }
4970
4971 /* Clean the Legacy or Link interrupt last */
4972 if (adapter->vector) /* we are doing MSIX */
4973 rid = adapter->vector;
4974 else
4975 rid = 0;
4976
4977 if (adapter->osdep.ihs[rid] != NULL) {
4978 pci_intr_disestablish(adapter->osdep.pc,
4979 adapter->osdep.ihs[rid]);
4980 adapter->osdep.ihs[rid] = NULL;
4981 }
4982
4983 if (adapter->osdep.intrs != NULL) {
4984 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4985 adapter->osdep.nintrs);
4986 adapter->osdep.intrs = NULL;
4987 }
4988 } /* ixgbe_free_pciintr_resources */
4989
4990 /************************************************************************
4991 * ixgbe_free_pci_resources
4992 ************************************************************************/
4993 static void
4994 ixgbe_free_pci_resources(struct adapter *adapter)
4995 {
4996
4997 ixgbe_free_pciintr_resources(adapter);
4998
4999 if (adapter->osdep.mem_size != 0) {
5000 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5001 adapter->osdep.mem_bus_space_handle,
5002 adapter->osdep.mem_size);
5003 }
5004
5005 } /* ixgbe_free_pci_resources */
5006
5007 /************************************************************************
5008 * ixgbe_set_sysctl_value
5009 ************************************************************************/
5010 static void
5011 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5012 const char *description, int *limit, int value)
5013 {
5014 device_t dev = adapter->dev;
5015 struct sysctllog **log;
5016 const struct sysctlnode *rnode, *cnode;
5017
5018 log = &adapter->sysctllog;
5019 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5020 aprint_error_dev(dev, "could not create sysctl root\n");
5021 return;
5022 }
5023 if (sysctl_createv(log, 0, &rnode, &cnode,
5024 CTLFLAG_READWRITE, CTLTYPE_INT,
5025 name, SYSCTL_DESCR(description),
5026 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5027 aprint_error_dev(dev, "could not create sysctl\n");
5028 *limit = value;
5029 } /* ixgbe_set_sysctl_value */
5030
5031 /************************************************************************
5032 * ixgbe_sysctl_flowcntl
5033 *
5034 * SYSCTL wrapper around setting Flow Control
5035 ************************************************************************/
5036 static int
5037 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5038 {
5039 struct sysctlnode node = *rnode;
5040 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5041 int error, fc;
5042
5043 fc = adapter->hw.fc.current_mode;
5044 node.sysctl_data = &fc;
5045 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5046 if (error != 0 || newp == NULL)
5047 return error;
5048
5049 /* Don't bother if it's not changed */
5050 if (fc == adapter->hw.fc.current_mode)
5051 return (0);
5052
5053 return ixgbe_set_flowcntl(adapter, fc);
5054 } /* ixgbe_sysctl_flowcntl */
5055
5056 /************************************************************************
5057 * ixgbe_set_flowcntl - Set flow control
5058 *
5059 * Flow control values:
5060 * 0 - off
5061 * 1 - rx pause
5062 * 2 - tx pause
5063 * 3 - full
5064 ************************************************************************/
5065 static int
5066 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5067 {
5068 switch (fc) {
5069 case ixgbe_fc_rx_pause:
5070 case ixgbe_fc_tx_pause:
5071 case ixgbe_fc_full:
5072 adapter->hw.fc.requested_mode = fc;
5073 if (adapter->num_queues > 1)
5074 ixgbe_disable_rx_drop(adapter);
5075 break;
5076 case ixgbe_fc_none:
5077 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5078 if (adapter->num_queues > 1)
5079 ixgbe_enable_rx_drop(adapter);
5080 break;
5081 default:
5082 return (EINVAL);
5083 }
5084
5085 #if 0 /* XXX NetBSD */
5086 /* Don't autoneg if forcing a value */
5087 adapter->hw.fc.disable_fc_autoneg = TRUE;
5088 #endif
5089 ixgbe_fc_enable(&adapter->hw);
5090
5091 return (0);
5092 } /* ixgbe_set_flowcntl */
5093
5094 /************************************************************************
5095 * ixgbe_enable_rx_drop
5096 *
5097 * Enable the hardware to drop packets when the buffer is
5098 * full. This is useful with multiqueue, so that no single
5099 * queue being full stalls the entire RX engine. We only
5100 * enable this when Multiqueue is enabled AND Flow Control
5101 * is disabled.
5102 ************************************************************************/
5103 static void
5104 ixgbe_enable_rx_drop(struct adapter *adapter)
5105 {
5106 struct ixgbe_hw *hw = &adapter->hw;
5107 struct rx_ring *rxr;
5108 u32 srrctl;
5109
5110 for (int i = 0; i < adapter->num_queues; i++) {
5111 rxr = &adapter->rx_rings[i];
5112 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5113 srrctl |= IXGBE_SRRCTL_DROP_EN;
5114 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5115 }
5116
5117 /* enable drop for each vf */
5118 for (int i = 0; i < adapter->num_vfs; i++) {
5119 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5120 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5121 IXGBE_QDE_ENABLE));
5122 }
5123 } /* ixgbe_enable_rx_drop */
5124
5125 /************************************************************************
5126 * ixgbe_disable_rx_drop
5127 ************************************************************************/
5128 static void
5129 ixgbe_disable_rx_drop(struct adapter *adapter)
5130 {
5131 struct ixgbe_hw *hw = &adapter->hw;
5132 struct rx_ring *rxr;
5133 u32 srrctl;
5134
5135 for (int i = 0; i < adapter->num_queues; i++) {
5136 rxr = &adapter->rx_rings[i];
5137 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5138 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5139 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5140 }
5141
5142 /* disable drop for each vf */
5143 for (int i = 0; i < adapter->num_vfs; i++) {
5144 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5145 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5146 }
5147 } /* ixgbe_disable_rx_drop */
5148
5149 /************************************************************************
5150 * ixgbe_sysctl_advertise
5151 *
5152 * SYSCTL wrapper around setting advertised speed
5153 ************************************************************************/
5154 static int
5155 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5156 {
5157 struct sysctlnode node = *rnode;
5158 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5159 int error = 0, advertise;
5160
5161 advertise = adapter->advertise;
5162 node.sysctl_data = &advertise;
5163 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5164 if (error != 0 || newp == NULL)
5165 return error;
5166
5167 return ixgbe_set_advertise(adapter, advertise);
5168 } /* ixgbe_sysctl_advertise */
5169
5170 /************************************************************************
5171 * ixgbe_set_advertise - Control advertised link speed
5172 *
5173 * Flags:
5174 * 0x00 - Default (all capable link speed)
5175 * 0x01 - advertise 100 Mb
5176 * 0x02 - advertise 1G
5177 * 0x04 - advertise 10G
5178 * 0x08 - advertise 10 Mb
5179 * 0x10 - advertise 2.5G
5180 * 0x20 - advertise 5G
5181 ************************************************************************/
5182 static int
5183 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5184 {
5185 device_t dev;
5186 struct ixgbe_hw *hw;
5187 ixgbe_link_speed speed = 0;
5188 ixgbe_link_speed link_caps = 0;
5189 s32 err = IXGBE_NOT_IMPLEMENTED;
5190 bool negotiate = FALSE;
5191
5192 /* Checks to validate new value */
5193 if (adapter->advertise == advertise) /* no change */
5194 return (0);
5195
5196 dev = adapter->dev;
5197 hw = &adapter->hw;
5198
5199 /* No speed changes for backplane media */
5200 if (hw->phy.media_type == ixgbe_media_type_backplane)
5201 return (ENODEV);
5202
5203 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5204 (hw->phy.multispeed_fiber))) {
5205 device_printf(dev,
5206 "Advertised speed can only be set on copper or "
5207 "multispeed fiber media types.\n");
5208 return (EINVAL);
5209 }
5210
5211 if (advertise < 0x0 || advertise > 0x2f) {
5212 device_printf(dev,
5213 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5214 return (EINVAL);
5215 }
5216
5217 if (hw->mac.ops.get_link_capabilities) {
5218 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5219 &negotiate);
5220 if (err != IXGBE_SUCCESS) {
5221 device_printf(dev, "Unable to determine supported advertise speeds\n");
5222 return (ENODEV);
5223 }
5224 }
5225
5226 /* Set new value and report new advertised mode */
5227 if (advertise & 0x1) {
5228 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5229 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5230 return (EINVAL);
5231 }
5232 speed |= IXGBE_LINK_SPEED_100_FULL;
5233 }
5234 if (advertise & 0x2) {
5235 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5236 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5237 return (EINVAL);
5238 }
5239 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5240 }
5241 if (advertise & 0x4) {
5242 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5243 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5244 return (EINVAL);
5245 }
5246 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5247 }
5248 if (advertise & 0x8) {
5249 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5250 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5251 return (EINVAL);
5252 }
5253 speed |= IXGBE_LINK_SPEED_10_FULL;
5254 }
5255 if (advertise & 0x10) {
5256 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5257 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5258 return (EINVAL);
5259 }
5260 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5261 }
5262 if (advertise & 0x20) {
5263 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5264 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5265 return (EINVAL);
5266 }
5267 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5268 }
5269 if (advertise == 0)
5270 speed = link_caps; /* All capable link speed */
5271
5272 hw->mac.autotry_restart = TRUE;
5273 hw->mac.ops.setup_link(hw, speed, TRUE);
5274 adapter->advertise = advertise;
5275
5276 return (0);
5277 } /* ixgbe_set_advertise */
5278
5279 /************************************************************************
5280 * ixgbe_get_advertise - Get current advertised speed settings
5281 *
5282 * Formatted for sysctl usage.
5283 * Flags:
5284 * 0x01 - advertise 100 Mb
5285 * 0x02 - advertise 1G
5286 * 0x04 - advertise 10G
5287 * 0x08 - advertise 10 Mb (yes, Mb)
5288 * 0x10 - advertise 2.5G
5289 * 0x20 - advertise 5G
5290 ************************************************************************/
5291 static int
5292 ixgbe_get_advertise(struct adapter *adapter)
5293 {
5294 struct ixgbe_hw *hw = &adapter->hw;
5295 int speed;
5296 ixgbe_link_speed link_caps = 0;
5297 s32 err;
5298 bool negotiate = FALSE;
5299
5300 /*
5301 * Advertised speed means nothing unless it's copper or
5302 * multi-speed fiber
5303 */
5304 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5305 !(hw->phy.multispeed_fiber))
5306 return (0);
5307
5308 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5309 if (err != IXGBE_SUCCESS)
5310 return (0);
5311
5312 speed =
5313 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5314 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5315 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5316 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5317 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5318 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5319
5320 return speed;
5321 } /* ixgbe_get_advertise */
5322
5323 /************************************************************************
5324 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5325 *
5326 * Control values:
5327 * 0/1 - off / on (use default value of 1000)
5328 *
5329 * Legal timer values are:
5330 * 50,100,250,500,1000,2000,5000,10000
5331 *
5332 * Turning off interrupt moderation will also turn this off.
5333 ************************************************************************/
5334 static int
5335 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5336 {
5337 struct sysctlnode node = *rnode;
5338 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5339 struct ifnet *ifp = adapter->ifp;
5340 int error;
5341 int newval;
5342
5343 newval = adapter->dmac;
5344 node.sysctl_data = &newval;
5345 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5346 if ((error) || (newp == NULL))
5347 return (error);
5348
5349 switch (newval) {
5350 case 0:
5351 /* Disabled */
5352 adapter->dmac = 0;
5353 break;
5354 case 1:
5355 /* Enable and use default */
5356 adapter->dmac = 1000;
5357 break;
5358 case 50:
5359 case 100:
5360 case 250:
5361 case 500:
5362 case 1000:
5363 case 2000:
5364 case 5000:
5365 case 10000:
5366 /* Legal values - allow */
5367 adapter->dmac = newval;
5368 break;
5369 default:
5370 /* Do nothing, illegal value */
5371 return (EINVAL);
5372 }
5373
5374 /* Re-initialize hardware if it's already running */
5375 if (ifp->if_flags & IFF_RUNNING)
5376 ifp->if_init(ifp);
5377
5378 return (0);
5379 }
5380
5381 #ifdef IXGBE_DEBUG
5382 /************************************************************************
5383 * ixgbe_sysctl_power_state
5384 *
5385 * Sysctl to test power states
5386 * Values:
5387 * 0 - set device to D0
5388 * 3 - set device to D3
5389 * (none) - get current device power state
5390 ************************************************************************/
5391 static int
5392 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5393 {
5394 #ifdef notyet
5395 struct sysctlnode node = *rnode;
5396 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5397 device_t dev = adapter->dev;
5398 int curr_ps, new_ps, error = 0;
5399
5400 curr_ps = new_ps = pci_get_powerstate(dev);
5401
5402 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5403 if ((error) || (req->newp == NULL))
5404 return (error);
5405
5406 if (new_ps == curr_ps)
5407 return (0);
5408
5409 if (new_ps == 3 && curr_ps == 0)
5410 error = DEVICE_SUSPEND(dev);
5411 else if (new_ps == 0 && curr_ps == 3)
5412 error = DEVICE_RESUME(dev);
5413 else
5414 return (EINVAL);
5415
5416 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5417
5418 return (error);
5419 #else
5420 return 0;
5421 #endif
5422 } /* ixgbe_sysctl_power_state */
5423 #endif
5424
5425 /************************************************************************
5426 * ixgbe_sysctl_wol_enable
5427 *
5428 * Sysctl to enable/disable the WoL capability,
5429 * if supported by the adapter.
5430 *
5431 * Values:
5432 * 0 - disabled
5433 * 1 - enabled
5434 ************************************************************************/
5435 static int
5436 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5437 {
5438 struct sysctlnode node = *rnode;
5439 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5440 struct ixgbe_hw *hw = &adapter->hw;
5441 bool new_wol_enabled;
5442 int error = 0;
5443
5444 new_wol_enabled = hw->wol_enabled;
5445 node.sysctl_data = &new_wol_enabled;
5446 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5447 if ((error) || (newp == NULL))
5448 return (error);
5449 if (new_wol_enabled == hw->wol_enabled)
5450 return (0);
5451
5452 if (new_wol_enabled && !adapter->wol_support)
5453 return (ENODEV);
5454 else
5455 hw->wol_enabled = new_wol_enabled;
5456
5457 return (0);
5458 } /* ixgbe_sysctl_wol_enable */
5459
5460 /************************************************************************
5461 * ixgbe_sysctl_wufc - Wake Up Filter Control
5462 *
5463 * Sysctl to enable/disable the types of packets that the
5464 * adapter will wake up on upon receipt.
5465 * Flags:
5466 * 0x1 - Link Status Change
5467 * 0x2 - Magic Packet
5468 * 0x4 - Direct Exact
5469 * 0x8 - Directed Multicast
5470 * 0x10 - Broadcast
5471 * 0x20 - ARP/IPv4 Request Packet
5472 * 0x40 - Direct IPv4 Packet
5473 * 0x80 - Direct IPv6 Packet
5474 *
5475 * Settings not listed above will cause the sysctl to return an error.
5476 ************************************************************************/
5477 static int
5478 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5479 {
5480 struct sysctlnode node = *rnode;
5481 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5482 int error = 0;
5483 u32 new_wufc;
5484
5485 new_wufc = adapter->wufc;
5486 node.sysctl_data = &new_wufc;
5487 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5488 if ((error) || (newp == NULL))
5489 return (error);
5490 if (new_wufc == adapter->wufc)
5491 return (0);
5492
5493 if (new_wufc & 0xffffff00)
5494 return (EINVAL);
5495
5496 new_wufc &= 0xff;
5497 new_wufc |= (0xffffff & adapter->wufc);
5498 adapter->wufc = new_wufc;
5499
5500 return (0);
5501 } /* ixgbe_sysctl_wufc */
5502
5503 #ifdef IXGBE_DEBUG
5504 /************************************************************************
5505 * ixgbe_sysctl_print_rss_config
5506 ************************************************************************/
5507 static int
5508 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5509 {
5510 #ifdef notyet
5511 struct sysctlnode node = *rnode;
5512 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5513 struct ixgbe_hw *hw = &adapter->hw;
5514 device_t dev = adapter->dev;
5515 struct sbuf *buf;
5516 int error = 0, reta_size;
5517 u32 reg;
5518
5519 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5520 if (!buf) {
5521 device_printf(dev, "Could not allocate sbuf for output.\n");
5522 return (ENOMEM);
5523 }
5524
5525 // TODO: use sbufs to make a string to print out
5526 /* Set multiplier for RETA setup and table size based on MAC */
5527 switch (adapter->hw.mac.type) {
5528 case ixgbe_mac_X550:
5529 case ixgbe_mac_X550EM_x:
5530 case ixgbe_mac_X550EM_a:
5531 reta_size = 128;
5532 break;
5533 default:
5534 reta_size = 32;
5535 break;
5536 }
5537
5538 /* Print out the redirection table */
5539 sbuf_cat(buf, "\n");
5540 for (int i = 0; i < reta_size; i++) {
5541 if (i < 32) {
5542 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5543 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5544 } else {
5545 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5546 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5547 }
5548 }
5549
5550 // TODO: print more config
5551
5552 error = sbuf_finish(buf);
5553 if (error)
5554 device_printf(dev, "Error finishing sbuf: %d\n", error);
5555
5556 sbuf_delete(buf);
5557 #endif
5558 return (0);
5559 } /* ixgbe_sysctl_print_rss_config */
5560 #endif /* IXGBE_DEBUG */
5561
5562 /************************************************************************
5563 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5564 *
5565 * For X552/X557-AT devices using an external PHY
5566 ************************************************************************/
5567 static int
5568 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5569 {
5570 struct sysctlnode node = *rnode;
5571 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5572 struct ixgbe_hw *hw = &adapter->hw;
5573 int val;
5574 u16 reg;
5575 int error;
5576
5577 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5578 device_printf(adapter->dev,
5579 "Device has no supported external thermal sensor.\n");
5580 return (ENODEV);
5581 }
5582
5583 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5584 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5585 device_printf(adapter->dev,
5586 "Error reading from PHY's current temperature register\n");
5587 return (EAGAIN);
5588 }
5589
5590 node.sysctl_data = &val;
5591
5592 /* Shift temp for output */
5593 val = reg >> 8;
5594
5595 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5596 if ((error) || (newp == NULL))
5597 return (error);
5598
5599 return (0);
5600 } /* ixgbe_sysctl_phy_temp */
5601
5602 /************************************************************************
5603 * ixgbe_sysctl_phy_overtemp_occurred
5604 *
5605 * Reports (directly from the PHY) whether the current PHY
5606 * temperature is over the overtemp threshold.
5607 ************************************************************************/
5608 static int
5609 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5610 {
5611 struct sysctlnode node = *rnode;
5612 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5613 struct ixgbe_hw *hw = &adapter->hw;
5614 int val, error;
5615 u16 reg;
5616
5617 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5618 device_printf(adapter->dev,
5619 "Device has no supported external thermal sensor.\n");
5620 return (ENODEV);
5621 }
5622
5623 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5624 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5625 device_printf(adapter->dev,
5626 "Error reading from PHY's temperature status register\n");
5627 return (EAGAIN);
5628 }
5629
5630 node.sysctl_data = &val;
5631
5632 /* Get occurrence bit */
5633 val = !!(reg & 0x4000);
5634
5635 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5636 if ((error) || (newp == NULL))
5637 return (error);
5638
5639 return (0);
5640 } /* ixgbe_sysctl_phy_overtemp_occurred */
5641
5642 /************************************************************************
5643 * ixgbe_sysctl_eee_state
5644 *
5645 * Sysctl to set EEE power saving feature
5646 * Values:
5647 * 0 - disable EEE
5648 * 1 - enable EEE
5649 * (none) - get current device EEE state
5650 ************************************************************************/
5651 static int
5652 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5653 {
5654 struct sysctlnode node = *rnode;
5655 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5656 struct ifnet *ifp = adapter->ifp;
5657 device_t dev = adapter->dev;
5658 int curr_eee, new_eee, error = 0;
5659 s32 retval;
5660
5661 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5662 node.sysctl_data = &new_eee;
5663 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5664 if ((error) || (newp == NULL))
5665 return (error);
5666
5667 /* Nothing to do */
5668 if (new_eee == curr_eee)
5669 return (0);
5670
5671 /* Not supported */
5672 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5673 return (EINVAL);
5674
5675 /* Bounds checking */
5676 if ((new_eee < 0) || (new_eee > 1))
5677 return (EINVAL);
5678
5679 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5680 if (retval) {
5681 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5682 return (EINVAL);
5683 }
5684
5685 /* Restart auto-neg */
5686 ifp->if_init(ifp);
5687
5688 device_printf(dev, "New EEE state: %d\n", new_eee);
5689
5690 /* Cache new value */
5691 if (new_eee)
5692 adapter->feat_en |= IXGBE_FEATURE_EEE;
5693 else
5694 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5695
5696 return (error);
5697 } /* ixgbe_sysctl_eee_state */
5698
5699 /************************************************************************
5700 * ixgbe_init_device_features
5701 ************************************************************************/
5702 static void
5703 ixgbe_init_device_features(struct adapter *adapter)
5704 {
5705 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5706 | IXGBE_FEATURE_RSS
5707 | IXGBE_FEATURE_MSI
5708 | IXGBE_FEATURE_MSIX
5709 | IXGBE_FEATURE_LEGACY_IRQ
5710 | IXGBE_FEATURE_LEGACY_TX;
5711
5712 /* Set capabilities first... */
5713 switch (adapter->hw.mac.type) {
5714 case ixgbe_mac_82598EB:
5715 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5716 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5717 break;
5718 case ixgbe_mac_X540:
5719 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5720 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5721 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5722 (adapter->hw.bus.func == 0))
5723 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5724 break;
5725 case ixgbe_mac_X550:
5726 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5727 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5728 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5729 break;
5730 case ixgbe_mac_X550EM_x:
5731 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5732 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5733 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5734 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5735 break;
5736 case ixgbe_mac_X550EM_a:
5737 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5738 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5739 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5740 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5741 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5742 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5743 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5744 }
5745 break;
5746 case ixgbe_mac_82599EB:
5747 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5748 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5749 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5750 (adapter->hw.bus.func == 0))
5751 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5752 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5753 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5754 break;
5755 default:
5756 break;
5757 }
5758
5759 /* Enabled by default... */
5760 /* Fan failure detection */
5761 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5762 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5763 /* Netmap */
5764 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5765 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5766 /* EEE */
5767 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5768 adapter->feat_en |= IXGBE_FEATURE_EEE;
5769 /* Thermal Sensor */
5770 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5771 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5772
5773 /* Enabled via global sysctl... */
5774 /* Flow Director */
5775 if (ixgbe_enable_fdir) {
5776 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5777 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5778 else
5779 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5780 }
5781 /* Legacy (single queue) transmit */
5782 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5783 ixgbe_enable_legacy_tx)
5784 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5785 /*
5786 * Message Signal Interrupts - Extended (MSI-X)
5787 * Normal MSI is only enabled if MSI-X calls fail.
5788 */
5789 if (!ixgbe_enable_msix)
5790 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5791 /* Receive-Side Scaling (RSS) */
5792 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5793 adapter->feat_en |= IXGBE_FEATURE_RSS;
5794
5795 /* Disable features with unmet dependencies... */
5796 /* No MSI-X */
5797 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5798 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5799 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5800 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5801 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5802 }
5803 } /* ixgbe_init_device_features */
5804
5805 /************************************************************************
5806 * ixgbe_probe - Device identification routine
5807 *
5808 * Determines if the driver should be loaded on
5809 * adapter based on its PCI vendor/device ID.
5810 *
5811 * return BUS_PROBE_DEFAULT on success, positive on failure
5812 ************************************************************************/
5813 static int
5814 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5815 {
5816 const struct pci_attach_args *pa = aux;
5817
5818 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5819 }
5820
5821 static ixgbe_vendor_info_t *
5822 ixgbe_lookup(const struct pci_attach_args *pa)
5823 {
5824 ixgbe_vendor_info_t *ent;
5825 pcireg_t subid;
5826
5827 INIT_DEBUGOUT("ixgbe_lookup: begin");
5828
5829 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5830 return NULL;
5831
5832 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5833
5834 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5835 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5836 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5837 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5838 (ent->subvendor_id == 0)) &&
5839 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5840 (ent->subdevice_id == 0))) {
5841 ++ixgbe_total_ports;
5842 return ent;
5843 }
5844 }
5845 return NULL;
5846 }
5847
5848 static int
5849 ixgbe_ifflags_cb(struct ethercom *ec)
5850 {
5851 struct ifnet *ifp = &ec->ec_if;
5852 struct adapter *adapter = ifp->if_softc;
5853 int change, rc = 0;
5854
5855 IXGBE_CORE_LOCK(adapter);
5856
5857 change = ifp->if_flags ^ adapter->if_flags;
5858 if (change != 0)
5859 adapter->if_flags = ifp->if_flags;
5860
5861 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5862 rc = ENETRESET;
5863 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5864 ixgbe_set_promisc(adapter);
5865
5866 /* Set up VLAN support and filter */
5867 ixgbe_setup_vlan_hw_support(adapter);
5868
5869 IXGBE_CORE_UNLOCK(adapter);
5870
5871 return rc;
5872 }
5873
5874 /************************************************************************
5875 * ixgbe_ioctl - Ioctl entry point
5876 *
5877 * Called when the user wants to configure the interface.
5878 *
5879 * return 0 on success, positive on failure
5880 ************************************************************************/
5881 static int
5882 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5883 {
5884 struct adapter *adapter = ifp->if_softc;
5885 struct ixgbe_hw *hw = &adapter->hw;
5886 struct ifcapreq *ifcr = data;
5887 struct ifreq *ifr = data;
5888 int error = 0;
5889 int l4csum_en;
5890 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5891 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5892
5893 switch (command) {
5894 case SIOCSIFFLAGS:
5895 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5896 break;
5897 case SIOCADDMULTI:
5898 case SIOCDELMULTI:
5899 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5900 break;
5901 case SIOCSIFMEDIA:
5902 case SIOCGIFMEDIA:
5903 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5904 break;
5905 case SIOCSIFCAP:
5906 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5907 break;
5908 case SIOCSIFMTU:
5909 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5910 break;
5911 #ifdef __NetBSD__
5912 case SIOCINITIFADDR:
5913 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5914 break;
5915 case SIOCGIFFLAGS:
5916 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5917 break;
5918 case SIOCGIFAFLAG_IN:
5919 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5920 break;
5921 case SIOCGIFADDR:
5922 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5923 break;
5924 case SIOCGIFMTU:
5925 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5926 break;
5927 case SIOCGIFCAP:
5928 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5929 break;
5930 case SIOCGETHERCAP:
5931 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5932 break;
5933 case SIOCGLIFADDR:
5934 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5935 break;
5936 case SIOCZIFDATA:
5937 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5938 hw->mac.ops.clear_hw_cntrs(hw);
5939 ixgbe_clear_evcnt(adapter);
5940 break;
5941 case SIOCAIFADDR:
5942 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5943 break;
5944 #endif
5945 default:
5946 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5947 break;
5948 }
5949
5950 switch (command) {
5951 case SIOCSIFMEDIA:
5952 case SIOCGIFMEDIA:
5953 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5954 case SIOCGI2C:
5955 {
5956 struct ixgbe_i2c_req i2c;
5957
5958 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5959 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5960 if (error != 0)
5961 break;
5962 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5963 error = EINVAL;
5964 break;
5965 }
5966 if (i2c.len > sizeof(i2c.data)) {
5967 error = EINVAL;
5968 break;
5969 }
5970
5971 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5972 i2c.dev_addr, i2c.data);
5973 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5974 break;
5975 }
5976 case SIOCSIFCAP:
5977 /* Layer-4 Rx checksum offload has to be turned on and
5978 * off as a unit.
5979 */
5980 l4csum_en = ifcr->ifcr_capenable & l4csum;
5981 if (l4csum_en != l4csum && l4csum_en != 0)
5982 return EINVAL;
5983 /*FALLTHROUGH*/
5984 case SIOCADDMULTI:
5985 case SIOCDELMULTI:
5986 case SIOCSIFFLAGS:
5987 case SIOCSIFMTU:
5988 default:
5989 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5990 return error;
5991 if ((ifp->if_flags & IFF_RUNNING) == 0)
5992 ;
5993 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5994 IXGBE_CORE_LOCK(adapter);
5995 if ((ifp->if_flags & IFF_RUNNING) != 0)
5996 ixgbe_init_locked(adapter);
5997 ixgbe_recalculate_max_frame(adapter);
5998 IXGBE_CORE_UNLOCK(adapter);
5999 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6000 /*
6001 * Multicast list has changed; set the hardware filter
6002 * accordingly.
6003 */
6004 IXGBE_CORE_LOCK(adapter);
6005 ixgbe_disable_intr(adapter);
6006 ixgbe_set_multi(adapter);
6007 ixgbe_enable_intr(adapter);
6008 IXGBE_CORE_UNLOCK(adapter);
6009 }
6010 return 0;
6011 }
6012
6013 return error;
6014 } /* ixgbe_ioctl */
6015
6016 /************************************************************************
6017 * ixgbe_check_fan_failure
6018 ************************************************************************/
6019 static void
6020 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6021 {
6022 u32 mask;
6023
6024 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6025 IXGBE_ESDP_SDP1;
6026
6027 if (reg & mask)
6028 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6029 } /* ixgbe_check_fan_failure */
6030
6031 /************************************************************************
6032 * ixgbe_handle_que
6033 ************************************************************************/
6034 static void
6035 ixgbe_handle_que(void *context)
6036 {
6037 struct ix_queue *que = context;
6038 struct adapter *adapter = que->adapter;
6039 struct tx_ring *txr = que->txr;
6040 struct ifnet *ifp = adapter->ifp;
6041 bool more = false;
6042
6043 que->handleq.ev_count++;
6044
6045 if (ifp->if_flags & IFF_RUNNING) {
6046 more = ixgbe_rxeof(que);
6047 IXGBE_TX_LOCK(txr);
6048 more |= ixgbe_txeof(txr);
6049 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6050 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6051 ixgbe_mq_start_locked(ifp, txr);
6052 /* Only for queue 0 */
6053 /* NetBSD still needs this for CBQ */
6054 if ((&adapter->queues[0] == que)
6055 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6056 ixgbe_legacy_start_locked(ifp, txr);
6057 IXGBE_TX_UNLOCK(txr);
6058 }
6059
6060 if (more) {
6061 que->req.ev_count++;
6062 ixgbe_sched_handle_que(adapter, que);
6063 } else if (que->res != NULL) {
6064 /* Re-enable this interrupt */
6065 ixgbe_enable_queue(adapter, que->msix);
6066 } else
6067 ixgbe_enable_intr(adapter);
6068
6069 return;
6070 } /* ixgbe_handle_que */
6071
6072 /************************************************************************
6073 * ixgbe_handle_que_work
6074 ************************************************************************/
6075 static void
6076 ixgbe_handle_que_work(struct work *wk, void *context)
6077 {
6078 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6079
6080 /*
6081 * "enqueued flag" is not required here.
6082 * See ixgbe_msix_que().
6083 */
6084 ixgbe_handle_que(que);
6085 }
6086
6087 /************************************************************************
6088 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6089 ************************************************************************/
6090 static int
6091 ixgbe_allocate_legacy(struct adapter *adapter,
6092 const struct pci_attach_args *pa)
6093 {
6094 device_t dev = adapter->dev;
6095 struct ix_queue *que = adapter->queues;
6096 struct tx_ring *txr = adapter->tx_rings;
6097 int counts[PCI_INTR_TYPE_SIZE];
6098 pci_intr_type_t intr_type, max_type;
6099 char intrbuf[PCI_INTRSTR_LEN];
6100 const char *intrstr = NULL;
6101
6102 /* We allocate a single interrupt resource */
6103 max_type = PCI_INTR_TYPE_MSI;
6104 counts[PCI_INTR_TYPE_MSIX] = 0;
6105 counts[PCI_INTR_TYPE_MSI] =
6106 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6107 /* Check not feat_en but feat_cap to fallback to INTx */
6108 counts[PCI_INTR_TYPE_INTX] =
6109 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6110
6111 alloc_retry:
6112 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6113 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6114 return ENXIO;
6115 }
6116 adapter->osdep.nintrs = 1;
6117 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6118 intrbuf, sizeof(intrbuf));
6119 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6120 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6121 device_xname(dev));
6122 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6123 if (adapter->osdep.ihs[0] == NULL) {
6124 aprint_error_dev(dev,"unable to establish %s\n",
6125 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6126 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6127 adapter->osdep.intrs = NULL;
6128 switch (intr_type) {
6129 case PCI_INTR_TYPE_MSI:
6130 /* The next try is for INTx: Disable MSI */
6131 max_type = PCI_INTR_TYPE_INTX;
6132 counts[PCI_INTR_TYPE_INTX] = 1;
6133 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6134 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6135 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6136 goto alloc_retry;
6137 } else
6138 break;
6139 case PCI_INTR_TYPE_INTX:
6140 default:
6141 /* See below */
6142 break;
6143 }
6144 }
6145 if (intr_type == PCI_INTR_TYPE_INTX) {
6146 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6147 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6148 }
6149 if (adapter->osdep.ihs[0] == NULL) {
6150 aprint_error_dev(dev,
6151 "couldn't establish interrupt%s%s\n",
6152 intrstr ? " at " : "", intrstr ? intrstr : "");
6153 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6154 adapter->osdep.intrs = NULL;
6155 return ENXIO;
6156 }
6157 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6158 /*
6159 * Try allocating a fast interrupt and the associated deferred
6160 * processing contexts.
6161 */
6162 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6163 txr->txr_si =
6164 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6165 ixgbe_deferred_mq_start, txr);
6166 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6167 ixgbe_handle_que, que);
6168
6169 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6170 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6171 aprint_error_dev(dev,
6172 "could not establish software interrupts\n");
6173
6174 return ENXIO;
6175 }
6176 /* For simplicity in the handlers */
6177 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6178
6179 return (0);
6180 } /* ixgbe_allocate_legacy */
6181
6182 /************************************************************************
6183 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6184 ************************************************************************/
6185 static int
6186 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6187 {
6188 device_t dev = adapter->dev;
6189 struct ix_queue *que = adapter->queues;
6190 struct tx_ring *txr = adapter->tx_rings;
6191 pci_chipset_tag_t pc;
6192 char intrbuf[PCI_INTRSTR_LEN];
6193 char intr_xname[32];
6194 char wqname[MAXCOMLEN];
6195 const char *intrstr = NULL;
6196 int error, vector = 0;
6197 int cpu_id = 0;
6198 kcpuset_t *affinity;
6199 #ifdef RSS
6200 unsigned int rss_buckets = 0;
6201 kcpuset_t cpu_mask;
6202 #endif
6203
6204 pc = adapter->osdep.pc;
6205 #ifdef RSS
6206 /*
6207 * If we're doing RSS, the number of queues needs to
6208 * match the number of RSS buckets that are configured.
6209 *
6210 * + If there's more queues than RSS buckets, we'll end
6211 * up with queues that get no traffic.
6212 *
6213 * + If there's more RSS buckets than queues, we'll end
6214 * up having multiple RSS buckets map to the same queue,
6215 * so there'll be some contention.
6216 */
6217 rss_buckets = rss_getnumbuckets();
6218 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6219 (adapter->num_queues != rss_buckets)) {
6220 device_printf(dev,
6221 "%s: number of queues (%d) != number of RSS buckets (%d)"
6222 "; performance will be impacted.\n",
6223 __func__, adapter->num_queues, rss_buckets);
6224 }
6225 #endif
6226
6227 adapter->osdep.nintrs = adapter->num_queues + 1;
6228 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6229 adapter->osdep.nintrs) != 0) {
6230 aprint_error_dev(dev,
6231 "failed to allocate MSI-X interrupt\n");
6232 return (ENXIO);
6233 }
6234
6235 kcpuset_create(&affinity, false);
6236 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6237 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6238 device_xname(dev), i);
6239 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6240 sizeof(intrbuf));
6241 #ifdef IXGBE_MPSAFE
6242 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6243 true);
6244 #endif
6245 /* Set the handler function */
6246 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6247 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6248 intr_xname);
6249 if (que->res == NULL) {
6250 aprint_error_dev(dev,
6251 "Failed to register QUE handler\n");
6252 error = ENXIO;
6253 goto err_out;
6254 }
6255 que->msix = vector;
6256 adapter->active_queues |= (u64)(1 << que->msix);
6257
6258 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6259 #ifdef RSS
6260 /*
6261 * The queue ID is used as the RSS layer bucket ID.
6262 * We look up the queue ID -> RSS CPU ID and select
6263 * that.
6264 */
6265 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6266 CPU_SETOF(cpu_id, &cpu_mask);
6267 #endif
6268 } else {
6269 /*
6270 * Bind the MSI-X vector, and thus the
6271 * rings to the corresponding CPU.
6272 *
6273 * This just happens to match the default RSS
6274 * round-robin bucket -> queue -> CPU allocation.
6275 */
6276 if (adapter->num_queues > 1)
6277 cpu_id = i;
6278 }
6279 /* Round-robin affinity */
6280 kcpuset_zero(affinity);
6281 kcpuset_set(affinity, cpu_id % ncpu);
6282 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6283 NULL);
6284 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6285 intrstr);
6286 if (error == 0) {
6287 #if 1 /* def IXGBE_DEBUG */
6288 #ifdef RSS
6289 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6290 cpu_id % ncpu);
6291 #else
6292 aprint_normal(", bound queue %d to cpu %d", i,
6293 cpu_id % ncpu);
6294 #endif
6295 #endif /* IXGBE_DEBUG */
6296 }
6297 aprint_normal("\n");
6298
6299 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6300 txr->txr_si = softint_establish(
6301 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6302 ixgbe_deferred_mq_start, txr);
6303 if (txr->txr_si == NULL) {
6304 aprint_error_dev(dev,
6305 "couldn't establish software interrupt\n");
6306 error = ENXIO;
6307 goto err_out;
6308 }
6309 }
6310 que->que_si
6311 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6312 ixgbe_handle_que, que);
6313 if (que->que_si == NULL) {
6314 aprint_error_dev(dev,
6315 "couldn't establish software interrupt\n");
6316 error = ENXIO;
6317 goto err_out;
6318 }
6319 }
6320 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6321 error = workqueue_create(&adapter->txr_wq, wqname,
6322 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6323 IXGBE_WORKQUEUE_FLAGS);
6324 if (error) {
6325 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6326 goto err_out;
6327 }
6328 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6329
6330 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6331 error = workqueue_create(&adapter->que_wq, wqname,
6332 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6333 IXGBE_WORKQUEUE_FLAGS);
6334 if (error) {
6335 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6336 goto err_out;
6337 }
6338
6339 /* and Link */
6340 cpu_id++;
6341 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6342 adapter->vector = vector;
6343 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6344 sizeof(intrbuf));
6345 #ifdef IXGBE_MPSAFE
6346 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6347 true);
6348 #endif
6349 /* Set the link handler function */
6350 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6351 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6352 intr_xname);
6353 if (adapter->osdep.ihs[vector] == NULL) {
6354 aprint_error_dev(dev, "Failed to register LINK handler\n");
6355 error = ENXIO;
6356 goto err_out;
6357 }
6358 /* Round-robin affinity */
6359 kcpuset_zero(affinity);
6360 kcpuset_set(affinity, cpu_id % ncpu);
6361 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6362 NULL);
6363
6364 aprint_normal_dev(dev,
6365 "for link, interrupting at %s", intrstr);
6366 if (error == 0)
6367 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6368 else
6369 aprint_normal("\n");
6370
6371 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6372 adapter->mbx_si =
6373 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6374 ixgbe_handle_mbx, adapter);
6375 if (adapter->mbx_si == NULL) {
6376 aprint_error_dev(dev,
6377 "could not establish software interrupts\n");
6378
6379 error = ENXIO;
6380 goto err_out;
6381 }
6382 }
6383
6384 kcpuset_destroy(affinity);
6385 aprint_normal_dev(dev,
6386 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6387
6388 return (0);
6389
6390 err_out:
6391 kcpuset_destroy(affinity);
6392 ixgbe_free_softint(adapter);
6393 ixgbe_free_pciintr_resources(adapter);
6394 return (error);
6395 } /* ixgbe_allocate_msix */
6396
6397 /************************************************************************
6398 * ixgbe_configure_interrupts
6399 *
6400 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6401 * This will also depend on user settings.
6402 ************************************************************************/
6403 static int
6404 ixgbe_configure_interrupts(struct adapter *adapter)
6405 {
6406 device_t dev = adapter->dev;
6407 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6408 int want, queues, msgs;
6409
6410 /* Default to 1 queue if MSI-X setup fails */
6411 adapter->num_queues = 1;
6412
6413 /* Override by tuneable */
6414 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6415 goto msi;
6416
6417 /*
6418 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6419 * interrupt slot.
6420 */
6421 if (ncpu == 1)
6422 goto msi;
6423
6424 /* First try MSI-X */
6425 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6426 msgs = MIN(msgs, IXG_MAX_NINTR);
6427 if (msgs < 2)
6428 goto msi;
6429
6430 adapter->msix_mem = (void *)1; /* XXX */
6431
6432 /* Figure out a reasonable auto config value */
6433 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6434
6435 #ifdef RSS
6436 /* If we're doing RSS, clamp at the number of RSS buckets */
6437 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6438 queues = min(queues, rss_getnumbuckets());
6439 #endif
6440 if (ixgbe_num_queues > queues) {
6441 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6442 ixgbe_num_queues = queues;
6443 }
6444
6445 if (ixgbe_num_queues != 0)
6446 queues = ixgbe_num_queues;
6447 else
6448 queues = min(queues,
6449 min(mac->max_tx_queues, mac->max_rx_queues));
6450
6451 /* reflect correct sysctl value */
6452 ixgbe_num_queues = queues;
6453
6454 /*
6455 * Want one vector (RX/TX pair) per queue
6456 * plus an additional for Link.
6457 */
6458 want = queues + 1;
6459 if (msgs >= want)
6460 msgs = want;
6461 else {
6462 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6463 "%d vectors but %d queues wanted!\n",
6464 msgs, want);
6465 goto msi;
6466 }
6467 adapter->num_queues = queues;
6468 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6469 return (0);
6470
6471 /*
6472 * MSI-X allocation failed or provided us with
6473 * less vectors than needed. Free MSI-X resources
6474 * and we'll try enabling MSI.
6475 */
6476 msi:
6477 /* Without MSI-X, some features are no longer supported */
6478 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6479 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6480 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6481 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6482
6483 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6484 adapter->msix_mem = NULL; /* XXX */
6485 if (msgs > 1)
6486 msgs = 1;
6487 if (msgs != 0) {
6488 msgs = 1;
6489 adapter->feat_en |= IXGBE_FEATURE_MSI;
6490 return (0);
6491 }
6492
6493 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6494 aprint_error_dev(dev,
6495 "Device does not support legacy interrupts.\n");
6496 return 1;
6497 }
6498
6499 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6500
6501 return (0);
6502 } /* ixgbe_configure_interrupts */
6503
6504
6505 /************************************************************************
6506 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6507 *
6508 * Done outside of interrupt context since the driver might sleep
6509 ************************************************************************/
6510 static void
6511 ixgbe_handle_link(void *context)
6512 {
6513 struct adapter *adapter = context;
6514 struct ixgbe_hw *hw = &adapter->hw;
6515
6516 IXGBE_CORE_LOCK(adapter);
6517 ++adapter->link_sicount.ev_count;
6518 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6519 ixgbe_update_link_status(adapter);
6520
6521 /* Re-enable link interrupts */
6522 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6523
6524 IXGBE_CORE_UNLOCK(adapter);
6525 } /* ixgbe_handle_link */
6526
6527 /************************************************************************
6528 * ixgbe_rearm_queues
6529 ************************************************************************/
6530 static void
6531 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6532 {
6533 u32 mask;
6534
6535 switch (adapter->hw.mac.type) {
6536 case ixgbe_mac_82598EB:
6537 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6539 break;
6540 case ixgbe_mac_82599EB:
6541 case ixgbe_mac_X540:
6542 case ixgbe_mac_X550:
6543 case ixgbe_mac_X550EM_x:
6544 case ixgbe_mac_X550EM_a:
6545 mask = (queues & 0xFFFFFFFF);
6546 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6547 mask = (queues >> 32);
6548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6549 break;
6550 default:
6551 break;
6552 }
6553 } /* ixgbe_rearm_queues */
6554