ixgbe.c revision 1.178 1 /* $NetBSD: ixgbe.c,v 1.178 2019/03/15 02:38:20 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.6 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static void ixgbe_recovery_mode_timer(void *);
188 static int ixgbe_setup_interface(device_t, struct adapter *);
189 static void ixgbe_config_gpie(struct adapter *);
190 static void ixgbe_config_dmac(struct adapter *);
191 static void ixgbe_config_delay_values(struct adapter *);
192 static void ixgbe_config_link(struct adapter *);
193 static void ixgbe_check_wol_support(struct adapter *);
194 static int ixgbe_setup_low_power_mode(struct adapter *);
195 #if 0
196 static void ixgbe_rearm_queues(struct adapter *, u64);
197 #endif
198
199 static void ixgbe_initialize_transmit_units(struct adapter *);
200 static void ixgbe_initialize_receive_units(struct adapter *);
201 static void ixgbe_enable_rx_drop(struct adapter *);
202 static void ixgbe_disable_rx_drop(struct adapter *);
203 static void ixgbe_initialize_rss_mapping(struct adapter *);
204
205 static void ixgbe_enable_intr(struct adapter *);
206 static void ixgbe_disable_intr(struct adapter *);
207 static void ixgbe_update_stats_counters(struct adapter *);
208 static void ixgbe_set_promisc(struct adapter *);
209 static void ixgbe_set_multi(struct adapter *);
210 static void ixgbe_update_link_status(struct adapter *);
211 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
212 static void ixgbe_configure_ivars(struct adapter *);
213 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
214 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
215
216 static void ixgbe_setup_vlan_hw_support(struct adapter *);
217 #if 0
218 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
219 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
220 #endif
221
222 static void ixgbe_add_device_sysctls(struct adapter *);
223 static void ixgbe_add_hw_stats(struct adapter *);
224 static void ixgbe_clear_evcnt(struct adapter *);
225 static int ixgbe_set_flowcntl(struct adapter *, int);
226 static int ixgbe_set_advertise(struct adapter *, int);
227 static int ixgbe_get_advertise(struct adapter *);
228
229 /* Sysctl handlers */
230 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
231 const char *, int *, int);
232 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
235 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
238 #ifdef IXGBE_DEBUG
239 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
241 #endif
242 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
251
252 /* Support for pluggable optic modules */
253 static bool ixgbe_sfp_probe(struct adapter *);
254
255 /* Legacy (single vector) interrupt handler */
256 static int ixgbe_legacy_irq(void *);
257
258 /* The MSI/MSI-X Interrupt handlers */
259 static int ixgbe_msix_que(void *);
260 static int ixgbe_msix_link(void *);
261
262 /* Software interrupts for deferred work */
263 static void ixgbe_handle_que(void *);
264 static void ixgbe_handle_link(void *);
265 static void ixgbe_handle_msf(void *);
266 static void ixgbe_handle_mod(void *);
267 static void ixgbe_handle_phy(void *);
268
269 /* Workqueue handler for deferred work */
270 static void ixgbe_handle_que_work(struct work *, void *);
271
272 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
273
274 /************************************************************************
275 * NetBSD Device Interface Entry Points
276 ************************************************************************/
277 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
278 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
279 DVF_DETACH_SHUTDOWN);
280
281 #if 0
282 devclass_t ix_devclass;
283 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
284
285 MODULE_DEPEND(ix, pci, 1, 1, 1);
286 MODULE_DEPEND(ix, ether, 1, 1, 1);
287 #ifdef DEV_NETMAP
288 MODULE_DEPEND(ix, netmap, 1, 1, 1);
289 #endif
290 #endif
291
292 /*
293 * TUNEABLE PARAMETERS:
294 */
295
296 /*
297 * AIM: Adaptive Interrupt Moderation
298 * which means that the interrupt rate
299 * is varied over time based on the
300 * traffic for that interrupt vector
301 */
302 static bool ixgbe_enable_aim = true;
303 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
305 "Enable adaptive interrupt moderation");
306
307 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
308 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
309 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
310
311 /* How many packets rxeof tries to clean at a time */
312 static int ixgbe_rx_process_limit = 256;
313 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
314 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
315
316 /* How many packets txeof tries to clean at a time */
317 static int ixgbe_tx_process_limit = 256;
318 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
319 &ixgbe_tx_process_limit, 0,
320 "Maximum number of sent packets to process at a time, -1 means unlimited");
321
322 /* Flow control setting, default to full */
323 static int ixgbe_flow_control = ixgbe_fc_full;
324 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
325 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
326
327 /* Which pakcet processing uses workqueue or softint */
328 static bool ixgbe_txrx_workqueue = false;
329
330 /*
331 * Smart speed setting, default to on
332 * this only works as a compile option
333 * right now as its during attach, set
334 * this to 'ixgbe_smart_speed_off' to
335 * disable.
336 */
337 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
338
339 /*
340 * MSI-X should be the default for best performance,
341 * but this allows it to be forced off for testing.
342 */
343 static int ixgbe_enable_msix = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
345 "Enable MSI-X interrupts");
346
347 /*
348 * Number of Queues, can be set to 0,
349 * it then autoconfigures based on the
350 * number of cpus with a max of 8. This
351 * can be overriden manually here.
352 */
353 static int ixgbe_num_queues = 0;
354 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
355 "Number of queues to configure, 0 indicates autoconfigure");
356
357 /*
358 * Number of TX descriptors per ring,
359 * setting higher than RX as this seems
360 * the better performing choice.
361 */
362 static int ixgbe_txd = PERFORM_TXD;
363 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
364 "Number of transmit descriptors per queue");
365
366 /* Number of RX descriptors per ring */
367 static int ixgbe_rxd = PERFORM_RXD;
368 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
369 "Number of receive descriptors per queue");
370
371 /*
372 * Defining this on will allow the use
373 * of unsupported SFP+ modules, note that
374 * doing so you are on your own :)
375 */
376 static int allow_unsupported_sfp = false;
377 #define TUNABLE_INT(__x, __y)
378 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
379
380 /*
381 * Not sure if Flow Director is fully baked,
382 * so we'll default to turning it off.
383 */
384 static int ixgbe_enable_fdir = 0;
385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
386 "Enable Flow Director");
387
388 /* Legacy Transmit (single queue) */
389 static int ixgbe_enable_legacy_tx = 0;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
391 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
392
393 /* Receive-Side Scaling */
394 static int ixgbe_enable_rss = 1;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
396 "Enable Receive-Side Scaling (RSS)");
397
398 #if 0
399 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
400 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
401 #endif
402
403 #ifdef NET_MPSAFE
404 #define IXGBE_MPSAFE 1
405 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
406 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
407 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
408 #else
409 #define IXGBE_CALLOUT_FLAGS 0
410 #define IXGBE_SOFTINFT_FLAGS 0
411 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
412 #endif
413 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
414
415 /************************************************************************
416 * ixgbe_initialize_rss_mapping
417 ************************************************************************/
418 static void
419 ixgbe_initialize_rss_mapping(struct adapter *adapter)
420 {
421 struct ixgbe_hw *hw = &adapter->hw;
422 u32 reta = 0, mrqc, rss_key[10];
423 int queue_id, table_size, index_mult;
424 int i, j;
425 u32 rss_hash_config;
426
427 /* force use default RSS key. */
428 #ifdef __NetBSD__
429 rss_getkey((uint8_t *) &rss_key);
430 #else
431 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
432 /* Fetch the configured RSS key */
433 rss_getkey((uint8_t *) &rss_key);
434 } else {
435 /* set up random bits */
436 cprng_fast(&rss_key, sizeof(rss_key));
437 }
438 #endif
439
440 /* Set multiplier for RETA setup and table size based on MAC */
441 index_mult = 0x1;
442 table_size = 128;
443 switch (adapter->hw.mac.type) {
444 case ixgbe_mac_82598EB:
445 index_mult = 0x11;
446 break;
447 case ixgbe_mac_X550:
448 case ixgbe_mac_X550EM_x:
449 case ixgbe_mac_X550EM_a:
450 table_size = 512;
451 break;
452 default:
453 break;
454 }
455
456 /* Set up the redirection table */
457 for (i = 0, j = 0; i < table_size; i++, j++) {
458 if (j == adapter->num_queues)
459 j = 0;
460
461 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
462 /*
463 * Fetch the RSS bucket id for the given indirection
464 * entry. Cap it at the number of configured buckets
465 * (which is num_queues.)
466 */
467 queue_id = rss_get_indirection_to_bucket(i);
468 queue_id = queue_id % adapter->num_queues;
469 } else
470 queue_id = (j * index_mult);
471
472 /*
473 * The low 8 bits are for hash value (n+0);
474 * The next 8 bits are for hash value (n+1), etc.
475 */
476 reta = reta >> 8;
477 reta = reta | (((uint32_t) queue_id) << 24);
478 if ((i & 3) == 3) {
479 if (i < 128)
480 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
481 else
482 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
483 reta);
484 reta = 0;
485 }
486 }
487
488 /* Now fill our hash function seeds */
489 for (i = 0; i < 10; i++)
490 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
491
492 /* Perform hash on these packet types */
493 if (adapter->feat_en & IXGBE_FEATURE_RSS)
494 rss_hash_config = rss_gethashconfig();
495 else {
496 /*
497 * Disable UDP - IP fragments aren't currently being handled
498 * and so we end up with a mix of 2-tuple and 4-tuple
499 * traffic.
500 */
501 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
502 | RSS_HASHTYPE_RSS_TCP_IPV4
503 | RSS_HASHTYPE_RSS_IPV6
504 | RSS_HASHTYPE_RSS_TCP_IPV6
505 | RSS_HASHTYPE_RSS_IPV6_EX
506 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
507 }
508
509 mrqc = IXGBE_MRQC_RSSEN;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
528 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
529 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
530 } /* ixgbe_initialize_rss_mapping */
531
532 /************************************************************************
533 * ixgbe_initialize_receive_units - Setup receive registers and features.
534 ************************************************************************/
535 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
536
537 static void
538 ixgbe_initialize_receive_units(struct adapter *adapter)
539 {
540 struct rx_ring *rxr = adapter->rx_rings;
541 struct ixgbe_hw *hw = &adapter->hw;
542 struct ifnet *ifp = adapter->ifp;
543 int i, j;
544 u32 bufsz, fctrl, srrctl, rxcsum;
545 u32 hlreg;
546
547 /*
548 * Make sure receives are disabled while
549 * setting up the descriptor ring
550 */
551 ixgbe_disable_rx(hw);
552
553 /* Enable broadcasts */
554 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
555 fctrl |= IXGBE_FCTRL_BAM;
556 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
557 fctrl |= IXGBE_FCTRL_DPF;
558 fctrl |= IXGBE_FCTRL_PMCF;
559 }
560 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
561
562 /* Set for Jumbo Frames? */
563 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
564 if (ifp->if_mtu > ETHERMTU)
565 hlreg |= IXGBE_HLREG0_JUMBOEN;
566 else
567 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
568
569 #ifdef DEV_NETMAP
570 /* CRC stripping is conditional in Netmap */
571 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
572 (ifp->if_capenable & IFCAP_NETMAP) &&
573 !ix_crcstrip)
574 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
575 else
576 #endif /* DEV_NETMAP */
577 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
578
579 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
580
581 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
582 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
583
584 for (i = 0; i < adapter->num_queues; i++, rxr++) {
585 u64 rdba = rxr->rxdma.dma_paddr;
586 u32 reg;
587 int regnum = i / 4; /* 1 register per 4 queues */
588 int regshift = i % 4; /* 4 bits per 1 queue */
589 j = rxr->me;
590
591 /* Setup the Base and Length of the Rx Descriptor Ring */
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
593 (rdba & 0x00000000ffffffffULL));
594 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
595 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
596 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
597
598 /* Set up the SRRCTL register */
599 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
600 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
601 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
602 srrctl |= bufsz;
603 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
604
605 /* Set RQSMR (Receive Queue Statistic Mapping) register */
606 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
607 reg &= ~(0x000000ff << (regshift * 8));
608 reg |= i << (regshift * 8);
609 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
610
611 /*
612 * Set DROP_EN iff we have no flow control and >1 queue.
613 * Note that srrctl was cleared shortly before during reset,
614 * so we do not need to clear the bit, but do it just in case
615 * this code is moved elsewhere.
616 */
617 if (adapter->num_queues > 1 &&
618 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
619 srrctl |= IXGBE_SRRCTL_DROP_EN;
620 } else {
621 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
622 }
623
624 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
625
626 /* Setup the HW Rx Head and Tail Descriptor Pointers */
627 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
628 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
629
630 /* Set the driver rx tail address */
631 rxr->tail = IXGBE_RDT(rxr->me);
632 }
633
634 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
635 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
636 | IXGBE_PSRTYPE_UDPHDR
637 | IXGBE_PSRTYPE_IPV4HDR
638 | IXGBE_PSRTYPE_IPV6HDR;
639 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
640 }
641
642 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
643
644 ixgbe_initialize_rss_mapping(adapter);
645
646 if (adapter->num_queues > 1) {
647 /* RSS and RX IPP Checksum are mutually exclusive */
648 rxcsum |= IXGBE_RXCSUM_PCSD;
649 }
650
651 if (ifp->if_capenable & IFCAP_RXCSUM)
652 rxcsum |= IXGBE_RXCSUM_PCSD;
653
654 /* This is useful for calculating UDP/IP fragment checksums */
655 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
656 rxcsum |= IXGBE_RXCSUM_IPPCSE;
657
658 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
659
660 } /* ixgbe_initialize_receive_units */
661
662 /************************************************************************
663 * ixgbe_initialize_transmit_units - Enable transmit units.
664 ************************************************************************/
665 static void
666 ixgbe_initialize_transmit_units(struct adapter *adapter)
667 {
668 struct tx_ring *txr = adapter->tx_rings;
669 struct ixgbe_hw *hw = &adapter->hw;
670 int i;
671
672 /* Setup the Base and Length of the Tx Descriptor Ring */
673 for (i = 0; i < adapter->num_queues; i++, txr++) {
674 u64 tdba = txr->txdma.dma_paddr;
675 u32 txctrl = 0;
676 u32 tqsmreg, reg;
677 int regnum = i / 4; /* 1 register per 4 queues */
678 int regshift = i % 4; /* 4 bits per 1 queue */
679 int j = txr->me;
680
681 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
682 (tdba & 0x00000000ffffffffULL));
683 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
684 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
685 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
686
687 /*
688 * Set TQSMR (Transmit Queue Statistic Mapping) register.
689 * Register location is different between 82598 and others.
690 */
691 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
692 tqsmreg = IXGBE_TQSMR(regnum);
693 else
694 tqsmreg = IXGBE_TQSM(regnum);
695 reg = IXGBE_READ_REG(hw, tqsmreg);
696 reg &= ~(0x000000ff << (regshift * 8));
697 reg |= i << (regshift * 8);
698 IXGBE_WRITE_REG(hw, tqsmreg, reg);
699
700 /* Setup the HW Tx Head and Tail descriptor pointers */
701 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
702 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
703
704 /* Cache the tail address */
705 txr->tail = IXGBE_TDT(j);
706
707 txr->txr_no_space = false;
708
709 /* Disable Head Writeback */
710 /*
711 * Note: for X550 series devices, these registers are actually
712 * prefixed with TPH_ isntead of DCA_, but the addresses and
713 * fields remain the same.
714 */
715 switch (hw->mac.type) {
716 case ixgbe_mac_82598EB:
717 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
718 break;
719 default:
720 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
721 break;
722 }
723 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
724 switch (hw->mac.type) {
725 case ixgbe_mac_82598EB:
726 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
727 break;
728 default:
729 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
730 break;
731 }
732
733 }
734
735 if (hw->mac.type != ixgbe_mac_82598EB) {
736 u32 dmatxctl, rttdcs;
737
738 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
739 dmatxctl |= IXGBE_DMATXCTL_TE;
740 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
741 /* Disable arbiter to set MTQC */
742 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
743 rttdcs |= IXGBE_RTTDCS_ARBDIS;
744 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
745 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
746 ixgbe_get_mtqc(adapter->iov_mode));
747 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
748 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
749 }
750
751 return;
752 } /* ixgbe_initialize_transmit_units */
753
754 /************************************************************************
755 * ixgbe_attach - Device initialization routine
756 *
757 * Called when the driver is being loaded.
758 * Identifies the type of hardware, allocates all resources
759 * and initializes the hardware.
760 *
761 * return 0 on success, positive on failure
762 ************************************************************************/
763 static void
764 ixgbe_attach(device_t parent, device_t dev, void *aux)
765 {
766 struct adapter *adapter;
767 struct ixgbe_hw *hw;
768 int error = -1;
769 u32 ctrl_ext;
770 u16 high, low, nvmreg;
771 pcireg_t id, subid;
772 const ixgbe_vendor_info_t *ent;
773 struct pci_attach_args *pa = aux;
774 const char *str;
775 char buf[256];
776
777 INIT_DEBUGOUT("ixgbe_attach: begin");
778
779 /* Allocate, clear, and link in our adapter structure */
780 adapter = device_private(dev);
781 adapter->hw.back = adapter;
782 adapter->dev = dev;
783 hw = &adapter->hw;
784 adapter->osdep.pc = pa->pa_pc;
785 adapter->osdep.tag = pa->pa_tag;
786 if (pci_dma64_available(pa))
787 adapter->osdep.dmat = pa->pa_dmat64;
788 else
789 adapter->osdep.dmat = pa->pa_dmat;
790 adapter->osdep.attached = false;
791
792 ent = ixgbe_lookup(pa);
793
794 KASSERT(ent != NULL);
795
796 aprint_normal(": %s, Version - %s\n",
797 ixgbe_strings[ent->index], ixgbe_driver_version);
798
799 /* Core Lock Init*/
800 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
801
802 /* Set up the timer callout */
803 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
804
805 /* Determine hardware revision */
806 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
807 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
808
809 hw->vendor_id = PCI_VENDOR(id);
810 hw->device_id = PCI_PRODUCT(id);
811 hw->revision_id =
812 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
813 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
814 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
815
816 /*
817 * Make sure BUSMASTER is set
818 */
819 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
820
821 /* Do base PCI setup - map BAR0 */
822 if (ixgbe_allocate_pci_resources(adapter, pa)) {
823 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
824 error = ENXIO;
825 goto err_out;
826 }
827
828 /* let hardware know driver is loaded */
829 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
830 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
831 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
832
833 /*
834 * Initialize the shared code
835 */
836 if (ixgbe_init_shared_code(hw) != 0) {
837 aprint_error_dev(dev, "Unable to initialize the shared code\n");
838 error = ENXIO;
839 goto err_out;
840 }
841
842 switch (hw->mac.type) {
843 case ixgbe_mac_82598EB:
844 str = "82598EB";
845 break;
846 case ixgbe_mac_82599EB:
847 str = "82599EB";
848 break;
849 case ixgbe_mac_X540:
850 str = "X540";
851 break;
852 case ixgbe_mac_X550:
853 str = "X550";
854 break;
855 case ixgbe_mac_X550EM_x:
856 str = "X550EM";
857 break;
858 case ixgbe_mac_X550EM_a:
859 str = "X550EM A";
860 break;
861 default:
862 str = "Unknown";
863 break;
864 }
865 aprint_normal_dev(dev, "device %s\n", str);
866
867 if (hw->mbx.ops.init_params)
868 hw->mbx.ops.init_params(hw);
869
870 hw->allow_unsupported_sfp = allow_unsupported_sfp;
871
872 /* Pick up the 82599 settings */
873 if (hw->mac.type != ixgbe_mac_82598EB) {
874 hw->phy.smart_speed = ixgbe_smart_speed;
875 adapter->num_segs = IXGBE_82599_SCATTER;
876 } else
877 adapter->num_segs = IXGBE_82598_SCATTER;
878
879 /* Ensure SW/FW semaphore is free */
880 ixgbe_init_swfw_semaphore(hw);
881
882 hw->mac.ops.set_lan_id(hw);
883 ixgbe_init_device_features(adapter);
884
885 if (ixgbe_configure_interrupts(adapter)) {
886 error = ENXIO;
887 goto err_out;
888 }
889
890 /* Allocate multicast array memory. */
891 adapter->mta = malloc(sizeof(*adapter->mta) *
892 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
893 if (adapter->mta == NULL) {
894 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
895 error = ENOMEM;
896 goto err_out;
897 }
898
899 /* Enable WoL (if supported) */
900 ixgbe_check_wol_support(adapter);
901
902 /* Verify adapter fan is still functional (if applicable) */
903 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
904 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
905 ixgbe_check_fan_failure(adapter, esdp, FALSE);
906 }
907
908 /* Set an initial default flow control value */
909 hw->fc.requested_mode = ixgbe_flow_control;
910
911 /* Sysctls for limiting the amount of work done in the taskqueues */
912 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
913 "max number of rx packets to process",
914 &adapter->rx_process_limit, ixgbe_rx_process_limit);
915
916 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
917 "max number of tx packets to process",
918 &adapter->tx_process_limit, ixgbe_tx_process_limit);
919
920 /* Do descriptor calc and sanity checks */
921 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
922 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
923 aprint_error_dev(dev, "TXD config issue, using default!\n");
924 adapter->num_tx_desc = DEFAULT_TXD;
925 } else
926 adapter->num_tx_desc = ixgbe_txd;
927
928 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
929 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
930 aprint_error_dev(dev, "RXD config issue, using default!\n");
931 adapter->num_rx_desc = DEFAULT_RXD;
932 } else
933 adapter->num_rx_desc = ixgbe_rxd;
934
935 /* Allocate our TX/RX Queues */
936 if (ixgbe_allocate_queues(adapter)) {
937 error = ENOMEM;
938 goto err_out;
939 }
940
941 hw->phy.reset_if_overtemp = TRUE;
942 error = ixgbe_reset_hw(hw);
943 hw->phy.reset_if_overtemp = FALSE;
944 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
945 /*
946 * No optics in this port, set up
947 * so the timer routine will probe
948 * for later insertion.
949 */
950 adapter->sfp_probe = TRUE;
951 error = IXGBE_SUCCESS;
952 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
953 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
954 error = EIO;
955 goto err_late;
956 } else if (error) {
957 aprint_error_dev(dev, "Hardware initialization failed\n");
958 error = EIO;
959 goto err_late;
960 }
961
962 /* Make sure we have a good EEPROM before we read from it */
963 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
964 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
965 error = EIO;
966 goto err_late;
967 }
968
969 aprint_normal("%s:", device_xname(dev));
970 /* NVM Image Version */
971 high = low = 0;
972 switch (hw->mac.type) {
973 case ixgbe_mac_X540:
974 case ixgbe_mac_X550EM_a:
975 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
976 if (nvmreg == 0xffff)
977 break;
978 high = (nvmreg >> 12) & 0x0f;
979 low = (nvmreg >> 4) & 0xff;
980 id = nvmreg & 0x0f;
981 aprint_normal(" NVM Image Version %u.", high);
982 if (hw->mac.type == ixgbe_mac_X540)
983 str = "%x";
984 else
985 str = "%02x";
986 aprint_normal(str, low);
987 aprint_normal(" ID 0x%x,", id);
988 break;
989 case ixgbe_mac_X550EM_x:
990 case ixgbe_mac_X550:
991 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
992 if (nvmreg == 0xffff)
993 break;
994 high = (nvmreg >> 12) & 0x0f;
995 low = nvmreg & 0xff;
996 aprint_normal(" NVM Image Version %u.%02x,", high, low);
997 break;
998 default:
999 break;
1000 }
1001 hw->eeprom.nvm_image_ver_high = high;
1002 hw->eeprom.nvm_image_ver_low = low;
1003
1004 /* PHY firmware revision */
1005 switch (hw->mac.type) {
1006 case ixgbe_mac_X540:
1007 case ixgbe_mac_X550:
1008 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1009 if (nvmreg == 0xffff)
1010 break;
1011 high = (nvmreg >> 12) & 0x0f;
1012 low = (nvmreg >> 4) & 0xff;
1013 id = nvmreg & 0x000f;
1014 aprint_normal(" PHY FW Revision %u.", high);
1015 if (hw->mac.type == ixgbe_mac_X540)
1016 str = "%x";
1017 else
1018 str = "%02x";
1019 aprint_normal(str, low);
1020 aprint_normal(" ID 0x%x,", id);
1021 break;
1022 default:
1023 break;
1024 }
1025
1026 /* NVM Map version & OEM NVM Image version */
1027 switch (hw->mac.type) {
1028 case ixgbe_mac_X550:
1029 case ixgbe_mac_X550EM_x:
1030 case ixgbe_mac_X550EM_a:
1031 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1032 if (nvmreg != 0xffff) {
1033 high = (nvmreg >> 12) & 0x0f;
1034 low = nvmreg & 0x00ff;
1035 aprint_normal(" NVM Map version %u.%02x,", high, low);
1036 }
1037 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1038 if (nvmreg != 0xffff) {
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0x00ff;
1041 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1042 low);
1043 }
1044 break;
1045 default:
1046 break;
1047 }
1048
1049 /* Print the ETrackID */
1050 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1051 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1052 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1053
1054 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1055 error = ixgbe_allocate_msix(adapter, pa);
1056 if (error) {
1057 /* Free allocated queue structures first */
1058 ixgbe_free_transmit_structures(adapter);
1059 ixgbe_free_receive_structures(adapter);
1060 free(adapter->queues, M_DEVBUF);
1061
1062 /* Fallback to legacy interrupt */
1063 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1064 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1065 adapter->feat_en |= IXGBE_FEATURE_MSI;
1066 adapter->num_queues = 1;
1067
1068 /* Allocate our TX/RX Queues again */
1069 if (ixgbe_allocate_queues(adapter)) {
1070 error = ENOMEM;
1071 goto err_out;
1072 }
1073 }
1074 }
1075 /* Recovery mode */
1076 switch (adapter->hw.mac.type) {
1077 case ixgbe_mac_X550:
1078 case ixgbe_mac_X550EM_x:
1079 case ixgbe_mac_X550EM_a:
1080 /* >= 2.00 */
1081 if (hw->eeprom.nvm_image_ver_high >= 2) {
1082 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1083 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1084 }
1085 break;
1086 default:
1087 break;
1088 }
1089
1090 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1091 error = ixgbe_allocate_legacy(adapter, pa);
1092 if (error)
1093 goto err_late;
1094
1095 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1096 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1097 ixgbe_handle_link, adapter);
1098 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1099 ixgbe_handle_mod, adapter);
1100 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_handle_msf, adapter);
1102 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_phy, adapter);
1104 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1105 adapter->fdir_si =
1106 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_reinit_fdir, adapter);
1108 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1109 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1110 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1111 && (adapter->fdir_si == NULL))) {
1112 aprint_error_dev(dev,
1113 "could not establish software interrupts ()\n");
1114 goto err_out;
1115 }
1116
1117 error = ixgbe_start_hw(hw);
1118 switch (error) {
1119 case IXGBE_ERR_EEPROM_VERSION:
1120 aprint_error_dev(dev, "This device is a pre-production adapter/"
1121 "LOM. Please be aware there may be issues associated "
1122 "with your hardware.\nIf you are experiencing problems "
1123 "please contact your Intel or hardware representative "
1124 "who provided you with this hardware.\n");
1125 break;
1126 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1127 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1128 error = EIO;
1129 goto err_late;
1130 case IXGBE_ERR_SFP_NOT_PRESENT:
1131 aprint_error_dev(dev, "No SFP+ Module found\n");
1132 /* falls thru */
1133 default:
1134 break;
1135 }
1136
1137 /* Setup OS specific network interface */
1138 if (ixgbe_setup_interface(dev, adapter) != 0)
1139 goto err_late;
1140
1141 /*
1142 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1143 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1144 */
1145 if (hw->phy.media_type == ixgbe_media_type_copper) {
1146 uint16_t id1, id2;
1147 int oui, model, rev;
1148 const char *descr;
1149
1150 id1 = hw->phy.id >> 16;
1151 id2 = hw->phy.id & 0xffff;
1152 oui = MII_OUI(id1, id2);
1153 model = MII_MODEL(id2);
1154 rev = MII_REV(id2);
1155 if ((descr = mii_get_descr(oui, model)) != NULL)
1156 aprint_normal_dev(dev,
1157 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1158 descr, oui, model, rev);
1159 else
1160 aprint_normal_dev(dev,
1161 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1162 oui, model, rev);
1163 }
1164
1165 /* Enable the optics for 82599 SFP+ fiber */
1166 ixgbe_enable_tx_laser(hw);
1167
1168 /* Enable EEE power saving */
1169 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1170 hw->mac.ops.setup_eee(hw,
1171 adapter->feat_en & IXGBE_FEATURE_EEE);
1172
1173 /* Enable power to the phy. */
1174 ixgbe_set_phy_power(hw, TRUE);
1175
1176 /* Initialize statistics */
1177 ixgbe_update_stats_counters(adapter);
1178
1179 /* Check PCIE slot type/speed/width */
1180 ixgbe_get_slot_info(adapter);
1181
1182 /*
1183 * Do time init and sysctl init here, but
1184 * only on the first port of a bypass adapter.
1185 */
1186 ixgbe_bypass_init(adapter);
1187
1188 /* Set an initial dmac value */
1189 adapter->dmac = 0;
1190 /* Set initial advertised speeds (if applicable) */
1191 adapter->advertise = ixgbe_get_advertise(adapter);
1192
1193 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1194 ixgbe_define_iov_schemas(dev, &error);
1195
1196 /* Add sysctls */
1197 ixgbe_add_device_sysctls(adapter);
1198 ixgbe_add_hw_stats(adapter);
1199
1200 /* For Netmap */
1201 adapter->init_locked = ixgbe_init_locked;
1202 adapter->stop_locked = ixgbe_stop;
1203
1204 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1205 ixgbe_netmap_attach(adapter);
1206
1207 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1208 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1209 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1210 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1211
1212 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1213 pmf_class_network_register(dev, adapter->ifp);
1214 else
1215 aprint_error_dev(dev, "couldn't establish power handler\n");
1216
1217 /* Init recovery mode timer and state variable */
1218 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1219 adapter->recovery_mode = 0;
1220
1221 /* Set up the timer callout */
1222 callout_init(&adapter->recovery_mode_timer,
1223 IXGBE_CALLOUT_FLAGS);
1224
1225 /* Start the task */
1226 callout_reset(&adapter->recovery_mode_timer, hz,
1227 ixgbe_recovery_mode_timer, adapter);
1228 }
1229
1230 INIT_DEBUGOUT("ixgbe_attach: end");
1231 adapter->osdep.attached = true;
1232
1233 return;
1234
1235 err_late:
1236 ixgbe_free_transmit_structures(adapter);
1237 ixgbe_free_receive_structures(adapter);
1238 free(adapter->queues, M_DEVBUF);
1239 err_out:
1240 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1241 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1242 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1243 ixgbe_free_softint(adapter);
1244 ixgbe_free_pci_resources(adapter);
1245 if (adapter->mta != NULL)
1246 free(adapter->mta, M_DEVBUF);
1247 IXGBE_CORE_LOCK_DESTROY(adapter);
1248
1249 return;
1250 } /* ixgbe_attach */
1251
1252 /************************************************************************
1253 * ixgbe_check_wol_support
1254 *
1255 * Checks whether the adapter's ports are capable of
1256 * Wake On LAN by reading the adapter's NVM.
1257 *
1258 * Sets each port's hw->wol_enabled value depending
1259 * on the value read here.
1260 ************************************************************************/
1261 static void
1262 ixgbe_check_wol_support(struct adapter *adapter)
1263 {
1264 struct ixgbe_hw *hw = &adapter->hw;
1265 u16 dev_caps = 0;
1266
1267 /* Find out WoL support for port */
1268 adapter->wol_support = hw->wol_enabled = 0;
1269 ixgbe_get_device_caps(hw, &dev_caps);
1270 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1271 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1272 hw->bus.func == 0))
1273 adapter->wol_support = hw->wol_enabled = 1;
1274
1275 /* Save initial wake up filter configuration */
1276 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1277
1278 return;
1279 } /* ixgbe_check_wol_support */
1280
1281 /************************************************************************
1282 * ixgbe_setup_interface
1283 *
1284 * Setup networking device structure and register an interface.
1285 ************************************************************************/
1286 static int
1287 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1288 {
1289 struct ethercom *ec = &adapter->osdep.ec;
1290 struct ifnet *ifp;
1291 int rv;
1292
1293 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1294
1295 ifp = adapter->ifp = &ec->ec_if;
1296 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1297 ifp->if_baudrate = IF_Gbps(10);
1298 ifp->if_init = ixgbe_init;
1299 ifp->if_stop = ixgbe_ifstop;
1300 ifp->if_softc = adapter;
1301 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1302 #ifdef IXGBE_MPSAFE
1303 ifp->if_extflags = IFEF_MPSAFE;
1304 #endif
1305 ifp->if_ioctl = ixgbe_ioctl;
1306 #if __FreeBSD_version >= 1100045
1307 /* TSO parameters */
1308 ifp->if_hw_tsomax = 65518;
1309 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1310 ifp->if_hw_tsomaxsegsize = 2048;
1311 #endif
1312 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1313 #if 0
1314 ixgbe_start_locked = ixgbe_legacy_start_locked;
1315 #endif
1316 } else {
1317 ifp->if_transmit = ixgbe_mq_start;
1318 #if 0
1319 ixgbe_start_locked = ixgbe_mq_start_locked;
1320 #endif
1321 }
1322 ifp->if_start = ixgbe_legacy_start;
1323 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1324 IFQ_SET_READY(&ifp->if_snd);
1325
1326 rv = if_initialize(ifp);
1327 if (rv != 0) {
1328 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1329 return rv;
1330 }
1331 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1332 ether_ifattach(ifp, adapter->hw.mac.addr);
1333 /*
1334 * We use per TX queue softint, so if_deferred_start_init() isn't
1335 * used.
1336 */
1337 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1338
1339 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1340
1341 /*
1342 * Tell the upper layer(s) we support long frames.
1343 */
1344 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1345
1346 /* Set capability flags */
1347 ifp->if_capabilities |= IFCAP_RXCSUM
1348 | IFCAP_TXCSUM
1349 | IFCAP_TSOv4
1350 | IFCAP_TSOv6;
1351 ifp->if_capenable = 0;
1352
1353 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1354 | ETHERCAP_VLAN_HWCSUM
1355 | ETHERCAP_JUMBO_MTU
1356 | ETHERCAP_VLAN_MTU;
1357
1358 /* Enable the above capabilities by default */
1359 ec->ec_capenable = ec->ec_capabilities;
1360
1361 /*
1362 * Don't turn this on by default, if vlans are
1363 * created on another pseudo device (eg. lagg)
1364 * then vlan events are not passed thru, breaking
1365 * operation, but with HW FILTER off it works. If
1366 * using vlans directly on the ixgbe driver you can
1367 * enable this and get full hardware tag filtering.
1368 */
1369 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1370
1371 /*
1372 * Specify the media types supported by this adapter and register
1373 * callbacks to update media and link information
1374 */
1375 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1376 ixgbe_media_status);
1377
1378 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1379 ixgbe_add_media_types(adapter);
1380
1381 /* Set autoselect media by default */
1382 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1383
1384 if_register(ifp);
1385
1386 return (0);
1387 } /* ixgbe_setup_interface */
1388
1389 /************************************************************************
1390 * ixgbe_add_media_types
1391 ************************************************************************/
1392 static void
1393 ixgbe_add_media_types(struct adapter *adapter)
1394 {
1395 struct ixgbe_hw *hw = &adapter->hw;
1396 device_t dev = adapter->dev;
1397 u64 layer;
1398
1399 layer = adapter->phy_layer;
1400
1401 #define ADD(mm, dd) \
1402 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1403
1404 ADD(IFM_NONE, 0);
1405
1406 /* Media types with matching NetBSD media defines */
1407 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1408 ADD(IFM_10G_T | IFM_FDX, 0);
1409 }
1410 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1411 ADD(IFM_1000_T | IFM_FDX, 0);
1412 }
1413 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1414 ADD(IFM_100_TX | IFM_FDX, 0);
1415 }
1416 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1417 ADD(IFM_10_T | IFM_FDX, 0);
1418 }
1419
1420 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1421 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1422 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1423 }
1424
1425 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1426 ADD(IFM_10G_LR | IFM_FDX, 0);
1427 if (hw->phy.multispeed_fiber) {
1428 ADD(IFM_1000_LX | IFM_FDX, 0);
1429 }
1430 }
1431 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1432 ADD(IFM_10G_SR | IFM_FDX, 0);
1433 if (hw->phy.multispeed_fiber) {
1434 ADD(IFM_1000_SX | IFM_FDX, 0);
1435 }
1436 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1437 ADD(IFM_1000_SX | IFM_FDX, 0);
1438 }
1439 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1440 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1441 }
1442
1443 #ifdef IFM_ETH_XTYPE
1444 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1445 ADD(IFM_10G_KR | IFM_FDX, 0);
1446 }
1447 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1448 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1449 }
1450 #else
1451 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1452 device_printf(dev, "Media supported: 10GbaseKR\n");
1453 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1454 ADD(IFM_10G_SR | IFM_FDX, 0);
1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1457 device_printf(dev, "Media supported: 10GbaseKX4\n");
1458 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1459 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1460 }
1461 #endif
1462 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1463 ADD(IFM_1000_KX | IFM_FDX, 0);
1464 }
1465 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1466 ADD(IFM_2500_KX | IFM_FDX, 0);
1467 }
1468 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1469 ADD(IFM_2500_T | IFM_FDX, 0);
1470 }
1471 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1472 ADD(IFM_5000_T | IFM_FDX, 0);
1473 }
1474 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1475 device_printf(dev, "Media supported: 1000baseBX\n");
1476 /* XXX no ifmedia_set? */
1477
1478 ADD(IFM_AUTO, 0);
1479
1480 #undef ADD
1481 } /* ixgbe_add_media_types */
1482
1483 /************************************************************************
1484 * ixgbe_is_sfp
1485 ************************************************************************/
1486 static inline bool
1487 ixgbe_is_sfp(struct ixgbe_hw *hw)
1488 {
1489 switch (hw->mac.type) {
1490 case ixgbe_mac_82598EB:
1491 if (hw->phy.type == ixgbe_phy_nl)
1492 return (TRUE);
1493 return (FALSE);
1494 case ixgbe_mac_82599EB:
1495 switch (hw->mac.ops.get_media_type(hw)) {
1496 case ixgbe_media_type_fiber:
1497 case ixgbe_media_type_fiber_qsfp:
1498 return (TRUE);
1499 default:
1500 return (FALSE);
1501 }
1502 case ixgbe_mac_X550EM_x:
1503 case ixgbe_mac_X550EM_a:
1504 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1505 return (TRUE);
1506 return (FALSE);
1507 default:
1508 return (FALSE);
1509 }
1510 } /* ixgbe_is_sfp */
1511
1512 /************************************************************************
1513 * ixgbe_config_link
1514 ************************************************************************/
1515 static void
1516 ixgbe_config_link(struct adapter *adapter)
1517 {
1518 struct ixgbe_hw *hw = &adapter->hw;
1519 u32 autoneg, err = 0;
1520 bool sfp, negotiate = false;
1521
1522 sfp = ixgbe_is_sfp(hw);
1523
1524 if (sfp) {
1525 if (hw->phy.multispeed_fiber) {
1526 ixgbe_enable_tx_laser(hw);
1527 kpreempt_disable();
1528 softint_schedule(adapter->msf_si);
1529 kpreempt_enable();
1530 }
1531 kpreempt_disable();
1532 softint_schedule(adapter->mod_si);
1533 kpreempt_enable();
1534 } else {
1535 struct ifmedia *ifm = &adapter->media;
1536
1537 if (hw->mac.ops.check_link)
1538 err = ixgbe_check_link(hw, &adapter->link_speed,
1539 &adapter->link_up, FALSE);
1540 if (err)
1541 return;
1542
1543 /*
1544 * Check if it's the first call. If it's the first call,
1545 * get value for auto negotiation.
1546 */
1547 autoneg = hw->phy.autoneg_advertised;
1548 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1549 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1550 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1551 &negotiate);
1552 if (err)
1553 return;
1554 if (hw->mac.ops.setup_link)
1555 err = hw->mac.ops.setup_link(hw, autoneg,
1556 adapter->link_up);
1557 }
1558
1559 } /* ixgbe_config_link */
1560
1561 /************************************************************************
1562 * ixgbe_update_stats_counters - Update board statistics counters.
1563 ************************************************************************/
1564 static void
1565 ixgbe_update_stats_counters(struct adapter *adapter)
1566 {
1567 struct ifnet *ifp = adapter->ifp;
1568 struct ixgbe_hw *hw = &adapter->hw;
1569 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1570 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1571 u64 total_missed_rx = 0;
1572 uint64_t crcerrs, rlec;
1573 unsigned int queue_counters;
1574 int i;
1575
1576 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1577 stats->crcerrs.ev_count += crcerrs;
1578 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1579 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1580 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1581 if (hw->mac.type == ixgbe_mac_X550)
1582 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1583
1584 /* 16 registers exist */
1585 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1586 for (i = 0; i < queue_counters; i++) {
1587 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1588 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1589 if (hw->mac.type >= ixgbe_mac_82599EB) {
1590 stats->qprdc[i].ev_count
1591 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1592 }
1593 }
1594
1595 /* 8 registers exist */
1596 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1597 uint32_t mp;
1598
1599 /* MPC */
1600 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1601 /* global total per queue */
1602 stats->mpc[i].ev_count += mp;
1603 /* running comprehensive total for stats display */
1604 total_missed_rx += mp;
1605
1606 if (hw->mac.type == ixgbe_mac_82598EB)
1607 stats->rnbc[i].ev_count
1608 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1609
1610 stats->pxontxc[i].ev_count
1611 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1612 stats->pxofftxc[i].ev_count
1613 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1614 if (hw->mac.type >= ixgbe_mac_82599EB) {
1615 stats->pxonrxc[i].ev_count
1616 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1617 stats->pxoffrxc[i].ev_count
1618 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1619 stats->pxon2offc[i].ev_count
1620 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1621 } else {
1622 stats->pxonrxc[i].ev_count
1623 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1624 stats->pxoffrxc[i].ev_count
1625 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1626 }
1627 }
1628 stats->mpctotal.ev_count += total_missed_rx;
1629
1630 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1631 if ((adapter->link_active == LINK_STATE_UP)
1632 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1633 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1634 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1635 }
1636 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1637 stats->rlec.ev_count += rlec;
1638
1639 /* Hardware workaround, gprc counts missed packets */
1640 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1641
1642 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1643 stats->lxontxc.ev_count += lxon;
1644 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1645 stats->lxofftxc.ev_count += lxoff;
1646 total = lxon + lxoff;
1647
1648 if (hw->mac.type != ixgbe_mac_82598EB) {
1649 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1650 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1651 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1652 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1653 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1654 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1655 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1656 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1657 } else {
1658 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1659 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1660 /* 82598 only has a counter in the high register */
1661 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1662 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1663 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1664 }
1665
1666 /*
1667 * Workaround: mprc hardware is incorrectly counting
1668 * broadcasts, so for now we subtract those.
1669 */
1670 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1671 stats->bprc.ev_count += bprc;
1672 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1673 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1674
1675 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1676 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1677 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1678 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1679 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1680 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1681
1682 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1683 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1684 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1685
1686 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1687 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1688 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1689 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1690 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1691 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1692 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1693 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1694 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1695 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1696 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1697 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1698 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1699 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1700 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1701 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1702 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1703 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1704 /* Only read FCOE on 82599 */
1705 if (hw->mac.type != ixgbe_mac_82598EB) {
1706 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1707 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1708 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1709 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1710 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1711 }
1712
1713 /* Fill out the OS statistics structure */
1714 /*
1715 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1716 * adapter->stats counters. It's required to make ifconfig -z
1717 * (SOICZIFDATA) work.
1718 */
1719 ifp->if_collisions = 0;
1720
1721 /* Rx Errors */
1722 ifp->if_iqdrops += total_missed_rx;
1723 ifp->if_ierrors += crcerrs + rlec;
1724 } /* ixgbe_update_stats_counters */
1725
1726 /************************************************************************
1727 * ixgbe_add_hw_stats
1728 *
1729 * Add sysctl variables, one per statistic, to the system.
1730 ************************************************************************/
1731 static void
1732 ixgbe_add_hw_stats(struct adapter *adapter)
1733 {
1734 device_t dev = adapter->dev;
1735 const struct sysctlnode *rnode, *cnode;
1736 struct sysctllog **log = &adapter->sysctllog;
1737 struct tx_ring *txr = adapter->tx_rings;
1738 struct rx_ring *rxr = adapter->rx_rings;
1739 struct ixgbe_hw *hw = &adapter->hw;
1740 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1741 const char *xname = device_xname(dev);
1742 int i;
1743
1744 /* Driver Statistics */
1745 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1746 NULL, xname, "Driver tx dma soft fail EFBIG");
1747 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1748 NULL, xname, "m_defrag() failed");
1749 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1750 NULL, xname, "Driver tx dma hard fail EFBIG");
1751 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1752 NULL, xname, "Driver tx dma hard fail EINVAL");
1753 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1754 NULL, xname, "Driver tx dma hard fail other");
1755 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1756 NULL, xname, "Driver tx dma soft fail EAGAIN");
1757 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1758 NULL, xname, "Driver tx dma soft fail ENOMEM");
1759 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1760 NULL, xname, "Watchdog timeouts");
1761 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1762 NULL, xname, "TSO errors");
1763 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1764 NULL, xname, "Link MSI-X IRQ Handled");
1765 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1766 NULL, xname, "Link softint");
1767 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1768 NULL, xname, "module softint");
1769 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1770 NULL, xname, "multimode softint");
1771 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1772 NULL, xname, "external PHY softint");
1773
1774 /* Max number of traffic class is 8 */
1775 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1776 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1777 snprintf(adapter->tcs[i].evnamebuf,
1778 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1779 xname, i);
1780 if (i < __arraycount(stats->mpc)) {
1781 evcnt_attach_dynamic(&stats->mpc[i],
1782 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1783 "RX Missed Packet Count");
1784 if (hw->mac.type == ixgbe_mac_82598EB)
1785 evcnt_attach_dynamic(&stats->rnbc[i],
1786 EVCNT_TYPE_MISC, NULL,
1787 adapter->tcs[i].evnamebuf,
1788 "Receive No Buffers");
1789 }
1790 if (i < __arraycount(stats->pxontxc)) {
1791 evcnt_attach_dynamic(&stats->pxontxc[i],
1792 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1793 "pxontxc");
1794 evcnt_attach_dynamic(&stats->pxonrxc[i],
1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1796 "pxonrxc");
1797 evcnt_attach_dynamic(&stats->pxofftxc[i],
1798 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1799 "pxofftxc");
1800 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1801 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1802 "pxoffrxc");
1803 if (hw->mac.type >= ixgbe_mac_82599EB)
1804 evcnt_attach_dynamic(&stats->pxon2offc[i],
1805 EVCNT_TYPE_MISC, NULL,
1806 adapter->tcs[i].evnamebuf,
1807 "pxon2offc");
1808 }
1809 }
1810
1811 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1812 #ifdef LRO
1813 struct lro_ctrl *lro = &rxr->lro;
1814 #endif /* LRO */
1815
1816 snprintf(adapter->queues[i].evnamebuf,
1817 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1818 xname, i);
1819 snprintf(adapter->queues[i].namebuf,
1820 sizeof(adapter->queues[i].namebuf), "q%d", i);
1821
1822 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1823 aprint_error_dev(dev, "could not create sysctl root\n");
1824 break;
1825 }
1826
1827 if (sysctl_createv(log, 0, &rnode, &rnode,
1828 0, CTLTYPE_NODE,
1829 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1830 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1831 break;
1832
1833 if (sysctl_createv(log, 0, &rnode, &cnode,
1834 CTLFLAG_READWRITE, CTLTYPE_INT,
1835 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1836 ixgbe_sysctl_interrupt_rate_handler, 0,
1837 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1838 break;
1839
1840 if (sysctl_createv(log, 0, &rnode, &cnode,
1841 CTLFLAG_READONLY, CTLTYPE_INT,
1842 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1843 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1844 0, CTL_CREATE, CTL_EOL) != 0)
1845 break;
1846
1847 if (sysctl_createv(log, 0, &rnode, &cnode,
1848 CTLFLAG_READONLY, CTLTYPE_INT,
1849 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1850 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1851 0, CTL_CREATE, CTL_EOL) != 0)
1852 break;
1853
1854 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1855 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1856 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1857 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1858 "Handled queue in softint");
1859 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1860 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1861 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1862 NULL, adapter->queues[i].evnamebuf, "TSO");
1863 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1864 NULL, adapter->queues[i].evnamebuf,
1865 "Queue No Descriptor Available");
1866 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1867 NULL, adapter->queues[i].evnamebuf,
1868 "Queue Packets Transmitted");
1869 #ifndef IXGBE_LEGACY_TX
1870 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1871 NULL, adapter->queues[i].evnamebuf,
1872 "Packets dropped in pcq");
1873 #endif
1874
1875 if (sysctl_createv(log, 0, &rnode, &cnode,
1876 CTLFLAG_READONLY,
1877 CTLTYPE_INT,
1878 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1879 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1880 CTL_CREATE, CTL_EOL) != 0)
1881 break;
1882
1883 if (sysctl_createv(log, 0, &rnode, &cnode,
1884 CTLFLAG_READONLY,
1885 CTLTYPE_INT,
1886 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1887 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1888 CTL_CREATE, CTL_EOL) != 0)
1889 break;
1890
1891 if (sysctl_createv(log, 0, &rnode, &cnode,
1892 CTLFLAG_READONLY,
1893 CTLTYPE_INT,
1894 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1895 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1896 CTL_CREATE, CTL_EOL) != 0)
1897 break;
1898
1899 if (i < __arraycount(stats->qprc)) {
1900 evcnt_attach_dynamic(&stats->qprc[i],
1901 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1902 "qprc");
1903 evcnt_attach_dynamic(&stats->qptc[i],
1904 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1905 "qptc");
1906 evcnt_attach_dynamic(&stats->qbrc[i],
1907 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1908 "qbrc");
1909 evcnt_attach_dynamic(&stats->qbtc[i],
1910 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1911 "qbtc");
1912 if (hw->mac.type >= ixgbe_mac_82599EB)
1913 evcnt_attach_dynamic(&stats->qprdc[i],
1914 EVCNT_TYPE_MISC, NULL,
1915 adapter->queues[i].evnamebuf, "qprdc");
1916 }
1917
1918 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1919 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1920 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1921 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1922 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1923 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1924 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1925 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1926 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1927 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1928 #ifdef LRO
1929 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1930 CTLFLAG_RD, &lro->lro_queued, 0,
1931 "LRO Queued");
1932 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1933 CTLFLAG_RD, &lro->lro_flushed, 0,
1934 "LRO Flushed");
1935 #endif /* LRO */
1936 }
1937
1938 /* MAC stats get their own sub node */
1939
1940 snprintf(stats->namebuf,
1941 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1942
1943 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "rx csum offload - IP");
1945 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "rx csum offload - L4");
1947 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "rx csum offload - IP bad");
1949 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "rx csum offload - L4 bad");
1951 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "Interrupt conditions zero");
1953 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Legacy interrupts");
1955
1956 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "CRC Errors");
1958 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "Illegal Byte Errors");
1960 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Byte Errors");
1962 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "MAC Short Packets Discarded");
1964 if (hw->mac.type >= ixgbe_mac_X550)
1965 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "Bad SFD");
1967 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Total Packets Missed");
1969 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "MAC Local Faults");
1971 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "MAC Remote Faults");
1973 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Receive Length Errors");
1975 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Link XON Transmitted");
1977 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "Link XON Received");
1979 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "Link XOFF Transmitted");
1981 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Link XOFF Received");
1983
1984 /* Packet Reception Stats */
1985 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Total Octets Received");
1987 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Good Octets Received");
1989 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Total Packets Received");
1991 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Good Packets Received");
1993 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Multicast Packets Received");
1995 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Broadcast Packets Received");
1997 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "64 byte frames received ");
1999 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "65-127 byte frames received");
2001 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "128-255 byte frames received");
2003 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "256-511 byte frames received");
2005 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "512-1023 byte frames received");
2007 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "1023-1522 byte frames received");
2009 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Receive Undersized");
2011 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Fragmented Packets Received ");
2013 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Oversized Packets Received");
2015 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Received Jabber");
2017 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Management Packets Received");
2019 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Management Packets Dropped");
2021 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Checksum Errors");
2023
2024 /* Packet Transmission Stats */
2025 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Good Octets Transmitted");
2027 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Total Packets Transmitted");
2029 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Good Packets Transmitted");
2031 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Broadcast Packets Transmitted");
2033 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "Multicast Packets Transmitted");
2035 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "Management Packets Transmitted");
2037 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "64 byte frames transmitted ");
2039 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "65-127 byte frames transmitted");
2041 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "128-255 byte frames transmitted");
2043 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "256-511 byte frames transmitted");
2045 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "512-1023 byte frames transmitted");
2047 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2048 stats->namebuf, "1024-1522 byte frames transmitted");
2049 } /* ixgbe_add_hw_stats */
2050
2051 static void
2052 ixgbe_clear_evcnt(struct adapter *adapter)
2053 {
2054 struct tx_ring *txr = adapter->tx_rings;
2055 struct rx_ring *rxr = adapter->rx_rings;
2056 struct ixgbe_hw *hw = &adapter->hw;
2057 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2058 int i;
2059
2060 adapter->efbig_tx_dma_setup.ev_count = 0;
2061 adapter->mbuf_defrag_failed.ev_count = 0;
2062 adapter->efbig2_tx_dma_setup.ev_count = 0;
2063 adapter->einval_tx_dma_setup.ev_count = 0;
2064 adapter->other_tx_dma_setup.ev_count = 0;
2065 adapter->eagain_tx_dma_setup.ev_count = 0;
2066 adapter->enomem_tx_dma_setup.ev_count = 0;
2067 adapter->tso_err.ev_count = 0;
2068 adapter->watchdog_events.ev_count = 0;
2069 adapter->link_irq.ev_count = 0;
2070 adapter->link_sicount.ev_count = 0;
2071 adapter->mod_sicount.ev_count = 0;
2072 adapter->msf_sicount.ev_count = 0;
2073 adapter->phy_sicount.ev_count = 0;
2074
2075 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2076 if (i < __arraycount(stats->mpc)) {
2077 stats->mpc[i].ev_count = 0;
2078 if (hw->mac.type == ixgbe_mac_82598EB)
2079 stats->rnbc[i].ev_count = 0;
2080 }
2081 if (i < __arraycount(stats->pxontxc)) {
2082 stats->pxontxc[i].ev_count = 0;
2083 stats->pxonrxc[i].ev_count = 0;
2084 stats->pxofftxc[i].ev_count = 0;
2085 stats->pxoffrxc[i].ev_count = 0;
2086 if (hw->mac.type >= ixgbe_mac_82599EB)
2087 stats->pxon2offc[i].ev_count = 0;
2088 }
2089 }
2090
2091 txr = adapter->tx_rings;
2092 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2093 adapter->queues[i].irqs.ev_count = 0;
2094 adapter->queues[i].handleq.ev_count = 0;
2095 adapter->queues[i].req.ev_count = 0;
2096 txr->no_desc_avail.ev_count = 0;
2097 txr->total_packets.ev_count = 0;
2098 txr->tso_tx.ev_count = 0;
2099 #ifndef IXGBE_LEGACY_TX
2100 txr->pcq_drops.ev_count = 0;
2101 #endif
2102 txr->q_efbig_tx_dma_setup = 0;
2103 txr->q_mbuf_defrag_failed = 0;
2104 txr->q_efbig2_tx_dma_setup = 0;
2105 txr->q_einval_tx_dma_setup = 0;
2106 txr->q_other_tx_dma_setup = 0;
2107 txr->q_eagain_tx_dma_setup = 0;
2108 txr->q_enomem_tx_dma_setup = 0;
2109 txr->q_tso_err = 0;
2110
2111 if (i < __arraycount(stats->qprc)) {
2112 stats->qprc[i].ev_count = 0;
2113 stats->qptc[i].ev_count = 0;
2114 stats->qbrc[i].ev_count = 0;
2115 stats->qbtc[i].ev_count = 0;
2116 if (hw->mac.type >= ixgbe_mac_82599EB)
2117 stats->qprdc[i].ev_count = 0;
2118 }
2119
2120 rxr->rx_packets.ev_count = 0;
2121 rxr->rx_bytes.ev_count = 0;
2122 rxr->rx_copies.ev_count = 0;
2123 rxr->no_jmbuf.ev_count = 0;
2124 rxr->rx_discarded.ev_count = 0;
2125 }
2126 stats->ipcs.ev_count = 0;
2127 stats->l4cs.ev_count = 0;
2128 stats->ipcs_bad.ev_count = 0;
2129 stats->l4cs_bad.ev_count = 0;
2130 stats->intzero.ev_count = 0;
2131 stats->legint.ev_count = 0;
2132 stats->crcerrs.ev_count = 0;
2133 stats->illerrc.ev_count = 0;
2134 stats->errbc.ev_count = 0;
2135 stats->mspdc.ev_count = 0;
2136 stats->mbsdc.ev_count = 0;
2137 stats->mpctotal.ev_count = 0;
2138 stats->mlfc.ev_count = 0;
2139 stats->mrfc.ev_count = 0;
2140 stats->rlec.ev_count = 0;
2141 stats->lxontxc.ev_count = 0;
2142 stats->lxonrxc.ev_count = 0;
2143 stats->lxofftxc.ev_count = 0;
2144 stats->lxoffrxc.ev_count = 0;
2145
2146 /* Packet Reception Stats */
2147 stats->tor.ev_count = 0;
2148 stats->gorc.ev_count = 0;
2149 stats->tpr.ev_count = 0;
2150 stats->gprc.ev_count = 0;
2151 stats->mprc.ev_count = 0;
2152 stats->bprc.ev_count = 0;
2153 stats->prc64.ev_count = 0;
2154 stats->prc127.ev_count = 0;
2155 stats->prc255.ev_count = 0;
2156 stats->prc511.ev_count = 0;
2157 stats->prc1023.ev_count = 0;
2158 stats->prc1522.ev_count = 0;
2159 stats->ruc.ev_count = 0;
2160 stats->rfc.ev_count = 0;
2161 stats->roc.ev_count = 0;
2162 stats->rjc.ev_count = 0;
2163 stats->mngprc.ev_count = 0;
2164 stats->mngpdc.ev_count = 0;
2165 stats->xec.ev_count = 0;
2166
2167 /* Packet Transmission Stats */
2168 stats->gotc.ev_count = 0;
2169 stats->tpt.ev_count = 0;
2170 stats->gptc.ev_count = 0;
2171 stats->bptc.ev_count = 0;
2172 stats->mptc.ev_count = 0;
2173 stats->mngptc.ev_count = 0;
2174 stats->ptc64.ev_count = 0;
2175 stats->ptc127.ev_count = 0;
2176 stats->ptc255.ev_count = 0;
2177 stats->ptc511.ev_count = 0;
2178 stats->ptc1023.ev_count = 0;
2179 stats->ptc1522.ev_count = 0;
2180 }
2181
2182 /************************************************************************
2183 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2184 *
2185 * Retrieves the TDH value from the hardware
2186 ************************************************************************/
2187 static int
2188 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2189 {
2190 struct sysctlnode node = *rnode;
2191 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2192 struct adapter *adapter;
2193 uint32_t val;
2194
2195 if (!txr)
2196 return (0);
2197
2198 adapter = txr->adapter;
2199 if (ixgbe_fw_recovery_mode_swflag(adapter))
2200 return (EPERM);
2201
2202 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2203 node.sysctl_data = &val;
2204 return sysctl_lookup(SYSCTLFN_CALL(&node));
2205 } /* ixgbe_sysctl_tdh_handler */
2206
2207 /************************************************************************
2208 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2209 *
2210 * Retrieves the TDT value from the hardware
2211 ************************************************************************/
2212 static int
2213 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2214 {
2215 struct sysctlnode node = *rnode;
2216 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2217 struct adapter *adapter;
2218 uint32_t val;
2219
2220 if (!txr)
2221 return (0);
2222
2223 adapter = txr->adapter;
2224 if (ixgbe_fw_recovery_mode_swflag(adapter))
2225 return (EPERM);
2226
2227 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2228 node.sysctl_data = &val;
2229 return sysctl_lookup(SYSCTLFN_CALL(&node));
2230 } /* ixgbe_sysctl_tdt_handler */
2231
2232 /************************************************************************
2233 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2234 * handler function
2235 *
2236 * Retrieves the next_to_check value
2237 ************************************************************************/
2238 static int
2239 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2240 {
2241 struct sysctlnode node = *rnode;
2242 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2243 struct adapter *adapter;
2244 uint32_t val;
2245
2246 if (!rxr)
2247 return (0);
2248
2249 adapter = rxr->adapter;
2250 if (ixgbe_fw_recovery_mode_swflag(adapter))
2251 return (EPERM);
2252
2253 val = rxr->next_to_check;
2254 node.sysctl_data = &val;
2255 return sysctl_lookup(SYSCTLFN_CALL(&node));
2256 } /* ixgbe_sysctl_next_to_check_handler */
2257
2258 /************************************************************************
2259 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2260 *
2261 * Retrieves the RDH value from the hardware
2262 ************************************************************************/
2263 static int
2264 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2265 {
2266 struct sysctlnode node = *rnode;
2267 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2268 struct adapter *adapter;
2269 uint32_t val;
2270
2271 if (!rxr)
2272 return (0);
2273
2274 adapter = rxr->adapter;
2275 if (ixgbe_fw_recovery_mode_swflag(adapter))
2276 return (EPERM);
2277
2278 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2279 node.sysctl_data = &val;
2280 return sysctl_lookup(SYSCTLFN_CALL(&node));
2281 } /* ixgbe_sysctl_rdh_handler */
2282
2283 /************************************************************************
2284 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2285 *
2286 * Retrieves the RDT value from the hardware
2287 ************************************************************************/
2288 static int
2289 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2290 {
2291 struct sysctlnode node = *rnode;
2292 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2293 struct adapter *adapter;
2294 uint32_t val;
2295
2296 if (!rxr)
2297 return (0);
2298
2299 adapter = rxr->adapter;
2300 if (ixgbe_fw_recovery_mode_swflag(adapter))
2301 return (EPERM);
2302
2303 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2304 node.sysctl_data = &val;
2305 return sysctl_lookup(SYSCTLFN_CALL(&node));
2306 } /* ixgbe_sysctl_rdt_handler */
2307
2308 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2309 /************************************************************************
2310 * ixgbe_register_vlan
2311 *
2312 * Run via vlan config EVENT, it enables us to use the
2313 * HW Filter table since we can get the vlan id. This
2314 * just creates the entry in the soft version of the
2315 * VFTA, init will repopulate the real table.
2316 ************************************************************************/
2317 static void
2318 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2319 {
2320 struct adapter *adapter = ifp->if_softc;
2321 u16 index, bit;
2322
2323 if (ifp->if_softc != arg) /* Not our event */
2324 return;
2325
2326 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2327 return;
2328
2329 IXGBE_CORE_LOCK(adapter);
2330 index = (vtag >> 5) & 0x7F;
2331 bit = vtag & 0x1F;
2332 adapter->shadow_vfta[index] |= (1 << bit);
2333 ixgbe_setup_vlan_hw_support(adapter);
2334 IXGBE_CORE_UNLOCK(adapter);
2335 } /* ixgbe_register_vlan */
2336
2337 /************************************************************************
2338 * ixgbe_unregister_vlan
2339 *
2340 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2341 ************************************************************************/
2342 static void
2343 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2344 {
2345 struct adapter *adapter = ifp->if_softc;
2346 u16 index, bit;
2347
2348 if (ifp->if_softc != arg)
2349 return;
2350
2351 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2352 return;
2353
2354 IXGBE_CORE_LOCK(adapter);
2355 index = (vtag >> 5) & 0x7F;
2356 bit = vtag & 0x1F;
2357 adapter->shadow_vfta[index] &= ~(1 << bit);
2358 /* Re-init to load the changes */
2359 ixgbe_setup_vlan_hw_support(adapter);
2360 IXGBE_CORE_UNLOCK(adapter);
2361 } /* ixgbe_unregister_vlan */
2362 #endif
2363
2364 static void
2365 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2366 {
2367 struct ethercom *ec = &adapter->osdep.ec;
2368 struct ixgbe_hw *hw = &adapter->hw;
2369 struct rx_ring *rxr;
2370 int i;
2371 u32 ctrl;
2372 bool hwtagging;
2373
2374 /*
2375 * This function is called from both if_init and ifflags_cb()
2376 * on NetBSD.
2377 */
2378
2379 /* Enable HW tagging only if any vlan is attached */
2380 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2381 && VLAN_ATTACHED(ec);
2382
2383 /* Setup the queues for vlans */
2384 for (i = 0; i < adapter->num_queues; i++) {
2385 rxr = &adapter->rx_rings[i];
2386 /*
2387 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2388 */
2389 if (hw->mac.type != ixgbe_mac_82598EB) {
2390 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2391 if (hwtagging)
2392 ctrl |= IXGBE_RXDCTL_VME;
2393 else
2394 ctrl &= ~IXGBE_RXDCTL_VME;
2395 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2396 }
2397 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2398 }
2399
2400 /*
2401 * A soft reset zero's out the VFTA, so
2402 * we need to repopulate it now.
2403 */
2404 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2405 if (adapter->shadow_vfta[i] != 0)
2406 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2407 adapter->shadow_vfta[i]);
2408
2409 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2410 /* Enable the Filter Table if enabled */
2411 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2412 ctrl |= IXGBE_VLNCTRL_VFE;
2413 else
2414 ctrl &= ~IXGBE_VLNCTRL_VFE;
2415 /* VLAN hw tagging for 82598 */
2416 if (hw->mac.type == ixgbe_mac_82598EB) {
2417 if (hwtagging)
2418 ctrl |= IXGBE_VLNCTRL_VME;
2419 else
2420 ctrl &= ~IXGBE_VLNCTRL_VME;
2421 }
2422 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2423 } /* ixgbe_setup_vlan_hw_support */
2424
2425 /************************************************************************
2426 * ixgbe_get_slot_info
2427 *
2428 * Get the width and transaction speed of
2429 * the slot this adapter is plugged into.
2430 ************************************************************************/
2431 static void
2432 ixgbe_get_slot_info(struct adapter *adapter)
2433 {
2434 device_t dev = adapter->dev;
2435 struct ixgbe_hw *hw = &adapter->hw;
2436 u32 offset;
2437 u16 link;
2438 int bus_info_valid = TRUE;
2439
2440 /* Some devices are behind an internal bridge */
2441 switch (hw->device_id) {
2442 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2443 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2444 goto get_parent_info;
2445 default:
2446 break;
2447 }
2448
2449 ixgbe_get_bus_info(hw);
2450
2451 /*
2452 * Some devices don't use PCI-E, but there is no need
2453 * to display "Unknown" for bus speed and width.
2454 */
2455 switch (hw->mac.type) {
2456 case ixgbe_mac_X550EM_x:
2457 case ixgbe_mac_X550EM_a:
2458 return;
2459 default:
2460 goto display;
2461 }
2462
2463 get_parent_info:
2464 /*
2465 * For the Quad port adapter we need to parse back
2466 * up the PCI tree to find the speed of the expansion
2467 * slot into which this adapter is plugged. A bit more work.
2468 */
2469 dev = device_parent(device_parent(dev));
2470 #if 0
2471 #ifdef IXGBE_DEBUG
2472 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2473 pci_get_slot(dev), pci_get_function(dev));
2474 #endif
2475 dev = device_parent(device_parent(dev));
2476 #ifdef IXGBE_DEBUG
2477 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2478 pci_get_slot(dev), pci_get_function(dev));
2479 #endif
2480 #endif
2481 /* Now get the PCI Express Capabilities offset */
2482 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2483 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2484 /*
2485 * Hmm...can't get PCI-Express capabilities.
2486 * Falling back to default method.
2487 */
2488 bus_info_valid = FALSE;
2489 ixgbe_get_bus_info(hw);
2490 goto display;
2491 }
2492 /* ...and read the Link Status Register */
2493 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2494 offset + PCIE_LCSR) >> 16;
2495 ixgbe_set_pci_config_data_generic(hw, link);
2496
2497 display:
2498 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2499 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2500 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2501 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2502 "Unknown"),
2503 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2504 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2505 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2506 "Unknown"));
2507
2508 if (bus_info_valid) {
2509 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2510 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2511 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2512 device_printf(dev, "PCI-Express bandwidth available"
2513 " for this card\n is not sufficient for"
2514 " optimal performance.\n");
2515 device_printf(dev, "For optimal performance a x8 "
2516 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2517 }
2518 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2519 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2520 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2521 device_printf(dev, "PCI-Express bandwidth available"
2522 " for this card\n is not sufficient for"
2523 " optimal performance.\n");
2524 device_printf(dev, "For optimal performance a x8 "
2525 "PCIE Gen3 slot is required.\n");
2526 }
2527 } else
2528 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2529
2530 return;
2531 } /* ixgbe_get_slot_info */
2532
2533 /************************************************************************
2534 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2535 ************************************************************************/
2536 static inline void
2537 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2538 {
2539 struct ixgbe_hw *hw = &adapter->hw;
2540 struct ix_queue *que = &adapter->queues[vector];
2541 u64 queue = (u64)(1ULL << vector);
2542 u32 mask;
2543
2544 mutex_enter(&que->dc_mtx);
2545 if (que->disabled_count > 0 && --que->disabled_count > 0)
2546 goto out;
2547
2548 if (hw->mac.type == ixgbe_mac_82598EB) {
2549 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2550 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2551 } else {
2552 mask = (queue & 0xFFFFFFFF);
2553 if (mask)
2554 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2555 mask = (queue >> 32);
2556 if (mask)
2557 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2558 }
2559 out:
2560 mutex_exit(&que->dc_mtx);
2561 } /* ixgbe_enable_queue */
2562
2563 /************************************************************************
2564 * ixgbe_disable_queue_internal
2565 ************************************************************************/
2566 static inline void
2567 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2568 {
2569 struct ixgbe_hw *hw = &adapter->hw;
2570 struct ix_queue *que = &adapter->queues[vector];
2571 u64 queue = (u64)(1ULL << vector);
2572 u32 mask;
2573
2574 mutex_enter(&que->dc_mtx);
2575
2576 if (que->disabled_count > 0) {
2577 if (nestok)
2578 que->disabled_count++;
2579 goto out;
2580 }
2581 que->disabled_count++;
2582
2583 if (hw->mac.type == ixgbe_mac_82598EB) {
2584 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2585 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2586 } else {
2587 mask = (queue & 0xFFFFFFFF);
2588 if (mask)
2589 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2590 mask = (queue >> 32);
2591 if (mask)
2592 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2593 }
2594 out:
2595 mutex_exit(&que->dc_mtx);
2596 } /* ixgbe_disable_queue_internal */
2597
2598 /************************************************************************
2599 * ixgbe_disable_queue
2600 ************************************************************************/
2601 static inline void
2602 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2603 {
2604
2605 ixgbe_disable_queue_internal(adapter, vector, true);
2606 } /* ixgbe_disable_queue */
2607
2608 /************************************************************************
2609 * ixgbe_sched_handle_que - schedule deferred packet processing
2610 ************************************************************************/
2611 static inline void
2612 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2613 {
2614
2615 if(que->txrx_use_workqueue) {
2616 /*
2617 * adapter->que_wq is bound to each CPU instead of
2618 * each NIC queue to reduce workqueue kthread. As we
2619 * should consider about interrupt affinity in this
2620 * function, the workqueue kthread must be WQ_PERCPU.
2621 * If create WQ_PERCPU workqueue kthread for each NIC
2622 * queue, that number of created workqueue kthread is
2623 * (number of used NIC queue) * (number of CPUs) =
2624 * (number of CPUs) ^ 2 most often.
2625 *
2626 * The same NIC queue's interrupts are avoided by
2627 * masking the queue's interrupt. And different
2628 * NIC queue's interrupts use different struct work
2629 * (que->wq_cookie). So, "enqueued flag" to avoid
2630 * twice workqueue_enqueue() is not required .
2631 */
2632 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2633 } else {
2634 softint_schedule(que->que_si);
2635 }
2636 }
2637
2638 /************************************************************************
2639 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2640 ************************************************************************/
2641 static int
2642 ixgbe_msix_que(void *arg)
2643 {
2644 struct ix_queue *que = arg;
2645 struct adapter *adapter = que->adapter;
2646 struct ifnet *ifp = adapter->ifp;
2647 struct tx_ring *txr = que->txr;
2648 struct rx_ring *rxr = que->rxr;
2649 bool more;
2650 u32 newitr = 0;
2651
2652 /* Protect against spurious interrupts */
2653 if ((ifp->if_flags & IFF_RUNNING) == 0)
2654 return 0;
2655
2656 ixgbe_disable_queue(adapter, que->msix);
2657 ++que->irqs.ev_count;
2658
2659 /*
2660 * Don't change "que->txrx_use_workqueue" from this point to avoid
2661 * flip-flopping softint/workqueue mode in one deferred processing.
2662 */
2663 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2664
2665 #ifdef __NetBSD__
2666 /* Don't run ixgbe_rxeof in interrupt context */
2667 more = true;
2668 #else
2669 more = ixgbe_rxeof(que);
2670 #endif
2671
2672 IXGBE_TX_LOCK(txr);
2673 ixgbe_txeof(txr);
2674 IXGBE_TX_UNLOCK(txr);
2675
2676 /* Do AIM now? */
2677
2678 if (adapter->enable_aim == false)
2679 goto no_calc;
2680 /*
2681 * Do Adaptive Interrupt Moderation:
2682 * - Write out last calculated setting
2683 * - Calculate based on average size over
2684 * the last interval.
2685 */
2686 if (que->eitr_setting)
2687 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2688
2689 que->eitr_setting = 0;
2690
2691 /* Idle, do nothing */
2692 if ((txr->bytes == 0) && (rxr->bytes == 0))
2693 goto no_calc;
2694
2695 if ((txr->bytes) && (txr->packets))
2696 newitr = txr->bytes/txr->packets;
2697 if ((rxr->bytes) && (rxr->packets))
2698 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2699 newitr += 24; /* account for hardware frame, crc */
2700
2701 /* set an upper boundary */
2702 newitr = uimin(newitr, 3000);
2703
2704 /* Be nice to the mid range */
2705 if ((newitr > 300) && (newitr < 1200))
2706 newitr = (newitr / 3);
2707 else
2708 newitr = (newitr / 2);
2709
2710 /*
2711 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2712 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2713 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2714 * on 1G and higher.
2715 */
2716 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2717 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2718 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2719 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2720 }
2721
2722 /* save for next interrupt */
2723 que->eitr_setting = newitr;
2724
2725 /* Reset state */
2726 txr->bytes = 0;
2727 txr->packets = 0;
2728 rxr->bytes = 0;
2729 rxr->packets = 0;
2730
2731 no_calc:
2732 if (more)
2733 ixgbe_sched_handle_que(adapter, que);
2734 else
2735 ixgbe_enable_queue(adapter, que->msix);
2736
2737 return 1;
2738 } /* ixgbe_msix_que */
2739
2740 /************************************************************************
2741 * ixgbe_media_status - Media Ioctl callback
2742 *
2743 * Called whenever the user queries the status of
2744 * the interface using ifconfig.
2745 ************************************************************************/
2746 static void
2747 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2748 {
2749 struct adapter *adapter = ifp->if_softc;
2750 struct ixgbe_hw *hw = &adapter->hw;
2751 int layer;
2752
2753 INIT_DEBUGOUT("ixgbe_media_status: begin");
2754 IXGBE_CORE_LOCK(adapter);
2755 ixgbe_update_link_status(adapter);
2756
2757 ifmr->ifm_status = IFM_AVALID;
2758 ifmr->ifm_active = IFM_ETHER;
2759
2760 if (adapter->link_active != LINK_STATE_UP) {
2761 ifmr->ifm_active |= IFM_NONE;
2762 IXGBE_CORE_UNLOCK(adapter);
2763 return;
2764 }
2765
2766 ifmr->ifm_status |= IFM_ACTIVE;
2767 layer = adapter->phy_layer;
2768
2769 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2770 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2771 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2772 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2773 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2774 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2775 switch (adapter->link_speed) {
2776 case IXGBE_LINK_SPEED_10GB_FULL:
2777 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2778 break;
2779 case IXGBE_LINK_SPEED_5GB_FULL:
2780 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2781 break;
2782 case IXGBE_LINK_SPEED_2_5GB_FULL:
2783 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2784 break;
2785 case IXGBE_LINK_SPEED_1GB_FULL:
2786 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2787 break;
2788 case IXGBE_LINK_SPEED_100_FULL:
2789 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2790 break;
2791 case IXGBE_LINK_SPEED_10_FULL:
2792 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2793 break;
2794 }
2795 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2796 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2797 switch (adapter->link_speed) {
2798 case IXGBE_LINK_SPEED_10GB_FULL:
2799 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2800 break;
2801 }
2802 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2803 switch (adapter->link_speed) {
2804 case IXGBE_LINK_SPEED_10GB_FULL:
2805 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2806 break;
2807 case IXGBE_LINK_SPEED_1GB_FULL:
2808 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2809 break;
2810 }
2811 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2812 switch (adapter->link_speed) {
2813 case IXGBE_LINK_SPEED_10GB_FULL:
2814 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2815 break;
2816 case IXGBE_LINK_SPEED_1GB_FULL:
2817 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2818 break;
2819 }
2820 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2821 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2822 switch (adapter->link_speed) {
2823 case IXGBE_LINK_SPEED_10GB_FULL:
2824 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2825 break;
2826 case IXGBE_LINK_SPEED_1GB_FULL:
2827 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2828 break;
2829 }
2830 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2831 switch (adapter->link_speed) {
2832 case IXGBE_LINK_SPEED_10GB_FULL:
2833 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2834 break;
2835 }
2836 /*
2837 * XXX: These need to use the proper media types once
2838 * they're added.
2839 */
2840 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2841 switch (adapter->link_speed) {
2842 case IXGBE_LINK_SPEED_10GB_FULL:
2843 #ifndef IFM_ETH_XTYPE
2844 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2845 #else
2846 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2847 #endif
2848 break;
2849 case IXGBE_LINK_SPEED_2_5GB_FULL:
2850 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2851 break;
2852 case IXGBE_LINK_SPEED_1GB_FULL:
2853 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2854 break;
2855 }
2856 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2857 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2858 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2859 switch (adapter->link_speed) {
2860 case IXGBE_LINK_SPEED_10GB_FULL:
2861 #ifndef IFM_ETH_XTYPE
2862 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2863 #else
2864 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2865 #endif
2866 break;
2867 case IXGBE_LINK_SPEED_2_5GB_FULL:
2868 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2869 break;
2870 case IXGBE_LINK_SPEED_1GB_FULL:
2871 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2872 break;
2873 }
2874
2875 /* If nothing is recognized... */
2876 #if 0
2877 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2878 ifmr->ifm_active |= IFM_UNKNOWN;
2879 #endif
2880
2881 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2882
2883 /* Display current flow control setting used on link */
2884 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2885 hw->fc.current_mode == ixgbe_fc_full)
2886 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2887 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2888 hw->fc.current_mode == ixgbe_fc_full)
2889 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2890
2891 IXGBE_CORE_UNLOCK(adapter);
2892
2893 return;
2894 } /* ixgbe_media_status */
2895
2896 /************************************************************************
2897 * ixgbe_media_change - Media Ioctl callback
2898 *
2899 * Called when the user changes speed/duplex using
2900 * media/mediopt option with ifconfig.
2901 ************************************************************************/
2902 static int
2903 ixgbe_media_change(struct ifnet *ifp)
2904 {
2905 struct adapter *adapter = ifp->if_softc;
2906 struct ifmedia *ifm = &adapter->media;
2907 struct ixgbe_hw *hw = &adapter->hw;
2908 ixgbe_link_speed speed = 0;
2909 ixgbe_link_speed link_caps = 0;
2910 bool negotiate = false;
2911 s32 err = IXGBE_NOT_IMPLEMENTED;
2912
2913 INIT_DEBUGOUT("ixgbe_media_change: begin");
2914
2915 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2916 return (EINVAL);
2917
2918 if (hw->phy.media_type == ixgbe_media_type_backplane)
2919 return (EPERM);
2920
2921 IXGBE_CORE_LOCK(adapter);
2922 /*
2923 * We don't actually need to check against the supported
2924 * media types of the adapter; ifmedia will take care of
2925 * that for us.
2926 */
2927 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2928 case IFM_AUTO:
2929 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2930 &negotiate);
2931 if (err != IXGBE_SUCCESS) {
2932 device_printf(adapter->dev, "Unable to determine "
2933 "supported advertise speeds\n");
2934 IXGBE_CORE_UNLOCK(adapter);
2935 return (ENODEV);
2936 }
2937 speed |= link_caps;
2938 break;
2939 case IFM_10G_T:
2940 case IFM_10G_LRM:
2941 case IFM_10G_LR:
2942 case IFM_10G_TWINAX:
2943 #ifndef IFM_ETH_XTYPE
2944 case IFM_10G_SR: /* KR, too */
2945 case IFM_10G_CX4: /* KX4 */
2946 #else
2947 case IFM_10G_KR:
2948 case IFM_10G_KX4:
2949 #endif
2950 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2951 break;
2952 case IFM_5000_T:
2953 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2954 break;
2955 case IFM_2500_T:
2956 case IFM_2500_KX:
2957 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2958 break;
2959 case IFM_1000_T:
2960 case IFM_1000_LX:
2961 case IFM_1000_SX:
2962 case IFM_1000_KX:
2963 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2964 break;
2965 case IFM_100_TX:
2966 speed |= IXGBE_LINK_SPEED_100_FULL;
2967 break;
2968 case IFM_10_T:
2969 speed |= IXGBE_LINK_SPEED_10_FULL;
2970 break;
2971 case IFM_NONE:
2972 break;
2973 default:
2974 goto invalid;
2975 }
2976
2977 hw->mac.autotry_restart = TRUE;
2978 hw->mac.ops.setup_link(hw, speed, TRUE);
2979 adapter->advertise = 0;
2980 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2981 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2982 adapter->advertise |= 1 << 2;
2983 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2984 adapter->advertise |= 1 << 1;
2985 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2986 adapter->advertise |= 1 << 0;
2987 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2988 adapter->advertise |= 1 << 3;
2989 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2990 adapter->advertise |= 1 << 4;
2991 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2992 adapter->advertise |= 1 << 5;
2993 }
2994
2995 IXGBE_CORE_UNLOCK(adapter);
2996 return (0);
2997
2998 invalid:
2999 device_printf(adapter->dev, "Invalid media type!\n");
3000 IXGBE_CORE_UNLOCK(adapter);
3001
3002 return (EINVAL);
3003 } /* ixgbe_media_change */
3004
3005 /************************************************************************
3006 * ixgbe_set_promisc
3007 ************************************************************************/
3008 static void
3009 ixgbe_set_promisc(struct adapter *adapter)
3010 {
3011 struct ifnet *ifp = adapter->ifp;
3012 int mcnt = 0;
3013 u32 rctl;
3014 struct ether_multi *enm;
3015 struct ether_multistep step;
3016 struct ethercom *ec = &adapter->osdep.ec;
3017
3018 KASSERT(mutex_owned(&adapter->core_mtx));
3019 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3020 rctl &= (~IXGBE_FCTRL_UPE);
3021 if (ifp->if_flags & IFF_ALLMULTI)
3022 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
3023 else {
3024 ETHER_LOCK(ec);
3025 ETHER_FIRST_MULTI(step, ec, enm);
3026 while (enm != NULL) {
3027 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3028 break;
3029 mcnt++;
3030 ETHER_NEXT_MULTI(step, enm);
3031 }
3032 ETHER_UNLOCK(ec);
3033 }
3034 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
3035 rctl &= (~IXGBE_FCTRL_MPE);
3036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3037
3038 if (ifp->if_flags & IFF_PROMISC) {
3039 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3040 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3041 } else if (ifp->if_flags & IFF_ALLMULTI) {
3042 rctl |= IXGBE_FCTRL_MPE;
3043 rctl &= ~IXGBE_FCTRL_UPE;
3044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3045 }
3046 } /* ixgbe_set_promisc */
3047
3048 /************************************************************************
3049 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3050 ************************************************************************/
3051 static int
3052 ixgbe_msix_link(void *arg)
3053 {
3054 struct adapter *adapter = arg;
3055 struct ixgbe_hw *hw = &adapter->hw;
3056 u32 eicr, eicr_mask;
3057 s32 retval;
3058
3059 ++adapter->link_irq.ev_count;
3060
3061 /* Pause other interrupts */
3062 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3063
3064 /* First get the cause */
3065 /*
3066 * The specifications of 82598, 82599, X540 and X550 say EICS register
3067 * is write only. However, Linux says it is a workaround for silicon
3068 * errata to read EICS instead of EICR to get interrupt cause. It seems
3069 * there is a problem about read clear mechanism for EICR register.
3070 */
3071 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3072 /* Be sure the queue bits are not cleared */
3073 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3074 /* Clear interrupt with write */
3075 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3076
3077 /* Link status change */
3078 if (eicr & IXGBE_EICR_LSC) {
3079 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3080 softint_schedule(adapter->link_si);
3081 }
3082
3083 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3084 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3085 (eicr & IXGBE_EICR_FLOW_DIR)) {
3086 /* This is probably overkill :) */
3087 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3088 return 1;
3089 /* Disable the interrupt */
3090 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3091 softint_schedule(adapter->fdir_si);
3092 }
3093
3094 if (eicr & IXGBE_EICR_ECC) {
3095 device_printf(adapter->dev,
3096 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3097 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3098 }
3099
3100 /* Check for over temp condition */
3101 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3102 switch (adapter->hw.mac.type) {
3103 case ixgbe_mac_X550EM_a:
3104 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3105 break;
3106 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3107 IXGBE_EICR_GPI_SDP0_X550EM_a);
3108 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3109 IXGBE_EICR_GPI_SDP0_X550EM_a);
3110 retval = hw->phy.ops.check_overtemp(hw);
3111 if (retval != IXGBE_ERR_OVERTEMP)
3112 break;
3113 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3114 device_printf(adapter->dev, "System shutdown required!\n");
3115 break;
3116 default:
3117 if (!(eicr & IXGBE_EICR_TS))
3118 break;
3119 retval = hw->phy.ops.check_overtemp(hw);
3120 if (retval != IXGBE_ERR_OVERTEMP)
3121 break;
3122 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3123 device_printf(adapter->dev, "System shutdown required!\n");
3124 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3125 break;
3126 }
3127 }
3128
3129 /* Check for VF message */
3130 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3131 (eicr & IXGBE_EICR_MAILBOX))
3132 softint_schedule(adapter->mbx_si);
3133 }
3134
3135 if (ixgbe_is_sfp(hw)) {
3136 /* Pluggable optics-related interrupt */
3137 if (hw->mac.type >= ixgbe_mac_X540)
3138 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3139 else
3140 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3141
3142 if (eicr & eicr_mask) {
3143 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3144 softint_schedule(adapter->mod_si);
3145 }
3146
3147 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3148 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3149 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3150 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3151 softint_schedule(adapter->msf_si);
3152 }
3153 }
3154
3155 /* Check for fan failure */
3156 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3157 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3158 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3159 }
3160
3161 /* External PHY interrupt */
3162 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3163 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3165 softint_schedule(adapter->phy_si);
3166 }
3167
3168 /* Re-enable other interrupts */
3169 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3170 return 1;
3171 } /* ixgbe_msix_link */
3172
3173 static void
3174 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3175 {
3176
3177 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3178 itr |= itr << 16;
3179 else
3180 itr |= IXGBE_EITR_CNT_WDIS;
3181
3182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3183 }
3184
3185
3186 /************************************************************************
3187 * ixgbe_sysctl_interrupt_rate_handler
3188 ************************************************************************/
3189 static int
3190 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3191 {
3192 struct sysctlnode node = *rnode;
3193 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3194 struct adapter *adapter;
3195 uint32_t reg, usec, rate;
3196 int error;
3197
3198 if (que == NULL)
3199 return 0;
3200
3201 adapter = que->adapter;
3202 if (ixgbe_fw_recovery_mode_swflag(adapter))
3203 return (EPERM);
3204
3205 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3206 usec = ((reg & 0x0FF8) >> 3);
3207 if (usec > 0)
3208 rate = 500000 / usec;
3209 else
3210 rate = 0;
3211 node.sysctl_data = &rate;
3212 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3213 if (error || newp == NULL)
3214 return error;
3215 reg &= ~0xfff; /* default, no limitation */
3216 if (rate > 0 && rate < 500000) {
3217 if (rate < 1000)
3218 rate = 1000;
3219 reg |= ((4000000/rate) & 0xff8);
3220 /*
3221 * When RSC is used, ITR interval must be larger than
3222 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3223 * The minimum value is always greater than 2us on 100M
3224 * (and 10M?(not documented)), but it's not on 1G and higher.
3225 */
3226 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3227 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3228 if ((adapter->num_queues > 1)
3229 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3230 return EINVAL;
3231 }
3232 ixgbe_max_interrupt_rate = rate;
3233 } else
3234 ixgbe_max_interrupt_rate = 0;
3235 ixgbe_eitr_write(adapter, que->msix, reg);
3236
3237 return (0);
3238 } /* ixgbe_sysctl_interrupt_rate_handler */
3239
3240 const struct sysctlnode *
3241 ixgbe_sysctl_instance(struct adapter *adapter)
3242 {
3243 const char *dvname;
3244 struct sysctllog **log;
3245 int rc;
3246 const struct sysctlnode *rnode;
3247
3248 if (adapter->sysctltop != NULL)
3249 return adapter->sysctltop;
3250
3251 log = &adapter->sysctllog;
3252 dvname = device_xname(adapter->dev);
3253
3254 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3255 0, CTLTYPE_NODE, dvname,
3256 SYSCTL_DESCR("ixgbe information and settings"),
3257 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3258 goto err;
3259
3260 return rnode;
3261 err:
3262 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3263 return NULL;
3264 }
3265
3266 /************************************************************************
3267 * ixgbe_add_device_sysctls
3268 ************************************************************************/
3269 static void
3270 ixgbe_add_device_sysctls(struct adapter *adapter)
3271 {
3272 device_t dev = adapter->dev;
3273 struct ixgbe_hw *hw = &adapter->hw;
3274 struct sysctllog **log;
3275 const struct sysctlnode *rnode, *cnode;
3276
3277 log = &adapter->sysctllog;
3278
3279 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3280 aprint_error_dev(dev, "could not create sysctl root\n");
3281 return;
3282 }
3283
3284 if (sysctl_createv(log, 0, &rnode, &cnode,
3285 CTLFLAG_READWRITE, CTLTYPE_INT,
3286 "debug", SYSCTL_DESCR("Debug Info"),
3287 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3288 aprint_error_dev(dev, "could not create sysctl\n");
3289
3290 if (sysctl_createv(log, 0, &rnode, &cnode,
3291 CTLFLAG_READONLY, CTLTYPE_INT,
3292 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3293 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3294 aprint_error_dev(dev, "could not create sysctl\n");
3295
3296 if (sysctl_createv(log, 0, &rnode, &cnode,
3297 CTLFLAG_READONLY, CTLTYPE_INT,
3298 "num_queues", SYSCTL_DESCR("Number of queues"),
3299 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3300 aprint_error_dev(dev, "could not create sysctl\n");
3301
3302 /* Sysctls for all devices */
3303 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3304 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3305 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3306 CTL_EOL) != 0)
3307 aprint_error_dev(dev, "could not create sysctl\n");
3308
3309 adapter->enable_aim = ixgbe_enable_aim;
3310 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3311 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3312 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3313 aprint_error_dev(dev, "could not create sysctl\n");
3314
3315 if (sysctl_createv(log, 0, &rnode, &cnode,
3316 CTLFLAG_READWRITE, CTLTYPE_INT,
3317 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3318 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3319 CTL_EOL) != 0)
3320 aprint_error_dev(dev, "could not create sysctl\n");
3321
3322 /*
3323 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3324 * it causesflip-flopping softint/workqueue mode in one deferred
3325 * processing. Therefore, preempt_disable()/preempt_enable() are
3326 * required in ixgbe_sched_handle_que() to avoid
3327 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3328 * I think changing "que->txrx_use_workqueue" in interrupt handler
3329 * is lighter than doing preempt_disable()/preempt_enable() in every
3330 * ixgbe_sched_handle_que().
3331 */
3332 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3333 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3334 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3335 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3336 aprint_error_dev(dev, "could not create sysctl\n");
3337
3338 #ifdef IXGBE_DEBUG
3339 /* testing sysctls (for all devices) */
3340 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3341 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3342 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3343 CTL_EOL) != 0)
3344 aprint_error_dev(dev, "could not create sysctl\n");
3345
3346 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3347 CTLTYPE_STRING, "print_rss_config",
3348 SYSCTL_DESCR("Prints RSS Configuration"),
3349 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3350 CTL_EOL) != 0)
3351 aprint_error_dev(dev, "could not create sysctl\n");
3352 #endif
3353 /* for X550 series devices */
3354 if (hw->mac.type >= ixgbe_mac_X550)
3355 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3356 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3357 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3358 CTL_EOL) != 0)
3359 aprint_error_dev(dev, "could not create sysctl\n");
3360
3361 /* for WoL-capable devices */
3362 if (adapter->wol_support) {
3363 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3364 CTLTYPE_BOOL, "wol_enable",
3365 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3366 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3367 CTL_EOL) != 0)
3368 aprint_error_dev(dev, "could not create sysctl\n");
3369
3370 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3371 CTLTYPE_INT, "wufc",
3372 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3373 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3374 CTL_EOL) != 0)
3375 aprint_error_dev(dev, "could not create sysctl\n");
3376 }
3377
3378 /* for X552/X557-AT devices */
3379 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3380 const struct sysctlnode *phy_node;
3381
3382 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3383 "phy", SYSCTL_DESCR("External PHY sysctls"),
3384 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3385 aprint_error_dev(dev, "could not create sysctl\n");
3386 return;
3387 }
3388
3389 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3390 CTLTYPE_INT, "temp",
3391 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3392 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3393 CTL_EOL) != 0)
3394 aprint_error_dev(dev, "could not create sysctl\n");
3395
3396 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3397 CTLTYPE_INT, "overtemp_occurred",
3398 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3399 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3400 CTL_CREATE, CTL_EOL) != 0)
3401 aprint_error_dev(dev, "could not create sysctl\n");
3402 }
3403
3404 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3405 && (hw->phy.type == ixgbe_phy_fw))
3406 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3407 CTLTYPE_BOOL, "force_10_100_autonego",
3408 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3409 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3410 CTL_CREATE, CTL_EOL) != 0)
3411 aprint_error_dev(dev, "could not create sysctl\n");
3412
3413 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3414 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3415 CTLTYPE_INT, "eee_state",
3416 SYSCTL_DESCR("EEE Power Save State"),
3417 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3418 CTL_EOL) != 0)
3419 aprint_error_dev(dev, "could not create sysctl\n");
3420 }
3421 } /* ixgbe_add_device_sysctls */
3422
3423 /************************************************************************
3424 * ixgbe_allocate_pci_resources
3425 ************************************************************************/
3426 static int
3427 ixgbe_allocate_pci_resources(struct adapter *adapter,
3428 const struct pci_attach_args *pa)
3429 {
3430 pcireg_t memtype, csr;
3431 device_t dev = adapter->dev;
3432 bus_addr_t addr;
3433 int flags;
3434
3435 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3436 switch (memtype) {
3437 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3438 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3439 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3440 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3441 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3442 goto map_err;
3443 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3444 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3445 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3446 }
3447 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3448 adapter->osdep.mem_size, flags,
3449 &adapter->osdep.mem_bus_space_handle) != 0) {
3450 map_err:
3451 adapter->osdep.mem_size = 0;
3452 aprint_error_dev(dev, "unable to map BAR0\n");
3453 return ENXIO;
3454 }
3455 /*
3456 * Enable address decoding for memory range in case BIOS or
3457 * UEFI don't set it.
3458 */
3459 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3460 PCI_COMMAND_STATUS_REG);
3461 csr |= PCI_COMMAND_MEM_ENABLE;
3462 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3463 csr);
3464 break;
3465 default:
3466 aprint_error_dev(dev, "unexpected type on BAR0\n");
3467 return ENXIO;
3468 }
3469
3470 return (0);
3471 } /* ixgbe_allocate_pci_resources */
3472
3473 static void
3474 ixgbe_free_softint(struct adapter *adapter)
3475 {
3476 struct ix_queue *que = adapter->queues;
3477 struct tx_ring *txr = adapter->tx_rings;
3478 int i;
3479
3480 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3481 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3482 if (txr->txr_si != NULL)
3483 softint_disestablish(txr->txr_si);
3484 }
3485 if (que->que_si != NULL)
3486 softint_disestablish(que->que_si);
3487 }
3488 if (adapter->txr_wq != NULL)
3489 workqueue_destroy(adapter->txr_wq);
3490 if (adapter->txr_wq_enqueued != NULL)
3491 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3492 if (adapter->que_wq != NULL)
3493 workqueue_destroy(adapter->que_wq);
3494
3495 /* Drain the Link queue */
3496 if (adapter->link_si != NULL) {
3497 softint_disestablish(adapter->link_si);
3498 adapter->link_si = NULL;
3499 }
3500 if (adapter->mod_si != NULL) {
3501 softint_disestablish(adapter->mod_si);
3502 adapter->mod_si = NULL;
3503 }
3504 if (adapter->msf_si != NULL) {
3505 softint_disestablish(adapter->msf_si);
3506 adapter->msf_si = NULL;
3507 }
3508 if (adapter->phy_si != NULL) {
3509 softint_disestablish(adapter->phy_si);
3510 adapter->phy_si = NULL;
3511 }
3512 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3513 if (adapter->fdir_si != NULL) {
3514 softint_disestablish(adapter->fdir_si);
3515 adapter->fdir_si = NULL;
3516 }
3517 }
3518 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3519 if (adapter->mbx_si != NULL) {
3520 softint_disestablish(adapter->mbx_si);
3521 adapter->mbx_si = NULL;
3522 }
3523 }
3524 } /* ixgbe_free_softint */
3525
3526 /************************************************************************
3527 * ixgbe_detach - Device removal routine
3528 *
3529 * Called when the driver is being removed.
3530 * Stops the adapter and deallocates all the resources
3531 * that were allocated for driver operation.
3532 *
3533 * return 0 on success, positive on failure
3534 ************************************************************************/
3535 static int
3536 ixgbe_detach(device_t dev, int flags)
3537 {
3538 struct adapter *adapter = device_private(dev);
3539 struct rx_ring *rxr = adapter->rx_rings;
3540 struct tx_ring *txr = adapter->tx_rings;
3541 struct ixgbe_hw *hw = &adapter->hw;
3542 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3543 u32 ctrl_ext;
3544 int i;
3545
3546 INIT_DEBUGOUT("ixgbe_detach: begin");
3547 if (adapter->osdep.attached == false)
3548 return 0;
3549
3550 if (ixgbe_pci_iov_detach(dev) != 0) {
3551 device_printf(dev, "SR-IOV in use; detach first.\n");
3552 return (EBUSY);
3553 }
3554
3555 /* Stop the interface. Callouts are stopped in it. */
3556 ixgbe_ifstop(adapter->ifp, 1);
3557 #if NVLAN > 0
3558 /* Make sure VLANs are not using driver */
3559 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3560 ; /* nothing to do: no VLANs */
3561 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3562 vlan_ifdetach(adapter->ifp);
3563 else {
3564 aprint_error_dev(dev, "VLANs in use, detach first\n");
3565 return (EBUSY);
3566 }
3567 #endif
3568
3569 pmf_device_deregister(dev);
3570
3571 ether_ifdetach(adapter->ifp);
3572 /* Stop the adapter */
3573 IXGBE_CORE_LOCK(adapter);
3574 ixgbe_setup_low_power_mode(adapter);
3575 IXGBE_CORE_UNLOCK(adapter);
3576
3577 ixgbe_free_softint(adapter);
3578
3579 /* let hardware know driver is unloading */
3580 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3581 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3582 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3583
3584 callout_halt(&adapter->timer, NULL);
3585 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3586 callout_halt(&adapter->recovery_mode_timer, NULL);
3587
3588 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3589 netmap_detach(adapter->ifp);
3590
3591 ixgbe_free_pci_resources(adapter);
3592 #if 0 /* XXX the NetBSD port is probably missing something here */
3593 bus_generic_detach(dev);
3594 #endif
3595 if_detach(adapter->ifp);
3596 if_percpuq_destroy(adapter->ipq);
3597
3598 sysctl_teardown(&adapter->sysctllog);
3599 evcnt_detach(&adapter->efbig_tx_dma_setup);
3600 evcnt_detach(&adapter->mbuf_defrag_failed);
3601 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3602 evcnt_detach(&adapter->einval_tx_dma_setup);
3603 evcnt_detach(&adapter->other_tx_dma_setup);
3604 evcnt_detach(&adapter->eagain_tx_dma_setup);
3605 evcnt_detach(&adapter->enomem_tx_dma_setup);
3606 evcnt_detach(&adapter->watchdog_events);
3607 evcnt_detach(&adapter->tso_err);
3608 evcnt_detach(&adapter->link_irq);
3609 evcnt_detach(&adapter->link_sicount);
3610 evcnt_detach(&adapter->mod_sicount);
3611 evcnt_detach(&adapter->msf_sicount);
3612 evcnt_detach(&adapter->phy_sicount);
3613
3614 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3615 if (i < __arraycount(stats->mpc)) {
3616 evcnt_detach(&stats->mpc[i]);
3617 if (hw->mac.type == ixgbe_mac_82598EB)
3618 evcnt_detach(&stats->rnbc[i]);
3619 }
3620 if (i < __arraycount(stats->pxontxc)) {
3621 evcnt_detach(&stats->pxontxc[i]);
3622 evcnt_detach(&stats->pxonrxc[i]);
3623 evcnt_detach(&stats->pxofftxc[i]);
3624 evcnt_detach(&stats->pxoffrxc[i]);
3625 if (hw->mac.type >= ixgbe_mac_82599EB)
3626 evcnt_detach(&stats->pxon2offc[i]);
3627 }
3628 }
3629
3630 txr = adapter->tx_rings;
3631 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3632 evcnt_detach(&adapter->queues[i].irqs);
3633 evcnt_detach(&adapter->queues[i].handleq);
3634 evcnt_detach(&adapter->queues[i].req);
3635 evcnt_detach(&txr->no_desc_avail);
3636 evcnt_detach(&txr->total_packets);
3637 evcnt_detach(&txr->tso_tx);
3638 #ifndef IXGBE_LEGACY_TX
3639 evcnt_detach(&txr->pcq_drops);
3640 #endif
3641
3642 if (i < __arraycount(stats->qprc)) {
3643 evcnt_detach(&stats->qprc[i]);
3644 evcnt_detach(&stats->qptc[i]);
3645 evcnt_detach(&stats->qbrc[i]);
3646 evcnt_detach(&stats->qbtc[i]);
3647 if (hw->mac.type >= ixgbe_mac_82599EB)
3648 evcnt_detach(&stats->qprdc[i]);
3649 }
3650
3651 evcnt_detach(&rxr->rx_packets);
3652 evcnt_detach(&rxr->rx_bytes);
3653 evcnt_detach(&rxr->rx_copies);
3654 evcnt_detach(&rxr->no_jmbuf);
3655 evcnt_detach(&rxr->rx_discarded);
3656 }
3657 evcnt_detach(&stats->ipcs);
3658 evcnt_detach(&stats->l4cs);
3659 evcnt_detach(&stats->ipcs_bad);
3660 evcnt_detach(&stats->l4cs_bad);
3661 evcnt_detach(&stats->intzero);
3662 evcnt_detach(&stats->legint);
3663 evcnt_detach(&stats->crcerrs);
3664 evcnt_detach(&stats->illerrc);
3665 evcnt_detach(&stats->errbc);
3666 evcnt_detach(&stats->mspdc);
3667 if (hw->mac.type >= ixgbe_mac_X550)
3668 evcnt_detach(&stats->mbsdc);
3669 evcnt_detach(&stats->mpctotal);
3670 evcnt_detach(&stats->mlfc);
3671 evcnt_detach(&stats->mrfc);
3672 evcnt_detach(&stats->rlec);
3673 evcnt_detach(&stats->lxontxc);
3674 evcnt_detach(&stats->lxonrxc);
3675 evcnt_detach(&stats->lxofftxc);
3676 evcnt_detach(&stats->lxoffrxc);
3677
3678 /* Packet Reception Stats */
3679 evcnt_detach(&stats->tor);
3680 evcnt_detach(&stats->gorc);
3681 evcnt_detach(&stats->tpr);
3682 evcnt_detach(&stats->gprc);
3683 evcnt_detach(&stats->mprc);
3684 evcnt_detach(&stats->bprc);
3685 evcnt_detach(&stats->prc64);
3686 evcnt_detach(&stats->prc127);
3687 evcnt_detach(&stats->prc255);
3688 evcnt_detach(&stats->prc511);
3689 evcnt_detach(&stats->prc1023);
3690 evcnt_detach(&stats->prc1522);
3691 evcnt_detach(&stats->ruc);
3692 evcnt_detach(&stats->rfc);
3693 evcnt_detach(&stats->roc);
3694 evcnt_detach(&stats->rjc);
3695 evcnt_detach(&stats->mngprc);
3696 evcnt_detach(&stats->mngpdc);
3697 evcnt_detach(&stats->xec);
3698
3699 /* Packet Transmission Stats */
3700 evcnt_detach(&stats->gotc);
3701 evcnt_detach(&stats->tpt);
3702 evcnt_detach(&stats->gptc);
3703 evcnt_detach(&stats->bptc);
3704 evcnt_detach(&stats->mptc);
3705 evcnt_detach(&stats->mngptc);
3706 evcnt_detach(&stats->ptc64);
3707 evcnt_detach(&stats->ptc127);
3708 evcnt_detach(&stats->ptc255);
3709 evcnt_detach(&stats->ptc511);
3710 evcnt_detach(&stats->ptc1023);
3711 evcnt_detach(&stats->ptc1522);
3712
3713 ixgbe_free_transmit_structures(adapter);
3714 ixgbe_free_receive_structures(adapter);
3715 for (i = 0; i < adapter->num_queues; i++) {
3716 struct ix_queue * que = &adapter->queues[i];
3717 mutex_destroy(&que->dc_mtx);
3718 }
3719 free(adapter->queues, M_DEVBUF);
3720 free(adapter->mta, M_DEVBUF);
3721
3722 IXGBE_CORE_LOCK_DESTROY(adapter);
3723
3724 return (0);
3725 } /* ixgbe_detach */
3726
3727 /************************************************************************
3728 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3729 *
3730 * Prepare the adapter/port for LPLU and/or WoL
3731 ************************************************************************/
3732 static int
3733 ixgbe_setup_low_power_mode(struct adapter *adapter)
3734 {
3735 struct ixgbe_hw *hw = &adapter->hw;
3736 device_t dev = adapter->dev;
3737 s32 error = 0;
3738
3739 KASSERT(mutex_owned(&adapter->core_mtx));
3740
3741 /* Limit power management flow to X550EM baseT */
3742 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3743 hw->phy.ops.enter_lplu) {
3744 /* X550EM baseT adapters need a special LPLU flow */
3745 hw->phy.reset_disable = true;
3746 ixgbe_stop(adapter);
3747 error = hw->phy.ops.enter_lplu(hw);
3748 if (error)
3749 device_printf(dev,
3750 "Error entering LPLU: %d\n", error);
3751 hw->phy.reset_disable = false;
3752 } else {
3753 /* Just stop for other adapters */
3754 ixgbe_stop(adapter);
3755 }
3756
3757 if (!hw->wol_enabled) {
3758 ixgbe_set_phy_power(hw, FALSE);
3759 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3760 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3761 } else {
3762 /* Turn off support for APM wakeup. (Using ACPI instead) */
3763 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3764 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3765
3766 /*
3767 * Clear Wake Up Status register to prevent any previous wakeup
3768 * events from waking us up immediately after we suspend.
3769 */
3770 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3771
3772 /*
3773 * Program the Wakeup Filter Control register with user filter
3774 * settings
3775 */
3776 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3777
3778 /* Enable wakeups and power management in Wakeup Control */
3779 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3780 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3781
3782 }
3783
3784 return error;
3785 } /* ixgbe_setup_low_power_mode */
3786
3787 /************************************************************************
3788 * ixgbe_shutdown - Shutdown entry point
3789 ************************************************************************/
3790 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3791 static int
3792 ixgbe_shutdown(device_t dev)
3793 {
3794 struct adapter *adapter = device_private(dev);
3795 int error = 0;
3796
3797 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3798
3799 IXGBE_CORE_LOCK(adapter);
3800 error = ixgbe_setup_low_power_mode(adapter);
3801 IXGBE_CORE_UNLOCK(adapter);
3802
3803 return (error);
3804 } /* ixgbe_shutdown */
3805 #endif
3806
3807 /************************************************************************
3808 * ixgbe_suspend
3809 *
3810 * From D0 to D3
3811 ************************************************************************/
3812 static bool
3813 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3814 {
3815 struct adapter *adapter = device_private(dev);
3816 int error = 0;
3817
3818 INIT_DEBUGOUT("ixgbe_suspend: begin");
3819
3820 IXGBE_CORE_LOCK(adapter);
3821
3822 error = ixgbe_setup_low_power_mode(adapter);
3823
3824 IXGBE_CORE_UNLOCK(adapter);
3825
3826 return (error);
3827 } /* ixgbe_suspend */
3828
3829 /************************************************************************
3830 * ixgbe_resume
3831 *
3832 * From D3 to D0
3833 ************************************************************************/
3834 static bool
3835 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3836 {
3837 struct adapter *adapter = device_private(dev);
3838 struct ifnet *ifp = adapter->ifp;
3839 struct ixgbe_hw *hw = &adapter->hw;
3840 u32 wus;
3841
3842 INIT_DEBUGOUT("ixgbe_resume: begin");
3843
3844 IXGBE_CORE_LOCK(adapter);
3845
3846 /* Read & clear WUS register */
3847 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3848 if (wus)
3849 device_printf(dev, "Woken up by (WUS): %#010x\n",
3850 IXGBE_READ_REG(hw, IXGBE_WUS));
3851 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3852 /* And clear WUFC until next low-power transition */
3853 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3854
3855 /*
3856 * Required after D3->D0 transition;
3857 * will re-advertise all previous advertised speeds
3858 */
3859 if (ifp->if_flags & IFF_UP)
3860 ixgbe_init_locked(adapter);
3861
3862 IXGBE_CORE_UNLOCK(adapter);
3863
3864 return true;
3865 } /* ixgbe_resume */
3866
3867 /*
3868 * Set the various hardware offload abilities.
3869 *
3870 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3871 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3872 * mbuf offload flags the driver will understand.
3873 */
3874 static void
3875 ixgbe_set_if_hwassist(struct adapter *adapter)
3876 {
3877 /* XXX */
3878 }
3879
3880 /************************************************************************
3881 * ixgbe_init_locked - Init entry point
3882 *
3883 * Used in two ways: It is used by the stack as an init
3884 * entry point in network interface structure. It is also
3885 * used by the driver as a hw/sw initialization routine to
3886 * get to a consistent state.
3887 *
3888 * return 0 on success, positive on failure
3889 ************************************************************************/
3890 static void
3891 ixgbe_init_locked(struct adapter *adapter)
3892 {
3893 struct ifnet *ifp = adapter->ifp;
3894 device_t dev = adapter->dev;
3895 struct ixgbe_hw *hw = &adapter->hw;
3896 struct ix_queue *que;
3897 struct tx_ring *txr;
3898 struct rx_ring *rxr;
3899 u32 txdctl, mhadd;
3900 u32 rxdctl, rxctrl;
3901 u32 ctrl_ext;
3902 int i, j, err;
3903
3904 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3905
3906 KASSERT(mutex_owned(&adapter->core_mtx));
3907 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3908
3909 hw->adapter_stopped = FALSE;
3910 ixgbe_stop_adapter(hw);
3911 callout_stop(&adapter->timer);
3912 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3913 que->disabled_count = 0;
3914
3915 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3916 adapter->max_frame_size =
3917 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3918
3919 /* Queue indices may change with IOV mode */
3920 ixgbe_align_all_queue_indices(adapter);
3921
3922 /* reprogram the RAR[0] in case user changed it. */
3923 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3924
3925 /* Get the latest mac address, User can use a LAA */
3926 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3927 IXGBE_ETH_LENGTH_OF_ADDRESS);
3928 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3929 hw->addr_ctrl.rar_used_count = 1;
3930
3931 /* Set hardware offload abilities from ifnet flags */
3932 ixgbe_set_if_hwassist(adapter);
3933
3934 /* Prepare transmit descriptors and buffers */
3935 if (ixgbe_setup_transmit_structures(adapter)) {
3936 device_printf(dev, "Could not setup transmit structures\n");
3937 ixgbe_stop(adapter);
3938 return;
3939 }
3940
3941 ixgbe_init_hw(hw);
3942
3943 ixgbe_initialize_iov(adapter);
3944
3945 ixgbe_initialize_transmit_units(adapter);
3946
3947 /* Setup Multicast table */
3948 ixgbe_set_multi(adapter);
3949
3950 /* Determine the correct mbuf pool, based on frame size */
3951 if (adapter->max_frame_size <= MCLBYTES)
3952 adapter->rx_mbuf_sz = MCLBYTES;
3953 else
3954 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3955
3956 /* Prepare receive descriptors and buffers */
3957 if (ixgbe_setup_receive_structures(adapter)) {
3958 device_printf(dev, "Could not setup receive structures\n");
3959 ixgbe_stop(adapter);
3960 return;
3961 }
3962
3963 /* Configure RX settings */
3964 ixgbe_initialize_receive_units(adapter);
3965
3966 /* Enable SDP & MSI-X interrupts based on adapter */
3967 ixgbe_config_gpie(adapter);
3968
3969 /* Set MTU size */
3970 if (ifp->if_mtu > ETHERMTU) {
3971 /* aka IXGBE_MAXFRS on 82599 and newer */
3972 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3973 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3974 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3975 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3976 }
3977
3978 /* Now enable all the queues */
3979 for (i = 0; i < adapter->num_queues; i++) {
3980 txr = &adapter->tx_rings[i];
3981 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3982 txdctl |= IXGBE_TXDCTL_ENABLE;
3983 /* Set WTHRESH to 8, burst writeback */
3984 txdctl |= (8 << 16);
3985 /*
3986 * When the internal queue falls below PTHRESH (32),
3987 * start prefetching as long as there are at least
3988 * HTHRESH (1) buffers ready. The values are taken
3989 * from the Intel linux driver 3.8.21.
3990 * Prefetching enables tx line rate even with 1 queue.
3991 */
3992 txdctl |= (32 << 0) | (1 << 8);
3993 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3994 }
3995
3996 for (i = 0; i < adapter->num_queues; i++) {
3997 rxr = &adapter->rx_rings[i];
3998 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3999 if (hw->mac.type == ixgbe_mac_82598EB) {
4000 /*
4001 * PTHRESH = 21
4002 * HTHRESH = 4
4003 * WTHRESH = 8
4004 */
4005 rxdctl &= ~0x3FFFFF;
4006 rxdctl |= 0x080420;
4007 }
4008 rxdctl |= IXGBE_RXDCTL_ENABLE;
4009 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4010 for (j = 0; j < 10; j++) {
4011 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4012 IXGBE_RXDCTL_ENABLE)
4013 break;
4014 else
4015 msec_delay(1);
4016 }
4017 wmb();
4018
4019 /*
4020 * In netmap mode, we must preserve the buffers made
4021 * available to userspace before the if_init()
4022 * (this is true by default on the TX side, because
4023 * init makes all buffers available to userspace).
4024 *
4025 * netmap_reset() and the device specific routines
4026 * (e.g. ixgbe_setup_receive_rings()) map these
4027 * buffers at the end of the NIC ring, so here we
4028 * must set the RDT (tail) register to make sure
4029 * they are not overwritten.
4030 *
4031 * In this driver the NIC ring starts at RDH = 0,
4032 * RDT points to the last slot available for reception (?),
4033 * so RDT = num_rx_desc - 1 means the whole ring is available.
4034 */
4035 #ifdef DEV_NETMAP
4036 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4037 (ifp->if_capenable & IFCAP_NETMAP)) {
4038 struct netmap_adapter *na = NA(adapter->ifp);
4039 struct netmap_kring *kring = &na->rx_rings[i];
4040 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4041
4042 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4043 } else
4044 #endif /* DEV_NETMAP */
4045 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4046 adapter->num_rx_desc - 1);
4047 }
4048
4049 /* Enable Receive engine */
4050 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4051 if (hw->mac.type == ixgbe_mac_82598EB)
4052 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4053 rxctrl |= IXGBE_RXCTRL_RXEN;
4054 ixgbe_enable_rx_dma(hw, rxctrl);
4055
4056 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4057
4058 /* Set up MSI/MSI-X routing */
4059 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4060 ixgbe_configure_ivars(adapter);
4061 /* Set up auto-mask */
4062 if (hw->mac.type == ixgbe_mac_82598EB)
4063 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4064 else {
4065 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4066 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4067 }
4068 } else { /* Simple settings for Legacy/MSI */
4069 ixgbe_set_ivar(adapter, 0, 0, 0);
4070 ixgbe_set_ivar(adapter, 0, 0, 1);
4071 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4072 }
4073
4074 ixgbe_init_fdir(adapter);
4075
4076 /*
4077 * Check on any SFP devices that
4078 * need to be kick-started
4079 */
4080 if (hw->phy.type == ixgbe_phy_none) {
4081 err = hw->phy.ops.identify(hw);
4082 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4083 device_printf(dev,
4084 "Unsupported SFP+ module type was detected.\n");
4085 return;
4086 }
4087 }
4088
4089 /* Set moderation on the Link interrupt */
4090 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4091
4092 /* Enable EEE power saving */
4093 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4094 hw->mac.ops.setup_eee(hw,
4095 adapter->feat_en & IXGBE_FEATURE_EEE);
4096
4097 /* Enable power to the phy. */
4098 ixgbe_set_phy_power(hw, TRUE);
4099
4100 /* Config/Enable Link */
4101 ixgbe_config_link(adapter);
4102
4103 /* Hardware Packet Buffer & Flow Control setup */
4104 ixgbe_config_delay_values(adapter);
4105
4106 /* Initialize the FC settings */
4107 ixgbe_start_hw(hw);
4108
4109 /* Set up VLAN support and filter */
4110 ixgbe_setup_vlan_hw_support(adapter);
4111
4112 /* Setup DMA Coalescing */
4113 ixgbe_config_dmac(adapter);
4114
4115 /* And now turn on interrupts */
4116 ixgbe_enable_intr(adapter);
4117
4118 /* Enable the use of the MBX by the VF's */
4119 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4120 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4121 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4122 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4123 }
4124
4125 /* Update saved flags. See ixgbe_ifflags_cb() */
4126 adapter->if_flags = ifp->if_flags;
4127
4128 /* Now inform the stack we're ready */
4129 ifp->if_flags |= IFF_RUNNING;
4130
4131 return;
4132 } /* ixgbe_init_locked */
4133
4134 /************************************************************************
4135 * ixgbe_init
4136 ************************************************************************/
4137 static int
4138 ixgbe_init(struct ifnet *ifp)
4139 {
4140 struct adapter *adapter = ifp->if_softc;
4141
4142 IXGBE_CORE_LOCK(adapter);
4143 ixgbe_init_locked(adapter);
4144 IXGBE_CORE_UNLOCK(adapter);
4145
4146 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4147 } /* ixgbe_init */
4148
4149 /************************************************************************
4150 * ixgbe_set_ivar
4151 *
4152 * Setup the correct IVAR register for a particular MSI-X interrupt
4153 * (yes this is all very magic and confusing :)
4154 * - entry is the register array entry
4155 * - vector is the MSI-X vector for this queue
4156 * - type is RX/TX/MISC
4157 ************************************************************************/
4158 static void
4159 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4160 {
4161 struct ixgbe_hw *hw = &adapter->hw;
4162 u32 ivar, index;
4163
4164 vector |= IXGBE_IVAR_ALLOC_VAL;
4165
4166 switch (hw->mac.type) {
4167 case ixgbe_mac_82598EB:
4168 if (type == -1)
4169 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4170 else
4171 entry += (type * 64);
4172 index = (entry >> 2) & 0x1F;
4173 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4174 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4175 ivar |= (vector << (8 * (entry & 0x3)));
4176 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4177 break;
4178 case ixgbe_mac_82599EB:
4179 case ixgbe_mac_X540:
4180 case ixgbe_mac_X550:
4181 case ixgbe_mac_X550EM_x:
4182 case ixgbe_mac_X550EM_a:
4183 if (type == -1) { /* MISC IVAR */
4184 index = (entry & 1) * 8;
4185 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4186 ivar &= ~(0xFF << index);
4187 ivar |= (vector << index);
4188 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4189 } else { /* RX/TX IVARS */
4190 index = (16 * (entry & 1)) + (8 * type);
4191 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4192 ivar &= ~(0xFF << index);
4193 ivar |= (vector << index);
4194 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4195 }
4196 break;
4197 default:
4198 break;
4199 }
4200 } /* ixgbe_set_ivar */
4201
4202 /************************************************************************
4203 * ixgbe_configure_ivars
4204 ************************************************************************/
4205 static void
4206 ixgbe_configure_ivars(struct adapter *adapter)
4207 {
4208 struct ix_queue *que = adapter->queues;
4209 u32 newitr;
4210
4211 if (ixgbe_max_interrupt_rate > 0)
4212 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4213 else {
4214 /*
4215 * Disable DMA coalescing if interrupt moderation is
4216 * disabled.
4217 */
4218 adapter->dmac = 0;
4219 newitr = 0;
4220 }
4221
4222 for (int i = 0; i < adapter->num_queues; i++, que++) {
4223 struct rx_ring *rxr = &adapter->rx_rings[i];
4224 struct tx_ring *txr = &adapter->tx_rings[i];
4225 /* First the RX queue entry */
4226 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4227 /* ... and the TX */
4228 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4229 /* Set an Initial EITR value */
4230 ixgbe_eitr_write(adapter, que->msix, newitr);
4231 /*
4232 * To eliminate influence of the previous state.
4233 * At this point, Tx/Rx interrupt handler
4234 * (ixgbe_msix_que()) cannot be called, so both
4235 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4236 */
4237 que->eitr_setting = 0;
4238 }
4239
4240 /* For the Link interrupt */
4241 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4242 } /* ixgbe_configure_ivars */
4243
4244 /************************************************************************
4245 * ixgbe_config_gpie
4246 ************************************************************************/
4247 static void
4248 ixgbe_config_gpie(struct adapter *adapter)
4249 {
4250 struct ixgbe_hw *hw = &adapter->hw;
4251 u32 gpie;
4252
4253 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4254
4255 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4256 /* Enable Enhanced MSI-X mode */
4257 gpie |= IXGBE_GPIE_MSIX_MODE
4258 | IXGBE_GPIE_EIAME
4259 | IXGBE_GPIE_PBA_SUPPORT
4260 | IXGBE_GPIE_OCD;
4261 }
4262
4263 /* Fan Failure Interrupt */
4264 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4265 gpie |= IXGBE_SDP1_GPIEN;
4266
4267 /* Thermal Sensor Interrupt */
4268 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4269 gpie |= IXGBE_SDP0_GPIEN_X540;
4270
4271 /* Link detection */
4272 switch (hw->mac.type) {
4273 case ixgbe_mac_82599EB:
4274 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4275 break;
4276 case ixgbe_mac_X550EM_x:
4277 case ixgbe_mac_X550EM_a:
4278 gpie |= IXGBE_SDP0_GPIEN_X540;
4279 break;
4280 default:
4281 break;
4282 }
4283
4284 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4285
4286 } /* ixgbe_config_gpie */
4287
4288 /************************************************************************
4289 * ixgbe_config_delay_values
4290 *
4291 * Requires adapter->max_frame_size to be set.
4292 ************************************************************************/
4293 static void
4294 ixgbe_config_delay_values(struct adapter *adapter)
4295 {
4296 struct ixgbe_hw *hw = &adapter->hw;
4297 u32 rxpb, frame, size, tmp;
4298
4299 frame = adapter->max_frame_size;
4300
4301 /* Calculate High Water */
4302 switch (hw->mac.type) {
4303 case ixgbe_mac_X540:
4304 case ixgbe_mac_X550:
4305 case ixgbe_mac_X550EM_x:
4306 case ixgbe_mac_X550EM_a:
4307 tmp = IXGBE_DV_X540(frame, frame);
4308 break;
4309 default:
4310 tmp = IXGBE_DV(frame, frame);
4311 break;
4312 }
4313 size = IXGBE_BT2KB(tmp);
4314 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4315 hw->fc.high_water[0] = rxpb - size;
4316
4317 /* Now calculate Low Water */
4318 switch (hw->mac.type) {
4319 case ixgbe_mac_X540:
4320 case ixgbe_mac_X550:
4321 case ixgbe_mac_X550EM_x:
4322 case ixgbe_mac_X550EM_a:
4323 tmp = IXGBE_LOW_DV_X540(frame);
4324 break;
4325 default:
4326 tmp = IXGBE_LOW_DV(frame);
4327 break;
4328 }
4329 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4330
4331 hw->fc.pause_time = IXGBE_FC_PAUSE;
4332 hw->fc.send_xon = TRUE;
4333 } /* ixgbe_config_delay_values */
4334
4335 /************************************************************************
4336 * ixgbe_set_multi - Multicast Update
4337 *
4338 * Called whenever multicast address list is updated.
4339 ************************************************************************/
4340 static void
4341 ixgbe_set_multi(struct adapter *adapter)
4342 {
4343 struct ixgbe_mc_addr *mta;
4344 struct ifnet *ifp = adapter->ifp;
4345 u8 *update_ptr;
4346 int mcnt = 0;
4347 u32 fctrl;
4348 struct ethercom *ec = &adapter->osdep.ec;
4349 struct ether_multi *enm;
4350 struct ether_multistep step;
4351
4352 KASSERT(mutex_owned(&adapter->core_mtx));
4353 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4354
4355 mta = adapter->mta;
4356 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4357
4358 ifp->if_flags &= ~IFF_ALLMULTI;
4359 ETHER_LOCK(ec);
4360 ETHER_FIRST_MULTI(step, ec, enm);
4361 while (enm != NULL) {
4362 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4363 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4364 ETHER_ADDR_LEN) != 0)) {
4365 ifp->if_flags |= IFF_ALLMULTI;
4366 break;
4367 }
4368 bcopy(enm->enm_addrlo,
4369 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4370 mta[mcnt].vmdq = adapter->pool;
4371 mcnt++;
4372 ETHER_NEXT_MULTI(step, enm);
4373 }
4374 ETHER_UNLOCK(ec);
4375
4376 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4377 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4378 if (ifp->if_flags & IFF_PROMISC)
4379 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4380 else if (ifp->if_flags & IFF_ALLMULTI) {
4381 fctrl |= IXGBE_FCTRL_MPE;
4382 }
4383
4384 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4385
4386 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4387 update_ptr = (u8 *)mta;
4388 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4389 ixgbe_mc_array_itr, TRUE);
4390 }
4391
4392 } /* ixgbe_set_multi */
4393
4394 /************************************************************************
4395 * ixgbe_mc_array_itr
4396 *
4397 * An iterator function needed by the multicast shared code.
4398 * It feeds the shared code routine the addresses in the
4399 * array of ixgbe_set_multi() one by one.
4400 ************************************************************************/
4401 static u8 *
4402 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4403 {
4404 struct ixgbe_mc_addr *mta;
4405
4406 mta = (struct ixgbe_mc_addr *)*update_ptr;
4407 *vmdq = mta->vmdq;
4408
4409 *update_ptr = (u8*)(mta + 1);
4410
4411 return (mta->addr);
4412 } /* ixgbe_mc_array_itr */
4413
4414 /************************************************************************
4415 * ixgbe_local_timer - Timer routine
4416 *
4417 * Checks for link status, updates statistics,
4418 * and runs the watchdog check.
4419 ************************************************************************/
4420 static void
4421 ixgbe_local_timer(void *arg)
4422 {
4423 struct adapter *adapter = arg;
4424
4425 IXGBE_CORE_LOCK(adapter);
4426 ixgbe_local_timer1(adapter);
4427 IXGBE_CORE_UNLOCK(adapter);
4428 }
4429
4430 static void
4431 ixgbe_local_timer1(void *arg)
4432 {
4433 struct adapter *adapter = arg;
4434 device_t dev = adapter->dev;
4435 struct ix_queue *que = adapter->queues;
4436 u64 queues = 0;
4437 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4438 int hung = 0;
4439 int i;
4440
4441 KASSERT(mutex_owned(&adapter->core_mtx));
4442
4443 /* Check for pluggable optics */
4444 if (adapter->sfp_probe)
4445 if (!ixgbe_sfp_probe(adapter))
4446 goto out; /* Nothing to do */
4447
4448 ixgbe_update_link_status(adapter);
4449 ixgbe_update_stats_counters(adapter);
4450
4451 /* Update some event counters */
4452 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4453 que = adapter->queues;
4454 for (i = 0; i < adapter->num_queues; i++, que++) {
4455 struct tx_ring *txr = que->txr;
4456
4457 v0 += txr->q_efbig_tx_dma_setup;
4458 v1 += txr->q_mbuf_defrag_failed;
4459 v2 += txr->q_efbig2_tx_dma_setup;
4460 v3 += txr->q_einval_tx_dma_setup;
4461 v4 += txr->q_other_tx_dma_setup;
4462 v5 += txr->q_eagain_tx_dma_setup;
4463 v6 += txr->q_enomem_tx_dma_setup;
4464 v7 += txr->q_tso_err;
4465 }
4466 adapter->efbig_tx_dma_setup.ev_count = v0;
4467 adapter->mbuf_defrag_failed.ev_count = v1;
4468 adapter->efbig2_tx_dma_setup.ev_count = v2;
4469 adapter->einval_tx_dma_setup.ev_count = v3;
4470 adapter->other_tx_dma_setup.ev_count = v4;
4471 adapter->eagain_tx_dma_setup.ev_count = v5;
4472 adapter->enomem_tx_dma_setup.ev_count = v6;
4473 adapter->tso_err.ev_count = v7;
4474
4475 /*
4476 * Check the TX queues status
4477 * - mark hung queues so we don't schedule on them
4478 * - watchdog only if all queues show hung
4479 */
4480 que = adapter->queues;
4481 for (i = 0; i < adapter->num_queues; i++, que++) {
4482 /* Keep track of queues with work for soft irq */
4483 if (que->txr->busy)
4484 queues |= ((u64)1 << que->me);
4485 /*
4486 * Each time txeof runs without cleaning, but there
4487 * are uncleaned descriptors it increments busy. If
4488 * we get to the MAX we declare it hung.
4489 */
4490 if (que->busy == IXGBE_QUEUE_HUNG) {
4491 ++hung;
4492 /* Mark the queue as inactive */
4493 adapter->active_queues &= ~((u64)1 << que->me);
4494 continue;
4495 } else {
4496 /* Check if we've come back from hung */
4497 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4498 adapter->active_queues |= ((u64)1 << que->me);
4499 }
4500 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4501 device_printf(dev,
4502 "Warning queue %d appears to be hung!\n", i);
4503 que->txr->busy = IXGBE_QUEUE_HUNG;
4504 ++hung;
4505 }
4506 }
4507
4508 /* Only truely watchdog if all queues show hung */
4509 if (hung == adapter->num_queues)
4510 goto watchdog;
4511 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4512 else if (queues != 0) { /* Force an IRQ on queues with work */
4513 que = adapter->queues;
4514 for (i = 0; i < adapter->num_queues; i++, que++) {
4515 mutex_enter(&que->dc_mtx);
4516 if (que->disabled_count == 0)
4517 ixgbe_rearm_queues(adapter,
4518 queues & ((u64)1 << i));
4519 mutex_exit(&que->dc_mtx);
4520 }
4521 }
4522 #endif
4523
4524 out:
4525 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4526 return;
4527
4528 watchdog:
4529 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4530 adapter->ifp->if_flags &= ~IFF_RUNNING;
4531 adapter->watchdog_events.ev_count++;
4532 ixgbe_init_locked(adapter);
4533 } /* ixgbe_local_timer */
4534
4535 /************************************************************************
4536 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4537 ************************************************************************/
4538 static void
4539 ixgbe_recovery_mode_timer(void *arg)
4540 {
4541 struct adapter *adapter = arg;
4542 struct ixgbe_hw *hw = &adapter->hw;
4543
4544 IXGBE_CORE_LOCK(adapter);
4545 if (ixgbe_fw_recovery_mode(hw)) {
4546 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4547 /* Firmware error detected, entering recovery mode */
4548 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4549
4550 if (hw->adapter_stopped == FALSE)
4551 ixgbe_stop(adapter);
4552 }
4553 } else
4554 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4555
4556 callout_reset(&adapter->recovery_mode_timer, hz,
4557 ixgbe_recovery_mode_timer, adapter);
4558 IXGBE_CORE_UNLOCK(adapter);
4559 } /* ixgbe_recovery_mode_timer */
4560
4561 /************************************************************************
4562 * ixgbe_sfp_probe
4563 *
4564 * Determine if a port had optics inserted.
4565 ************************************************************************/
4566 static bool
4567 ixgbe_sfp_probe(struct adapter *adapter)
4568 {
4569 struct ixgbe_hw *hw = &adapter->hw;
4570 device_t dev = adapter->dev;
4571 bool result = FALSE;
4572
4573 if ((hw->phy.type == ixgbe_phy_nl) &&
4574 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4575 s32 ret = hw->phy.ops.identify_sfp(hw);
4576 if (ret)
4577 goto out;
4578 ret = hw->phy.ops.reset(hw);
4579 adapter->sfp_probe = FALSE;
4580 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4581 device_printf(dev,"Unsupported SFP+ module detected!");
4582 device_printf(dev,
4583 "Reload driver with supported module.\n");
4584 goto out;
4585 } else
4586 device_printf(dev, "SFP+ module detected!\n");
4587 /* We now have supported optics */
4588 result = TRUE;
4589 }
4590 out:
4591
4592 return (result);
4593 } /* ixgbe_sfp_probe */
4594
4595 /************************************************************************
4596 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4597 ************************************************************************/
4598 static void
4599 ixgbe_handle_mod(void *context)
4600 {
4601 struct adapter *adapter = context;
4602 struct ixgbe_hw *hw = &adapter->hw;
4603 device_t dev = adapter->dev;
4604 u32 err, cage_full = 0;
4605
4606 ++adapter->mod_sicount.ev_count;
4607 if (adapter->hw.need_crosstalk_fix) {
4608 switch (hw->mac.type) {
4609 case ixgbe_mac_82599EB:
4610 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4611 IXGBE_ESDP_SDP2;
4612 break;
4613 case ixgbe_mac_X550EM_x:
4614 case ixgbe_mac_X550EM_a:
4615 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4616 IXGBE_ESDP_SDP0;
4617 break;
4618 default:
4619 break;
4620 }
4621
4622 if (!cage_full)
4623 return;
4624 }
4625
4626 err = hw->phy.ops.identify_sfp(hw);
4627 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4628 device_printf(dev,
4629 "Unsupported SFP+ module type was detected.\n");
4630 return;
4631 }
4632
4633 if (hw->mac.type == ixgbe_mac_82598EB)
4634 err = hw->phy.ops.reset(hw);
4635 else
4636 err = hw->mac.ops.setup_sfp(hw);
4637
4638 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4639 device_printf(dev,
4640 "Setup failure - unsupported SFP+ module type.\n");
4641 return;
4642 }
4643 softint_schedule(adapter->msf_si);
4644 } /* ixgbe_handle_mod */
4645
4646
4647 /************************************************************************
4648 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4649 ************************************************************************/
4650 static void
4651 ixgbe_handle_msf(void *context)
4652 {
4653 struct adapter *adapter = context;
4654 struct ixgbe_hw *hw = &adapter->hw;
4655 u32 autoneg;
4656 bool negotiate;
4657
4658 IXGBE_CORE_LOCK(adapter);
4659 ++adapter->msf_sicount.ev_count;
4660 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4661 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4662
4663 autoneg = hw->phy.autoneg_advertised;
4664 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4665 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4666 else
4667 negotiate = 0;
4668 if (hw->mac.ops.setup_link)
4669 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4670
4671 /* Adjust media types shown in ifconfig */
4672 ifmedia_removeall(&adapter->media);
4673 ixgbe_add_media_types(adapter);
4674 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4675 IXGBE_CORE_UNLOCK(adapter);
4676 } /* ixgbe_handle_msf */
4677
4678 /************************************************************************
4679 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4680 ************************************************************************/
4681 static void
4682 ixgbe_handle_phy(void *context)
4683 {
4684 struct adapter *adapter = context;
4685 struct ixgbe_hw *hw = &adapter->hw;
4686 int error;
4687
4688 ++adapter->phy_sicount.ev_count;
4689 error = hw->phy.ops.handle_lasi(hw);
4690 if (error == IXGBE_ERR_OVERTEMP)
4691 device_printf(adapter->dev,
4692 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4693 " PHY will downshift to lower power state!\n");
4694 else if (error)
4695 device_printf(adapter->dev,
4696 "Error handling LASI interrupt: %d\n", error);
4697 } /* ixgbe_handle_phy */
4698
4699 static void
4700 ixgbe_ifstop(struct ifnet *ifp, int disable)
4701 {
4702 struct adapter *adapter = ifp->if_softc;
4703
4704 IXGBE_CORE_LOCK(adapter);
4705 ixgbe_stop(adapter);
4706 IXGBE_CORE_UNLOCK(adapter);
4707 }
4708
4709 /************************************************************************
4710 * ixgbe_stop - Stop the hardware
4711 *
4712 * Disables all traffic on the adapter by issuing a
4713 * global reset on the MAC and deallocates TX/RX buffers.
4714 ************************************************************************/
4715 static void
4716 ixgbe_stop(void *arg)
4717 {
4718 struct ifnet *ifp;
4719 struct adapter *adapter = arg;
4720 struct ixgbe_hw *hw = &adapter->hw;
4721
4722 ifp = adapter->ifp;
4723
4724 KASSERT(mutex_owned(&adapter->core_mtx));
4725
4726 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4727 ixgbe_disable_intr(adapter);
4728 callout_stop(&adapter->timer);
4729
4730 /* Let the stack know...*/
4731 ifp->if_flags &= ~IFF_RUNNING;
4732
4733 ixgbe_reset_hw(hw);
4734 hw->adapter_stopped = FALSE;
4735 ixgbe_stop_adapter(hw);
4736 if (hw->mac.type == ixgbe_mac_82599EB)
4737 ixgbe_stop_mac_link_on_d3_82599(hw);
4738 /* Turn off the laser - noop with no optics */
4739 ixgbe_disable_tx_laser(hw);
4740
4741 /* Update the stack */
4742 adapter->link_up = FALSE;
4743 ixgbe_update_link_status(adapter);
4744
4745 /* reprogram the RAR[0] in case user changed it. */
4746 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4747
4748 return;
4749 } /* ixgbe_stop */
4750
4751 /************************************************************************
4752 * ixgbe_update_link_status - Update OS on link state
4753 *
4754 * Note: Only updates the OS on the cached link state.
4755 * The real check of the hardware only happens with
4756 * a link interrupt.
4757 ************************************************************************/
4758 static void
4759 ixgbe_update_link_status(struct adapter *adapter)
4760 {
4761 struct ifnet *ifp = adapter->ifp;
4762 device_t dev = adapter->dev;
4763 struct ixgbe_hw *hw = &adapter->hw;
4764
4765 KASSERT(mutex_owned(&adapter->core_mtx));
4766
4767 if (adapter->link_up) {
4768 if (adapter->link_active != LINK_STATE_UP) {
4769 /*
4770 * To eliminate influence of the previous state
4771 * in the same way as ixgbe_init_locked().
4772 */
4773 struct ix_queue *que = adapter->queues;
4774 for (int i = 0; i < adapter->num_queues; i++, que++)
4775 que->eitr_setting = 0;
4776
4777 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4778 /*
4779 * Discard count for both MAC Local Fault and
4780 * Remote Fault because those registers are
4781 * valid only when the link speed is up and
4782 * 10Gbps.
4783 */
4784 IXGBE_READ_REG(hw, IXGBE_MLFC);
4785 IXGBE_READ_REG(hw, IXGBE_MRFC);
4786 }
4787
4788 if (bootverbose) {
4789 const char *bpsmsg;
4790
4791 switch (adapter->link_speed) {
4792 case IXGBE_LINK_SPEED_10GB_FULL:
4793 bpsmsg = "10 Gbps";
4794 break;
4795 case IXGBE_LINK_SPEED_5GB_FULL:
4796 bpsmsg = "5 Gbps";
4797 break;
4798 case IXGBE_LINK_SPEED_2_5GB_FULL:
4799 bpsmsg = "2.5 Gbps";
4800 break;
4801 case IXGBE_LINK_SPEED_1GB_FULL:
4802 bpsmsg = "1 Gbps";
4803 break;
4804 case IXGBE_LINK_SPEED_100_FULL:
4805 bpsmsg = "100 Mbps";
4806 break;
4807 case IXGBE_LINK_SPEED_10_FULL:
4808 bpsmsg = "10 Mbps";
4809 break;
4810 default:
4811 bpsmsg = "unknown speed";
4812 break;
4813 }
4814 device_printf(dev, "Link is up %s %s \n",
4815 bpsmsg, "Full Duplex");
4816 }
4817 adapter->link_active = LINK_STATE_UP;
4818 /* Update any Flow Control changes */
4819 ixgbe_fc_enable(&adapter->hw);
4820 /* Update DMA coalescing config */
4821 ixgbe_config_dmac(adapter);
4822 if_link_state_change(ifp, LINK_STATE_UP);
4823
4824 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4825 ixgbe_ping_all_vfs(adapter);
4826 }
4827 } else {
4828 /*
4829 * Do it when link active changes to DOWN. i.e.
4830 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4831 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4832 */
4833 if (adapter->link_active != LINK_STATE_DOWN) {
4834 if (bootverbose)
4835 device_printf(dev, "Link is Down\n");
4836 if_link_state_change(ifp, LINK_STATE_DOWN);
4837 adapter->link_active = LINK_STATE_DOWN;
4838 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4839 ixgbe_ping_all_vfs(adapter);
4840 ixgbe_drain_all(adapter);
4841 }
4842 }
4843 } /* ixgbe_update_link_status */
4844
4845 /************************************************************************
4846 * ixgbe_config_dmac - Configure DMA Coalescing
4847 ************************************************************************/
4848 static void
4849 ixgbe_config_dmac(struct adapter *adapter)
4850 {
4851 struct ixgbe_hw *hw = &adapter->hw;
4852 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4853
4854 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4855 return;
4856
4857 if (dcfg->watchdog_timer ^ adapter->dmac ||
4858 dcfg->link_speed ^ adapter->link_speed) {
4859 dcfg->watchdog_timer = adapter->dmac;
4860 dcfg->fcoe_en = false;
4861 dcfg->link_speed = adapter->link_speed;
4862 dcfg->num_tcs = 1;
4863
4864 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4865 dcfg->watchdog_timer, dcfg->link_speed);
4866
4867 hw->mac.ops.dmac_config(hw);
4868 }
4869 } /* ixgbe_config_dmac */
4870
4871 /************************************************************************
4872 * ixgbe_enable_intr
4873 ************************************************************************/
4874 static void
4875 ixgbe_enable_intr(struct adapter *adapter)
4876 {
4877 struct ixgbe_hw *hw = &adapter->hw;
4878 struct ix_queue *que = adapter->queues;
4879 u32 mask, fwsm;
4880
4881 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4882
4883 switch (adapter->hw.mac.type) {
4884 case ixgbe_mac_82599EB:
4885 mask |= IXGBE_EIMS_ECC;
4886 /* Temperature sensor on some adapters */
4887 mask |= IXGBE_EIMS_GPI_SDP0;
4888 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4889 mask |= IXGBE_EIMS_GPI_SDP1;
4890 mask |= IXGBE_EIMS_GPI_SDP2;
4891 break;
4892 case ixgbe_mac_X540:
4893 /* Detect if Thermal Sensor is enabled */
4894 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4895 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4896 mask |= IXGBE_EIMS_TS;
4897 mask |= IXGBE_EIMS_ECC;
4898 break;
4899 case ixgbe_mac_X550:
4900 /* MAC thermal sensor is automatically enabled */
4901 mask |= IXGBE_EIMS_TS;
4902 mask |= IXGBE_EIMS_ECC;
4903 break;
4904 case ixgbe_mac_X550EM_x:
4905 case ixgbe_mac_X550EM_a:
4906 /* Some devices use SDP0 for important information */
4907 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4908 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4909 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4910 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4911 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4912 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4913 mask |= IXGBE_EICR_GPI_SDP0_X540;
4914 mask |= IXGBE_EIMS_ECC;
4915 break;
4916 default:
4917 break;
4918 }
4919
4920 /* Enable Fan Failure detection */
4921 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4922 mask |= IXGBE_EIMS_GPI_SDP1;
4923 /* Enable SR-IOV */
4924 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4925 mask |= IXGBE_EIMS_MAILBOX;
4926 /* Enable Flow Director */
4927 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4928 mask |= IXGBE_EIMS_FLOW_DIR;
4929
4930 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4931
4932 /* With MSI-X we use auto clear */
4933 if (adapter->msix_mem) {
4934 mask = IXGBE_EIMS_ENABLE_MASK;
4935 /* Don't autoclear Link */
4936 mask &= ~IXGBE_EIMS_OTHER;
4937 mask &= ~IXGBE_EIMS_LSC;
4938 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4939 mask &= ~IXGBE_EIMS_MAILBOX;
4940 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4941 }
4942
4943 /*
4944 * Now enable all queues, this is done separately to
4945 * allow for handling the extended (beyond 32) MSI-X
4946 * vectors that can be used by 82599
4947 */
4948 for (int i = 0; i < adapter->num_queues; i++, que++)
4949 ixgbe_enable_queue(adapter, que->msix);
4950
4951 IXGBE_WRITE_FLUSH(hw);
4952
4953 } /* ixgbe_enable_intr */
4954
4955 /************************************************************************
4956 * ixgbe_disable_intr_internal
4957 ************************************************************************/
4958 static void
4959 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4960 {
4961 struct ix_queue *que = adapter->queues;
4962
4963 /* disable interrupts other than queues */
4964 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4965
4966 if (adapter->msix_mem)
4967 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4968
4969 for (int i = 0; i < adapter->num_queues; i++, que++)
4970 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4971
4972 IXGBE_WRITE_FLUSH(&adapter->hw);
4973
4974 } /* ixgbe_do_disable_intr_internal */
4975
4976 /************************************************************************
4977 * ixgbe_disable_intr
4978 ************************************************************************/
4979 static void
4980 ixgbe_disable_intr(struct adapter *adapter)
4981 {
4982
4983 ixgbe_disable_intr_internal(adapter, true);
4984 } /* ixgbe_disable_intr */
4985
4986 /************************************************************************
4987 * ixgbe_ensure_disabled_intr
4988 ************************************************************************/
4989 void
4990 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4991 {
4992
4993 ixgbe_disable_intr_internal(adapter, false);
4994 } /* ixgbe_ensure_disabled_intr */
4995
4996 /************************************************************************
4997 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4998 ************************************************************************/
4999 static int
5000 ixgbe_legacy_irq(void *arg)
5001 {
5002 struct ix_queue *que = arg;
5003 struct adapter *adapter = que->adapter;
5004 struct ixgbe_hw *hw = &adapter->hw;
5005 struct ifnet *ifp = adapter->ifp;
5006 struct tx_ring *txr = adapter->tx_rings;
5007 bool more = false;
5008 u32 eicr, eicr_mask;
5009
5010 /* Silicon errata #26 on 82598 */
5011 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5012
5013 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5014
5015 adapter->stats.pf.legint.ev_count++;
5016 ++que->irqs.ev_count;
5017 if (eicr == 0) {
5018 adapter->stats.pf.intzero.ev_count++;
5019 if ((ifp->if_flags & IFF_UP) != 0)
5020 ixgbe_enable_intr(adapter);
5021 return 0;
5022 }
5023
5024 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5025 /*
5026 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5027 */
5028 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5029
5030 #ifdef __NetBSD__
5031 /* Don't run ixgbe_rxeof in interrupt context */
5032 more = true;
5033 #else
5034 more = ixgbe_rxeof(que);
5035 #endif
5036
5037 IXGBE_TX_LOCK(txr);
5038 ixgbe_txeof(txr);
5039 #ifdef notyet
5040 if (!ixgbe_ring_empty(ifp, txr->br))
5041 ixgbe_start_locked(ifp, txr);
5042 #endif
5043 IXGBE_TX_UNLOCK(txr);
5044 }
5045
5046 /* Check for fan failure */
5047 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5048 ixgbe_check_fan_failure(adapter, eicr, true);
5049 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5050 }
5051
5052 /* Link status change */
5053 if (eicr & IXGBE_EICR_LSC)
5054 softint_schedule(adapter->link_si);
5055
5056 if (ixgbe_is_sfp(hw)) {
5057 /* Pluggable optics-related interrupt */
5058 if (hw->mac.type >= ixgbe_mac_X540)
5059 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5060 else
5061 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5062
5063 if (eicr & eicr_mask) {
5064 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5065 softint_schedule(adapter->mod_si);
5066 }
5067
5068 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5069 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5070 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5071 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5072 softint_schedule(adapter->msf_si);
5073 }
5074 }
5075
5076 /* External PHY interrupt */
5077 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5078 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5079 softint_schedule(adapter->phy_si);
5080
5081 if (more) {
5082 que->req.ev_count++;
5083 ixgbe_sched_handle_que(adapter, que);
5084 } else
5085 ixgbe_enable_intr(adapter);
5086
5087 return 1;
5088 } /* ixgbe_legacy_irq */
5089
5090 /************************************************************************
5091 * ixgbe_free_pciintr_resources
5092 ************************************************************************/
5093 static void
5094 ixgbe_free_pciintr_resources(struct adapter *adapter)
5095 {
5096 struct ix_queue *que = adapter->queues;
5097 int rid;
5098
5099 /*
5100 * Release all msix queue resources:
5101 */
5102 for (int i = 0; i < adapter->num_queues; i++, que++) {
5103 if (que->res != NULL) {
5104 pci_intr_disestablish(adapter->osdep.pc,
5105 adapter->osdep.ihs[i]);
5106 adapter->osdep.ihs[i] = NULL;
5107 }
5108 }
5109
5110 /* Clean the Legacy or Link interrupt last */
5111 if (adapter->vector) /* we are doing MSIX */
5112 rid = adapter->vector;
5113 else
5114 rid = 0;
5115
5116 if (adapter->osdep.ihs[rid] != NULL) {
5117 pci_intr_disestablish(adapter->osdep.pc,
5118 adapter->osdep.ihs[rid]);
5119 adapter->osdep.ihs[rid] = NULL;
5120 }
5121
5122 if (adapter->osdep.intrs != NULL) {
5123 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5124 adapter->osdep.nintrs);
5125 adapter->osdep.intrs = NULL;
5126 }
5127 } /* ixgbe_free_pciintr_resources */
5128
5129 /************************************************************************
5130 * ixgbe_free_pci_resources
5131 ************************************************************************/
5132 static void
5133 ixgbe_free_pci_resources(struct adapter *adapter)
5134 {
5135
5136 ixgbe_free_pciintr_resources(adapter);
5137
5138 if (adapter->osdep.mem_size != 0) {
5139 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5140 adapter->osdep.mem_bus_space_handle,
5141 adapter->osdep.mem_size);
5142 }
5143
5144 } /* ixgbe_free_pci_resources */
5145
5146 /************************************************************************
5147 * ixgbe_set_sysctl_value
5148 ************************************************************************/
5149 static void
5150 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5151 const char *description, int *limit, int value)
5152 {
5153 device_t dev = adapter->dev;
5154 struct sysctllog **log;
5155 const struct sysctlnode *rnode, *cnode;
5156
5157 /*
5158 * It's not required to check recovery mode because this function never
5159 * touches hardware.
5160 */
5161
5162 log = &adapter->sysctllog;
5163 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5164 aprint_error_dev(dev, "could not create sysctl root\n");
5165 return;
5166 }
5167 if (sysctl_createv(log, 0, &rnode, &cnode,
5168 CTLFLAG_READWRITE, CTLTYPE_INT,
5169 name, SYSCTL_DESCR(description),
5170 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5171 aprint_error_dev(dev, "could not create sysctl\n");
5172 *limit = value;
5173 } /* ixgbe_set_sysctl_value */
5174
5175 /************************************************************************
5176 * ixgbe_sysctl_flowcntl
5177 *
5178 * SYSCTL wrapper around setting Flow Control
5179 ************************************************************************/
5180 static int
5181 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5182 {
5183 struct sysctlnode node = *rnode;
5184 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5185 int error, fc;
5186
5187 if (ixgbe_fw_recovery_mode_swflag(adapter))
5188 return (EPERM);
5189
5190 fc = adapter->hw.fc.current_mode;
5191 node.sysctl_data = &fc;
5192 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5193 if (error != 0 || newp == NULL)
5194 return error;
5195
5196 /* Don't bother if it's not changed */
5197 if (fc == adapter->hw.fc.current_mode)
5198 return (0);
5199
5200 return ixgbe_set_flowcntl(adapter, fc);
5201 } /* ixgbe_sysctl_flowcntl */
5202
5203 /************************************************************************
5204 * ixgbe_set_flowcntl - Set flow control
5205 *
5206 * Flow control values:
5207 * 0 - off
5208 * 1 - rx pause
5209 * 2 - tx pause
5210 * 3 - full
5211 ************************************************************************/
5212 static int
5213 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5214 {
5215 switch (fc) {
5216 case ixgbe_fc_rx_pause:
5217 case ixgbe_fc_tx_pause:
5218 case ixgbe_fc_full:
5219 adapter->hw.fc.requested_mode = fc;
5220 if (adapter->num_queues > 1)
5221 ixgbe_disable_rx_drop(adapter);
5222 break;
5223 case ixgbe_fc_none:
5224 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5225 if (adapter->num_queues > 1)
5226 ixgbe_enable_rx_drop(adapter);
5227 break;
5228 default:
5229 return (EINVAL);
5230 }
5231
5232 #if 0 /* XXX NetBSD */
5233 /* Don't autoneg if forcing a value */
5234 adapter->hw.fc.disable_fc_autoneg = TRUE;
5235 #endif
5236 ixgbe_fc_enable(&adapter->hw);
5237
5238 return (0);
5239 } /* ixgbe_set_flowcntl */
5240
5241 /************************************************************************
5242 * ixgbe_enable_rx_drop
5243 *
5244 * Enable the hardware to drop packets when the buffer is
5245 * full. This is useful with multiqueue, so that no single
5246 * queue being full stalls the entire RX engine. We only
5247 * enable this when Multiqueue is enabled AND Flow Control
5248 * is disabled.
5249 ************************************************************************/
5250 static void
5251 ixgbe_enable_rx_drop(struct adapter *adapter)
5252 {
5253 struct ixgbe_hw *hw = &adapter->hw;
5254 struct rx_ring *rxr;
5255 u32 srrctl;
5256
5257 for (int i = 0; i < adapter->num_queues; i++) {
5258 rxr = &adapter->rx_rings[i];
5259 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5260 srrctl |= IXGBE_SRRCTL_DROP_EN;
5261 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5262 }
5263
5264 /* enable drop for each vf */
5265 for (int i = 0; i < adapter->num_vfs; i++) {
5266 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5267 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5268 IXGBE_QDE_ENABLE));
5269 }
5270 } /* ixgbe_enable_rx_drop */
5271
5272 /************************************************************************
5273 * ixgbe_disable_rx_drop
5274 ************************************************************************/
5275 static void
5276 ixgbe_disable_rx_drop(struct adapter *adapter)
5277 {
5278 struct ixgbe_hw *hw = &adapter->hw;
5279 struct rx_ring *rxr;
5280 u32 srrctl;
5281
5282 for (int i = 0; i < adapter->num_queues; i++) {
5283 rxr = &adapter->rx_rings[i];
5284 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5285 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5286 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5287 }
5288
5289 /* disable drop for each vf */
5290 for (int i = 0; i < adapter->num_vfs; i++) {
5291 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5292 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5293 }
5294 } /* ixgbe_disable_rx_drop */
5295
5296 /************************************************************************
5297 * ixgbe_sysctl_advertise
5298 *
5299 * SYSCTL wrapper around setting advertised speed
5300 ************************************************************************/
5301 static int
5302 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5303 {
5304 struct sysctlnode node = *rnode;
5305 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5306 int error = 0, advertise;
5307
5308 if (ixgbe_fw_recovery_mode_swflag(adapter))
5309 return (EPERM);
5310
5311 advertise = adapter->advertise;
5312 node.sysctl_data = &advertise;
5313 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5314 if (error != 0 || newp == NULL)
5315 return error;
5316
5317 return ixgbe_set_advertise(adapter, advertise);
5318 } /* ixgbe_sysctl_advertise */
5319
5320 /************************************************************************
5321 * ixgbe_set_advertise - Control advertised link speed
5322 *
5323 * Flags:
5324 * 0x00 - Default (all capable link speed)
5325 * 0x01 - advertise 100 Mb
5326 * 0x02 - advertise 1G
5327 * 0x04 - advertise 10G
5328 * 0x08 - advertise 10 Mb
5329 * 0x10 - advertise 2.5G
5330 * 0x20 - advertise 5G
5331 ************************************************************************/
5332 static int
5333 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5334 {
5335 device_t dev;
5336 struct ixgbe_hw *hw;
5337 ixgbe_link_speed speed = 0;
5338 ixgbe_link_speed link_caps = 0;
5339 s32 err = IXGBE_NOT_IMPLEMENTED;
5340 bool negotiate = FALSE;
5341
5342 /* Checks to validate new value */
5343 if (adapter->advertise == advertise) /* no change */
5344 return (0);
5345
5346 dev = adapter->dev;
5347 hw = &adapter->hw;
5348
5349 /* No speed changes for backplane media */
5350 if (hw->phy.media_type == ixgbe_media_type_backplane)
5351 return (ENODEV);
5352
5353 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5354 (hw->phy.multispeed_fiber))) {
5355 device_printf(dev,
5356 "Advertised speed can only be set on copper or "
5357 "multispeed fiber media types.\n");
5358 return (EINVAL);
5359 }
5360
5361 if (advertise < 0x0 || advertise > 0x2f) {
5362 device_printf(dev,
5363 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5364 return (EINVAL);
5365 }
5366
5367 if (hw->mac.ops.get_link_capabilities) {
5368 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5369 &negotiate);
5370 if (err != IXGBE_SUCCESS) {
5371 device_printf(dev, "Unable to determine supported advertise speeds\n");
5372 return (ENODEV);
5373 }
5374 }
5375
5376 /* Set new value and report new advertised mode */
5377 if (advertise & 0x1) {
5378 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5379 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5380 return (EINVAL);
5381 }
5382 speed |= IXGBE_LINK_SPEED_100_FULL;
5383 }
5384 if (advertise & 0x2) {
5385 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5386 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5387 return (EINVAL);
5388 }
5389 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5390 }
5391 if (advertise & 0x4) {
5392 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5393 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5394 return (EINVAL);
5395 }
5396 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5397 }
5398 if (advertise & 0x8) {
5399 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5400 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5401 return (EINVAL);
5402 }
5403 speed |= IXGBE_LINK_SPEED_10_FULL;
5404 }
5405 if (advertise & 0x10) {
5406 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5407 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5408 return (EINVAL);
5409 }
5410 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5411 }
5412 if (advertise & 0x20) {
5413 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5414 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5415 return (EINVAL);
5416 }
5417 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5418 }
5419 if (advertise == 0)
5420 speed = link_caps; /* All capable link speed */
5421
5422 hw->mac.autotry_restart = TRUE;
5423 hw->mac.ops.setup_link(hw, speed, TRUE);
5424 adapter->advertise = advertise;
5425
5426 return (0);
5427 } /* ixgbe_set_advertise */
5428
5429 /************************************************************************
5430 * ixgbe_get_advertise - Get current advertised speed settings
5431 *
5432 * Formatted for sysctl usage.
5433 * Flags:
5434 * 0x01 - advertise 100 Mb
5435 * 0x02 - advertise 1G
5436 * 0x04 - advertise 10G
5437 * 0x08 - advertise 10 Mb (yes, Mb)
5438 * 0x10 - advertise 2.5G
5439 * 0x20 - advertise 5G
5440 ************************************************************************/
5441 static int
5442 ixgbe_get_advertise(struct adapter *adapter)
5443 {
5444 struct ixgbe_hw *hw = &adapter->hw;
5445 int speed;
5446 ixgbe_link_speed link_caps = 0;
5447 s32 err;
5448 bool negotiate = FALSE;
5449
5450 /*
5451 * Advertised speed means nothing unless it's copper or
5452 * multi-speed fiber
5453 */
5454 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5455 !(hw->phy.multispeed_fiber))
5456 return (0);
5457
5458 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5459 if (err != IXGBE_SUCCESS)
5460 return (0);
5461
5462 speed =
5463 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5464 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5465 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5466 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5467 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5468 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5469
5470 return speed;
5471 } /* ixgbe_get_advertise */
5472
5473 /************************************************************************
5474 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5475 *
5476 * Control values:
5477 * 0/1 - off / on (use default value of 1000)
5478 *
5479 * Legal timer values are:
5480 * 50,100,250,500,1000,2000,5000,10000
5481 *
5482 * Turning off interrupt moderation will also turn this off.
5483 ************************************************************************/
5484 static int
5485 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5486 {
5487 struct sysctlnode node = *rnode;
5488 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5489 struct ifnet *ifp = adapter->ifp;
5490 int error;
5491 int newval;
5492
5493 if (ixgbe_fw_recovery_mode_swflag(adapter))
5494 return (EPERM);
5495
5496 newval = adapter->dmac;
5497 node.sysctl_data = &newval;
5498 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5499 if ((error) || (newp == NULL))
5500 return (error);
5501
5502 switch (newval) {
5503 case 0:
5504 /* Disabled */
5505 adapter->dmac = 0;
5506 break;
5507 case 1:
5508 /* Enable and use default */
5509 adapter->dmac = 1000;
5510 break;
5511 case 50:
5512 case 100:
5513 case 250:
5514 case 500:
5515 case 1000:
5516 case 2000:
5517 case 5000:
5518 case 10000:
5519 /* Legal values - allow */
5520 adapter->dmac = newval;
5521 break;
5522 default:
5523 /* Do nothing, illegal value */
5524 return (EINVAL);
5525 }
5526
5527 /* Re-initialize hardware if it's already running */
5528 if (ifp->if_flags & IFF_RUNNING)
5529 ifp->if_init(ifp);
5530
5531 return (0);
5532 }
5533
5534 #ifdef IXGBE_DEBUG
5535 /************************************************************************
5536 * ixgbe_sysctl_power_state
5537 *
5538 * Sysctl to test power states
5539 * Values:
5540 * 0 - set device to D0
5541 * 3 - set device to D3
5542 * (none) - get current device power state
5543 ************************************************************************/
5544 static int
5545 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5546 {
5547 #ifdef notyet
5548 struct sysctlnode node = *rnode;
5549 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5550 device_t dev = adapter->dev;
5551 int curr_ps, new_ps, error = 0;
5552
5553 if (ixgbe_fw_recovery_mode_swflag(adapter))
5554 return (EPERM);
5555
5556 curr_ps = new_ps = pci_get_powerstate(dev);
5557
5558 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5559 if ((error) || (req->newp == NULL))
5560 return (error);
5561
5562 if (new_ps == curr_ps)
5563 return (0);
5564
5565 if (new_ps == 3 && curr_ps == 0)
5566 error = DEVICE_SUSPEND(dev);
5567 else if (new_ps == 0 && curr_ps == 3)
5568 error = DEVICE_RESUME(dev);
5569 else
5570 return (EINVAL);
5571
5572 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5573
5574 return (error);
5575 #else
5576 return 0;
5577 #endif
5578 } /* ixgbe_sysctl_power_state */
5579 #endif
5580
5581 /************************************************************************
5582 * ixgbe_sysctl_wol_enable
5583 *
5584 * Sysctl to enable/disable the WoL capability,
5585 * if supported by the adapter.
5586 *
5587 * Values:
5588 * 0 - disabled
5589 * 1 - enabled
5590 ************************************************************************/
5591 static int
5592 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5593 {
5594 struct sysctlnode node = *rnode;
5595 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5596 struct ixgbe_hw *hw = &adapter->hw;
5597 bool new_wol_enabled;
5598 int error = 0;
5599
5600 /*
5601 * It's not required to check recovery mode because this function never
5602 * touches hardware.
5603 */
5604 new_wol_enabled = hw->wol_enabled;
5605 node.sysctl_data = &new_wol_enabled;
5606 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5607 if ((error) || (newp == NULL))
5608 return (error);
5609 if (new_wol_enabled == hw->wol_enabled)
5610 return (0);
5611
5612 if (new_wol_enabled && !adapter->wol_support)
5613 return (ENODEV);
5614 else
5615 hw->wol_enabled = new_wol_enabled;
5616
5617 return (0);
5618 } /* ixgbe_sysctl_wol_enable */
5619
5620 /************************************************************************
5621 * ixgbe_sysctl_wufc - Wake Up Filter Control
5622 *
5623 * Sysctl to enable/disable the types of packets that the
5624 * adapter will wake up on upon receipt.
5625 * Flags:
5626 * 0x1 - Link Status Change
5627 * 0x2 - Magic Packet
5628 * 0x4 - Direct Exact
5629 * 0x8 - Directed Multicast
5630 * 0x10 - Broadcast
5631 * 0x20 - ARP/IPv4 Request Packet
5632 * 0x40 - Direct IPv4 Packet
5633 * 0x80 - Direct IPv6 Packet
5634 *
5635 * Settings not listed above will cause the sysctl to return an error.
5636 ************************************************************************/
5637 static int
5638 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5639 {
5640 struct sysctlnode node = *rnode;
5641 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5642 int error = 0;
5643 u32 new_wufc;
5644
5645 /*
5646 * It's not required to check recovery mode because this function never
5647 * touches hardware.
5648 */
5649 new_wufc = adapter->wufc;
5650 node.sysctl_data = &new_wufc;
5651 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5652 if ((error) || (newp == NULL))
5653 return (error);
5654 if (new_wufc == adapter->wufc)
5655 return (0);
5656
5657 if (new_wufc & 0xffffff00)
5658 return (EINVAL);
5659
5660 new_wufc &= 0xff;
5661 new_wufc |= (0xffffff & adapter->wufc);
5662 adapter->wufc = new_wufc;
5663
5664 return (0);
5665 } /* ixgbe_sysctl_wufc */
5666
5667 #ifdef IXGBE_DEBUG
5668 /************************************************************************
5669 * ixgbe_sysctl_print_rss_config
5670 ************************************************************************/
5671 static int
5672 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5673 {
5674 #ifdef notyet
5675 struct sysctlnode node = *rnode;
5676 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5677 struct ixgbe_hw *hw = &adapter->hw;
5678 device_t dev = adapter->dev;
5679 struct sbuf *buf;
5680 int error = 0, reta_size;
5681 u32 reg;
5682
5683 if (ixgbe_fw_recovery_mode_swflag(adapter))
5684 return (EPERM);
5685
5686 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5687 if (!buf) {
5688 device_printf(dev, "Could not allocate sbuf for output.\n");
5689 return (ENOMEM);
5690 }
5691
5692 // TODO: use sbufs to make a string to print out
5693 /* Set multiplier for RETA setup and table size based on MAC */
5694 switch (adapter->hw.mac.type) {
5695 case ixgbe_mac_X550:
5696 case ixgbe_mac_X550EM_x:
5697 case ixgbe_mac_X550EM_a:
5698 reta_size = 128;
5699 break;
5700 default:
5701 reta_size = 32;
5702 break;
5703 }
5704
5705 /* Print out the redirection table */
5706 sbuf_cat(buf, "\n");
5707 for (int i = 0; i < reta_size; i++) {
5708 if (i < 32) {
5709 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5710 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5711 } else {
5712 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5713 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5714 }
5715 }
5716
5717 // TODO: print more config
5718
5719 error = sbuf_finish(buf);
5720 if (error)
5721 device_printf(dev, "Error finishing sbuf: %d\n", error);
5722
5723 sbuf_delete(buf);
5724 #endif
5725 return (0);
5726 } /* ixgbe_sysctl_print_rss_config */
5727 #endif /* IXGBE_DEBUG */
5728
5729 /************************************************************************
5730 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5731 *
5732 * For X552/X557-AT devices using an external PHY
5733 ************************************************************************/
5734 static int
5735 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5736 {
5737 struct sysctlnode node = *rnode;
5738 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5739 struct ixgbe_hw *hw = &adapter->hw;
5740 int val;
5741 u16 reg;
5742 int error;
5743
5744 if (ixgbe_fw_recovery_mode_swflag(adapter))
5745 return (EPERM);
5746
5747 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5748 device_printf(adapter->dev,
5749 "Device has no supported external thermal sensor.\n");
5750 return (ENODEV);
5751 }
5752
5753 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5754 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5755 device_printf(adapter->dev,
5756 "Error reading from PHY's current temperature register\n");
5757 return (EAGAIN);
5758 }
5759
5760 node.sysctl_data = &val;
5761
5762 /* Shift temp for output */
5763 val = reg >> 8;
5764
5765 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5766 if ((error) || (newp == NULL))
5767 return (error);
5768
5769 return (0);
5770 } /* ixgbe_sysctl_phy_temp */
5771
5772 /************************************************************************
5773 * ixgbe_sysctl_phy_overtemp_occurred
5774 *
5775 * Reports (directly from the PHY) whether the current PHY
5776 * temperature is over the overtemp threshold.
5777 ************************************************************************/
5778 static int
5779 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5780 {
5781 struct sysctlnode node = *rnode;
5782 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5783 struct ixgbe_hw *hw = &adapter->hw;
5784 int val, error;
5785 u16 reg;
5786
5787 if (ixgbe_fw_recovery_mode_swflag(adapter))
5788 return (EPERM);
5789
5790 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5791 device_printf(adapter->dev,
5792 "Device has no supported external thermal sensor.\n");
5793 return (ENODEV);
5794 }
5795
5796 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5797 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5798 device_printf(adapter->dev,
5799 "Error reading from PHY's temperature status register\n");
5800 return (EAGAIN);
5801 }
5802
5803 node.sysctl_data = &val;
5804
5805 /* Get occurrence bit */
5806 val = !!(reg & 0x4000);
5807
5808 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5809 if ((error) || (newp == NULL))
5810 return (error);
5811
5812 return (0);
5813 } /* ixgbe_sysctl_phy_overtemp_occurred */
5814
5815 /************************************************************************
5816 * ixgbe_sysctl_eee_state
5817 *
5818 * Sysctl to set EEE power saving feature
5819 * Values:
5820 * 0 - disable EEE
5821 * 1 - enable EEE
5822 * (none) - get current device EEE state
5823 ************************************************************************/
5824 static int
5825 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5826 {
5827 struct sysctlnode node = *rnode;
5828 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5829 struct ifnet *ifp = adapter->ifp;
5830 device_t dev = adapter->dev;
5831 int curr_eee, new_eee, error = 0;
5832 s32 retval;
5833
5834 if (ixgbe_fw_recovery_mode_swflag(adapter))
5835 return (EPERM);
5836
5837 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5838 node.sysctl_data = &new_eee;
5839 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5840 if ((error) || (newp == NULL))
5841 return (error);
5842
5843 /* Nothing to do */
5844 if (new_eee == curr_eee)
5845 return (0);
5846
5847 /* Not supported */
5848 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5849 return (EINVAL);
5850
5851 /* Bounds checking */
5852 if ((new_eee < 0) || (new_eee > 1))
5853 return (EINVAL);
5854
5855 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5856 if (retval) {
5857 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5858 return (EINVAL);
5859 }
5860
5861 /* Restart auto-neg */
5862 ifp->if_init(ifp);
5863
5864 device_printf(dev, "New EEE state: %d\n", new_eee);
5865
5866 /* Cache new value */
5867 if (new_eee)
5868 adapter->feat_en |= IXGBE_FEATURE_EEE;
5869 else
5870 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5871
5872 return (error);
5873 } /* ixgbe_sysctl_eee_state */
5874
5875 #define PRINTQS(adapter, regname) \
5876 do { \
5877 struct ixgbe_hw *_hw = &(adapter)->hw; \
5878 int _i; \
5879 \
5880 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5881 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5882 printf((_i == 0) ? "\t" : " "); \
5883 printf("%08x", IXGBE_READ_REG(_hw, \
5884 IXGBE_##regname(_i))); \
5885 } \
5886 printf("\n"); \
5887 } while (0)
5888
5889 /************************************************************************
5890 * ixgbe_print_debug_info
5891 *
5892 * Called only when em_display_debug_stats is enabled.
5893 * Provides a way to take a look at important statistics
5894 * maintained by the driver and hardware.
5895 ************************************************************************/
5896 static void
5897 ixgbe_print_debug_info(struct adapter *adapter)
5898 {
5899 device_t dev = adapter->dev;
5900 struct ixgbe_hw *hw = &adapter->hw;
5901 int table_size;
5902 int i;
5903
5904 switch (adapter->hw.mac.type) {
5905 case ixgbe_mac_X550:
5906 case ixgbe_mac_X550EM_x:
5907 case ixgbe_mac_X550EM_a:
5908 table_size = 128;
5909 break;
5910 default:
5911 table_size = 32;
5912 break;
5913 }
5914
5915 device_printf(dev, "[E]RETA:\n");
5916 for (i = 0; i < table_size; i++) {
5917 if (i < 32)
5918 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5919 IXGBE_RETA(i)));
5920 else
5921 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5922 IXGBE_ERETA(i - 32)));
5923 }
5924
5925 device_printf(dev, "queue:");
5926 for (i = 0; i < adapter->num_queues; i++) {
5927 printf((i == 0) ? "\t" : " ");
5928 printf("%8d", i);
5929 }
5930 printf("\n");
5931 PRINTQS(adapter, RDBAL);
5932 PRINTQS(adapter, RDBAH);
5933 PRINTQS(adapter, RDLEN);
5934 PRINTQS(adapter, SRRCTL);
5935 PRINTQS(adapter, RDH);
5936 PRINTQS(adapter, RDT);
5937 PRINTQS(adapter, RXDCTL);
5938
5939 device_printf(dev, "RQSMR:");
5940 for (i = 0; i < adapter->num_queues / 4; i++) {
5941 printf((i == 0) ? "\t" : " ");
5942 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5943 }
5944 printf("\n");
5945
5946 device_printf(dev, "disabled_count:");
5947 for (i = 0; i < adapter->num_queues; i++) {
5948 printf((i == 0) ? "\t" : " ");
5949 printf("%8d", adapter->queues[i].disabled_count);
5950 }
5951 printf("\n");
5952
5953 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5954 if (hw->mac.type != ixgbe_mac_82598EB) {
5955 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5956 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5957 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5958 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5959 }
5960 } /* ixgbe_print_debug_info */
5961
5962 /************************************************************************
5963 * ixgbe_sysctl_debug
5964 ************************************************************************/
5965 static int
5966 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5967 {
5968 struct sysctlnode node = *rnode;
5969 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5970 int error, result = 0;
5971
5972 if (ixgbe_fw_recovery_mode_swflag(adapter))
5973 return (EPERM);
5974
5975 node.sysctl_data = &result;
5976 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5977
5978 if (error || newp == NULL)
5979 return error;
5980
5981 if (result == 1)
5982 ixgbe_print_debug_info(adapter);
5983
5984 return 0;
5985 } /* ixgbe_sysctl_debug */
5986
5987 /************************************************************************
5988 * ixgbe_init_device_features
5989 ************************************************************************/
5990 static void
5991 ixgbe_init_device_features(struct adapter *adapter)
5992 {
5993 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5994 | IXGBE_FEATURE_RSS
5995 | IXGBE_FEATURE_MSI
5996 | IXGBE_FEATURE_MSIX
5997 | IXGBE_FEATURE_LEGACY_IRQ
5998 | IXGBE_FEATURE_LEGACY_TX;
5999
6000 /* Set capabilities first... */
6001 switch (adapter->hw.mac.type) {
6002 case ixgbe_mac_82598EB:
6003 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6004 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6005 break;
6006 case ixgbe_mac_X540:
6007 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6008 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6009 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6010 (adapter->hw.bus.func == 0))
6011 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6012 break;
6013 case ixgbe_mac_X550:
6014 /*
6015 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6016 * NVM Image version.
6017 */
6018 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6019 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6020 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6021 break;
6022 case ixgbe_mac_X550EM_x:
6023 /*
6024 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6025 * NVM Image version.
6026 */
6027 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6028 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6029 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
6030 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6031 break;
6032 case ixgbe_mac_X550EM_a:
6033 /*
6034 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6035 * NVM Image version.
6036 */
6037 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6038 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6039 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6040 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6041 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6042 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6043 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6044 }
6045 break;
6046 case ixgbe_mac_82599EB:
6047 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6048 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6049 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6050 (adapter->hw.bus.func == 0))
6051 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6052 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6053 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6054 break;
6055 default:
6056 break;
6057 }
6058
6059 /* Enabled by default... */
6060 /* Fan failure detection */
6061 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6062 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6063 /* Netmap */
6064 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6065 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6066 /* EEE */
6067 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6068 adapter->feat_en |= IXGBE_FEATURE_EEE;
6069 /* Thermal Sensor */
6070 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6071 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6072 /*
6073 * Recovery mode:
6074 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6075 * NVM Image version.
6076 */
6077
6078 /* Enabled via global sysctl... */
6079 /* Flow Director */
6080 if (ixgbe_enable_fdir) {
6081 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6082 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6083 else
6084 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6085 }
6086 /* Legacy (single queue) transmit */
6087 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6088 ixgbe_enable_legacy_tx)
6089 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6090 /*
6091 * Message Signal Interrupts - Extended (MSI-X)
6092 * Normal MSI is only enabled if MSI-X calls fail.
6093 */
6094 if (!ixgbe_enable_msix)
6095 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6096 /* Receive-Side Scaling (RSS) */
6097 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6098 adapter->feat_en |= IXGBE_FEATURE_RSS;
6099
6100 /* Disable features with unmet dependencies... */
6101 /* No MSI-X */
6102 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6103 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6104 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6105 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6106 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6107 }
6108 } /* ixgbe_init_device_features */
6109
6110 /************************************************************************
6111 * ixgbe_probe - Device identification routine
6112 *
6113 * Determines if the driver should be loaded on
6114 * adapter based on its PCI vendor/device ID.
6115 *
6116 * return BUS_PROBE_DEFAULT on success, positive on failure
6117 ************************************************************************/
6118 static int
6119 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6120 {
6121 const struct pci_attach_args *pa = aux;
6122
6123 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6124 }
6125
6126 static const ixgbe_vendor_info_t *
6127 ixgbe_lookup(const struct pci_attach_args *pa)
6128 {
6129 const ixgbe_vendor_info_t *ent;
6130 pcireg_t subid;
6131
6132 INIT_DEBUGOUT("ixgbe_lookup: begin");
6133
6134 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6135 return NULL;
6136
6137 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6138
6139 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6140 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6141 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6142 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6143 (ent->subvendor_id == 0)) &&
6144 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6145 (ent->subdevice_id == 0))) {
6146 return ent;
6147 }
6148 }
6149 return NULL;
6150 }
6151
6152 static int
6153 ixgbe_ifflags_cb(struct ethercom *ec)
6154 {
6155 struct ifnet *ifp = &ec->ec_if;
6156 struct adapter *adapter = ifp->if_softc;
6157 int change, rc = 0;
6158
6159 IXGBE_CORE_LOCK(adapter);
6160
6161 change = ifp->if_flags ^ adapter->if_flags;
6162 if (change != 0)
6163 adapter->if_flags = ifp->if_flags;
6164
6165 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
6166 rc = ENETRESET;
6167 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
6168 ixgbe_set_promisc(adapter);
6169
6170 /* Set up VLAN support and filter */
6171 ixgbe_setup_vlan_hw_support(adapter);
6172
6173 IXGBE_CORE_UNLOCK(adapter);
6174
6175 return rc;
6176 }
6177
6178 /************************************************************************
6179 * ixgbe_ioctl - Ioctl entry point
6180 *
6181 * Called when the user wants to configure the interface.
6182 *
6183 * return 0 on success, positive on failure
6184 ************************************************************************/
6185 static int
6186 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
6187 {
6188 struct adapter *adapter = ifp->if_softc;
6189 struct ixgbe_hw *hw = &adapter->hw;
6190 struct ifcapreq *ifcr = data;
6191 struct ifreq *ifr = data;
6192 int error = 0;
6193 int l4csum_en;
6194 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
6195 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
6196
6197 if (ixgbe_fw_recovery_mode_swflag(adapter))
6198 return (EPERM);
6199
6200 switch (command) {
6201 case SIOCSIFFLAGS:
6202 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6203 break;
6204 case SIOCADDMULTI:
6205 case SIOCDELMULTI:
6206 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6207 break;
6208 case SIOCSIFMEDIA:
6209 case SIOCGIFMEDIA:
6210 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6211 break;
6212 case SIOCSIFCAP:
6213 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6214 break;
6215 case SIOCSIFMTU:
6216 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6217 break;
6218 #ifdef __NetBSD__
6219 case SIOCINITIFADDR:
6220 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6221 break;
6222 case SIOCGIFFLAGS:
6223 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6224 break;
6225 case SIOCGIFAFLAG_IN:
6226 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6227 break;
6228 case SIOCGIFADDR:
6229 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6230 break;
6231 case SIOCGIFMTU:
6232 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6233 break;
6234 case SIOCGIFCAP:
6235 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6236 break;
6237 case SIOCGETHERCAP:
6238 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6239 break;
6240 case SIOCGLIFADDR:
6241 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6242 break;
6243 case SIOCZIFDATA:
6244 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6245 hw->mac.ops.clear_hw_cntrs(hw);
6246 ixgbe_clear_evcnt(adapter);
6247 break;
6248 case SIOCAIFADDR:
6249 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6250 break;
6251 #endif
6252 default:
6253 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6254 break;
6255 }
6256
6257 switch (command) {
6258 case SIOCSIFMEDIA:
6259 case SIOCGIFMEDIA:
6260 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6261 case SIOCGI2C:
6262 {
6263 struct ixgbe_i2c_req i2c;
6264
6265 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6266 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6267 if (error != 0)
6268 break;
6269 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6270 error = EINVAL;
6271 break;
6272 }
6273 if (i2c.len > sizeof(i2c.data)) {
6274 error = EINVAL;
6275 break;
6276 }
6277
6278 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6279 i2c.dev_addr, i2c.data);
6280 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6281 break;
6282 }
6283 case SIOCSIFCAP:
6284 /* Layer-4 Rx checksum offload has to be turned on and
6285 * off as a unit.
6286 */
6287 l4csum_en = ifcr->ifcr_capenable & l4csum;
6288 if (l4csum_en != l4csum && l4csum_en != 0)
6289 return EINVAL;
6290 /*FALLTHROUGH*/
6291 case SIOCADDMULTI:
6292 case SIOCDELMULTI:
6293 case SIOCSIFFLAGS:
6294 case SIOCSIFMTU:
6295 default:
6296 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6297 return error;
6298 if ((ifp->if_flags & IFF_RUNNING) == 0)
6299 ;
6300 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6301 IXGBE_CORE_LOCK(adapter);
6302 if ((ifp->if_flags & IFF_RUNNING) != 0)
6303 ixgbe_init_locked(adapter);
6304 ixgbe_recalculate_max_frame(adapter);
6305 IXGBE_CORE_UNLOCK(adapter);
6306 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6307 /*
6308 * Multicast list has changed; set the hardware filter
6309 * accordingly.
6310 */
6311 IXGBE_CORE_LOCK(adapter);
6312 ixgbe_disable_intr(adapter);
6313 ixgbe_set_multi(adapter);
6314 ixgbe_enable_intr(adapter);
6315 IXGBE_CORE_UNLOCK(adapter);
6316 }
6317 return 0;
6318 }
6319
6320 return error;
6321 } /* ixgbe_ioctl */
6322
6323 /************************************************************************
6324 * ixgbe_check_fan_failure
6325 ************************************************************************/
6326 static void
6327 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6328 {
6329 u32 mask;
6330
6331 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6332 IXGBE_ESDP_SDP1;
6333
6334 if (reg & mask)
6335 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6336 } /* ixgbe_check_fan_failure */
6337
6338 /************************************************************************
6339 * ixgbe_handle_que
6340 ************************************************************************/
6341 static void
6342 ixgbe_handle_que(void *context)
6343 {
6344 struct ix_queue *que = context;
6345 struct adapter *adapter = que->adapter;
6346 struct tx_ring *txr = que->txr;
6347 struct ifnet *ifp = adapter->ifp;
6348 bool more = false;
6349
6350 que->handleq.ev_count++;
6351
6352 if (ifp->if_flags & IFF_RUNNING) {
6353 more = ixgbe_rxeof(que);
6354 IXGBE_TX_LOCK(txr);
6355 more |= ixgbe_txeof(txr);
6356 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6357 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6358 ixgbe_mq_start_locked(ifp, txr);
6359 /* Only for queue 0 */
6360 /* NetBSD still needs this for CBQ */
6361 if ((&adapter->queues[0] == que)
6362 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6363 ixgbe_legacy_start_locked(ifp, txr);
6364 IXGBE_TX_UNLOCK(txr);
6365 }
6366
6367 if (more) {
6368 que->req.ev_count++;
6369 ixgbe_sched_handle_que(adapter, que);
6370 } else if (que->res != NULL) {
6371 /* Re-enable this interrupt */
6372 ixgbe_enable_queue(adapter, que->msix);
6373 } else
6374 ixgbe_enable_intr(adapter);
6375
6376 return;
6377 } /* ixgbe_handle_que */
6378
6379 /************************************************************************
6380 * ixgbe_handle_que_work
6381 ************************************************************************/
6382 static void
6383 ixgbe_handle_que_work(struct work *wk, void *context)
6384 {
6385 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6386
6387 /*
6388 * "enqueued flag" is not required here.
6389 * See ixgbe_msix_que().
6390 */
6391 ixgbe_handle_que(que);
6392 }
6393
6394 /************************************************************************
6395 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6396 ************************************************************************/
6397 static int
6398 ixgbe_allocate_legacy(struct adapter *adapter,
6399 const struct pci_attach_args *pa)
6400 {
6401 device_t dev = adapter->dev;
6402 struct ix_queue *que = adapter->queues;
6403 struct tx_ring *txr = adapter->tx_rings;
6404 int counts[PCI_INTR_TYPE_SIZE];
6405 pci_intr_type_t intr_type, max_type;
6406 char intrbuf[PCI_INTRSTR_LEN];
6407 const char *intrstr = NULL;
6408
6409 /* We allocate a single interrupt resource */
6410 max_type = PCI_INTR_TYPE_MSI;
6411 counts[PCI_INTR_TYPE_MSIX] = 0;
6412 counts[PCI_INTR_TYPE_MSI] =
6413 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6414 /* Check not feat_en but feat_cap to fallback to INTx */
6415 counts[PCI_INTR_TYPE_INTX] =
6416 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6417
6418 alloc_retry:
6419 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6420 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6421 return ENXIO;
6422 }
6423 adapter->osdep.nintrs = 1;
6424 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6425 intrbuf, sizeof(intrbuf));
6426 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6427 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6428 device_xname(dev));
6429 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6430 if (adapter->osdep.ihs[0] == NULL) {
6431 aprint_error_dev(dev,"unable to establish %s\n",
6432 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6433 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6434 adapter->osdep.intrs = NULL;
6435 switch (intr_type) {
6436 case PCI_INTR_TYPE_MSI:
6437 /* The next try is for INTx: Disable MSI */
6438 max_type = PCI_INTR_TYPE_INTX;
6439 counts[PCI_INTR_TYPE_INTX] = 1;
6440 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6441 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6442 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6443 goto alloc_retry;
6444 } else
6445 break;
6446 case PCI_INTR_TYPE_INTX:
6447 default:
6448 /* See below */
6449 break;
6450 }
6451 }
6452 if (intr_type == PCI_INTR_TYPE_INTX) {
6453 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6454 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6455 }
6456 if (adapter->osdep.ihs[0] == NULL) {
6457 aprint_error_dev(dev,
6458 "couldn't establish interrupt%s%s\n",
6459 intrstr ? " at " : "", intrstr ? intrstr : "");
6460 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6461 adapter->osdep.intrs = NULL;
6462 return ENXIO;
6463 }
6464 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6465 /*
6466 * Try allocating a fast interrupt and the associated deferred
6467 * processing contexts.
6468 */
6469 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6470 txr->txr_si =
6471 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6472 ixgbe_deferred_mq_start, txr);
6473 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6474 ixgbe_handle_que, que);
6475
6476 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6477 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6478 aprint_error_dev(dev,
6479 "could not establish software interrupts\n");
6480
6481 return ENXIO;
6482 }
6483 /* For simplicity in the handlers */
6484 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6485
6486 return (0);
6487 } /* ixgbe_allocate_legacy */
6488
6489 /************************************************************************
6490 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6491 ************************************************************************/
6492 static int
6493 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6494 {
6495 device_t dev = adapter->dev;
6496 struct ix_queue *que = adapter->queues;
6497 struct tx_ring *txr = adapter->tx_rings;
6498 pci_chipset_tag_t pc;
6499 char intrbuf[PCI_INTRSTR_LEN];
6500 char intr_xname[32];
6501 char wqname[MAXCOMLEN];
6502 const char *intrstr = NULL;
6503 int error, vector = 0;
6504 int cpu_id = 0;
6505 kcpuset_t *affinity;
6506 #ifdef RSS
6507 unsigned int rss_buckets = 0;
6508 kcpuset_t cpu_mask;
6509 #endif
6510
6511 pc = adapter->osdep.pc;
6512 #ifdef RSS
6513 /*
6514 * If we're doing RSS, the number of queues needs to
6515 * match the number of RSS buckets that are configured.
6516 *
6517 * + If there's more queues than RSS buckets, we'll end
6518 * up with queues that get no traffic.
6519 *
6520 * + If there's more RSS buckets than queues, we'll end
6521 * up having multiple RSS buckets map to the same queue,
6522 * so there'll be some contention.
6523 */
6524 rss_buckets = rss_getnumbuckets();
6525 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6526 (adapter->num_queues != rss_buckets)) {
6527 device_printf(dev,
6528 "%s: number of queues (%d) != number of RSS buckets (%d)"
6529 "; performance will be impacted.\n",
6530 __func__, adapter->num_queues, rss_buckets);
6531 }
6532 #endif
6533
6534 adapter->osdep.nintrs = adapter->num_queues + 1;
6535 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6536 adapter->osdep.nintrs) != 0) {
6537 aprint_error_dev(dev,
6538 "failed to allocate MSI-X interrupt\n");
6539 return (ENXIO);
6540 }
6541
6542 kcpuset_create(&affinity, false);
6543 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6544 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6545 device_xname(dev), i);
6546 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6547 sizeof(intrbuf));
6548 #ifdef IXGBE_MPSAFE
6549 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6550 true);
6551 #endif
6552 /* Set the handler function */
6553 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6554 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6555 intr_xname);
6556 if (que->res == NULL) {
6557 aprint_error_dev(dev,
6558 "Failed to register QUE handler\n");
6559 error = ENXIO;
6560 goto err_out;
6561 }
6562 que->msix = vector;
6563 adapter->active_queues |= (u64)(1 << que->msix);
6564
6565 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6566 #ifdef RSS
6567 /*
6568 * The queue ID is used as the RSS layer bucket ID.
6569 * We look up the queue ID -> RSS CPU ID and select
6570 * that.
6571 */
6572 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6573 CPU_SETOF(cpu_id, &cpu_mask);
6574 #endif
6575 } else {
6576 /*
6577 * Bind the MSI-X vector, and thus the
6578 * rings to the corresponding CPU.
6579 *
6580 * This just happens to match the default RSS
6581 * round-robin bucket -> queue -> CPU allocation.
6582 */
6583 if (adapter->num_queues > 1)
6584 cpu_id = i;
6585 }
6586 /* Round-robin affinity */
6587 kcpuset_zero(affinity);
6588 kcpuset_set(affinity, cpu_id % ncpu);
6589 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6590 NULL);
6591 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6592 intrstr);
6593 if (error == 0) {
6594 #if 1 /* def IXGBE_DEBUG */
6595 #ifdef RSS
6596 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6597 cpu_id % ncpu);
6598 #else
6599 aprint_normal(", bound queue %d to cpu %d", i,
6600 cpu_id % ncpu);
6601 #endif
6602 #endif /* IXGBE_DEBUG */
6603 }
6604 aprint_normal("\n");
6605
6606 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6607 txr->txr_si = softint_establish(
6608 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6609 ixgbe_deferred_mq_start, txr);
6610 if (txr->txr_si == NULL) {
6611 aprint_error_dev(dev,
6612 "couldn't establish software interrupt\n");
6613 error = ENXIO;
6614 goto err_out;
6615 }
6616 }
6617 que->que_si
6618 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6619 ixgbe_handle_que, que);
6620 if (que->que_si == NULL) {
6621 aprint_error_dev(dev,
6622 "couldn't establish software interrupt\n");
6623 error = ENXIO;
6624 goto err_out;
6625 }
6626 }
6627 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6628 error = workqueue_create(&adapter->txr_wq, wqname,
6629 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6630 IXGBE_WORKQUEUE_FLAGS);
6631 if (error) {
6632 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6633 goto err_out;
6634 }
6635 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6636
6637 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6638 error = workqueue_create(&adapter->que_wq, wqname,
6639 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6640 IXGBE_WORKQUEUE_FLAGS);
6641 if (error) {
6642 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6643 goto err_out;
6644 }
6645
6646 /* and Link */
6647 cpu_id++;
6648 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6649 adapter->vector = vector;
6650 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6651 sizeof(intrbuf));
6652 #ifdef IXGBE_MPSAFE
6653 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6654 true);
6655 #endif
6656 /* Set the link handler function */
6657 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6658 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6659 intr_xname);
6660 if (adapter->osdep.ihs[vector] == NULL) {
6661 aprint_error_dev(dev, "Failed to register LINK handler\n");
6662 error = ENXIO;
6663 goto err_out;
6664 }
6665 /* Round-robin affinity */
6666 kcpuset_zero(affinity);
6667 kcpuset_set(affinity, cpu_id % ncpu);
6668 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6669 NULL);
6670
6671 aprint_normal_dev(dev,
6672 "for link, interrupting at %s", intrstr);
6673 if (error == 0)
6674 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6675 else
6676 aprint_normal("\n");
6677
6678 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6679 adapter->mbx_si =
6680 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6681 ixgbe_handle_mbx, adapter);
6682 if (adapter->mbx_si == NULL) {
6683 aprint_error_dev(dev,
6684 "could not establish software interrupts\n");
6685
6686 error = ENXIO;
6687 goto err_out;
6688 }
6689 }
6690
6691 kcpuset_destroy(affinity);
6692 aprint_normal_dev(dev,
6693 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6694
6695 return (0);
6696
6697 err_out:
6698 kcpuset_destroy(affinity);
6699 ixgbe_free_softint(adapter);
6700 ixgbe_free_pciintr_resources(adapter);
6701 return (error);
6702 } /* ixgbe_allocate_msix */
6703
6704 /************************************************************************
6705 * ixgbe_configure_interrupts
6706 *
6707 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6708 * This will also depend on user settings.
6709 ************************************************************************/
6710 static int
6711 ixgbe_configure_interrupts(struct adapter *adapter)
6712 {
6713 device_t dev = adapter->dev;
6714 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6715 int want, queues, msgs;
6716
6717 /* Default to 1 queue if MSI-X setup fails */
6718 adapter->num_queues = 1;
6719
6720 /* Override by tuneable */
6721 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6722 goto msi;
6723
6724 /*
6725 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6726 * interrupt slot.
6727 */
6728 if (ncpu == 1)
6729 goto msi;
6730
6731 /* First try MSI-X */
6732 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6733 msgs = MIN(msgs, IXG_MAX_NINTR);
6734 if (msgs < 2)
6735 goto msi;
6736
6737 adapter->msix_mem = (void *)1; /* XXX */
6738
6739 /* Figure out a reasonable auto config value */
6740 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6741
6742 #ifdef RSS
6743 /* If we're doing RSS, clamp at the number of RSS buckets */
6744 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6745 queues = uimin(queues, rss_getnumbuckets());
6746 #endif
6747 if (ixgbe_num_queues > queues) {
6748 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6749 ixgbe_num_queues = queues;
6750 }
6751
6752 if (ixgbe_num_queues != 0)
6753 queues = ixgbe_num_queues;
6754 else
6755 queues = uimin(queues,
6756 uimin(mac->max_tx_queues, mac->max_rx_queues));
6757
6758 /* reflect correct sysctl value */
6759 ixgbe_num_queues = queues;
6760
6761 /*
6762 * Want one vector (RX/TX pair) per queue
6763 * plus an additional for Link.
6764 */
6765 want = queues + 1;
6766 if (msgs >= want)
6767 msgs = want;
6768 else {
6769 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6770 "%d vectors but %d queues wanted!\n",
6771 msgs, want);
6772 goto msi;
6773 }
6774 adapter->num_queues = queues;
6775 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6776 return (0);
6777
6778 /*
6779 * MSI-X allocation failed or provided us with
6780 * less vectors than needed. Free MSI-X resources
6781 * and we'll try enabling MSI.
6782 */
6783 msi:
6784 /* Without MSI-X, some features are no longer supported */
6785 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6786 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6787 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6788 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6789
6790 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6791 adapter->msix_mem = NULL; /* XXX */
6792 if (msgs > 1)
6793 msgs = 1;
6794 if (msgs != 0) {
6795 msgs = 1;
6796 adapter->feat_en |= IXGBE_FEATURE_MSI;
6797 return (0);
6798 }
6799
6800 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6801 aprint_error_dev(dev,
6802 "Device does not support legacy interrupts.\n");
6803 return 1;
6804 }
6805
6806 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6807
6808 return (0);
6809 } /* ixgbe_configure_interrupts */
6810
6811
6812 /************************************************************************
6813 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6814 *
6815 * Done outside of interrupt context since the driver might sleep
6816 ************************************************************************/
6817 static void
6818 ixgbe_handle_link(void *context)
6819 {
6820 struct adapter *adapter = context;
6821 struct ixgbe_hw *hw = &adapter->hw;
6822
6823 IXGBE_CORE_LOCK(adapter);
6824 ++adapter->link_sicount.ev_count;
6825 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6826 ixgbe_update_link_status(adapter);
6827
6828 /* Re-enable link interrupts */
6829 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6830
6831 IXGBE_CORE_UNLOCK(adapter);
6832 } /* ixgbe_handle_link */
6833
6834 #if 0
6835 /************************************************************************
6836 * ixgbe_rearm_queues
6837 ************************************************************************/
6838 static __inline void
6839 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6840 {
6841 u32 mask;
6842
6843 switch (adapter->hw.mac.type) {
6844 case ixgbe_mac_82598EB:
6845 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6846 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6847 break;
6848 case ixgbe_mac_82599EB:
6849 case ixgbe_mac_X540:
6850 case ixgbe_mac_X550:
6851 case ixgbe_mac_X550EM_x:
6852 case ixgbe_mac_X550EM_a:
6853 mask = (queues & 0xFFFFFFFF);
6854 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6855 mask = (queues >> 32);
6856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6857 break;
6858 default:
6859 break;
6860 }
6861 } /* ixgbe_rearm_queues */
6862 #endif
6863