ixgbe.c revision 1.177 1 /* $NetBSD: ixgbe.c,v 1.177 2019/03/13 10:02:13 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84 /* XXX NetBSD: + 3.3.6 */
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static void ixgbe_recovery_mode_timer(void *);
188 static int ixgbe_setup_interface(device_t, struct adapter *);
189 static void ixgbe_config_gpie(struct adapter *);
190 static void ixgbe_config_dmac(struct adapter *);
191 static void ixgbe_config_delay_values(struct adapter *);
192 static void ixgbe_config_link(struct adapter *);
193 static void ixgbe_check_wol_support(struct adapter *);
194 static int ixgbe_setup_low_power_mode(struct adapter *);
195 #if 0
196 static void ixgbe_rearm_queues(struct adapter *, u64);
197 #endif
198
199 static void ixgbe_initialize_transmit_units(struct adapter *);
200 static void ixgbe_initialize_receive_units(struct adapter *);
201 static void ixgbe_enable_rx_drop(struct adapter *);
202 static void ixgbe_disable_rx_drop(struct adapter *);
203 static void ixgbe_initialize_rss_mapping(struct adapter *);
204
205 static void ixgbe_enable_intr(struct adapter *);
206 static void ixgbe_disable_intr(struct adapter *);
207 static void ixgbe_update_stats_counters(struct adapter *);
208 static void ixgbe_set_promisc(struct adapter *);
209 static void ixgbe_set_multi(struct adapter *);
210 static void ixgbe_update_link_status(struct adapter *);
211 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
212 static void ixgbe_configure_ivars(struct adapter *);
213 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
214 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
215
216 static void ixgbe_setup_vlan_hw_support(struct adapter *);
217 #if 0
218 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
219 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
220 #endif
221
222 static void ixgbe_add_device_sysctls(struct adapter *);
223 static void ixgbe_add_hw_stats(struct adapter *);
224 static void ixgbe_clear_evcnt(struct adapter *);
225 static int ixgbe_set_flowcntl(struct adapter *, int);
226 static int ixgbe_set_advertise(struct adapter *, int);
227 static int ixgbe_get_advertise(struct adapter *);
228
229 /* Sysctl handlers */
230 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
231 const char *, int *, int);
232 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
235 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
237 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
238 #ifdef IXGBE_DEBUG
239 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
241 #endif
242 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
250 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
251
252 /* Support for pluggable optic modules */
253 static bool ixgbe_sfp_probe(struct adapter *);
254
255 /* Legacy (single vector) interrupt handler */
256 static int ixgbe_legacy_irq(void *);
257
258 /* The MSI/MSI-X Interrupt handlers */
259 static int ixgbe_msix_que(void *);
260 static int ixgbe_msix_link(void *);
261
262 /* Software interrupts for deferred work */
263 static void ixgbe_handle_que(void *);
264 static void ixgbe_handle_link(void *);
265 static void ixgbe_handle_msf(void *);
266 static void ixgbe_handle_mod(void *);
267 static void ixgbe_handle_phy(void *);
268
269 /* Workqueue handler for deferred work */
270 static void ixgbe_handle_que_work(struct work *, void *);
271
272 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
273
274 /************************************************************************
275 * NetBSD Device Interface Entry Points
276 ************************************************************************/
277 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
278 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
279 DVF_DETACH_SHUTDOWN);
280
281 #if 0
282 devclass_t ix_devclass;
283 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
284
285 MODULE_DEPEND(ix, pci, 1, 1, 1);
286 MODULE_DEPEND(ix, ether, 1, 1, 1);
287 #ifdef DEV_NETMAP
288 MODULE_DEPEND(ix, netmap, 1, 1, 1);
289 #endif
290 #endif
291
292 /*
293 * TUNEABLE PARAMETERS:
294 */
295
296 /*
297 * AIM: Adaptive Interrupt Moderation
298 * which means that the interrupt rate
299 * is varied over time based on the
300 * traffic for that interrupt vector
301 */
302 static bool ixgbe_enable_aim = true;
303 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
305 "Enable adaptive interrupt moderation");
306
307 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
308 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
309 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
310
311 /* How many packets rxeof tries to clean at a time */
312 static int ixgbe_rx_process_limit = 256;
313 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
314 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
315
316 /* How many packets txeof tries to clean at a time */
317 static int ixgbe_tx_process_limit = 256;
318 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
319 &ixgbe_tx_process_limit, 0,
320 "Maximum number of sent packets to process at a time, -1 means unlimited");
321
322 /* Flow control setting, default to full */
323 static int ixgbe_flow_control = ixgbe_fc_full;
324 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
325 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
326
327 /* Which pakcet processing uses workqueue or softint */
328 static bool ixgbe_txrx_workqueue = false;
329
330 /*
331 * Smart speed setting, default to on
332 * this only works as a compile option
333 * right now as its during attach, set
334 * this to 'ixgbe_smart_speed_off' to
335 * disable.
336 */
337 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
338
339 /*
340 * MSI-X should be the default for best performance,
341 * but this allows it to be forced off for testing.
342 */
343 static int ixgbe_enable_msix = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
345 "Enable MSI-X interrupts");
346
347 /*
348 * Number of Queues, can be set to 0,
349 * it then autoconfigures based on the
350 * number of cpus with a max of 8. This
351 * can be overriden manually here.
352 */
353 static int ixgbe_num_queues = 0;
354 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
355 "Number of queues to configure, 0 indicates autoconfigure");
356
357 /*
358 * Number of TX descriptors per ring,
359 * setting higher than RX as this seems
360 * the better performing choice.
361 */
362 static int ixgbe_txd = PERFORM_TXD;
363 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
364 "Number of transmit descriptors per queue");
365
366 /* Number of RX descriptors per ring */
367 static int ixgbe_rxd = PERFORM_RXD;
368 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
369 "Number of receive descriptors per queue");
370
371 /*
372 * Defining this on will allow the use
373 * of unsupported SFP+ modules, note that
374 * doing so you are on your own :)
375 */
376 static int allow_unsupported_sfp = false;
377 #define TUNABLE_INT(__x, __y)
378 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
379
380 /*
381 * Not sure if Flow Director is fully baked,
382 * so we'll default to turning it off.
383 */
384 static int ixgbe_enable_fdir = 0;
385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
386 "Enable Flow Director");
387
388 /* Legacy Transmit (single queue) */
389 static int ixgbe_enable_legacy_tx = 0;
390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
391 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
392
393 /* Receive-Side Scaling */
394 static int ixgbe_enable_rss = 1;
395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
396 "Enable Receive-Side Scaling (RSS)");
397
398 #if 0
399 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
400 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
401 #endif
402
403 #ifdef NET_MPSAFE
404 #define IXGBE_MPSAFE 1
405 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
406 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
407 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
408 #else
409 #define IXGBE_CALLOUT_FLAGS 0
410 #define IXGBE_SOFTINFT_FLAGS 0
411 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
412 #endif
413 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
414
415 /************************************************************************
416 * ixgbe_initialize_rss_mapping
417 ************************************************************************/
418 static void
419 ixgbe_initialize_rss_mapping(struct adapter *adapter)
420 {
421 struct ixgbe_hw *hw = &adapter->hw;
422 u32 reta = 0, mrqc, rss_key[10];
423 int queue_id, table_size, index_mult;
424 int i, j;
425 u32 rss_hash_config;
426
427 /* force use default RSS key. */
428 #ifdef __NetBSD__
429 rss_getkey((uint8_t *) &rss_key);
430 #else
431 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
432 /* Fetch the configured RSS key */
433 rss_getkey((uint8_t *) &rss_key);
434 } else {
435 /* set up random bits */
436 cprng_fast(&rss_key, sizeof(rss_key));
437 }
438 #endif
439
440 /* Set multiplier for RETA setup and table size based on MAC */
441 index_mult = 0x1;
442 table_size = 128;
443 switch (adapter->hw.mac.type) {
444 case ixgbe_mac_82598EB:
445 index_mult = 0x11;
446 break;
447 case ixgbe_mac_X550:
448 case ixgbe_mac_X550EM_x:
449 case ixgbe_mac_X550EM_a:
450 table_size = 512;
451 break;
452 default:
453 break;
454 }
455
456 /* Set up the redirection table */
457 for (i = 0, j = 0; i < table_size; i++, j++) {
458 if (j == adapter->num_queues)
459 j = 0;
460
461 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
462 /*
463 * Fetch the RSS bucket id for the given indirection
464 * entry. Cap it at the number of configured buckets
465 * (which is num_queues.)
466 */
467 queue_id = rss_get_indirection_to_bucket(i);
468 queue_id = queue_id % adapter->num_queues;
469 } else
470 queue_id = (j * index_mult);
471
472 /*
473 * The low 8 bits are for hash value (n+0);
474 * The next 8 bits are for hash value (n+1), etc.
475 */
476 reta = reta >> 8;
477 reta = reta | (((uint32_t) queue_id) << 24);
478 if ((i & 3) == 3) {
479 if (i < 128)
480 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
481 else
482 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
483 reta);
484 reta = 0;
485 }
486 }
487
488 /* Now fill our hash function seeds */
489 for (i = 0; i < 10; i++)
490 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
491
492 /* Perform hash on these packet types */
493 if (adapter->feat_en & IXGBE_FEATURE_RSS)
494 rss_hash_config = rss_gethashconfig();
495 else {
496 /*
497 * Disable UDP - IP fragments aren't currently being handled
498 * and so we end up with a mix of 2-tuple and 4-tuple
499 * traffic.
500 */
501 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
502 | RSS_HASHTYPE_RSS_TCP_IPV4
503 | RSS_HASHTYPE_RSS_IPV6
504 | RSS_HASHTYPE_RSS_TCP_IPV6
505 | RSS_HASHTYPE_RSS_IPV6_EX
506 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
507 }
508
509 mrqc = IXGBE_MRQC_RSSEN;
510 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
511 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
512 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
513 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
514 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
515 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
528 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
529 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
530 } /* ixgbe_initialize_rss_mapping */
531
532 /************************************************************************
533 * ixgbe_initialize_receive_units - Setup receive registers and features.
534 ************************************************************************/
535 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
536
537 static void
538 ixgbe_initialize_receive_units(struct adapter *adapter)
539 {
540 struct rx_ring *rxr = adapter->rx_rings;
541 struct ixgbe_hw *hw = &adapter->hw;
542 struct ifnet *ifp = adapter->ifp;
543 int i, j;
544 u32 bufsz, fctrl, srrctl, rxcsum;
545 u32 hlreg;
546
547 /*
548 * Make sure receives are disabled while
549 * setting up the descriptor ring
550 */
551 ixgbe_disable_rx(hw);
552
553 /* Enable broadcasts */
554 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
555 fctrl |= IXGBE_FCTRL_BAM;
556 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
557 fctrl |= IXGBE_FCTRL_DPF;
558 fctrl |= IXGBE_FCTRL_PMCF;
559 }
560 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
561
562 /* Set for Jumbo Frames? */
563 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
564 if (ifp->if_mtu > ETHERMTU)
565 hlreg |= IXGBE_HLREG0_JUMBOEN;
566 else
567 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
568
569 #ifdef DEV_NETMAP
570 /* CRC stripping is conditional in Netmap */
571 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
572 (ifp->if_capenable & IFCAP_NETMAP) &&
573 !ix_crcstrip)
574 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
575 else
576 #endif /* DEV_NETMAP */
577 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
578
579 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
580
581 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
582 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
583
584 for (i = 0; i < adapter->num_queues; i++, rxr++) {
585 u64 rdba = rxr->rxdma.dma_paddr;
586 u32 reg;
587 int regnum = i / 4; /* 1 register per 4 queues */
588 int regshift = i % 4; /* 4 bits per 1 queue */
589 j = rxr->me;
590
591 /* Setup the Base and Length of the Rx Descriptor Ring */
592 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
593 (rdba & 0x00000000ffffffffULL));
594 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
595 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
596 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
597
598 /* Set up the SRRCTL register */
599 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
600 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
601 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
602 srrctl |= bufsz;
603 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
604
605 /* Set RQSMR (Receive Queue Statistic Mapping) register */
606 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
607 reg &= ~(0x000000ff << (regshift * 8));
608 reg |= i << (regshift * 8);
609 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
610
611 /*
612 * Set DROP_EN iff we have no flow control and >1 queue.
613 * Note that srrctl was cleared shortly before during reset,
614 * so we do not need to clear the bit, but do it just in case
615 * this code is moved elsewhere.
616 */
617 if (adapter->num_queues > 1 &&
618 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
619 srrctl |= IXGBE_SRRCTL_DROP_EN;
620 } else {
621 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
622 }
623
624 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
625
626 /* Setup the HW Rx Head and Tail Descriptor Pointers */
627 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
628 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
629
630 /* Set the driver rx tail address */
631 rxr->tail = IXGBE_RDT(rxr->me);
632 }
633
634 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
635 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
636 | IXGBE_PSRTYPE_UDPHDR
637 | IXGBE_PSRTYPE_IPV4HDR
638 | IXGBE_PSRTYPE_IPV6HDR;
639 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
640 }
641
642 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
643
644 ixgbe_initialize_rss_mapping(adapter);
645
646 if (adapter->num_queues > 1) {
647 /* RSS and RX IPP Checksum are mutually exclusive */
648 rxcsum |= IXGBE_RXCSUM_PCSD;
649 }
650
651 if (ifp->if_capenable & IFCAP_RXCSUM)
652 rxcsum |= IXGBE_RXCSUM_PCSD;
653
654 /* This is useful for calculating UDP/IP fragment checksums */
655 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
656 rxcsum |= IXGBE_RXCSUM_IPPCSE;
657
658 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
659
660 } /* ixgbe_initialize_receive_units */
661
662 /************************************************************************
663 * ixgbe_initialize_transmit_units - Enable transmit units.
664 ************************************************************************/
665 static void
666 ixgbe_initialize_transmit_units(struct adapter *adapter)
667 {
668 struct tx_ring *txr = adapter->tx_rings;
669 struct ixgbe_hw *hw = &adapter->hw;
670 int i;
671
672 /* Setup the Base and Length of the Tx Descriptor Ring */
673 for (i = 0; i < adapter->num_queues; i++, txr++) {
674 u64 tdba = txr->txdma.dma_paddr;
675 u32 txctrl = 0;
676 u32 tqsmreg, reg;
677 int regnum = i / 4; /* 1 register per 4 queues */
678 int regshift = i % 4; /* 4 bits per 1 queue */
679 int j = txr->me;
680
681 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
682 (tdba & 0x00000000ffffffffULL));
683 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
684 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
685 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
686
687 /*
688 * Set TQSMR (Transmit Queue Statistic Mapping) register.
689 * Register location is different between 82598 and others.
690 */
691 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
692 tqsmreg = IXGBE_TQSMR(regnum);
693 else
694 tqsmreg = IXGBE_TQSM(regnum);
695 reg = IXGBE_READ_REG(hw, tqsmreg);
696 reg &= ~(0x000000ff << (regshift * 8));
697 reg |= i << (regshift * 8);
698 IXGBE_WRITE_REG(hw, tqsmreg, reg);
699
700 /* Setup the HW Tx Head and Tail descriptor pointers */
701 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
702 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
703
704 /* Cache the tail address */
705 txr->tail = IXGBE_TDT(j);
706
707 txr->txr_no_space = false;
708
709 /* Disable Head Writeback */
710 /*
711 * Note: for X550 series devices, these registers are actually
712 * prefixed with TPH_ isntead of DCA_, but the addresses and
713 * fields remain the same.
714 */
715 switch (hw->mac.type) {
716 case ixgbe_mac_82598EB:
717 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
718 break;
719 default:
720 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
721 break;
722 }
723 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
724 switch (hw->mac.type) {
725 case ixgbe_mac_82598EB:
726 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
727 break;
728 default:
729 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
730 break;
731 }
732
733 }
734
735 if (hw->mac.type != ixgbe_mac_82598EB) {
736 u32 dmatxctl, rttdcs;
737
738 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
739 dmatxctl |= IXGBE_DMATXCTL_TE;
740 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
741 /* Disable arbiter to set MTQC */
742 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
743 rttdcs |= IXGBE_RTTDCS_ARBDIS;
744 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
745 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
746 ixgbe_get_mtqc(adapter->iov_mode));
747 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
748 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
749 }
750
751 return;
752 } /* ixgbe_initialize_transmit_units */
753
754 /************************************************************************
755 * ixgbe_attach - Device initialization routine
756 *
757 * Called when the driver is being loaded.
758 * Identifies the type of hardware, allocates all resources
759 * and initializes the hardware.
760 *
761 * return 0 on success, positive on failure
762 ************************************************************************/
763 static void
764 ixgbe_attach(device_t parent, device_t dev, void *aux)
765 {
766 struct adapter *adapter;
767 struct ixgbe_hw *hw;
768 int error = -1;
769 u32 ctrl_ext;
770 u16 high, low, nvmreg;
771 pcireg_t id, subid;
772 const ixgbe_vendor_info_t *ent;
773 struct pci_attach_args *pa = aux;
774 const char *str;
775 char buf[256];
776
777 INIT_DEBUGOUT("ixgbe_attach: begin");
778
779 /* Allocate, clear, and link in our adapter structure */
780 adapter = device_private(dev);
781 adapter->hw.back = adapter;
782 adapter->dev = dev;
783 hw = &adapter->hw;
784 adapter->osdep.pc = pa->pa_pc;
785 adapter->osdep.tag = pa->pa_tag;
786 if (pci_dma64_available(pa))
787 adapter->osdep.dmat = pa->pa_dmat64;
788 else
789 adapter->osdep.dmat = pa->pa_dmat;
790 adapter->osdep.attached = false;
791
792 ent = ixgbe_lookup(pa);
793
794 KASSERT(ent != NULL);
795
796 aprint_normal(": %s, Version - %s\n",
797 ixgbe_strings[ent->index], ixgbe_driver_version);
798
799 /* Core Lock Init*/
800 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
801
802 /* Set up the timer callout */
803 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
804
805 /* Determine hardware revision */
806 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
807 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
808
809 hw->vendor_id = PCI_VENDOR(id);
810 hw->device_id = PCI_PRODUCT(id);
811 hw->revision_id =
812 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
813 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
814 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
815
816 /*
817 * Make sure BUSMASTER is set
818 */
819 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
820
821 /* Do base PCI setup - map BAR0 */
822 if (ixgbe_allocate_pci_resources(adapter, pa)) {
823 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
824 error = ENXIO;
825 goto err_out;
826 }
827
828 /* let hardware know driver is loaded */
829 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
830 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
831 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
832
833 /*
834 * Initialize the shared code
835 */
836 if (ixgbe_init_shared_code(hw) != 0) {
837 aprint_error_dev(dev, "Unable to initialize the shared code\n");
838 error = ENXIO;
839 goto err_out;
840 }
841
842 switch (hw->mac.type) {
843 case ixgbe_mac_82598EB:
844 str = "82598EB";
845 break;
846 case ixgbe_mac_82599EB:
847 str = "82599EB";
848 break;
849 case ixgbe_mac_X540:
850 str = "X540";
851 break;
852 case ixgbe_mac_X550:
853 str = "X550";
854 break;
855 case ixgbe_mac_X550EM_x:
856 str = "X550EM";
857 break;
858 case ixgbe_mac_X550EM_a:
859 str = "X550EM A";
860 break;
861 default:
862 str = "Unknown";
863 break;
864 }
865 aprint_normal_dev(dev, "device %s\n", str);
866
867 if (hw->mbx.ops.init_params)
868 hw->mbx.ops.init_params(hw);
869
870 hw->allow_unsupported_sfp = allow_unsupported_sfp;
871
872 /* Pick up the 82599 settings */
873 if (hw->mac.type != ixgbe_mac_82598EB) {
874 hw->phy.smart_speed = ixgbe_smart_speed;
875 adapter->num_segs = IXGBE_82599_SCATTER;
876 } else
877 adapter->num_segs = IXGBE_82598_SCATTER;
878
879 /* Ensure SW/FW semaphore is free */
880 ixgbe_init_swfw_semaphore(hw);
881
882 hw->mac.ops.set_lan_id(hw);
883 ixgbe_init_device_features(adapter);
884
885 if (ixgbe_configure_interrupts(adapter)) {
886 error = ENXIO;
887 goto err_out;
888 }
889
890 /* Allocate multicast array memory. */
891 adapter->mta = malloc(sizeof(*adapter->mta) *
892 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
893 if (adapter->mta == NULL) {
894 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
895 error = ENOMEM;
896 goto err_out;
897 }
898
899 /* Enable WoL (if supported) */
900 ixgbe_check_wol_support(adapter);
901
902 /* Verify adapter fan is still functional (if applicable) */
903 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
904 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
905 ixgbe_check_fan_failure(adapter, esdp, FALSE);
906 }
907
908 /* Set an initial default flow control value */
909 hw->fc.requested_mode = ixgbe_flow_control;
910
911 /* Sysctls for limiting the amount of work done in the taskqueues */
912 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
913 "max number of rx packets to process",
914 &adapter->rx_process_limit, ixgbe_rx_process_limit);
915
916 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
917 "max number of tx packets to process",
918 &adapter->tx_process_limit, ixgbe_tx_process_limit);
919
920 /* Do descriptor calc and sanity checks */
921 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
922 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
923 aprint_error_dev(dev, "TXD config issue, using default!\n");
924 adapter->num_tx_desc = DEFAULT_TXD;
925 } else
926 adapter->num_tx_desc = ixgbe_txd;
927
928 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
929 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
930 aprint_error_dev(dev, "RXD config issue, using default!\n");
931 adapter->num_rx_desc = DEFAULT_RXD;
932 } else
933 adapter->num_rx_desc = ixgbe_rxd;
934
935 /* Allocate our TX/RX Queues */
936 if (ixgbe_allocate_queues(adapter)) {
937 error = ENOMEM;
938 goto err_out;
939 }
940
941 hw->phy.reset_if_overtemp = TRUE;
942 error = ixgbe_reset_hw(hw);
943 hw->phy.reset_if_overtemp = FALSE;
944 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
945 /*
946 * No optics in this port, set up
947 * so the timer routine will probe
948 * for later insertion.
949 */
950 adapter->sfp_probe = TRUE;
951 error = IXGBE_SUCCESS;
952 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
953 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
954 error = EIO;
955 goto err_late;
956 } else if (error) {
957 aprint_error_dev(dev, "Hardware initialization failed\n");
958 error = EIO;
959 goto err_late;
960 }
961
962 /* Make sure we have a good EEPROM before we read from it */
963 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
964 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
965 error = EIO;
966 goto err_late;
967 }
968
969 aprint_normal("%s:", device_xname(dev));
970 /* NVM Image Version */
971 high = low = 0;
972 switch (hw->mac.type) {
973 case ixgbe_mac_X540:
974 case ixgbe_mac_X550EM_a:
975 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
976 if (nvmreg == 0xffff)
977 break;
978 high = (nvmreg >> 12) & 0x0f;
979 low = (nvmreg >> 4) & 0xff;
980 id = nvmreg & 0x0f;
981 aprint_normal(" NVM Image Version %u.", high);
982 if (hw->mac.type == ixgbe_mac_X540)
983 str = "%x";
984 else
985 str = "%02x";
986 aprint_normal(str, low);
987 aprint_normal(" ID 0x%x,", id);
988 break;
989 case ixgbe_mac_X550EM_x:
990 case ixgbe_mac_X550:
991 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
992 if (nvmreg == 0xffff)
993 break;
994 high = (nvmreg >> 12) & 0x0f;
995 low = nvmreg & 0xff;
996 aprint_normal(" NVM Image Version %u.%02x,", high, low);
997 break;
998 default:
999 break;
1000 }
1001 hw->eeprom.nvm_image_ver_high = high;
1002 hw->eeprom.nvm_image_ver_low = low;
1003
1004 /* PHY firmware revision */
1005 switch (hw->mac.type) {
1006 case ixgbe_mac_X540:
1007 case ixgbe_mac_X550:
1008 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1009 if (nvmreg == 0xffff)
1010 break;
1011 high = (nvmreg >> 12) & 0x0f;
1012 low = (nvmreg >> 4) & 0xff;
1013 id = nvmreg & 0x000f;
1014 aprint_normal(" PHY FW Revision %u.", high);
1015 if (hw->mac.type == ixgbe_mac_X540)
1016 str = "%x";
1017 else
1018 str = "%02x";
1019 aprint_normal(str, low);
1020 aprint_normal(" ID 0x%x,", id);
1021 break;
1022 default:
1023 break;
1024 }
1025
1026 /* NVM Map version & OEM NVM Image version */
1027 switch (hw->mac.type) {
1028 case ixgbe_mac_X550:
1029 case ixgbe_mac_X550EM_x:
1030 case ixgbe_mac_X550EM_a:
1031 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1032 if (nvmreg != 0xffff) {
1033 high = (nvmreg >> 12) & 0x0f;
1034 low = nvmreg & 0x00ff;
1035 aprint_normal(" NVM Map version %u.%02x,", high, low);
1036 }
1037 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1038 if (nvmreg != 0xffff) {
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0x00ff;
1041 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1042 low);
1043 }
1044 break;
1045 default:
1046 break;
1047 }
1048
1049 /* Print the ETrackID */
1050 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1051 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1052 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1053
1054 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1055 error = ixgbe_allocate_msix(adapter, pa);
1056 if (error) {
1057 /* Free allocated queue structures first */
1058 ixgbe_free_transmit_structures(adapter);
1059 ixgbe_free_receive_structures(adapter);
1060 free(adapter->queues, M_DEVBUF);
1061
1062 /* Fallback to legacy interrupt */
1063 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1064 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1065 adapter->feat_en |= IXGBE_FEATURE_MSI;
1066 adapter->num_queues = 1;
1067
1068 /* Allocate our TX/RX Queues again */
1069 if (ixgbe_allocate_queues(adapter)) {
1070 error = ENOMEM;
1071 goto err_out;
1072 }
1073 }
1074 }
1075 /* Recovery mode */
1076 switch (adapter->hw.mac.type) {
1077 case ixgbe_mac_X550:
1078 case ixgbe_mac_X550EM_x:
1079 case ixgbe_mac_X550EM_a:
1080 /* >= 2.00 */
1081 if (hw->eeprom.nvm_image_ver_high >= 2) {
1082 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1083 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1084 }
1085 break;
1086 default:
1087 break;
1088 }
1089
1090 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1091 error = ixgbe_allocate_legacy(adapter, pa);
1092 if (error)
1093 goto err_late;
1094
1095 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1096 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1097 ixgbe_handle_link, adapter);
1098 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1099 ixgbe_handle_mod, adapter);
1100 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_handle_msf, adapter);
1102 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1103 ixgbe_handle_phy, adapter);
1104 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1105 adapter->fdir_si =
1106 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_reinit_fdir, adapter);
1108 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1109 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1110 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1111 && (adapter->fdir_si == NULL))) {
1112 aprint_error_dev(dev,
1113 "could not establish software interrupts ()\n");
1114 goto err_out;
1115 }
1116
1117 error = ixgbe_start_hw(hw);
1118 switch (error) {
1119 case IXGBE_ERR_EEPROM_VERSION:
1120 aprint_error_dev(dev, "This device is a pre-production adapter/"
1121 "LOM. Please be aware there may be issues associated "
1122 "with your hardware.\nIf you are experiencing problems "
1123 "please contact your Intel or hardware representative "
1124 "who provided you with this hardware.\n");
1125 break;
1126 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1127 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1128 error = EIO;
1129 goto err_late;
1130 case IXGBE_ERR_SFP_NOT_PRESENT:
1131 aprint_error_dev(dev, "No SFP+ Module found\n");
1132 /* falls thru */
1133 default:
1134 break;
1135 }
1136
1137 /* Setup OS specific network interface */
1138 if (ixgbe_setup_interface(dev, adapter) != 0)
1139 goto err_late;
1140
1141 /*
1142 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1143 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1144 */
1145 if (hw->phy.media_type == ixgbe_media_type_copper) {
1146 uint16_t id1, id2;
1147 int oui, model, rev;
1148 const char *descr;
1149
1150 id1 = hw->phy.id >> 16;
1151 id2 = hw->phy.id & 0xffff;
1152 oui = MII_OUI(id1, id2);
1153 model = MII_MODEL(id2);
1154 rev = MII_REV(id2);
1155 if ((descr = mii_get_descr(oui, model)) != NULL)
1156 aprint_normal_dev(dev,
1157 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1158 descr, oui, model, rev);
1159 else
1160 aprint_normal_dev(dev,
1161 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1162 oui, model, rev);
1163 }
1164
1165 /* Enable the optics for 82599 SFP+ fiber */
1166 ixgbe_enable_tx_laser(hw);
1167
1168 /* Enable EEE power saving */
1169 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1170 hw->mac.ops.setup_eee(hw,
1171 adapter->feat_en & IXGBE_FEATURE_EEE);
1172
1173 /* Enable power to the phy. */
1174 ixgbe_set_phy_power(hw, TRUE);
1175
1176 /* Initialize statistics */
1177 ixgbe_update_stats_counters(adapter);
1178
1179 /* Check PCIE slot type/speed/width */
1180 ixgbe_get_slot_info(adapter);
1181
1182 /*
1183 * Do time init and sysctl init here, but
1184 * only on the first port of a bypass adapter.
1185 */
1186 ixgbe_bypass_init(adapter);
1187
1188 /* Set an initial dmac value */
1189 adapter->dmac = 0;
1190 /* Set initial advertised speeds (if applicable) */
1191 adapter->advertise = ixgbe_get_advertise(adapter);
1192
1193 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1194 ixgbe_define_iov_schemas(dev, &error);
1195
1196 /* Add sysctls */
1197 ixgbe_add_device_sysctls(adapter);
1198 ixgbe_add_hw_stats(adapter);
1199
1200 /* For Netmap */
1201 adapter->init_locked = ixgbe_init_locked;
1202 adapter->stop_locked = ixgbe_stop;
1203
1204 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1205 ixgbe_netmap_attach(adapter);
1206
1207 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1208 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1209 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1210 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1211
1212 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1213 pmf_class_network_register(dev, adapter->ifp);
1214 else
1215 aprint_error_dev(dev, "couldn't establish power handler\n");
1216
1217 /* Init recovery mode timer and state variable */
1218 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1219 adapter->recovery_mode = 0;
1220
1221 /* Set up the timer callout */
1222 callout_init(&adapter->recovery_mode_timer,
1223 IXGBE_CALLOUT_FLAGS);
1224
1225 /* Start the task */
1226 callout_reset(&adapter->recovery_mode_timer, hz,
1227 ixgbe_recovery_mode_timer, adapter);
1228 }
1229
1230 INIT_DEBUGOUT("ixgbe_attach: end");
1231 adapter->osdep.attached = true;
1232
1233 return;
1234
1235 err_late:
1236 ixgbe_free_transmit_structures(adapter);
1237 ixgbe_free_receive_structures(adapter);
1238 free(adapter->queues, M_DEVBUF);
1239 err_out:
1240 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1241 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1242 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1243 ixgbe_free_softint(adapter);
1244 ixgbe_free_pci_resources(adapter);
1245 if (adapter->mta != NULL)
1246 free(adapter->mta, M_DEVBUF);
1247 IXGBE_CORE_LOCK_DESTROY(adapter);
1248
1249 return;
1250 } /* ixgbe_attach */
1251
1252 /************************************************************************
1253 * ixgbe_check_wol_support
1254 *
1255 * Checks whether the adapter's ports are capable of
1256 * Wake On LAN by reading the adapter's NVM.
1257 *
1258 * Sets each port's hw->wol_enabled value depending
1259 * on the value read here.
1260 ************************************************************************/
1261 static void
1262 ixgbe_check_wol_support(struct adapter *adapter)
1263 {
1264 struct ixgbe_hw *hw = &adapter->hw;
1265 u16 dev_caps = 0;
1266
1267 /* Find out WoL support for port */
1268 adapter->wol_support = hw->wol_enabled = 0;
1269 ixgbe_get_device_caps(hw, &dev_caps);
1270 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1271 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1272 hw->bus.func == 0))
1273 adapter->wol_support = hw->wol_enabled = 1;
1274
1275 /* Save initial wake up filter configuration */
1276 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1277
1278 return;
1279 } /* ixgbe_check_wol_support */
1280
1281 /************************************************************************
1282 * ixgbe_setup_interface
1283 *
1284 * Setup networking device structure and register an interface.
1285 ************************************************************************/
1286 static int
1287 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1288 {
1289 struct ethercom *ec = &adapter->osdep.ec;
1290 struct ifnet *ifp;
1291 int rv;
1292
1293 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1294
1295 ifp = adapter->ifp = &ec->ec_if;
1296 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1297 ifp->if_baudrate = IF_Gbps(10);
1298 ifp->if_init = ixgbe_init;
1299 ifp->if_stop = ixgbe_ifstop;
1300 ifp->if_softc = adapter;
1301 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1302 #ifdef IXGBE_MPSAFE
1303 ifp->if_extflags = IFEF_MPSAFE;
1304 #endif
1305 ifp->if_ioctl = ixgbe_ioctl;
1306 #if __FreeBSD_version >= 1100045
1307 /* TSO parameters */
1308 ifp->if_hw_tsomax = 65518;
1309 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1310 ifp->if_hw_tsomaxsegsize = 2048;
1311 #endif
1312 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1313 #if 0
1314 ixgbe_start_locked = ixgbe_legacy_start_locked;
1315 #endif
1316 } else {
1317 ifp->if_transmit = ixgbe_mq_start;
1318 #if 0
1319 ixgbe_start_locked = ixgbe_mq_start_locked;
1320 #endif
1321 }
1322 ifp->if_start = ixgbe_legacy_start;
1323 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1324 IFQ_SET_READY(&ifp->if_snd);
1325
1326 rv = if_initialize(ifp);
1327 if (rv != 0) {
1328 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1329 return rv;
1330 }
1331 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1332 ether_ifattach(ifp, adapter->hw.mac.addr);
1333 /*
1334 * We use per TX queue softint, so if_deferred_start_init() isn't
1335 * used.
1336 */
1337 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1338
1339 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1340
1341 /*
1342 * Tell the upper layer(s) we support long frames.
1343 */
1344 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1345
1346 /* Set capability flags */
1347 ifp->if_capabilities |= IFCAP_RXCSUM
1348 | IFCAP_TXCSUM
1349 | IFCAP_TSOv4
1350 | IFCAP_TSOv6;
1351 ifp->if_capenable = 0;
1352
1353 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1354 | ETHERCAP_VLAN_HWCSUM
1355 | ETHERCAP_JUMBO_MTU
1356 | ETHERCAP_VLAN_MTU;
1357
1358 /* Enable the above capabilities by default */
1359 ec->ec_capenable = ec->ec_capabilities;
1360
1361 /*
1362 * Don't turn this on by default, if vlans are
1363 * created on another pseudo device (eg. lagg)
1364 * then vlan events are not passed thru, breaking
1365 * operation, but with HW FILTER off it works. If
1366 * using vlans directly on the ixgbe driver you can
1367 * enable this and get full hardware tag filtering.
1368 */
1369 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1370
1371 /*
1372 * Specify the media types supported by this adapter and register
1373 * callbacks to update media and link information
1374 */
1375 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1376 ixgbe_media_status);
1377
1378 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1379 ixgbe_add_media_types(adapter);
1380
1381 /* Set autoselect media by default */
1382 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1383
1384 if_register(ifp);
1385
1386 return (0);
1387 } /* ixgbe_setup_interface */
1388
1389 /************************************************************************
1390 * ixgbe_add_media_types
1391 ************************************************************************/
1392 static void
1393 ixgbe_add_media_types(struct adapter *adapter)
1394 {
1395 struct ixgbe_hw *hw = &adapter->hw;
1396 device_t dev = adapter->dev;
1397 u64 layer;
1398
1399 layer = adapter->phy_layer;
1400
1401 #define ADD(mm, dd) \
1402 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1403
1404 ADD(IFM_NONE, 0);
1405
1406 /* Media types with matching NetBSD media defines */
1407 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1408 ADD(IFM_10G_T | IFM_FDX, 0);
1409 }
1410 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1411 ADD(IFM_1000_T | IFM_FDX, 0);
1412 }
1413 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1414 ADD(IFM_100_TX | IFM_FDX, 0);
1415 }
1416 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1417 ADD(IFM_10_T | IFM_FDX, 0);
1418 }
1419
1420 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1421 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1422 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1423 }
1424
1425 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1426 ADD(IFM_10G_LR | IFM_FDX, 0);
1427 if (hw->phy.multispeed_fiber) {
1428 ADD(IFM_1000_LX | IFM_FDX, 0);
1429 }
1430 }
1431 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1432 ADD(IFM_10G_SR | IFM_FDX, 0);
1433 if (hw->phy.multispeed_fiber) {
1434 ADD(IFM_1000_SX | IFM_FDX, 0);
1435 }
1436 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1437 ADD(IFM_1000_SX | IFM_FDX, 0);
1438 }
1439 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1440 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1441 }
1442
1443 #ifdef IFM_ETH_XTYPE
1444 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1445 ADD(IFM_10G_KR | IFM_FDX, 0);
1446 }
1447 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1448 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1449 }
1450 #else
1451 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1452 device_printf(dev, "Media supported: 10GbaseKR\n");
1453 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1454 ADD(IFM_10G_SR | IFM_FDX, 0);
1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1457 device_printf(dev, "Media supported: 10GbaseKX4\n");
1458 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1459 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1460 }
1461 #endif
1462 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1463 ADD(IFM_1000_KX | IFM_FDX, 0);
1464 }
1465 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1466 ADD(IFM_2500_KX | IFM_FDX, 0);
1467 }
1468 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1469 ADD(IFM_2500_T | IFM_FDX, 0);
1470 }
1471 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1472 ADD(IFM_5000_T | IFM_FDX, 0);
1473 }
1474 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1475 device_printf(dev, "Media supported: 1000baseBX\n");
1476 /* XXX no ifmedia_set? */
1477
1478 ADD(IFM_AUTO, 0);
1479
1480 #undef ADD
1481 } /* ixgbe_add_media_types */
1482
1483 /************************************************************************
1484 * ixgbe_is_sfp
1485 ************************************************************************/
1486 static inline bool
1487 ixgbe_is_sfp(struct ixgbe_hw *hw)
1488 {
1489 switch (hw->mac.type) {
1490 case ixgbe_mac_82598EB:
1491 if (hw->phy.type == ixgbe_phy_nl)
1492 return (TRUE);
1493 return (FALSE);
1494 case ixgbe_mac_82599EB:
1495 switch (hw->mac.ops.get_media_type(hw)) {
1496 case ixgbe_media_type_fiber:
1497 case ixgbe_media_type_fiber_qsfp:
1498 return (TRUE);
1499 default:
1500 return (FALSE);
1501 }
1502 case ixgbe_mac_X550EM_x:
1503 case ixgbe_mac_X550EM_a:
1504 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1505 return (TRUE);
1506 return (FALSE);
1507 default:
1508 return (FALSE);
1509 }
1510 } /* ixgbe_is_sfp */
1511
1512 /************************************************************************
1513 * ixgbe_config_link
1514 ************************************************************************/
1515 static void
1516 ixgbe_config_link(struct adapter *adapter)
1517 {
1518 struct ixgbe_hw *hw = &adapter->hw;
1519 u32 autoneg, err = 0;
1520 bool sfp, negotiate = false;
1521
1522 sfp = ixgbe_is_sfp(hw);
1523
1524 if (sfp) {
1525 if (hw->phy.multispeed_fiber) {
1526 ixgbe_enable_tx_laser(hw);
1527 kpreempt_disable();
1528 softint_schedule(adapter->msf_si);
1529 kpreempt_enable();
1530 }
1531 kpreempt_disable();
1532 softint_schedule(adapter->mod_si);
1533 kpreempt_enable();
1534 } else {
1535 struct ifmedia *ifm = &adapter->media;
1536
1537 if (hw->mac.ops.check_link)
1538 err = ixgbe_check_link(hw, &adapter->link_speed,
1539 &adapter->link_up, FALSE);
1540 if (err)
1541 return;
1542
1543 /*
1544 * Check if it's the first call. If it's the first call,
1545 * get value for auto negotiation.
1546 */
1547 autoneg = hw->phy.autoneg_advertised;
1548 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1549 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1550 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1551 &negotiate);
1552 if (err)
1553 return;
1554 if (hw->mac.ops.setup_link)
1555 err = hw->mac.ops.setup_link(hw, autoneg,
1556 adapter->link_up);
1557 }
1558
1559 } /* ixgbe_config_link */
1560
1561 /************************************************************************
1562 * ixgbe_update_stats_counters - Update board statistics counters.
1563 ************************************************************************/
1564 static void
1565 ixgbe_update_stats_counters(struct adapter *adapter)
1566 {
1567 struct ifnet *ifp = adapter->ifp;
1568 struct ixgbe_hw *hw = &adapter->hw;
1569 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1570 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1571 u64 total_missed_rx = 0;
1572 uint64_t crcerrs, rlec;
1573 unsigned int queue_counters;
1574 int i;
1575
1576 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1577 stats->crcerrs.ev_count += crcerrs;
1578 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1579 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1580 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1581 if (hw->mac.type == ixgbe_mac_X550)
1582 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1583
1584 /* 16 registers exist */
1585 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1586 for (i = 0; i < queue_counters; i++) {
1587 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1588 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1589 if (hw->mac.type >= ixgbe_mac_82599EB) {
1590 stats->qprdc[i].ev_count
1591 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1592 }
1593 }
1594
1595 /* 8 registers exist */
1596 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1597 uint32_t mp;
1598
1599 /* MPC */
1600 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1601 /* global total per queue */
1602 stats->mpc[i].ev_count += mp;
1603 /* running comprehensive total for stats display */
1604 total_missed_rx += mp;
1605
1606 if (hw->mac.type == ixgbe_mac_82598EB)
1607 stats->rnbc[i].ev_count
1608 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1609
1610 stats->pxontxc[i].ev_count
1611 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1612 stats->pxofftxc[i].ev_count
1613 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1614 if (hw->mac.type >= ixgbe_mac_82599EB) {
1615 stats->pxonrxc[i].ev_count
1616 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1617 stats->pxoffrxc[i].ev_count
1618 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1619 stats->pxon2offc[i].ev_count
1620 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1621 } else {
1622 stats->pxonrxc[i].ev_count
1623 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1624 stats->pxoffrxc[i].ev_count
1625 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1626 }
1627 }
1628 stats->mpctotal.ev_count += total_missed_rx;
1629
1630 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1631 if ((adapter->link_active == LINK_STATE_UP)
1632 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1633 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1634 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1635 }
1636 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1637 stats->rlec.ev_count += rlec;
1638
1639 /* Hardware workaround, gprc counts missed packets */
1640 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1641
1642 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1643 stats->lxontxc.ev_count += lxon;
1644 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1645 stats->lxofftxc.ev_count += lxoff;
1646 total = lxon + lxoff;
1647
1648 if (hw->mac.type != ixgbe_mac_82598EB) {
1649 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1650 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1651 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1652 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1653 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1654 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1655 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1656 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1657 } else {
1658 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1659 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1660 /* 82598 only has a counter in the high register */
1661 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1662 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1663 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1664 }
1665
1666 /*
1667 * Workaround: mprc hardware is incorrectly counting
1668 * broadcasts, so for now we subtract those.
1669 */
1670 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1671 stats->bprc.ev_count += bprc;
1672 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1673 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1674
1675 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1676 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1677 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1678 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1679 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1680 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1681
1682 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1683 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1684 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1685
1686 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1687 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1688 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1689 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1690 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1691 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1692 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1693 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1694 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1695 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1696 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1697 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1698 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1699 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1700 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1701 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1702 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1703 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1704 /* Only read FCOE on 82599 */
1705 if (hw->mac.type != ixgbe_mac_82598EB) {
1706 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1707 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1708 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1709 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1710 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1711 }
1712
1713 /* Fill out the OS statistics structure */
1714 /*
1715 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1716 * adapter->stats counters. It's required to make ifconfig -z
1717 * (SOICZIFDATA) work.
1718 */
1719 ifp->if_collisions = 0;
1720
1721 /* Rx Errors */
1722 ifp->if_iqdrops += total_missed_rx;
1723 ifp->if_ierrors += crcerrs + rlec;
1724 } /* ixgbe_update_stats_counters */
1725
1726 /************************************************************************
1727 * ixgbe_add_hw_stats
1728 *
1729 * Add sysctl variables, one per statistic, to the system.
1730 ************************************************************************/
1731 static void
1732 ixgbe_add_hw_stats(struct adapter *adapter)
1733 {
1734 device_t dev = adapter->dev;
1735 const struct sysctlnode *rnode, *cnode;
1736 struct sysctllog **log = &adapter->sysctllog;
1737 struct tx_ring *txr = adapter->tx_rings;
1738 struct rx_ring *rxr = adapter->rx_rings;
1739 struct ixgbe_hw *hw = &adapter->hw;
1740 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1741 const char *xname = device_xname(dev);
1742 int i;
1743
1744 /* Driver Statistics */
1745 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1746 NULL, xname, "Driver tx dma soft fail EFBIG");
1747 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1748 NULL, xname, "m_defrag() failed");
1749 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1750 NULL, xname, "Driver tx dma hard fail EFBIG");
1751 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1752 NULL, xname, "Driver tx dma hard fail EINVAL");
1753 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1754 NULL, xname, "Driver tx dma hard fail other");
1755 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1756 NULL, xname, "Driver tx dma soft fail EAGAIN");
1757 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1758 NULL, xname, "Driver tx dma soft fail ENOMEM");
1759 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1760 NULL, xname, "Watchdog timeouts");
1761 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1762 NULL, xname, "TSO errors");
1763 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1764 NULL, xname, "Link MSI-X IRQ Handled");
1765 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1766 NULL, xname, "Link softint");
1767 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1768 NULL, xname, "module softint");
1769 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1770 NULL, xname, "multimode softint");
1771 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1772 NULL, xname, "external PHY softint");
1773
1774 /* Max number of traffic class is 8 */
1775 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1776 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1777 snprintf(adapter->tcs[i].evnamebuf,
1778 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1779 xname, i);
1780 if (i < __arraycount(stats->mpc)) {
1781 evcnt_attach_dynamic(&stats->mpc[i],
1782 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1783 "RX Missed Packet Count");
1784 if (hw->mac.type == ixgbe_mac_82598EB)
1785 evcnt_attach_dynamic(&stats->rnbc[i],
1786 EVCNT_TYPE_MISC, NULL,
1787 adapter->tcs[i].evnamebuf,
1788 "Receive No Buffers");
1789 }
1790 if (i < __arraycount(stats->pxontxc)) {
1791 evcnt_attach_dynamic(&stats->pxontxc[i],
1792 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1793 "pxontxc");
1794 evcnt_attach_dynamic(&stats->pxonrxc[i],
1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1796 "pxonrxc");
1797 evcnt_attach_dynamic(&stats->pxofftxc[i],
1798 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1799 "pxofftxc");
1800 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1801 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1802 "pxoffrxc");
1803 if (hw->mac.type >= ixgbe_mac_82599EB)
1804 evcnt_attach_dynamic(&stats->pxon2offc[i],
1805 EVCNT_TYPE_MISC, NULL,
1806 adapter->tcs[i].evnamebuf,
1807 "pxon2offc");
1808 }
1809 }
1810
1811 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1812 #ifdef LRO
1813 struct lro_ctrl *lro = &rxr->lro;
1814 #endif /* LRO */
1815
1816 snprintf(adapter->queues[i].evnamebuf,
1817 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1818 xname, i);
1819 snprintf(adapter->queues[i].namebuf,
1820 sizeof(adapter->queues[i].namebuf), "q%d", i);
1821
1822 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1823 aprint_error_dev(dev, "could not create sysctl root\n");
1824 break;
1825 }
1826
1827 if (sysctl_createv(log, 0, &rnode, &rnode,
1828 0, CTLTYPE_NODE,
1829 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1830 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1831 break;
1832
1833 if (sysctl_createv(log, 0, &rnode, &cnode,
1834 CTLFLAG_READWRITE, CTLTYPE_INT,
1835 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1836 ixgbe_sysctl_interrupt_rate_handler, 0,
1837 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1838 break;
1839
1840 if (sysctl_createv(log, 0, &rnode, &cnode,
1841 CTLFLAG_READONLY, CTLTYPE_INT,
1842 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1843 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1844 0, CTL_CREATE, CTL_EOL) != 0)
1845 break;
1846
1847 if (sysctl_createv(log, 0, &rnode, &cnode,
1848 CTLFLAG_READONLY, CTLTYPE_INT,
1849 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1850 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1851 0, CTL_CREATE, CTL_EOL) != 0)
1852 break;
1853
1854 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1855 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1856 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1857 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1858 "Handled queue in softint");
1859 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1860 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1861 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1862 NULL, adapter->queues[i].evnamebuf, "TSO");
1863 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1864 NULL, adapter->queues[i].evnamebuf,
1865 "Queue No Descriptor Available");
1866 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1867 NULL, adapter->queues[i].evnamebuf,
1868 "Queue Packets Transmitted");
1869 #ifndef IXGBE_LEGACY_TX
1870 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1871 NULL, adapter->queues[i].evnamebuf,
1872 "Packets dropped in pcq");
1873 #endif
1874
1875 if (sysctl_createv(log, 0, &rnode, &cnode,
1876 CTLFLAG_READONLY,
1877 CTLTYPE_INT,
1878 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1879 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1880 CTL_CREATE, CTL_EOL) != 0)
1881 break;
1882
1883 if (sysctl_createv(log, 0, &rnode, &cnode,
1884 CTLFLAG_READONLY,
1885 CTLTYPE_INT,
1886 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1887 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1888 CTL_CREATE, CTL_EOL) != 0)
1889 break;
1890
1891 if (sysctl_createv(log, 0, &rnode, &cnode,
1892 CTLFLAG_READONLY,
1893 CTLTYPE_INT,
1894 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1895 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1896 CTL_CREATE, CTL_EOL) != 0)
1897 break;
1898
1899 if (i < __arraycount(stats->qprc)) {
1900 evcnt_attach_dynamic(&stats->qprc[i],
1901 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1902 "qprc");
1903 evcnt_attach_dynamic(&stats->qptc[i],
1904 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1905 "qptc");
1906 evcnt_attach_dynamic(&stats->qbrc[i],
1907 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1908 "qbrc");
1909 evcnt_attach_dynamic(&stats->qbtc[i],
1910 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1911 "qbtc");
1912 if (hw->mac.type >= ixgbe_mac_82599EB)
1913 evcnt_attach_dynamic(&stats->qprdc[i],
1914 EVCNT_TYPE_MISC, NULL,
1915 adapter->queues[i].evnamebuf, "qprdc");
1916 }
1917
1918 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1919 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1920 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1921 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1922 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1923 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1924 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1925 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1926 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1927 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1928 #ifdef LRO
1929 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1930 CTLFLAG_RD, &lro->lro_queued, 0,
1931 "LRO Queued");
1932 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1933 CTLFLAG_RD, &lro->lro_flushed, 0,
1934 "LRO Flushed");
1935 #endif /* LRO */
1936 }
1937
1938 /* MAC stats get their own sub node */
1939
1940 snprintf(stats->namebuf,
1941 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1942
1943 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "rx csum offload - IP");
1945 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "rx csum offload - L4");
1947 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "rx csum offload - IP bad");
1949 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "rx csum offload - L4 bad");
1951 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "Interrupt conditions zero");
1953 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Legacy interrupts");
1955
1956 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "CRC Errors");
1958 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "Illegal Byte Errors");
1960 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Byte Errors");
1962 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "MAC Short Packets Discarded");
1964 if (hw->mac.type >= ixgbe_mac_X550)
1965 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "Bad SFD");
1967 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Total Packets Missed");
1969 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "MAC Local Faults");
1971 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "MAC Remote Faults");
1973 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Receive Length Errors");
1975 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Link XON Transmitted");
1977 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "Link XON Received");
1979 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "Link XOFF Transmitted");
1981 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Link XOFF Received");
1983
1984 /* Packet Reception Stats */
1985 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Total Octets Received");
1987 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Good Octets Received");
1989 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Total Packets Received");
1991 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Good Packets Received");
1993 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Multicast Packets Received");
1995 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Broadcast Packets Received");
1997 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "64 byte frames received ");
1999 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "65-127 byte frames received");
2001 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "128-255 byte frames received");
2003 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "256-511 byte frames received");
2005 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "512-1023 byte frames received");
2007 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "1023-1522 byte frames received");
2009 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Receive Undersized");
2011 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Fragmented Packets Received ");
2013 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Oversized Packets Received");
2015 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Received Jabber");
2017 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Management Packets Received");
2019 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Management Packets Dropped");
2021 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Checksum Errors");
2023
2024 /* Packet Transmission Stats */
2025 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Good Octets Transmitted");
2027 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Total Packets Transmitted");
2029 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Good Packets Transmitted");
2031 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Broadcast Packets Transmitted");
2033 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "Multicast Packets Transmitted");
2035 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "Management Packets Transmitted");
2037 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "64 byte frames transmitted ");
2039 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "65-127 byte frames transmitted");
2041 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "128-255 byte frames transmitted");
2043 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "256-511 byte frames transmitted");
2045 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "512-1023 byte frames transmitted");
2047 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2048 stats->namebuf, "1024-1522 byte frames transmitted");
2049 } /* ixgbe_add_hw_stats */
2050
2051 static void
2052 ixgbe_clear_evcnt(struct adapter *adapter)
2053 {
2054 struct tx_ring *txr = adapter->tx_rings;
2055 struct rx_ring *rxr = adapter->rx_rings;
2056 struct ixgbe_hw *hw = &adapter->hw;
2057 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2058 int i;
2059
2060 adapter->efbig_tx_dma_setup.ev_count = 0;
2061 adapter->mbuf_defrag_failed.ev_count = 0;
2062 adapter->efbig2_tx_dma_setup.ev_count = 0;
2063 adapter->einval_tx_dma_setup.ev_count = 0;
2064 adapter->other_tx_dma_setup.ev_count = 0;
2065 adapter->eagain_tx_dma_setup.ev_count = 0;
2066 adapter->enomem_tx_dma_setup.ev_count = 0;
2067 adapter->tso_err.ev_count = 0;
2068 adapter->watchdog_events.ev_count = 0;
2069 adapter->link_irq.ev_count = 0;
2070 adapter->link_sicount.ev_count = 0;
2071 adapter->mod_sicount.ev_count = 0;
2072 adapter->msf_sicount.ev_count = 0;
2073 adapter->phy_sicount.ev_count = 0;
2074
2075 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2076 if (i < __arraycount(stats->mpc)) {
2077 stats->mpc[i].ev_count = 0;
2078 if (hw->mac.type == ixgbe_mac_82598EB)
2079 stats->rnbc[i].ev_count = 0;
2080 }
2081 if (i < __arraycount(stats->pxontxc)) {
2082 stats->pxontxc[i].ev_count = 0;
2083 stats->pxonrxc[i].ev_count = 0;
2084 stats->pxofftxc[i].ev_count = 0;
2085 stats->pxoffrxc[i].ev_count = 0;
2086 if (hw->mac.type >= ixgbe_mac_82599EB)
2087 stats->pxon2offc[i].ev_count = 0;
2088 }
2089 }
2090
2091 txr = adapter->tx_rings;
2092 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2093 adapter->queues[i].irqs.ev_count = 0;
2094 adapter->queues[i].handleq.ev_count = 0;
2095 adapter->queues[i].req.ev_count = 0;
2096 txr->no_desc_avail.ev_count = 0;
2097 txr->total_packets.ev_count = 0;
2098 txr->tso_tx.ev_count = 0;
2099 #ifndef IXGBE_LEGACY_TX
2100 txr->pcq_drops.ev_count = 0;
2101 #endif
2102 txr->q_efbig_tx_dma_setup = 0;
2103 txr->q_mbuf_defrag_failed = 0;
2104 txr->q_efbig2_tx_dma_setup = 0;
2105 txr->q_einval_tx_dma_setup = 0;
2106 txr->q_other_tx_dma_setup = 0;
2107 txr->q_eagain_tx_dma_setup = 0;
2108 txr->q_enomem_tx_dma_setup = 0;
2109 txr->q_tso_err = 0;
2110
2111 if (i < __arraycount(stats->qprc)) {
2112 stats->qprc[i].ev_count = 0;
2113 stats->qptc[i].ev_count = 0;
2114 stats->qbrc[i].ev_count = 0;
2115 stats->qbtc[i].ev_count = 0;
2116 if (hw->mac.type >= ixgbe_mac_82599EB)
2117 stats->qprdc[i].ev_count = 0;
2118 }
2119
2120 rxr->rx_packets.ev_count = 0;
2121 rxr->rx_bytes.ev_count = 0;
2122 rxr->rx_copies.ev_count = 0;
2123 rxr->no_jmbuf.ev_count = 0;
2124 rxr->rx_discarded.ev_count = 0;
2125 }
2126 stats->ipcs.ev_count = 0;
2127 stats->l4cs.ev_count = 0;
2128 stats->ipcs_bad.ev_count = 0;
2129 stats->l4cs_bad.ev_count = 0;
2130 stats->intzero.ev_count = 0;
2131 stats->legint.ev_count = 0;
2132 stats->crcerrs.ev_count = 0;
2133 stats->illerrc.ev_count = 0;
2134 stats->errbc.ev_count = 0;
2135 stats->mspdc.ev_count = 0;
2136 stats->mbsdc.ev_count = 0;
2137 stats->mpctotal.ev_count = 0;
2138 stats->mlfc.ev_count = 0;
2139 stats->mrfc.ev_count = 0;
2140 stats->rlec.ev_count = 0;
2141 stats->lxontxc.ev_count = 0;
2142 stats->lxonrxc.ev_count = 0;
2143 stats->lxofftxc.ev_count = 0;
2144 stats->lxoffrxc.ev_count = 0;
2145
2146 /* Packet Reception Stats */
2147 stats->tor.ev_count = 0;
2148 stats->gorc.ev_count = 0;
2149 stats->tpr.ev_count = 0;
2150 stats->gprc.ev_count = 0;
2151 stats->mprc.ev_count = 0;
2152 stats->bprc.ev_count = 0;
2153 stats->prc64.ev_count = 0;
2154 stats->prc127.ev_count = 0;
2155 stats->prc255.ev_count = 0;
2156 stats->prc511.ev_count = 0;
2157 stats->prc1023.ev_count = 0;
2158 stats->prc1522.ev_count = 0;
2159 stats->ruc.ev_count = 0;
2160 stats->rfc.ev_count = 0;
2161 stats->roc.ev_count = 0;
2162 stats->rjc.ev_count = 0;
2163 stats->mngprc.ev_count = 0;
2164 stats->mngpdc.ev_count = 0;
2165 stats->xec.ev_count = 0;
2166
2167 /* Packet Transmission Stats */
2168 stats->gotc.ev_count = 0;
2169 stats->tpt.ev_count = 0;
2170 stats->gptc.ev_count = 0;
2171 stats->bptc.ev_count = 0;
2172 stats->mptc.ev_count = 0;
2173 stats->mngptc.ev_count = 0;
2174 stats->ptc64.ev_count = 0;
2175 stats->ptc127.ev_count = 0;
2176 stats->ptc255.ev_count = 0;
2177 stats->ptc511.ev_count = 0;
2178 stats->ptc1023.ev_count = 0;
2179 stats->ptc1522.ev_count = 0;
2180 }
2181
2182 /************************************************************************
2183 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2184 *
2185 * Retrieves the TDH value from the hardware
2186 ************************************************************************/
2187 static int
2188 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2189 {
2190 struct sysctlnode node = *rnode;
2191 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2192 struct adapter *adapter;
2193 uint32_t val;
2194
2195 if (!txr)
2196 return (0);
2197
2198 adapter = txr->adapter;
2199 if (ixgbe_fw_recovery_mode_swflag(adapter))
2200 return (EPERM);
2201
2202 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2203 node.sysctl_data = &val;
2204 return sysctl_lookup(SYSCTLFN_CALL(&node));
2205 } /* ixgbe_sysctl_tdh_handler */
2206
2207 /************************************************************************
2208 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2209 *
2210 * Retrieves the TDT value from the hardware
2211 ************************************************************************/
2212 static int
2213 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2214 {
2215 struct sysctlnode node = *rnode;
2216 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2217 struct adapter *adapter;
2218 uint32_t val;
2219
2220 if (!txr)
2221 return (0);
2222
2223 adapter = txr->adapter;
2224 if (ixgbe_fw_recovery_mode_swflag(adapter))
2225 return (EPERM);
2226
2227 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2228 node.sysctl_data = &val;
2229 return sysctl_lookup(SYSCTLFN_CALL(&node));
2230 } /* ixgbe_sysctl_tdt_handler */
2231
2232 /************************************************************************
2233 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2234 * handler function
2235 *
2236 * Retrieves the next_to_check value
2237 ************************************************************************/
2238 static int
2239 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2240 {
2241 struct sysctlnode node = *rnode;
2242 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2243 struct adapter *adapter;
2244 uint32_t val;
2245
2246 if (!rxr)
2247 return (0);
2248
2249 adapter = rxr->adapter;
2250 if (ixgbe_fw_recovery_mode_swflag(adapter))
2251 return (EPERM);
2252
2253 val = rxr->next_to_check;
2254 node.sysctl_data = &val;
2255 return sysctl_lookup(SYSCTLFN_CALL(&node));
2256 } /* ixgbe_sysctl_next_to_check_handler */
2257
2258 /************************************************************************
2259 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2260 *
2261 * Retrieves the RDH value from the hardware
2262 ************************************************************************/
2263 static int
2264 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2265 {
2266 struct sysctlnode node = *rnode;
2267 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2268 struct adapter *adapter;
2269 uint32_t val;
2270
2271 if (!rxr)
2272 return (0);
2273
2274 adapter = rxr->adapter;
2275 if (ixgbe_fw_recovery_mode_swflag(adapter))
2276 return (EPERM);
2277
2278 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2279 node.sysctl_data = &val;
2280 return sysctl_lookup(SYSCTLFN_CALL(&node));
2281 } /* ixgbe_sysctl_rdh_handler */
2282
2283 /************************************************************************
2284 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2285 *
2286 * Retrieves the RDT value from the hardware
2287 ************************************************************************/
2288 static int
2289 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2290 {
2291 struct sysctlnode node = *rnode;
2292 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2293 struct adapter *adapter;
2294 uint32_t val;
2295
2296 if (!rxr)
2297 return (0);
2298
2299 adapter = rxr->adapter;
2300 if (ixgbe_fw_recovery_mode_swflag(adapter))
2301 return (EPERM);
2302
2303 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2304 node.sysctl_data = &val;
2305 return sysctl_lookup(SYSCTLFN_CALL(&node));
2306 } /* ixgbe_sysctl_rdt_handler */
2307
2308 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2309 /************************************************************************
2310 * ixgbe_register_vlan
2311 *
2312 * Run via vlan config EVENT, it enables us to use the
2313 * HW Filter table since we can get the vlan id. This
2314 * just creates the entry in the soft version of the
2315 * VFTA, init will repopulate the real table.
2316 ************************************************************************/
2317 static void
2318 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2319 {
2320 struct adapter *adapter = ifp->if_softc;
2321 u16 index, bit;
2322
2323 if (ifp->if_softc != arg) /* Not our event */
2324 return;
2325
2326 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2327 return;
2328
2329 IXGBE_CORE_LOCK(adapter);
2330 index = (vtag >> 5) & 0x7F;
2331 bit = vtag & 0x1F;
2332 adapter->shadow_vfta[index] |= (1 << bit);
2333 ixgbe_setup_vlan_hw_support(adapter);
2334 IXGBE_CORE_UNLOCK(adapter);
2335 } /* ixgbe_register_vlan */
2336
2337 /************************************************************************
2338 * ixgbe_unregister_vlan
2339 *
2340 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2341 ************************************************************************/
2342 static void
2343 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2344 {
2345 struct adapter *adapter = ifp->if_softc;
2346 u16 index, bit;
2347
2348 if (ifp->if_softc != arg)
2349 return;
2350
2351 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2352 return;
2353
2354 IXGBE_CORE_LOCK(adapter);
2355 index = (vtag >> 5) & 0x7F;
2356 bit = vtag & 0x1F;
2357 adapter->shadow_vfta[index] &= ~(1 << bit);
2358 /* Re-init to load the changes */
2359 ixgbe_setup_vlan_hw_support(adapter);
2360 IXGBE_CORE_UNLOCK(adapter);
2361 } /* ixgbe_unregister_vlan */
2362 #endif
2363
2364 static void
2365 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2366 {
2367 struct ethercom *ec = &adapter->osdep.ec;
2368 struct ixgbe_hw *hw = &adapter->hw;
2369 struct rx_ring *rxr;
2370 int i;
2371 u32 ctrl;
2372 bool hwtagging;
2373
2374 /*
2375 * This function is called from both if_init and ifflags_cb()
2376 * on NetBSD.
2377 */
2378
2379 /* Enalble HW tagging only if any vlan is attached */
2380 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2381 && VLAN_ATTACHED(&adapter->osdep.ec);
2382
2383 /* Setup the queues for vlans */
2384 for (i = 0; i < adapter->num_queues; i++) {
2385 rxr = &adapter->rx_rings[i];
2386 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2387 if (hw->mac.type != ixgbe_mac_82598EB) {
2388 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2389 if (hwtagging)
2390 ctrl |= IXGBE_RXDCTL_VME;
2391 else
2392 ctrl &= ~IXGBE_RXDCTL_VME;
2393 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2394 }
2395 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2396 }
2397
2398 /*
2399 * A soft reset zero's out the VFTA, so
2400 * we need to repopulate it now.
2401 */
2402 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2403 if (adapter->shadow_vfta[i] != 0)
2404 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2405 adapter->shadow_vfta[i]);
2406
2407 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2408 /* Enable the Filter Table if enabled */
2409 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2410 ctrl |= IXGBE_VLNCTRL_VFE;
2411 else
2412 ctrl &= ~IXGBE_VLNCTRL_VFE;
2413 /* VLAN hw tagging for 82598 */
2414 if (hw->mac.type == ixgbe_mac_82598EB) {
2415 if (hwtagging)
2416 ctrl |= IXGBE_VLNCTRL_VME;
2417 else
2418 ctrl &= ~IXGBE_VLNCTRL_VME;
2419 }
2420 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2421 } /* ixgbe_setup_vlan_hw_support */
2422
2423 /************************************************************************
2424 * ixgbe_get_slot_info
2425 *
2426 * Get the width and transaction speed of
2427 * the slot this adapter is plugged into.
2428 ************************************************************************/
2429 static void
2430 ixgbe_get_slot_info(struct adapter *adapter)
2431 {
2432 device_t dev = adapter->dev;
2433 struct ixgbe_hw *hw = &adapter->hw;
2434 u32 offset;
2435 u16 link;
2436 int bus_info_valid = TRUE;
2437
2438 /* Some devices are behind an internal bridge */
2439 switch (hw->device_id) {
2440 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2441 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2442 goto get_parent_info;
2443 default:
2444 break;
2445 }
2446
2447 ixgbe_get_bus_info(hw);
2448
2449 /*
2450 * Some devices don't use PCI-E, but there is no need
2451 * to display "Unknown" for bus speed and width.
2452 */
2453 switch (hw->mac.type) {
2454 case ixgbe_mac_X550EM_x:
2455 case ixgbe_mac_X550EM_a:
2456 return;
2457 default:
2458 goto display;
2459 }
2460
2461 get_parent_info:
2462 /*
2463 * For the Quad port adapter we need to parse back
2464 * up the PCI tree to find the speed of the expansion
2465 * slot into which this adapter is plugged. A bit more work.
2466 */
2467 dev = device_parent(device_parent(dev));
2468 #if 0
2469 #ifdef IXGBE_DEBUG
2470 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2471 pci_get_slot(dev), pci_get_function(dev));
2472 #endif
2473 dev = device_parent(device_parent(dev));
2474 #ifdef IXGBE_DEBUG
2475 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2476 pci_get_slot(dev), pci_get_function(dev));
2477 #endif
2478 #endif
2479 /* Now get the PCI Express Capabilities offset */
2480 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2481 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2482 /*
2483 * Hmm...can't get PCI-Express capabilities.
2484 * Falling back to default method.
2485 */
2486 bus_info_valid = FALSE;
2487 ixgbe_get_bus_info(hw);
2488 goto display;
2489 }
2490 /* ...and read the Link Status Register */
2491 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2492 offset + PCIE_LCSR) >> 16;
2493 ixgbe_set_pci_config_data_generic(hw, link);
2494
2495 display:
2496 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2497 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2498 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2499 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2500 "Unknown"),
2501 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2502 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2503 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2504 "Unknown"));
2505
2506 if (bus_info_valid) {
2507 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2508 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2509 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2510 device_printf(dev, "PCI-Express bandwidth available"
2511 " for this card\n is not sufficient for"
2512 " optimal performance.\n");
2513 device_printf(dev, "For optimal performance a x8 "
2514 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2515 }
2516 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2517 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2518 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2519 device_printf(dev, "PCI-Express bandwidth available"
2520 " for this card\n is not sufficient for"
2521 " optimal performance.\n");
2522 device_printf(dev, "For optimal performance a x8 "
2523 "PCIE Gen3 slot is required.\n");
2524 }
2525 } else
2526 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2527
2528 return;
2529 } /* ixgbe_get_slot_info */
2530
2531 /************************************************************************
2532 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2533 ************************************************************************/
2534 static inline void
2535 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2536 {
2537 struct ixgbe_hw *hw = &adapter->hw;
2538 struct ix_queue *que = &adapter->queues[vector];
2539 u64 queue = (u64)(1ULL << vector);
2540 u32 mask;
2541
2542 mutex_enter(&que->dc_mtx);
2543 if (que->disabled_count > 0 && --que->disabled_count > 0)
2544 goto out;
2545
2546 if (hw->mac.type == ixgbe_mac_82598EB) {
2547 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2548 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2549 } else {
2550 mask = (queue & 0xFFFFFFFF);
2551 if (mask)
2552 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2553 mask = (queue >> 32);
2554 if (mask)
2555 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2556 }
2557 out:
2558 mutex_exit(&que->dc_mtx);
2559 } /* ixgbe_enable_queue */
2560
2561 /************************************************************************
2562 * ixgbe_disable_queue_internal
2563 ************************************************************************/
2564 static inline void
2565 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2566 {
2567 struct ixgbe_hw *hw = &adapter->hw;
2568 struct ix_queue *que = &adapter->queues[vector];
2569 u64 queue = (u64)(1ULL << vector);
2570 u32 mask;
2571
2572 mutex_enter(&que->dc_mtx);
2573
2574 if (que->disabled_count > 0) {
2575 if (nestok)
2576 que->disabled_count++;
2577 goto out;
2578 }
2579 que->disabled_count++;
2580
2581 if (hw->mac.type == ixgbe_mac_82598EB) {
2582 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2583 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2584 } else {
2585 mask = (queue & 0xFFFFFFFF);
2586 if (mask)
2587 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2588 mask = (queue >> 32);
2589 if (mask)
2590 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2591 }
2592 out:
2593 mutex_exit(&que->dc_mtx);
2594 } /* ixgbe_disable_queue_internal */
2595
2596 /************************************************************************
2597 * ixgbe_disable_queue
2598 ************************************************************************/
2599 static inline void
2600 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2601 {
2602
2603 ixgbe_disable_queue_internal(adapter, vector, true);
2604 } /* ixgbe_disable_queue */
2605
2606 /************************************************************************
2607 * ixgbe_sched_handle_que - schedule deferred packet processing
2608 ************************************************************************/
2609 static inline void
2610 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2611 {
2612
2613 if(que->txrx_use_workqueue) {
2614 /*
2615 * adapter->que_wq is bound to each CPU instead of
2616 * each NIC queue to reduce workqueue kthread. As we
2617 * should consider about interrupt affinity in this
2618 * function, the workqueue kthread must be WQ_PERCPU.
2619 * If create WQ_PERCPU workqueue kthread for each NIC
2620 * queue, that number of created workqueue kthread is
2621 * (number of used NIC queue) * (number of CPUs) =
2622 * (number of CPUs) ^ 2 most often.
2623 *
2624 * The same NIC queue's interrupts are avoided by
2625 * masking the queue's interrupt. And different
2626 * NIC queue's interrupts use different struct work
2627 * (que->wq_cookie). So, "enqueued flag" to avoid
2628 * twice workqueue_enqueue() is not required .
2629 */
2630 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2631 } else {
2632 softint_schedule(que->que_si);
2633 }
2634 }
2635
2636 /************************************************************************
2637 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2638 ************************************************************************/
2639 static int
2640 ixgbe_msix_que(void *arg)
2641 {
2642 struct ix_queue *que = arg;
2643 struct adapter *adapter = que->adapter;
2644 struct ifnet *ifp = adapter->ifp;
2645 struct tx_ring *txr = que->txr;
2646 struct rx_ring *rxr = que->rxr;
2647 bool more;
2648 u32 newitr = 0;
2649
2650 /* Protect against spurious interrupts */
2651 if ((ifp->if_flags & IFF_RUNNING) == 0)
2652 return 0;
2653
2654 ixgbe_disable_queue(adapter, que->msix);
2655 ++que->irqs.ev_count;
2656
2657 /*
2658 * Don't change "que->txrx_use_workqueue" from this point to avoid
2659 * flip-flopping softint/workqueue mode in one deferred processing.
2660 */
2661 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2662
2663 #ifdef __NetBSD__
2664 /* Don't run ixgbe_rxeof in interrupt context */
2665 more = true;
2666 #else
2667 more = ixgbe_rxeof(que);
2668 #endif
2669
2670 IXGBE_TX_LOCK(txr);
2671 ixgbe_txeof(txr);
2672 IXGBE_TX_UNLOCK(txr);
2673
2674 /* Do AIM now? */
2675
2676 if (adapter->enable_aim == false)
2677 goto no_calc;
2678 /*
2679 * Do Adaptive Interrupt Moderation:
2680 * - Write out last calculated setting
2681 * - Calculate based on average size over
2682 * the last interval.
2683 */
2684 if (que->eitr_setting)
2685 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2686
2687 que->eitr_setting = 0;
2688
2689 /* Idle, do nothing */
2690 if ((txr->bytes == 0) && (rxr->bytes == 0))
2691 goto no_calc;
2692
2693 if ((txr->bytes) && (txr->packets))
2694 newitr = txr->bytes/txr->packets;
2695 if ((rxr->bytes) && (rxr->packets))
2696 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2697 newitr += 24; /* account for hardware frame, crc */
2698
2699 /* set an upper boundary */
2700 newitr = uimin(newitr, 3000);
2701
2702 /* Be nice to the mid range */
2703 if ((newitr > 300) && (newitr < 1200))
2704 newitr = (newitr / 3);
2705 else
2706 newitr = (newitr / 2);
2707
2708 /*
2709 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2710 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2711 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2712 * on 1G and higher.
2713 */
2714 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2715 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2716 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2717 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2718 }
2719
2720 /* save for next interrupt */
2721 que->eitr_setting = newitr;
2722
2723 /* Reset state */
2724 txr->bytes = 0;
2725 txr->packets = 0;
2726 rxr->bytes = 0;
2727 rxr->packets = 0;
2728
2729 no_calc:
2730 if (more)
2731 ixgbe_sched_handle_que(adapter, que);
2732 else
2733 ixgbe_enable_queue(adapter, que->msix);
2734
2735 return 1;
2736 } /* ixgbe_msix_que */
2737
2738 /************************************************************************
2739 * ixgbe_media_status - Media Ioctl callback
2740 *
2741 * Called whenever the user queries the status of
2742 * the interface using ifconfig.
2743 ************************************************************************/
2744 static void
2745 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2746 {
2747 struct adapter *adapter = ifp->if_softc;
2748 struct ixgbe_hw *hw = &adapter->hw;
2749 int layer;
2750
2751 INIT_DEBUGOUT("ixgbe_media_status: begin");
2752 IXGBE_CORE_LOCK(adapter);
2753 ixgbe_update_link_status(adapter);
2754
2755 ifmr->ifm_status = IFM_AVALID;
2756 ifmr->ifm_active = IFM_ETHER;
2757
2758 if (adapter->link_active != LINK_STATE_UP) {
2759 ifmr->ifm_active |= IFM_NONE;
2760 IXGBE_CORE_UNLOCK(adapter);
2761 return;
2762 }
2763
2764 ifmr->ifm_status |= IFM_ACTIVE;
2765 layer = adapter->phy_layer;
2766
2767 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2768 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2769 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2770 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2771 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2772 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2773 switch (adapter->link_speed) {
2774 case IXGBE_LINK_SPEED_10GB_FULL:
2775 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2776 break;
2777 case IXGBE_LINK_SPEED_5GB_FULL:
2778 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2779 break;
2780 case IXGBE_LINK_SPEED_2_5GB_FULL:
2781 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2782 break;
2783 case IXGBE_LINK_SPEED_1GB_FULL:
2784 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2785 break;
2786 case IXGBE_LINK_SPEED_100_FULL:
2787 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2788 break;
2789 case IXGBE_LINK_SPEED_10_FULL:
2790 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2791 break;
2792 }
2793 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2794 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2795 switch (adapter->link_speed) {
2796 case IXGBE_LINK_SPEED_10GB_FULL:
2797 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2798 break;
2799 }
2800 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2801 switch (adapter->link_speed) {
2802 case IXGBE_LINK_SPEED_10GB_FULL:
2803 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2804 break;
2805 case IXGBE_LINK_SPEED_1GB_FULL:
2806 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2807 break;
2808 }
2809 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2810 switch (adapter->link_speed) {
2811 case IXGBE_LINK_SPEED_10GB_FULL:
2812 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2813 break;
2814 case IXGBE_LINK_SPEED_1GB_FULL:
2815 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2816 break;
2817 }
2818 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2819 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2820 switch (adapter->link_speed) {
2821 case IXGBE_LINK_SPEED_10GB_FULL:
2822 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2823 break;
2824 case IXGBE_LINK_SPEED_1GB_FULL:
2825 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2826 break;
2827 }
2828 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2829 switch (adapter->link_speed) {
2830 case IXGBE_LINK_SPEED_10GB_FULL:
2831 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2832 break;
2833 }
2834 /*
2835 * XXX: These need to use the proper media types once
2836 * they're added.
2837 */
2838 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2839 switch (adapter->link_speed) {
2840 case IXGBE_LINK_SPEED_10GB_FULL:
2841 #ifndef IFM_ETH_XTYPE
2842 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2843 #else
2844 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2845 #endif
2846 break;
2847 case IXGBE_LINK_SPEED_2_5GB_FULL:
2848 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2849 break;
2850 case IXGBE_LINK_SPEED_1GB_FULL:
2851 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2852 break;
2853 }
2854 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2855 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2856 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2857 switch (adapter->link_speed) {
2858 case IXGBE_LINK_SPEED_10GB_FULL:
2859 #ifndef IFM_ETH_XTYPE
2860 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2861 #else
2862 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2863 #endif
2864 break;
2865 case IXGBE_LINK_SPEED_2_5GB_FULL:
2866 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2867 break;
2868 case IXGBE_LINK_SPEED_1GB_FULL:
2869 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2870 break;
2871 }
2872
2873 /* If nothing is recognized... */
2874 #if 0
2875 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2876 ifmr->ifm_active |= IFM_UNKNOWN;
2877 #endif
2878
2879 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2880
2881 /* Display current flow control setting used on link */
2882 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2883 hw->fc.current_mode == ixgbe_fc_full)
2884 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2885 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2886 hw->fc.current_mode == ixgbe_fc_full)
2887 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2888
2889 IXGBE_CORE_UNLOCK(adapter);
2890
2891 return;
2892 } /* ixgbe_media_status */
2893
2894 /************************************************************************
2895 * ixgbe_media_change - Media Ioctl callback
2896 *
2897 * Called when the user changes speed/duplex using
2898 * media/mediopt option with ifconfig.
2899 ************************************************************************/
2900 static int
2901 ixgbe_media_change(struct ifnet *ifp)
2902 {
2903 struct adapter *adapter = ifp->if_softc;
2904 struct ifmedia *ifm = &adapter->media;
2905 struct ixgbe_hw *hw = &adapter->hw;
2906 ixgbe_link_speed speed = 0;
2907 ixgbe_link_speed link_caps = 0;
2908 bool negotiate = false;
2909 s32 err = IXGBE_NOT_IMPLEMENTED;
2910
2911 INIT_DEBUGOUT("ixgbe_media_change: begin");
2912
2913 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2914 return (EINVAL);
2915
2916 if (hw->phy.media_type == ixgbe_media_type_backplane)
2917 return (EPERM);
2918
2919 IXGBE_CORE_LOCK(adapter);
2920 /*
2921 * We don't actually need to check against the supported
2922 * media types of the adapter; ifmedia will take care of
2923 * that for us.
2924 */
2925 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2926 case IFM_AUTO:
2927 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2928 &negotiate);
2929 if (err != IXGBE_SUCCESS) {
2930 device_printf(adapter->dev, "Unable to determine "
2931 "supported advertise speeds\n");
2932 IXGBE_CORE_UNLOCK(adapter);
2933 return (ENODEV);
2934 }
2935 speed |= link_caps;
2936 break;
2937 case IFM_10G_T:
2938 case IFM_10G_LRM:
2939 case IFM_10G_LR:
2940 case IFM_10G_TWINAX:
2941 #ifndef IFM_ETH_XTYPE
2942 case IFM_10G_SR: /* KR, too */
2943 case IFM_10G_CX4: /* KX4 */
2944 #else
2945 case IFM_10G_KR:
2946 case IFM_10G_KX4:
2947 #endif
2948 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2949 break;
2950 case IFM_5000_T:
2951 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2952 break;
2953 case IFM_2500_T:
2954 case IFM_2500_KX:
2955 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2956 break;
2957 case IFM_1000_T:
2958 case IFM_1000_LX:
2959 case IFM_1000_SX:
2960 case IFM_1000_KX:
2961 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2962 break;
2963 case IFM_100_TX:
2964 speed |= IXGBE_LINK_SPEED_100_FULL;
2965 break;
2966 case IFM_10_T:
2967 speed |= IXGBE_LINK_SPEED_10_FULL;
2968 break;
2969 case IFM_NONE:
2970 break;
2971 default:
2972 goto invalid;
2973 }
2974
2975 hw->mac.autotry_restart = TRUE;
2976 hw->mac.ops.setup_link(hw, speed, TRUE);
2977 adapter->advertise = 0;
2978 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2979 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2980 adapter->advertise |= 1 << 2;
2981 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2982 adapter->advertise |= 1 << 1;
2983 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2984 adapter->advertise |= 1 << 0;
2985 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2986 adapter->advertise |= 1 << 3;
2987 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2988 adapter->advertise |= 1 << 4;
2989 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2990 adapter->advertise |= 1 << 5;
2991 }
2992
2993 IXGBE_CORE_UNLOCK(adapter);
2994 return (0);
2995
2996 invalid:
2997 device_printf(adapter->dev, "Invalid media type!\n");
2998 IXGBE_CORE_UNLOCK(adapter);
2999
3000 return (EINVAL);
3001 } /* ixgbe_media_change */
3002
3003 /************************************************************************
3004 * ixgbe_set_promisc
3005 ************************************************************************/
3006 static void
3007 ixgbe_set_promisc(struct adapter *adapter)
3008 {
3009 struct ifnet *ifp = adapter->ifp;
3010 int mcnt = 0;
3011 u32 rctl;
3012 struct ether_multi *enm;
3013 struct ether_multistep step;
3014 struct ethercom *ec = &adapter->osdep.ec;
3015
3016 KASSERT(mutex_owned(&adapter->core_mtx));
3017 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3018 rctl &= (~IXGBE_FCTRL_UPE);
3019 if (ifp->if_flags & IFF_ALLMULTI)
3020 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
3021 else {
3022 ETHER_LOCK(ec);
3023 ETHER_FIRST_MULTI(step, ec, enm);
3024 while (enm != NULL) {
3025 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3026 break;
3027 mcnt++;
3028 ETHER_NEXT_MULTI(step, enm);
3029 }
3030 ETHER_UNLOCK(ec);
3031 }
3032 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
3033 rctl &= (~IXGBE_FCTRL_MPE);
3034 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3035
3036 if (ifp->if_flags & IFF_PROMISC) {
3037 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3039 } else if (ifp->if_flags & IFF_ALLMULTI) {
3040 rctl |= IXGBE_FCTRL_MPE;
3041 rctl &= ~IXGBE_FCTRL_UPE;
3042 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3043 }
3044 } /* ixgbe_set_promisc */
3045
3046 /************************************************************************
3047 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3048 ************************************************************************/
3049 static int
3050 ixgbe_msix_link(void *arg)
3051 {
3052 struct adapter *adapter = arg;
3053 struct ixgbe_hw *hw = &adapter->hw;
3054 u32 eicr, eicr_mask;
3055 s32 retval;
3056
3057 ++adapter->link_irq.ev_count;
3058
3059 /* Pause other interrupts */
3060 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3061
3062 /* First get the cause */
3063 /*
3064 * The specifications of 82598, 82599, X540 and X550 say EICS register
3065 * is write only. However, Linux says it is a workaround for silicon
3066 * errata to read EICS instead of EICR to get interrupt cause. It seems
3067 * there is a problem about read clear mechanism for EICR register.
3068 */
3069 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3070 /* Be sure the queue bits are not cleared */
3071 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3072 /* Clear interrupt with write */
3073 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3074
3075 /* Link status change */
3076 if (eicr & IXGBE_EICR_LSC) {
3077 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3078 softint_schedule(adapter->link_si);
3079 }
3080
3081 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3082 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3083 (eicr & IXGBE_EICR_FLOW_DIR)) {
3084 /* This is probably overkill :) */
3085 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3086 return 1;
3087 /* Disable the interrupt */
3088 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3089 softint_schedule(adapter->fdir_si);
3090 }
3091
3092 if (eicr & IXGBE_EICR_ECC) {
3093 device_printf(adapter->dev,
3094 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3095 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3096 }
3097
3098 /* Check for over temp condition */
3099 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3100 switch (adapter->hw.mac.type) {
3101 case ixgbe_mac_X550EM_a:
3102 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3103 break;
3104 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3105 IXGBE_EICR_GPI_SDP0_X550EM_a);
3106 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3107 IXGBE_EICR_GPI_SDP0_X550EM_a);
3108 retval = hw->phy.ops.check_overtemp(hw);
3109 if (retval != IXGBE_ERR_OVERTEMP)
3110 break;
3111 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3112 device_printf(adapter->dev, "System shutdown required!\n");
3113 break;
3114 default:
3115 if (!(eicr & IXGBE_EICR_TS))
3116 break;
3117 retval = hw->phy.ops.check_overtemp(hw);
3118 if (retval != IXGBE_ERR_OVERTEMP)
3119 break;
3120 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3121 device_printf(adapter->dev, "System shutdown required!\n");
3122 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3123 break;
3124 }
3125 }
3126
3127 /* Check for VF message */
3128 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3129 (eicr & IXGBE_EICR_MAILBOX))
3130 softint_schedule(adapter->mbx_si);
3131 }
3132
3133 if (ixgbe_is_sfp(hw)) {
3134 /* Pluggable optics-related interrupt */
3135 if (hw->mac.type >= ixgbe_mac_X540)
3136 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3137 else
3138 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3139
3140 if (eicr & eicr_mask) {
3141 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3142 softint_schedule(adapter->mod_si);
3143 }
3144
3145 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3146 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3147 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3148 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3149 softint_schedule(adapter->msf_si);
3150 }
3151 }
3152
3153 /* Check for fan failure */
3154 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3155 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3156 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3157 }
3158
3159 /* External PHY interrupt */
3160 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3161 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3162 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3163 softint_schedule(adapter->phy_si);
3164 }
3165
3166 /* Re-enable other interrupts */
3167 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3168 return 1;
3169 } /* ixgbe_msix_link */
3170
3171 static void
3172 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3173 {
3174
3175 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3176 itr |= itr << 16;
3177 else
3178 itr |= IXGBE_EITR_CNT_WDIS;
3179
3180 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3181 }
3182
3183
3184 /************************************************************************
3185 * ixgbe_sysctl_interrupt_rate_handler
3186 ************************************************************************/
3187 static int
3188 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3189 {
3190 struct sysctlnode node = *rnode;
3191 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3192 struct adapter *adapter;
3193 uint32_t reg, usec, rate;
3194 int error;
3195
3196 if (que == NULL)
3197 return 0;
3198
3199 adapter = que->adapter;
3200 if (ixgbe_fw_recovery_mode_swflag(adapter))
3201 return (EPERM);
3202
3203 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3204 usec = ((reg & 0x0FF8) >> 3);
3205 if (usec > 0)
3206 rate = 500000 / usec;
3207 else
3208 rate = 0;
3209 node.sysctl_data = &rate;
3210 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3211 if (error || newp == NULL)
3212 return error;
3213 reg &= ~0xfff; /* default, no limitation */
3214 if (rate > 0 && rate < 500000) {
3215 if (rate < 1000)
3216 rate = 1000;
3217 reg |= ((4000000/rate) & 0xff8);
3218 /*
3219 * When RSC is used, ITR interval must be larger than
3220 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3221 * The minimum value is always greater than 2us on 100M
3222 * (and 10M?(not documented)), but it's not on 1G and higher.
3223 */
3224 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3225 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3226 if ((adapter->num_queues > 1)
3227 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3228 return EINVAL;
3229 }
3230 ixgbe_max_interrupt_rate = rate;
3231 } else
3232 ixgbe_max_interrupt_rate = 0;
3233 ixgbe_eitr_write(adapter, que->msix, reg);
3234
3235 return (0);
3236 } /* ixgbe_sysctl_interrupt_rate_handler */
3237
3238 const struct sysctlnode *
3239 ixgbe_sysctl_instance(struct adapter *adapter)
3240 {
3241 const char *dvname;
3242 struct sysctllog **log;
3243 int rc;
3244 const struct sysctlnode *rnode;
3245
3246 if (adapter->sysctltop != NULL)
3247 return adapter->sysctltop;
3248
3249 log = &adapter->sysctllog;
3250 dvname = device_xname(adapter->dev);
3251
3252 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3253 0, CTLTYPE_NODE, dvname,
3254 SYSCTL_DESCR("ixgbe information and settings"),
3255 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3256 goto err;
3257
3258 return rnode;
3259 err:
3260 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3261 return NULL;
3262 }
3263
3264 /************************************************************************
3265 * ixgbe_add_device_sysctls
3266 ************************************************************************/
3267 static void
3268 ixgbe_add_device_sysctls(struct adapter *adapter)
3269 {
3270 device_t dev = adapter->dev;
3271 struct ixgbe_hw *hw = &adapter->hw;
3272 struct sysctllog **log;
3273 const struct sysctlnode *rnode, *cnode;
3274
3275 log = &adapter->sysctllog;
3276
3277 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3278 aprint_error_dev(dev, "could not create sysctl root\n");
3279 return;
3280 }
3281
3282 if (sysctl_createv(log, 0, &rnode, &cnode,
3283 CTLFLAG_READWRITE, CTLTYPE_INT,
3284 "debug", SYSCTL_DESCR("Debug Info"),
3285 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3286 aprint_error_dev(dev, "could not create sysctl\n");
3287
3288 if (sysctl_createv(log, 0, &rnode, &cnode,
3289 CTLFLAG_READONLY, CTLTYPE_INT,
3290 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3291 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3292 aprint_error_dev(dev, "could not create sysctl\n");
3293
3294 if (sysctl_createv(log, 0, &rnode, &cnode,
3295 CTLFLAG_READONLY, CTLTYPE_INT,
3296 "num_queues", SYSCTL_DESCR("Number of queues"),
3297 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3298 aprint_error_dev(dev, "could not create sysctl\n");
3299
3300 /* Sysctls for all devices */
3301 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3302 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3303 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3304 CTL_EOL) != 0)
3305 aprint_error_dev(dev, "could not create sysctl\n");
3306
3307 adapter->enable_aim = ixgbe_enable_aim;
3308 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3309 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3310 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3311 aprint_error_dev(dev, "could not create sysctl\n");
3312
3313 if (sysctl_createv(log, 0, &rnode, &cnode,
3314 CTLFLAG_READWRITE, CTLTYPE_INT,
3315 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3316 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3317 CTL_EOL) != 0)
3318 aprint_error_dev(dev, "could not create sysctl\n");
3319
3320 /*
3321 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3322 * it causesflip-flopping softint/workqueue mode in one deferred
3323 * processing. Therefore, preempt_disable()/preempt_enable() are
3324 * required in ixgbe_sched_handle_que() to avoid
3325 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3326 * I think changing "que->txrx_use_workqueue" in interrupt handler
3327 * is lighter than doing preempt_disable()/preempt_enable() in every
3328 * ixgbe_sched_handle_que().
3329 */
3330 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3331 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3332 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3333 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3334 aprint_error_dev(dev, "could not create sysctl\n");
3335
3336 #ifdef IXGBE_DEBUG
3337 /* testing sysctls (for all devices) */
3338 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3339 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3340 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3341 CTL_EOL) != 0)
3342 aprint_error_dev(dev, "could not create sysctl\n");
3343
3344 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3345 CTLTYPE_STRING, "print_rss_config",
3346 SYSCTL_DESCR("Prints RSS Configuration"),
3347 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3348 CTL_EOL) != 0)
3349 aprint_error_dev(dev, "could not create sysctl\n");
3350 #endif
3351 /* for X550 series devices */
3352 if (hw->mac.type >= ixgbe_mac_X550)
3353 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3354 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3355 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3356 CTL_EOL) != 0)
3357 aprint_error_dev(dev, "could not create sysctl\n");
3358
3359 /* for WoL-capable devices */
3360 if (adapter->wol_support) {
3361 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3362 CTLTYPE_BOOL, "wol_enable",
3363 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3364 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3365 CTL_EOL) != 0)
3366 aprint_error_dev(dev, "could not create sysctl\n");
3367
3368 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3369 CTLTYPE_INT, "wufc",
3370 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3371 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3372 CTL_EOL) != 0)
3373 aprint_error_dev(dev, "could not create sysctl\n");
3374 }
3375
3376 /* for X552/X557-AT devices */
3377 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3378 const struct sysctlnode *phy_node;
3379
3380 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3381 "phy", SYSCTL_DESCR("External PHY sysctls"),
3382 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3383 aprint_error_dev(dev, "could not create sysctl\n");
3384 return;
3385 }
3386
3387 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3388 CTLTYPE_INT, "temp",
3389 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3390 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3391 CTL_EOL) != 0)
3392 aprint_error_dev(dev, "could not create sysctl\n");
3393
3394 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3395 CTLTYPE_INT, "overtemp_occurred",
3396 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3397 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3398 CTL_CREATE, CTL_EOL) != 0)
3399 aprint_error_dev(dev, "could not create sysctl\n");
3400 }
3401
3402 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3403 && (hw->phy.type == ixgbe_phy_fw))
3404 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3405 CTLTYPE_BOOL, "force_10_100_autonego",
3406 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3407 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3408 CTL_CREATE, CTL_EOL) != 0)
3409 aprint_error_dev(dev, "could not create sysctl\n");
3410
3411 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3412 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3413 CTLTYPE_INT, "eee_state",
3414 SYSCTL_DESCR("EEE Power Save State"),
3415 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3416 CTL_EOL) != 0)
3417 aprint_error_dev(dev, "could not create sysctl\n");
3418 }
3419 } /* ixgbe_add_device_sysctls */
3420
3421 /************************************************************************
3422 * ixgbe_allocate_pci_resources
3423 ************************************************************************/
3424 static int
3425 ixgbe_allocate_pci_resources(struct adapter *adapter,
3426 const struct pci_attach_args *pa)
3427 {
3428 pcireg_t memtype, csr;
3429 device_t dev = adapter->dev;
3430 bus_addr_t addr;
3431 int flags;
3432
3433 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3434 switch (memtype) {
3435 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3436 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3437 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3438 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3439 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3440 goto map_err;
3441 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3442 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3443 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3444 }
3445 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3446 adapter->osdep.mem_size, flags,
3447 &adapter->osdep.mem_bus_space_handle) != 0) {
3448 map_err:
3449 adapter->osdep.mem_size = 0;
3450 aprint_error_dev(dev, "unable to map BAR0\n");
3451 return ENXIO;
3452 }
3453 /*
3454 * Enable address decoding for memory range in case BIOS or
3455 * UEFI don't set it.
3456 */
3457 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3458 PCI_COMMAND_STATUS_REG);
3459 csr |= PCI_COMMAND_MEM_ENABLE;
3460 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3461 csr);
3462 break;
3463 default:
3464 aprint_error_dev(dev, "unexpected type on BAR0\n");
3465 return ENXIO;
3466 }
3467
3468 return (0);
3469 } /* ixgbe_allocate_pci_resources */
3470
3471 static void
3472 ixgbe_free_softint(struct adapter *adapter)
3473 {
3474 struct ix_queue *que = adapter->queues;
3475 struct tx_ring *txr = adapter->tx_rings;
3476 int i;
3477
3478 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3479 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3480 if (txr->txr_si != NULL)
3481 softint_disestablish(txr->txr_si);
3482 }
3483 if (que->que_si != NULL)
3484 softint_disestablish(que->que_si);
3485 }
3486 if (adapter->txr_wq != NULL)
3487 workqueue_destroy(adapter->txr_wq);
3488 if (adapter->txr_wq_enqueued != NULL)
3489 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3490 if (adapter->que_wq != NULL)
3491 workqueue_destroy(adapter->que_wq);
3492
3493 /* Drain the Link queue */
3494 if (adapter->link_si != NULL) {
3495 softint_disestablish(adapter->link_si);
3496 adapter->link_si = NULL;
3497 }
3498 if (adapter->mod_si != NULL) {
3499 softint_disestablish(adapter->mod_si);
3500 adapter->mod_si = NULL;
3501 }
3502 if (adapter->msf_si != NULL) {
3503 softint_disestablish(adapter->msf_si);
3504 adapter->msf_si = NULL;
3505 }
3506 if (adapter->phy_si != NULL) {
3507 softint_disestablish(adapter->phy_si);
3508 adapter->phy_si = NULL;
3509 }
3510 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3511 if (adapter->fdir_si != NULL) {
3512 softint_disestablish(adapter->fdir_si);
3513 adapter->fdir_si = NULL;
3514 }
3515 }
3516 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3517 if (adapter->mbx_si != NULL) {
3518 softint_disestablish(adapter->mbx_si);
3519 adapter->mbx_si = NULL;
3520 }
3521 }
3522 } /* ixgbe_free_softint */
3523
3524 /************************************************************************
3525 * ixgbe_detach - Device removal routine
3526 *
3527 * Called when the driver is being removed.
3528 * Stops the adapter and deallocates all the resources
3529 * that were allocated for driver operation.
3530 *
3531 * return 0 on success, positive on failure
3532 ************************************************************************/
3533 static int
3534 ixgbe_detach(device_t dev, int flags)
3535 {
3536 struct adapter *adapter = device_private(dev);
3537 struct rx_ring *rxr = adapter->rx_rings;
3538 struct tx_ring *txr = adapter->tx_rings;
3539 struct ixgbe_hw *hw = &adapter->hw;
3540 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3541 u32 ctrl_ext;
3542 int i;
3543
3544 INIT_DEBUGOUT("ixgbe_detach: begin");
3545 if (adapter->osdep.attached == false)
3546 return 0;
3547
3548 if (ixgbe_pci_iov_detach(dev) != 0) {
3549 device_printf(dev, "SR-IOV in use; detach first.\n");
3550 return (EBUSY);
3551 }
3552
3553 /* Stop the interface. Callouts are stopped in it. */
3554 ixgbe_ifstop(adapter->ifp, 1);
3555 #if NVLAN > 0
3556 /* Make sure VLANs are not using driver */
3557 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3558 ; /* nothing to do: no VLANs */
3559 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3560 vlan_ifdetach(adapter->ifp);
3561 else {
3562 aprint_error_dev(dev, "VLANs in use, detach first\n");
3563 return (EBUSY);
3564 }
3565 #endif
3566
3567 pmf_device_deregister(dev);
3568
3569 ether_ifdetach(adapter->ifp);
3570 /* Stop the adapter */
3571 IXGBE_CORE_LOCK(adapter);
3572 ixgbe_setup_low_power_mode(adapter);
3573 IXGBE_CORE_UNLOCK(adapter);
3574
3575 ixgbe_free_softint(adapter);
3576
3577 /* let hardware know driver is unloading */
3578 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3579 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3580 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3581
3582 callout_halt(&adapter->timer, NULL);
3583 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3584 callout_halt(&adapter->recovery_mode_timer, NULL);
3585
3586 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3587 netmap_detach(adapter->ifp);
3588
3589 ixgbe_free_pci_resources(adapter);
3590 #if 0 /* XXX the NetBSD port is probably missing something here */
3591 bus_generic_detach(dev);
3592 #endif
3593 if_detach(adapter->ifp);
3594 if_percpuq_destroy(adapter->ipq);
3595
3596 sysctl_teardown(&adapter->sysctllog);
3597 evcnt_detach(&adapter->efbig_tx_dma_setup);
3598 evcnt_detach(&adapter->mbuf_defrag_failed);
3599 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3600 evcnt_detach(&adapter->einval_tx_dma_setup);
3601 evcnt_detach(&adapter->other_tx_dma_setup);
3602 evcnt_detach(&adapter->eagain_tx_dma_setup);
3603 evcnt_detach(&adapter->enomem_tx_dma_setup);
3604 evcnt_detach(&adapter->watchdog_events);
3605 evcnt_detach(&adapter->tso_err);
3606 evcnt_detach(&adapter->link_irq);
3607 evcnt_detach(&adapter->link_sicount);
3608 evcnt_detach(&adapter->mod_sicount);
3609 evcnt_detach(&adapter->msf_sicount);
3610 evcnt_detach(&adapter->phy_sicount);
3611
3612 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3613 if (i < __arraycount(stats->mpc)) {
3614 evcnt_detach(&stats->mpc[i]);
3615 if (hw->mac.type == ixgbe_mac_82598EB)
3616 evcnt_detach(&stats->rnbc[i]);
3617 }
3618 if (i < __arraycount(stats->pxontxc)) {
3619 evcnt_detach(&stats->pxontxc[i]);
3620 evcnt_detach(&stats->pxonrxc[i]);
3621 evcnt_detach(&stats->pxofftxc[i]);
3622 evcnt_detach(&stats->pxoffrxc[i]);
3623 if (hw->mac.type >= ixgbe_mac_82599EB)
3624 evcnt_detach(&stats->pxon2offc[i]);
3625 }
3626 }
3627
3628 txr = adapter->tx_rings;
3629 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3630 evcnt_detach(&adapter->queues[i].irqs);
3631 evcnt_detach(&adapter->queues[i].handleq);
3632 evcnt_detach(&adapter->queues[i].req);
3633 evcnt_detach(&txr->no_desc_avail);
3634 evcnt_detach(&txr->total_packets);
3635 evcnt_detach(&txr->tso_tx);
3636 #ifndef IXGBE_LEGACY_TX
3637 evcnt_detach(&txr->pcq_drops);
3638 #endif
3639
3640 if (i < __arraycount(stats->qprc)) {
3641 evcnt_detach(&stats->qprc[i]);
3642 evcnt_detach(&stats->qptc[i]);
3643 evcnt_detach(&stats->qbrc[i]);
3644 evcnt_detach(&stats->qbtc[i]);
3645 if (hw->mac.type >= ixgbe_mac_82599EB)
3646 evcnt_detach(&stats->qprdc[i]);
3647 }
3648
3649 evcnt_detach(&rxr->rx_packets);
3650 evcnt_detach(&rxr->rx_bytes);
3651 evcnt_detach(&rxr->rx_copies);
3652 evcnt_detach(&rxr->no_jmbuf);
3653 evcnt_detach(&rxr->rx_discarded);
3654 }
3655 evcnt_detach(&stats->ipcs);
3656 evcnt_detach(&stats->l4cs);
3657 evcnt_detach(&stats->ipcs_bad);
3658 evcnt_detach(&stats->l4cs_bad);
3659 evcnt_detach(&stats->intzero);
3660 evcnt_detach(&stats->legint);
3661 evcnt_detach(&stats->crcerrs);
3662 evcnt_detach(&stats->illerrc);
3663 evcnt_detach(&stats->errbc);
3664 evcnt_detach(&stats->mspdc);
3665 if (hw->mac.type >= ixgbe_mac_X550)
3666 evcnt_detach(&stats->mbsdc);
3667 evcnt_detach(&stats->mpctotal);
3668 evcnt_detach(&stats->mlfc);
3669 evcnt_detach(&stats->mrfc);
3670 evcnt_detach(&stats->rlec);
3671 evcnt_detach(&stats->lxontxc);
3672 evcnt_detach(&stats->lxonrxc);
3673 evcnt_detach(&stats->lxofftxc);
3674 evcnt_detach(&stats->lxoffrxc);
3675
3676 /* Packet Reception Stats */
3677 evcnt_detach(&stats->tor);
3678 evcnt_detach(&stats->gorc);
3679 evcnt_detach(&stats->tpr);
3680 evcnt_detach(&stats->gprc);
3681 evcnt_detach(&stats->mprc);
3682 evcnt_detach(&stats->bprc);
3683 evcnt_detach(&stats->prc64);
3684 evcnt_detach(&stats->prc127);
3685 evcnt_detach(&stats->prc255);
3686 evcnt_detach(&stats->prc511);
3687 evcnt_detach(&stats->prc1023);
3688 evcnt_detach(&stats->prc1522);
3689 evcnt_detach(&stats->ruc);
3690 evcnt_detach(&stats->rfc);
3691 evcnt_detach(&stats->roc);
3692 evcnt_detach(&stats->rjc);
3693 evcnt_detach(&stats->mngprc);
3694 evcnt_detach(&stats->mngpdc);
3695 evcnt_detach(&stats->xec);
3696
3697 /* Packet Transmission Stats */
3698 evcnt_detach(&stats->gotc);
3699 evcnt_detach(&stats->tpt);
3700 evcnt_detach(&stats->gptc);
3701 evcnt_detach(&stats->bptc);
3702 evcnt_detach(&stats->mptc);
3703 evcnt_detach(&stats->mngptc);
3704 evcnt_detach(&stats->ptc64);
3705 evcnt_detach(&stats->ptc127);
3706 evcnt_detach(&stats->ptc255);
3707 evcnt_detach(&stats->ptc511);
3708 evcnt_detach(&stats->ptc1023);
3709 evcnt_detach(&stats->ptc1522);
3710
3711 ixgbe_free_transmit_structures(adapter);
3712 ixgbe_free_receive_structures(adapter);
3713 for (i = 0; i < adapter->num_queues; i++) {
3714 struct ix_queue * que = &adapter->queues[i];
3715 mutex_destroy(&que->dc_mtx);
3716 }
3717 free(adapter->queues, M_DEVBUF);
3718 free(adapter->mta, M_DEVBUF);
3719
3720 IXGBE_CORE_LOCK_DESTROY(adapter);
3721
3722 return (0);
3723 } /* ixgbe_detach */
3724
3725 /************************************************************************
3726 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3727 *
3728 * Prepare the adapter/port for LPLU and/or WoL
3729 ************************************************************************/
3730 static int
3731 ixgbe_setup_low_power_mode(struct adapter *adapter)
3732 {
3733 struct ixgbe_hw *hw = &adapter->hw;
3734 device_t dev = adapter->dev;
3735 s32 error = 0;
3736
3737 KASSERT(mutex_owned(&adapter->core_mtx));
3738
3739 /* Limit power management flow to X550EM baseT */
3740 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3741 hw->phy.ops.enter_lplu) {
3742 /* X550EM baseT adapters need a special LPLU flow */
3743 hw->phy.reset_disable = true;
3744 ixgbe_stop(adapter);
3745 error = hw->phy.ops.enter_lplu(hw);
3746 if (error)
3747 device_printf(dev,
3748 "Error entering LPLU: %d\n", error);
3749 hw->phy.reset_disable = false;
3750 } else {
3751 /* Just stop for other adapters */
3752 ixgbe_stop(adapter);
3753 }
3754
3755 if (!hw->wol_enabled) {
3756 ixgbe_set_phy_power(hw, FALSE);
3757 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3758 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3759 } else {
3760 /* Turn off support for APM wakeup. (Using ACPI instead) */
3761 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3762 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3763
3764 /*
3765 * Clear Wake Up Status register to prevent any previous wakeup
3766 * events from waking us up immediately after we suspend.
3767 */
3768 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3769
3770 /*
3771 * Program the Wakeup Filter Control register with user filter
3772 * settings
3773 */
3774 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3775
3776 /* Enable wakeups and power management in Wakeup Control */
3777 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3778 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3779
3780 }
3781
3782 return error;
3783 } /* ixgbe_setup_low_power_mode */
3784
3785 /************************************************************************
3786 * ixgbe_shutdown - Shutdown entry point
3787 ************************************************************************/
3788 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3789 static int
3790 ixgbe_shutdown(device_t dev)
3791 {
3792 struct adapter *adapter = device_private(dev);
3793 int error = 0;
3794
3795 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3796
3797 IXGBE_CORE_LOCK(adapter);
3798 error = ixgbe_setup_low_power_mode(adapter);
3799 IXGBE_CORE_UNLOCK(adapter);
3800
3801 return (error);
3802 } /* ixgbe_shutdown */
3803 #endif
3804
3805 /************************************************************************
3806 * ixgbe_suspend
3807 *
3808 * From D0 to D3
3809 ************************************************************************/
3810 static bool
3811 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3812 {
3813 struct adapter *adapter = device_private(dev);
3814 int error = 0;
3815
3816 INIT_DEBUGOUT("ixgbe_suspend: begin");
3817
3818 IXGBE_CORE_LOCK(adapter);
3819
3820 error = ixgbe_setup_low_power_mode(adapter);
3821
3822 IXGBE_CORE_UNLOCK(adapter);
3823
3824 return (error);
3825 } /* ixgbe_suspend */
3826
3827 /************************************************************************
3828 * ixgbe_resume
3829 *
3830 * From D3 to D0
3831 ************************************************************************/
3832 static bool
3833 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3834 {
3835 struct adapter *adapter = device_private(dev);
3836 struct ifnet *ifp = adapter->ifp;
3837 struct ixgbe_hw *hw = &adapter->hw;
3838 u32 wus;
3839
3840 INIT_DEBUGOUT("ixgbe_resume: begin");
3841
3842 IXGBE_CORE_LOCK(adapter);
3843
3844 /* Read & clear WUS register */
3845 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3846 if (wus)
3847 device_printf(dev, "Woken up by (WUS): %#010x\n",
3848 IXGBE_READ_REG(hw, IXGBE_WUS));
3849 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3850 /* And clear WUFC until next low-power transition */
3851 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3852
3853 /*
3854 * Required after D3->D0 transition;
3855 * will re-advertise all previous advertised speeds
3856 */
3857 if (ifp->if_flags & IFF_UP)
3858 ixgbe_init_locked(adapter);
3859
3860 IXGBE_CORE_UNLOCK(adapter);
3861
3862 return true;
3863 } /* ixgbe_resume */
3864
3865 /*
3866 * Set the various hardware offload abilities.
3867 *
3868 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3869 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3870 * mbuf offload flags the driver will understand.
3871 */
3872 static void
3873 ixgbe_set_if_hwassist(struct adapter *adapter)
3874 {
3875 /* XXX */
3876 }
3877
3878 /************************************************************************
3879 * ixgbe_init_locked - Init entry point
3880 *
3881 * Used in two ways: It is used by the stack as an init
3882 * entry point in network interface structure. It is also
3883 * used by the driver as a hw/sw initialization routine to
3884 * get to a consistent state.
3885 *
3886 * return 0 on success, positive on failure
3887 ************************************************************************/
3888 static void
3889 ixgbe_init_locked(struct adapter *adapter)
3890 {
3891 struct ifnet *ifp = adapter->ifp;
3892 device_t dev = adapter->dev;
3893 struct ixgbe_hw *hw = &adapter->hw;
3894 struct ix_queue *que;
3895 struct tx_ring *txr;
3896 struct rx_ring *rxr;
3897 u32 txdctl, mhadd;
3898 u32 rxdctl, rxctrl;
3899 u32 ctrl_ext;
3900 int i, j, err;
3901
3902 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3903
3904 KASSERT(mutex_owned(&adapter->core_mtx));
3905 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3906
3907 hw->adapter_stopped = FALSE;
3908 ixgbe_stop_adapter(hw);
3909 callout_stop(&adapter->timer);
3910 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3911 que->disabled_count = 0;
3912
3913 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3914 adapter->max_frame_size =
3915 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3916
3917 /* Queue indices may change with IOV mode */
3918 ixgbe_align_all_queue_indices(adapter);
3919
3920 /* reprogram the RAR[0] in case user changed it. */
3921 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3922
3923 /* Get the latest mac address, User can use a LAA */
3924 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3925 IXGBE_ETH_LENGTH_OF_ADDRESS);
3926 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3927 hw->addr_ctrl.rar_used_count = 1;
3928
3929 /* Set hardware offload abilities from ifnet flags */
3930 ixgbe_set_if_hwassist(adapter);
3931
3932 /* Prepare transmit descriptors and buffers */
3933 if (ixgbe_setup_transmit_structures(adapter)) {
3934 device_printf(dev, "Could not setup transmit structures\n");
3935 ixgbe_stop(adapter);
3936 return;
3937 }
3938
3939 ixgbe_init_hw(hw);
3940
3941 ixgbe_initialize_iov(adapter);
3942
3943 ixgbe_initialize_transmit_units(adapter);
3944
3945 /* Setup Multicast table */
3946 ixgbe_set_multi(adapter);
3947
3948 /* Determine the correct mbuf pool, based on frame size */
3949 if (adapter->max_frame_size <= MCLBYTES)
3950 adapter->rx_mbuf_sz = MCLBYTES;
3951 else
3952 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3953
3954 /* Prepare receive descriptors and buffers */
3955 if (ixgbe_setup_receive_structures(adapter)) {
3956 device_printf(dev, "Could not setup receive structures\n");
3957 ixgbe_stop(adapter);
3958 return;
3959 }
3960
3961 /* Configure RX settings */
3962 ixgbe_initialize_receive_units(adapter);
3963
3964 /* Enable SDP & MSI-X interrupts based on adapter */
3965 ixgbe_config_gpie(adapter);
3966
3967 /* Set MTU size */
3968 if (ifp->if_mtu > ETHERMTU) {
3969 /* aka IXGBE_MAXFRS on 82599 and newer */
3970 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3971 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3972 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3973 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3974 }
3975
3976 /* Now enable all the queues */
3977 for (i = 0; i < adapter->num_queues; i++) {
3978 txr = &adapter->tx_rings[i];
3979 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3980 txdctl |= IXGBE_TXDCTL_ENABLE;
3981 /* Set WTHRESH to 8, burst writeback */
3982 txdctl |= (8 << 16);
3983 /*
3984 * When the internal queue falls below PTHRESH (32),
3985 * start prefetching as long as there are at least
3986 * HTHRESH (1) buffers ready. The values are taken
3987 * from the Intel linux driver 3.8.21.
3988 * Prefetching enables tx line rate even with 1 queue.
3989 */
3990 txdctl |= (32 << 0) | (1 << 8);
3991 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3992 }
3993
3994 for (i = 0; i < adapter->num_queues; i++) {
3995 rxr = &adapter->rx_rings[i];
3996 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3997 if (hw->mac.type == ixgbe_mac_82598EB) {
3998 /*
3999 * PTHRESH = 21
4000 * HTHRESH = 4
4001 * WTHRESH = 8
4002 */
4003 rxdctl &= ~0x3FFFFF;
4004 rxdctl |= 0x080420;
4005 }
4006 rxdctl |= IXGBE_RXDCTL_ENABLE;
4007 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4008 for (j = 0; j < 10; j++) {
4009 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4010 IXGBE_RXDCTL_ENABLE)
4011 break;
4012 else
4013 msec_delay(1);
4014 }
4015 wmb();
4016
4017 /*
4018 * In netmap mode, we must preserve the buffers made
4019 * available to userspace before the if_init()
4020 * (this is true by default on the TX side, because
4021 * init makes all buffers available to userspace).
4022 *
4023 * netmap_reset() and the device specific routines
4024 * (e.g. ixgbe_setup_receive_rings()) map these
4025 * buffers at the end of the NIC ring, so here we
4026 * must set the RDT (tail) register to make sure
4027 * they are not overwritten.
4028 *
4029 * In this driver the NIC ring starts at RDH = 0,
4030 * RDT points to the last slot available for reception (?),
4031 * so RDT = num_rx_desc - 1 means the whole ring is available.
4032 */
4033 #ifdef DEV_NETMAP
4034 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4035 (ifp->if_capenable & IFCAP_NETMAP)) {
4036 struct netmap_adapter *na = NA(adapter->ifp);
4037 struct netmap_kring *kring = &na->rx_rings[i];
4038 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4039
4040 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4041 } else
4042 #endif /* DEV_NETMAP */
4043 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4044 adapter->num_rx_desc - 1);
4045 }
4046
4047 /* Enable Receive engine */
4048 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4049 if (hw->mac.type == ixgbe_mac_82598EB)
4050 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4051 rxctrl |= IXGBE_RXCTRL_RXEN;
4052 ixgbe_enable_rx_dma(hw, rxctrl);
4053
4054 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4055
4056 /* Set up MSI/MSI-X routing */
4057 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4058 ixgbe_configure_ivars(adapter);
4059 /* Set up auto-mask */
4060 if (hw->mac.type == ixgbe_mac_82598EB)
4061 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4062 else {
4063 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4064 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4065 }
4066 } else { /* Simple settings for Legacy/MSI */
4067 ixgbe_set_ivar(adapter, 0, 0, 0);
4068 ixgbe_set_ivar(adapter, 0, 0, 1);
4069 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4070 }
4071
4072 ixgbe_init_fdir(adapter);
4073
4074 /*
4075 * Check on any SFP devices that
4076 * need to be kick-started
4077 */
4078 if (hw->phy.type == ixgbe_phy_none) {
4079 err = hw->phy.ops.identify(hw);
4080 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4081 device_printf(dev,
4082 "Unsupported SFP+ module type was detected.\n");
4083 return;
4084 }
4085 }
4086
4087 /* Set moderation on the Link interrupt */
4088 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4089
4090 /* Enable EEE power saving */
4091 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4092 hw->mac.ops.setup_eee(hw,
4093 adapter->feat_en & IXGBE_FEATURE_EEE);
4094
4095 /* Enable power to the phy. */
4096 ixgbe_set_phy_power(hw, TRUE);
4097
4098 /* Config/Enable Link */
4099 ixgbe_config_link(adapter);
4100
4101 /* Hardware Packet Buffer & Flow Control setup */
4102 ixgbe_config_delay_values(adapter);
4103
4104 /* Initialize the FC settings */
4105 ixgbe_start_hw(hw);
4106
4107 /* Set up VLAN support and filter */
4108 ixgbe_setup_vlan_hw_support(adapter);
4109
4110 /* Setup DMA Coalescing */
4111 ixgbe_config_dmac(adapter);
4112
4113 /* And now turn on interrupts */
4114 ixgbe_enable_intr(adapter);
4115
4116 /* Enable the use of the MBX by the VF's */
4117 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4118 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4119 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4120 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4121 }
4122
4123 /* Update saved flags. See ixgbe_ifflags_cb() */
4124 adapter->if_flags = ifp->if_flags;
4125
4126 /* Now inform the stack we're ready */
4127 ifp->if_flags |= IFF_RUNNING;
4128
4129 return;
4130 } /* ixgbe_init_locked */
4131
4132 /************************************************************************
4133 * ixgbe_init
4134 ************************************************************************/
4135 static int
4136 ixgbe_init(struct ifnet *ifp)
4137 {
4138 struct adapter *adapter = ifp->if_softc;
4139
4140 IXGBE_CORE_LOCK(adapter);
4141 ixgbe_init_locked(adapter);
4142 IXGBE_CORE_UNLOCK(adapter);
4143
4144 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4145 } /* ixgbe_init */
4146
4147 /************************************************************************
4148 * ixgbe_set_ivar
4149 *
4150 * Setup the correct IVAR register for a particular MSI-X interrupt
4151 * (yes this is all very magic and confusing :)
4152 * - entry is the register array entry
4153 * - vector is the MSI-X vector for this queue
4154 * - type is RX/TX/MISC
4155 ************************************************************************/
4156 static void
4157 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4158 {
4159 struct ixgbe_hw *hw = &adapter->hw;
4160 u32 ivar, index;
4161
4162 vector |= IXGBE_IVAR_ALLOC_VAL;
4163
4164 switch (hw->mac.type) {
4165 case ixgbe_mac_82598EB:
4166 if (type == -1)
4167 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4168 else
4169 entry += (type * 64);
4170 index = (entry >> 2) & 0x1F;
4171 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4172 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4173 ivar |= (vector << (8 * (entry & 0x3)));
4174 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4175 break;
4176 case ixgbe_mac_82599EB:
4177 case ixgbe_mac_X540:
4178 case ixgbe_mac_X550:
4179 case ixgbe_mac_X550EM_x:
4180 case ixgbe_mac_X550EM_a:
4181 if (type == -1) { /* MISC IVAR */
4182 index = (entry & 1) * 8;
4183 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4184 ivar &= ~(0xFF << index);
4185 ivar |= (vector << index);
4186 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4187 } else { /* RX/TX IVARS */
4188 index = (16 * (entry & 1)) + (8 * type);
4189 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4190 ivar &= ~(0xFF << index);
4191 ivar |= (vector << index);
4192 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4193 }
4194 break;
4195 default:
4196 break;
4197 }
4198 } /* ixgbe_set_ivar */
4199
4200 /************************************************************************
4201 * ixgbe_configure_ivars
4202 ************************************************************************/
4203 static void
4204 ixgbe_configure_ivars(struct adapter *adapter)
4205 {
4206 struct ix_queue *que = adapter->queues;
4207 u32 newitr;
4208
4209 if (ixgbe_max_interrupt_rate > 0)
4210 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4211 else {
4212 /*
4213 * Disable DMA coalescing if interrupt moderation is
4214 * disabled.
4215 */
4216 adapter->dmac = 0;
4217 newitr = 0;
4218 }
4219
4220 for (int i = 0; i < adapter->num_queues; i++, que++) {
4221 struct rx_ring *rxr = &adapter->rx_rings[i];
4222 struct tx_ring *txr = &adapter->tx_rings[i];
4223 /* First the RX queue entry */
4224 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4225 /* ... and the TX */
4226 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4227 /* Set an Initial EITR value */
4228 ixgbe_eitr_write(adapter, que->msix, newitr);
4229 /*
4230 * To eliminate influence of the previous state.
4231 * At this point, Tx/Rx interrupt handler
4232 * (ixgbe_msix_que()) cannot be called, so both
4233 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4234 */
4235 que->eitr_setting = 0;
4236 }
4237
4238 /* For the Link interrupt */
4239 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4240 } /* ixgbe_configure_ivars */
4241
4242 /************************************************************************
4243 * ixgbe_config_gpie
4244 ************************************************************************/
4245 static void
4246 ixgbe_config_gpie(struct adapter *adapter)
4247 {
4248 struct ixgbe_hw *hw = &adapter->hw;
4249 u32 gpie;
4250
4251 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4252
4253 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4254 /* Enable Enhanced MSI-X mode */
4255 gpie |= IXGBE_GPIE_MSIX_MODE
4256 | IXGBE_GPIE_EIAME
4257 | IXGBE_GPIE_PBA_SUPPORT
4258 | IXGBE_GPIE_OCD;
4259 }
4260
4261 /* Fan Failure Interrupt */
4262 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4263 gpie |= IXGBE_SDP1_GPIEN;
4264
4265 /* Thermal Sensor Interrupt */
4266 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4267 gpie |= IXGBE_SDP0_GPIEN_X540;
4268
4269 /* Link detection */
4270 switch (hw->mac.type) {
4271 case ixgbe_mac_82599EB:
4272 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4273 break;
4274 case ixgbe_mac_X550EM_x:
4275 case ixgbe_mac_X550EM_a:
4276 gpie |= IXGBE_SDP0_GPIEN_X540;
4277 break;
4278 default:
4279 break;
4280 }
4281
4282 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4283
4284 } /* ixgbe_config_gpie */
4285
4286 /************************************************************************
4287 * ixgbe_config_delay_values
4288 *
4289 * Requires adapter->max_frame_size to be set.
4290 ************************************************************************/
4291 static void
4292 ixgbe_config_delay_values(struct adapter *adapter)
4293 {
4294 struct ixgbe_hw *hw = &adapter->hw;
4295 u32 rxpb, frame, size, tmp;
4296
4297 frame = adapter->max_frame_size;
4298
4299 /* Calculate High Water */
4300 switch (hw->mac.type) {
4301 case ixgbe_mac_X540:
4302 case ixgbe_mac_X550:
4303 case ixgbe_mac_X550EM_x:
4304 case ixgbe_mac_X550EM_a:
4305 tmp = IXGBE_DV_X540(frame, frame);
4306 break;
4307 default:
4308 tmp = IXGBE_DV(frame, frame);
4309 break;
4310 }
4311 size = IXGBE_BT2KB(tmp);
4312 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4313 hw->fc.high_water[0] = rxpb - size;
4314
4315 /* Now calculate Low Water */
4316 switch (hw->mac.type) {
4317 case ixgbe_mac_X540:
4318 case ixgbe_mac_X550:
4319 case ixgbe_mac_X550EM_x:
4320 case ixgbe_mac_X550EM_a:
4321 tmp = IXGBE_LOW_DV_X540(frame);
4322 break;
4323 default:
4324 tmp = IXGBE_LOW_DV(frame);
4325 break;
4326 }
4327 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4328
4329 hw->fc.pause_time = IXGBE_FC_PAUSE;
4330 hw->fc.send_xon = TRUE;
4331 } /* ixgbe_config_delay_values */
4332
4333 /************************************************************************
4334 * ixgbe_set_multi - Multicast Update
4335 *
4336 * Called whenever multicast address list is updated.
4337 ************************************************************************/
4338 static void
4339 ixgbe_set_multi(struct adapter *adapter)
4340 {
4341 struct ixgbe_mc_addr *mta;
4342 struct ifnet *ifp = adapter->ifp;
4343 u8 *update_ptr;
4344 int mcnt = 0;
4345 u32 fctrl;
4346 struct ethercom *ec = &adapter->osdep.ec;
4347 struct ether_multi *enm;
4348 struct ether_multistep step;
4349
4350 KASSERT(mutex_owned(&adapter->core_mtx));
4351 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4352
4353 mta = adapter->mta;
4354 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4355
4356 ifp->if_flags &= ~IFF_ALLMULTI;
4357 ETHER_LOCK(ec);
4358 ETHER_FIRST_MULTI(step, ec, enm);
4359 while (enm != NULL) {
4360 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4361 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4362 ETHER_ADDR_LEN) != 0)) {
4363 ifp->if_flags |= IFF_ALLMULTI;
4364 break;
4365 }
4366 bcopy(enm->enm_addrlo,
4367 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4368 mta[mcnt].vmdq = adapter->pool;
4369 mcnt++;
4370 ETHER_NEXT_MULTI(step, enm);
4371 }
4372 ETHER_UNLOCK(ec);
4373
4374 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4375 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4376 if (ifp->if_flags & IFF_PROMISC)
4377 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4378 else if (ifp->if_flags & IFF_ALLMULTI) {
4379 fctrl |= IXGBE_FCTRL_MPE;
4380 }
4381
4382 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4383
4384 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4385 update_ptr = (u8 *)mta;
4386 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4387 ixgbe_mc_array_itr, TRUE);
4388 }
4389
4390 } /* ixgbe_set_multi */
4391
4392 /************************************************************************
4393 * ixgbe_mc_array_itr
4394 *
4395 * An iterator function needed by the multicast shared code.
4396 * It feeds the shared code routine the addresses in the
4397 * array of ixgbe_set_multi() one by one.
4398 ************************************************************************/
4399 static u8 *
4400 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4401 {
4402 struct ixgbe_mc_addr *mta;
4403
4404 mta = (struct ixgbe_mc_addr *)*update_ptr;
4405 *vmdq = mta->vmdq;
4406
4407 *update_ptr = (u8*)(mta + 1);
4408
4409 return (mta->addr);
4410 } /* ixgbe_mc_array_itr */
4411
4412 /************************************************************************
4413 * ixgbe_local_timer - Timer routine
4414 *
4415 * Checks for link status, updates statistics,
4416 * and runs the watchdog check.
4417 ************************************************************************/
4418 static void
4419 ixgbe_local_timer(void *arg)
4420 {
4421 struct adapter *adapter = arg;
4422
4423 IXGBE_CORE_LOCK(adapter);
4424 ixgbe_local_timer1(adapter);
4425 IXGBE_CORE_UNLOCK(adapter);
4426 }
4427
4428 static void
4429 ixgbe_local_timer1(void *arg)
4430 {
4431 struct adapter *adapter = arg;
4432 device_t dev = adapter->dev;
4433 struct ix_queue *que = adapter->queues;
4434 u64 queues = 0;
4435 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4436 int hung = 0;
4437 int i;
4438
4439 KASSERT(mutex_owned(&adapter->core_mtx));
4440
4441 /* Check for pluggable optics */
4442 if (adapter->sfp_probe)
4443 if (!ixgbe_sfp_probe(adapter))
4444 goto out; /* Nothing to do */
4445
4446 ixgbe_update_link_status(adapter);
4447 ixgbe_update_stats_counters(adapter);
4448
4449 /* Update some event counters */
4450 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4451 que = adapter->queues;
4452 for (i = 0; i < adapter->num_queues; i++, que++) {
4453 struct tx_ring *txr = que->txr;
4454
4455 v0 += txr->q_efbig_tx_dma_setup;
4456 v1 += txr->q_mbuf_defrag_failed;
4457 v2 += txr->q_efbig2_tx_dma_setup;
4458 v3 += txr->q_einval_tx_dma_setup;
4459 v4 += txr->q_other_tx_dma_setup;
4460 v5 += txr->q_eagain_tx_dma_setup;
4461 v6 += txr->q_enomem_tx_dma_setup;
4462 v7 += txr->q_tso_err;
4463 }
4464 adapter->efbig_tx_dma_setup.ev_count = v0;
4465 adapter->mbuf_defrag_failed.ev_count = v1;
4466 adapter->efbig2_tx_dma_setup.ev_count = v2;
4467 adapter->einval_tx_dma_setup.ev_count = v3;
4468 adapter->other_tx_dma_setup.ev_count = v4;
4469 adapter->eagain_tx_dma_setup.ev_count = v5;
4470 adapter->enomem_tx_dma_setup.ev_count = v6;
4471 adapter->tso_err.ev_count = v7;
4472
4473 /*
4474 * Check the TX queues status
4475 * - mark hung queues so we don't schedule on them
4476 * - watchdog only if all queues show hung
4477 */
4478 que = adapter->queues;
4479 for (i = 0; i < adapter->num_queues; i++, que++) {
4480 /* Keep track of queues with work for soft irq */
4481 if (que->txr->busy)
4482 queues |= ((u64)1 << que->me);
4483 /*
4484 * Each time txeof runs without cleaning, but there
4485 * are uncleaned descriptors it increments busy. If
4486 * we get to the MAX we declare it hung.
4487 */
4488 if (que->busy == IXGBE_QUEUE_HUNG) {
4489 ++hung;
4490 /* Mark the queue as inactive */
4491 adapter->active_queues &= ~((u64)1 << que->me);
4492 continue;
4493 } else {
4494 /* Check if we've come back from hung */
4495 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4496 adapter->active_queues |= ((u64)1 << que->me);
4497 }
4498 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4499 device_printf(dev,
4500 "Warning queue %d appears to be hung!\n", i);
4501 que->txr->busy = IXGBE_QUEUE_HUNG;
4502 ++hung;
4503 }
4504 }
4505
4506 /* Only truely watchdog if all queues show hung */
4507 if (hung == adapter->num_queues)
4508 goto watchdog;
4509 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4510 else if (queues != 0) { /* Force an IRQ on queues with work */
4511 que = adapter->queues;
4512 for (i = 0; i < adapter->num_queues; i++, que++) {
4513 mutex_enter(&que->dc_mtx);
4514 if (que->disabled_count == 0)
4515 ixgbe_rearm_queues(adapter,
4516 queues & ((u64)1 << i));
4517 mutex_exit(&que->dc_mtx);
4518 }
4519 }
4520 #endif
4521
4522 out:
4523 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4524 return;
4525
4526 watchdog:
4527 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4528 adapter->ifp->if_flags &= ~IFF_RUNNING;
4529 adapter->watchdog_events.ev_count++;
4530 ixgbe_init_locked(adapter);
4531 } /* ixgbe_local_timer */
4532
4533 /************************************************************************
4534 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4535 ************************************************************************/
4536 static void
4537 ixgbe_recovery_mode_timer(void *arg)
4538 {
4539 struct adapter *adapter = arg;
4540 struct ixgbe_hw *hw = &adapter->hw;
4541
4542 IXGBE_CORE_LOCK(adapter);
4543 if (ixgbe_fw_recovery_mode(hw)) {
4544 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4545 /* Firmware error detected, entering recovery mode */
4546 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4547
4548 if (hw->adapter_stopped == FALSE)
4549 ixgbe_stop(adapter);
4550 }
4551 } else
4552 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4553
4554 callout_reset(&adapter->recovery_mode_timer, hz,
4555 ixgbe_recovery_mode_timer, adapter);
4556 IXGBE_CORE_UNLOCK(adapter);
4557 } /* ixgbe_recovery_mode_timer */
4558
4559 /************************************************************************
4560 * ixgbe_sfp_probe
4561 *
4562 * Determine if a port had optics inserted.
4563 ************************************************************************/
4564 static bool
4565 ixgbe_sfp_probe(struct adapter *adapter)
4566 {
4567 struct ixgbe_hw *hw = &adapter->hw;
4568 device_t dev = adapter->dev;
4569 bool result = FALSE;
4570
4571 if ((hw->phy.type == ixgbe_phy_nl) &&
4572 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4573 s32 ret = hw->phy.ops.identify_sfp(hw);
4574 if (ret)
4575 goto out;
4576 ret = hw->phy.ops.reset(hw);
4577 adapter->sfp_probe = FALSE;
4578 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4579 device_printf(dev,"Unsupported SFP+ module detected!");
4580 device_printf(dev,
4581 "Reload driver with supported module.\n");
4582 goto out;
4583 } else
4584 device_printf(dev, "SFP+ module detected!\n");
4585 /* We now have supported optics */
4586 result = TRUE;
4587 }
4588 out:
4589
4590 return (result);
4591 } /* ixgbe_sfp_probe */
4592
4593 /************************************************************************
4594 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4595 ************************************************************************/
4596 static void
4597 ixgbe_handle_mod(void *context)
4598 {
4599 struct adapter *adapter = context;
4600 struct ixgbe_hw *hw = &adapter->hw;
4601 device_t dev = adapter->dev;
4602 u32 err, cage_full = 0;
4603
4604 ++adapter->mod_sicount.ev_count;
4605 if (adapter->hw.need_crosstalk_fix) {
4606 switch (hw->mac.type) {
4607 case ixgbe_mac_82599EB:
4608 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4609 IXGBE_ESDP_SDP2;
4610 break;
4611 case ixgbe_mac_X550EM_x:
4612 case ixgbe_mac_X550EM_a:
4613 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4614 IXGBE_ESDP_SDP0;
4615 break;
4616 default:
4617 break;
4618 }
4619
4620 if (!cage_full)
4621 return;
4622 }
4623
4624 err = hw->phy.ops.identify_sfp(hw);
4625 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4626 device_printf(dev,
4627 "Unsupported SFP+ module type was detected.\n");
4628 return;
4629 }
4630
4631 if (hw->mac.type == ixgbe_mac_82598EB)
4632 err = hw->phy.ops.reset(hw);
4633 else
4634 err = hw->mac.ops.setup_sfp(hw);
4635
4636 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4637 device_printf(dev,
4638 "Setup failure - unsupported SFP+ module type.\n");
4639 return;
4640 }
4641 softint_schedule(adapter->msf_si);
4642 } /* ixgbe_handle_mod */
4643
4644
4645 /************************************************************************
4646 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4647 ************************************************************************/
4648 static void
4649 ixgbe_handle_msf(void *context)
4650 {
4651 struct adapter *adapter = context;
4652 struct ixgbe_hw *hw = &adapter->hw;
4653 u32 autoneg;
4654 bool negotiate;
4655
4656 IXGBE_CORE_LOCK(adapter);
4657 ++adapter->msf_sicount.ev_count;
4658 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4659 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4660
4661 autoneg = hw->phy.autoneg_advertised;
4662 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4663 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4664 else
4665 negotiate = 0;
4666 if (hw->mac.ops.setup_link)
4667 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4668
4669 /* Adjust media types shown in ifconfig */
4670 ifmedia_removeall(&adapter->media);
4671 ixgbe_add_media_types(adapter);
4672 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4673 IXGBE_CORE_UNLOCK(adapter);
4674 } /* ixgbe_handle_msf */
4675
4676 /************************************************************************
4677 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4678 ************************************************************************/
4679 static void
4680 ixgbe_handle_phy(void *context)
4681 {
4682 struct adapter *adapter = context;
4683 struct ixgbe_hw *hw = &adapter->hw;
4684 int error;
4685
4686 ++adapter->phy_sicount.ev_count;
4687 error = hw->phy.ops.handle_lasi(hw);
4688 if (error == IXGBE_ERR_OVERTEMP)
4689 device_printf(adapter->dev,
4690 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4691 " PHY will downshift to lower power state!\n");
4692 else if (error)
4693 device_printf(adapter->dev,
4694 "Error handling LASI interrupt: %d\n", error);
4695 } /* ixgbe_handle_phy */
4696
4697 static void
4698 ixgbe_ifstop(struct ifnet *ifp, int disable)
4699 {
4700 struct adapter *adapter = ifp->if_softc;
4701
4702 IXGBE_CORE_LOCK(adapter);
4703 ixgbe_stop(adapter);
4704 IXGBE_CORE_UNLOCK(adapter);
4705 }
4706
4707 /************************************************************************
4708 * ixgbe_stop - Stop the hardware
4709 *
4710 * Disables all traffic on the adapter by issuing a
4711 * global reset on the MAC and deallocates TX/RX buffers.
4712 ************************************************************************/
4713 static void
4714 ixgbe_stop(void *arg)
4715 {
4716 struct ifnet *ifp;
4717 struct adapter *adapter = arg;
4718 struct ixgbe_hw *hw = &adapter->hw;
4719
4720 ifp = adapter->ifp;
4721
4722 KASSERT(mutex_owned(&adapter->core_mtx));
4723
4724 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4725 ixgbe_disable_intr(adapter);
4726 callout_stop(&adapter->timer);
4727
4728 /* Let the stack know...*/
4729 ifp->if_flags &= ~IFF_RUNNING;
4730
4731 ixgbe_reset_hw(hw);
4732 hw->adapter_stopped = FALSE;
4733 ixgbe_stop_adapter(hw);
4734 if (hw->mac.type == ixgbe_mac_82599EB)
4735 ixgbe_stop_mac_link_on_d3_82599(hw);
4736 /* Turn off the laser - noop with no optics */
4737 ixgbe_disable_tx_laser(hw);
4738
4739 /* Update the stack */
4740 adapter->link_up = FALSE;
4741 ixgbe_update_link_status(adapter);
4742
4743 /* reprogram the RAR[0] in case user changed it. */
4744 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4745
4746 return;
4747 } /* ixgbe_stop */
4748
4749 /************************************************************************
4750 * ixgbe_update_link_status - Update OS on link state
4751 *
4752 * Note: Only updates the OS on the cached link state.
4753 * The real check of the hardware only happens with
4754 * a link interrupt.
4755 ************************************************************************/
4756 static void
4757 ixgbe_update_link_status(struct adapter *adapter)
4758 {
4759 struct ifnet *ifp = adapter->ifp;
4760 device_t dev = adapter->dev;
4761 struct ixgbe_hw *hw = &adapter->hw;
4762
4763 KASSERT(mutex_owned(&adapter->core_mtx));
4764
4765 if (adapter->link_up) {
4766 if (adapter->link_active != LINK_STATE_UP) {
4767 /*
4768 * To eliminate influence of the previous state
4769 * in the same way as ixgbe_init_locked().
4770 */
4771 struct ix_queue *que = adapter->queues;
4772 for (int i = 0; i < adapter->num_queues; i++, que++)
4773 que->eitr_setting = 0;
4774
4775 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4776 /*
4777 * Discard count for both MAC Local Fault and
4778 * Remote Fault because those registers are
4779 * valid only when the link speed is up and
4780 * 10Gbps.
4781 */
4782 IXGBE_READ_REG(hw, IXGBE_MLFC);
4783 IXGBE_READ_REG(hw, IXGBE_MRFC);
4784 }
4785
4786 if (bootverbose) {
4787 const char *bpsmsg;
4788
4789 switch (adapter->link_speed) {
4790 case IXGBE_LINK_SPEED_10GB_FULL:
4791 bpsmsg = "10 Gbps";
4792 break;
4793 case IXGBE_LINK_SPEED_5GB_FULL:
4794 bpsmsg = "5 Gbps";
4795 break;
4796 case IXGBE_LINK_SPEED_2_5GB_FULL:
4797 bpsmsg = "2.5 Gbps";
4798 break;
4799 case IXGBE_LINK_SPEED_1GB_FULL:
4800 bpsmsg = "1 Gbps";
4801 break;
4802 case IXGBE_LINK_SPEED_100_FULL:
4803 bpsmsg = "100 Mbps";
4804 break;
4805 case IXGBE_LINK_SPEED_10_FULL:
4806 bpsmsg = "10 Mbps";
4807 break;
4808 default:
4809 bpsmsg = "unknown speed";
4810 break;
4811 }
4812 device_printf(dev, "Link is up %s %s \n",
4813 bpsmsg, "Full Duplex");
4814 }
4815 adapter->link_active = LINK_STATE_UP;
4816 /* Update any Flow Control changes */
4817 ixgbe_fc_enable(&adapter->hw);
4818 /* Update DMA coalescing config */
4819 ixgbe_config_dmac(adapter);
4820 if_link_state_change(ifp, LINK_STATE_UP);
4821
4822 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4823 ixgbe_ping_all_vfs(adapter);
4824 }
4825 } else {
4826 /*
4827 * Do it when link active changes to DOWN. i.e.
4828 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4829 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4830 */
4831 if (adapter->link_active != LINK_STATE_DOWN) {
4832 if (bootverbose)
4833 device_printf(dev, "Link is Down\n");
4834 if_link_state_change(ifp, LINK_STATE_DOWN);
4835 adapter->link_active = LINK_STATE_DOWN;
4836 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4837 ixgbe_ping_all_vfs(adapter);
4838 ixgbe_drain_all(adapter);
4839 }
4840 }
4841 } /* ixgbe_update_link_status */
4842
4843 /************************************************************************
4844 * ixgbe_config_dmac - Configure DMA Coalescing
4845 ************************************************************************/
4846 static void
4847 ixgbe_config_dmac(struct adapter *adapter)
4848 {
4849 struct ixgbe_hw *hw = &adapter->hw;
4850 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4851
4852 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4853 return;
4854
4855 if (dcfg->watchdog_timer ^ adapter->dmac ||
4856 dcfg->link_speed ^ adapter->link_speed) {
4857 dcfg->watchdog_timer = adapter->dmac;
4858 dcfg->fcoe_en = false;
4859 dcfg->link_speed = adapter->link_speed;
4860 dcfg->num_tcs = 1;
4861
4862 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4863 dcfg->watchdog_timer, dcfg->link_speed);
4864
4865 hw->mac.ops.dmac_config(hw);
4866 }
4867 } /* ixgbe_config_dmac */
4868
4869 /************************************************************************
4870 * ixgbe_enable_intr
4871 ************************************************************************/
4872 static void
4873 ixgbe_enable_intr(struct adapter *adapter)
4874 {
4875 struct ixgbe_hw *hw = &adapter->hw;
4876 struct ix_queue *que = adapter->queues;
4877 u32 mask, fwsm;
4878
4879 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4880
4881 switch (adapter->hw.mac.type) {
4882 case ixgbe_mac_82599EB:
4883 mask |= IXGBE_EIMS_ECC;
4884 /* Temperature sensor on some adapters */
4885 mask |= IXGBE_EIMS_GPI_SDP0;
4886 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4887 mask |= IXGBE_EIMS_GPI_SDP1;
4888 mask |= IXGBE_EIMS_GPI_SDP2;
4889 break;
4890 case ixgbe_mac_X540:
4891 /* Detect if Thermal Sensor is enabled */
4892 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4893 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4894 mask |= IXGBE_EIMS_TS;
4895 mask |= IXGBE_EIMS_ECC;
4896 break;
4897 case ixgbe_mac_X550:
4898 /* MAC thermal sensor is automatically enabled */
4899 mask |= IXGBE_EIMS_TS;
4900 mask |= IXGBE_EIMS_ECC;
4901 break;
4902 case ixgbe_mac_X550EM_x:
4903 case ixgbe_mac_X550EM_a:
4904 /* Some devices use SDP0 for important information */
4905 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4906 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4907 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4908 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4909 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4910 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4911 mask |= IXGBE_EICR_GPI_SDP0_X540;
4912 mask |= IXGBE_EIMS_ECC;
4913 break;
4914 default:
4915 break;
4916 }
4917
4918 /* Enable Fan Failure detection */
4919 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4920 mask |= IXGBE_EIMS_GPI_SDP1;
4921 /* Enable SR-IOV */
4922 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4923 mask |= IXGBE_EIMS_MAILBOX;
4924 /* Enable Flow Director */
4925 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4926 mask |= IXGBE_EIMS_FLOW_DIR;
4927
4928 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4929
4930 /* With MSI-X we use auto clear */
4931 if (adapter->msix_mem) {
4932 mask = IXGBE_EIMS_ENABLE_MASK;
4933 /* Don't autoclear Link */
4934 mask &= ~IXGBE_EIMS_OTHER;
4935 mask &= ~IXGBE_EIMS_LSC;
4936 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4937 mask &= ~IXGBE_EIMS_MAILBOX;
4938 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4939 }
4940
4941 /*
4942 * Now enable all queues, this is done separately to
4943 * allow for handling the extended (beyond 32) MSI-X
4944 * vectors that can be used by 82599
4945 */
4946 for (int i = 0; i < adapter->num_queues; i++, que++)
4947 ixgbe_enable_queue(adapter, que->msix);
4948
4949 IXGBE_WRITE_FLUSH(hw);
4950
4951 } /* ixgbe_enable_intr */
4952
4953 /************************************************************************
4954 * ixgbe_disable_intr_internal
4955 ************************************************************************/
4956 static void
4957 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4958 {
4959 struct ix_queue *que = adapter->queues;
4960
4961 /* disable interrupts other than queues */
4962 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4963
4964 if (adapter->msix_mem)
4965 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4966
4967 for (int i = 0; i < adapter->num_queues; i++, que++)
4968 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4969
4970 IXGBE_WRITE_FLUSH(&adapter->hw);
4971
4972 } /* ixgbe_do_disable_intr_internal */
4973
4974 /************************************************************************
4975 * ixgbe_disable_intr
4976 ************************************************************************/
4977 static void
4978 ixgbe_disable_intr(struct adapter *adapter)
4979 {
4980
4981 ixgbe_disable_intr_internal(adapter, true);
4982 } /* ixgbe_disable_intr */
4983
4984 /************************************************************************
4985 * ixgbe_ensure_disabled_intr
4986 ************************************************************************/
4987 void
4988 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4989 {
4990
4991 ixgbe_disable_intr_internal(adapter, false);
4992 } /* ixgbe_ensure_disabled_intr */
4993
4994 /************************************************************************
4995 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4996 ************************************************************************/
4997 static int
4998 ixgbe_legacy_irq(void *arg)
4999 {
5000 struct ix_queue *que = arg;
5001 struct adapter *adapter = que->adapter;
5002 struct ixgbe_hw *hw = &adapter->hw;
5003 struct ifnet *ifp = adapter->ifp;
5004 struct tx_ring *txr = adapter->tx_rings;
5005 bool more = false;
5006 u32 eicr, eicr_mask;
5007
5008 /* Silicon errata #26 on 82598 */
5009 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5010
5011 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5012
5013 adapter->stats.pf.legint.ev_count++;
5014 ++que->irqs.ev_count;
5015 if (eicr == 0) {
5016 adapter->stats.pf.intzero.ev_count++;
5017 if ((ifp->if_flags & IFF_UP) != 0)
5018 ixgbe_enable_intr(adapter);
5019 return 0;
5020 }
5021
5022 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5023 /*
5024 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5025 */
5026 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5027
5028 #ifdef __NetBSD__
5029 /* Don't run ixgbe_rxeof in interrupt context */
5030 more = true;
5031 #else
5032 more = ixgbe_rxeof(que);
5033 #endif
5034
5035 IXGBE_TX_LOCK(txr);
5036 ixgbe_txeof(txr);
5037 #ifdef notyet
5038 if (!ixgbe_ring_empty(ifp, txr->br))
5039 ixgbe_start_locked(ifp, txr);
5040 #endif
5041 IXGBE_TX_UNLOCK(txr);
5042 }
5043
5044 /* Check for fan failure */
5045 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5046 ixgbe_check_fan_failure(adapter, eicr, true);
5047 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5048 }
5049
5050 /* Link status change */
5051 if (eicr & IXGBE_EICR_LSC)
5052 softint_schedule(adapter->link_si);
5053
5054 if (ixgbe_is_sfp(hw)) {
5055 /* Pluggable optics-related interrupt */
5056 if (hw->mac.type >= ixgbe_mac_X540)
5057 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5058 else
5059 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5060
5061 if (eicr & eicr_mask) {
5062 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5063 softint_schedule(adapter->mod_si);
5064 }
5065
5066 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5067 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5068 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5069 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5070 softint_schedule(adapter->msf_si);
5071 }
5072 }
5073
5074 /* External PHY interrupt */
5075 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5076 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5077 softint_schedule(adapter->phy_si);
5078
5079 if (more) {
5080 que->req.ev_count++;
5081 ixgbe_sched_handle_que(adapter, que);
5082 } else
5083 ixgbe_enable_intr(adapter);
5084
5085 return 1;
5086 } /* ixgbe_legacy_irq */
5087
5088 /************************************************************************
5089 * ixgbe_free_pciintr_resources
5090 ************************************************************************/
5091 static void
5092 ixgbe_free_pciintr_resources(struct adapter *adapter)
5093 {
5094 struct ix_queue *que = adapter->queues;
5095 int rid;
5096
5097 /*
5098 * Release all msix queue resources:
5099 */
5100 for (int i = 0; i < adapter->num_queues; i++, que++) {
5101 if (que->res != NULL) {
5102 pci_intr_disestablish(adapter->osdep.pc,
5103 adapter->osdep.ihs[i]);
5104 adapter->osdep.ihs[i] = NULL;
5105 }
5106 }
5107
5108 /* Clean the Legacy or Link interrupt last */
5109 if (adapter->vector) /* we are doing MSIX */
5110 rid = adapter->vector;
5111 else
5112 rid = 0;
5113
5114 if (adapter->osdep.ihs[rid] != NULL) {
5115 pci_intr_disestablish(adapter->osdep.pc,
5116 adapter->osdep.ihs[rid]);
5117 adapter->osdep.ihs[rid] = NULL;
5118 }
5119
5120 if (adapter->osdep.intrs != NULL) {
5121 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5122 adapter->osdep.nintrs);
5123 adapter->osdep.intrs = NULL;
5124 }
5125 } /* ixgbe_free_pciintr_resources */
5126
5127 /************************************************************************
5128 * ixgbe_free_pci_resources
5129 ************************************************************************/
5130 static void
5131 ixgbe_free_pci_resources(struct adapter *adapter)
5132 {
5133
5134 ixgbe_free_pciintr_resources(adapter);
5135
5136 if (adapter->osdep.mem_size != 0) {
5137 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5138 adapter->osdep.mem_bus_space_handle,
5139 adapter->osdep.mem_size);
5140 }
5141
5142 } /* ixgbe_free_pci_resources */
5143
5144 /************************************************************************
5145 * ixgbe_set_sysctl_value
5146 ************************************************************************/
5147 static void
5148 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5149 const char *description, int *limit, int value)
5150 {
5151 device_t dev = adapter->dev;
5152 struct sysctllog **log;
5153 const struct sysctlnode *rnode, *cnode;
5154
5155 /*
5156 * It's not required to check recovery mode because this function never
5157 * touches hardware.
5158 */
5159
5160 log = &adapter->sysctllog;
5161 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5162 aprint_error_dev(dev, "could not create sysctl root\n");
5163 return;
5164 }
5165 if (sysctl_createv(log, 0, &rnode, &cnode,
5166 CTLFLAG_READWRITE, CTLTYPE_INT,
5167 name, SYSCTL_DESCR(description),
5168 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5169 aprint_error_dev(dev, "could not create sysctl\n");
5170 *limit = value;
5171 } /* ixgbe_set_sysctl_value */
5172
5173 /************************************************************************
5174 * ixgbe_sysctl_flowcntl
5175 *
5176 * SYSCTL wrapper around setting Flow Control
5177 ************************************************************************/
5178 static int
5179 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5180 {
5181 struct sysctlnode node = *rnode;
5182 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5183 int error, fc;
5184
5185 if (ixgbe_fw_recovery_mode_swflag(adapter))
5186 return (EPERM);
5187
5188 fc = adapter->hw.fc.current_mode;
5189 node.sysctl_data = &fc;
5190 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5191 if (error != 0 || newp == NULL)
5192 return error;
5193
5194 /* Don't bother if it's not changed */
5195 if (fc == adapter->hw.fc.current_mode)
5196 return (0);
5197
5198 return ixgbe_set_flowcntl(adapter, fc);
5199 } /* ixgbe_sysctl_flowcntl */
5200
5201 /************************************************************************
5202 * ixgbe_set_flowcntl - Set flow control
5203 *
5204 * Flow control values:
5205 * 0 - off
5206 * 1 - rx pause
5207 * 2 - tx pause
5208 * 3 - full
5209 ************************************************************************/
5210 static int
5211 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5212 {
5213 switch (fc) {
5214 case ixgbe_fc_rx_pause:
5215 case ixgbe_fc_tx_pause:
5216 case ixgbe_fc_full:
5217 adapter->hw.fc.requested_mode = fc;
5218 if (adapter->num_queues > 1)
5219 ixgbe_disable_rx_drop(adapter);
5220 break;
5221 case ixgbe_fc_none:
5222 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5223 if (adapter->num_queues > 1)
5224 ixgbe_enable_rx_drop(adapter);
5225 break;
5226 default:
5227 return (EINVAL);
5228 }
5229
5230 #if 0 /* XXX NetBSD */
5231 /* Don't autoneg if forcing a value */
5232 adapter->hw.fc.disable_fc_autoneg = TRUE;
5233 #endif
5234 ixgbe_fc_enable(&adapter->hw);
5235
5236 return (0);
5237 } /* ixgbe_set_flowcntl */
5238
5239 /************************************************************************
5240 * ixgbe_enable_rx_drop
5241 *
5242 * Enable the hardware to drop packets when the buffer is
5243 * full. This is useful with multiqueue, so that no single
5244 * queue being full stalls the entire RX engine. We only
5245 * enable this when Multiqueue is enabled AND Flow Control
5246 * is disabled.
5247 ************************************************************************/
5248 static void
5249 ixgbe_enable_rx_drop(struct adapter *adapter)
5250 {
5251 struct ixgbe_hw *hw = &adapter->hw;
5252 struct rx_ring *rxr;
5253 u32 srrctl;
5254
5255 for (int i = 0; i < adapter->num_queues; i++) {
5256 rxr = &adapter->rx_rings[i];
5257 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5258 srrctl |= IXGBE_SRRCTL_DROP_EN;
5259 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5260 }
5261
5262 /* enable drop for each vf */
5263 for (int i = 0; i < adapter->num_vfs; i++) {
5264 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5265 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5266 IXGBE_QDE_ENABLE));
5267 }
5268 } /* ixgbe_enable_rx_drop */
5269
5270 /************************************************************************
5271 * ixgbe_disable_rx_drop
5272 ************************************************************************/
5273 static void
5274 ixgbe_disable_rx_drop(struct adapter *adapter)
5275 {
5276 struct ixgbe_hw *hw = &adapter->hw;
5277 struct rx_ring *rxr;
5278 u32 srrctl;
5279
5280 for (int i = 0; i < adapter->num_queues; i++) {
5281 rxr = &adapter->rx_rings[i];
5282 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5283 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5284 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5285 }
5286
5287 /* disable drop for each vf */
5288 for (int i = 0; i < adapter->num_vfs; i++) {
5289 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5290 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5291 }
5292 } /* ixgbe_disable_rx_drop */
5293
5294 /************************************************************************
5295 * ixgbe_sysctl_advertise
5296 *
5297 * SYSCTL wrapper around setting advertised speed
5298 ************************************************************************/
5299 static int
5300 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5301 {
5302 struct sysctlnode node = *rnode;
5303 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5304 int error = 0, advertise;
5305
5306 if (ixgbe_fw_recovery_mode_swflag(adapter))
5307 return (EPERM);
5308
5309 advertise = adapter->advertise;
5310 node.sysctl_data = &advertise;
5311 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5312 if (error != 0 || newp == NULL)
5313 return error;
5314
5315 return ixgbe_set_advertise(adapter, advertise);
5316 } /* ixgbe_sysctl_advertise */
5317
5318 /************************************************************************
5319 * ixgbe_set_advertise - Control advertised link speed
5320 *
5321 * Flags:
5322 * 0x00 - Default (all capable link speed)
5323 * 0x01 - advertise 100 Mb
5324 * 0x02 - advertise 1G
5325 * 0x04 - advertise 10G
5326 * 0x08 - advertise 10 Mb
5327 * 0x10 - advertise 2.5G
5328 * 0x20 - advertise 5G
5329 ************************************************************************/
5330 static int
5331 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5332 {
5333 device_t dev;
5334 struct ixgbe_hw *hw;
5335 ixgbe_link_speed speed = 0;
5336 ixgbe_link_speed link_caps = 0;
5337 s32 err = IXGBE_NOT_IMPLEMENTED;
5338 bool negotiate = FALSE;
5339
5340 /* Checks to validate new value */
5341 if (adapter->advertise == advertise) /* no change */
5342 return (0);
5343
5344 dev = adapter->dev;
5345 hw = &adapter->hw;
5346
5347 /* No speed changes for backplane media */
5348 if (hw->phy.media_type == ixgbe_media_type_backplane)
5349 return (ENODEV);
5350
5351 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5352 (hw->phy.multispeed_fiber))) {
5353 device_printf(dev,
5354 "Advertised speed can only be set on copper or "
5355 "multispeed fiber media types.\n");
5356 return (EINVAL);
5357 }
5358
5359 if (advertise < 0x0 || advertise > 0x2f) {
5360 device_printf(dev,
5361 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5362 return (EINVAL);
5363 }
5364
5365 if (hw->mac.ops.get_link_capabilities) {
5366 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5367 &negotiate);
5368 if (err != IXGBE_SUCCESS) {
5369 device_printf(dev, "Unable to determine supported advertise speeds\n");
5370 return (ENODEV);
5371 }
5372 }
5373
5374 /* Set new value and report new advertised mode */
5375 if (advertise & 0x1) {
5376 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5377 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5378 return (EINVAL);
5379 }
5380 speed |= IXGBE_LINK_SPEED_100_FULL;
5381 }
5382 if (advertise & 0x2) {
5383 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5384 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5385 return (EINVAL);
5386 }
5387 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5388 }
5389 if (advertise & 0x4) {
5390 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5391 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5392 return (EINVAL);
5393 }
5394 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5395 }
5396 if (advertise & 0x8) {
5397 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5398 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5399 return (EINVAL);
5400 }
5401 speed |= IXGBE_LINK_SPEED_10_FULL;
5402 }
5403 if (advertise & 0x10) {
5404 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5405 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5406 return (EINVAL);
5407 }
5408 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5409 }
5410 if (advertise & 0x20) {
5411 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5412 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5413 return (EINVAL);
5414 }
5415 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5416 }
5417 if (advertise == 0)
5418 speed = link_caps; /* All capable link speed */
5419
5420 hw->mac.autotry_restart = TRUE;
5421 hw->mac.ops.setup_link(hw, speed, TRUE);
5422 adapter->advertise = advertise;
5423
5424 return (0);
5425 } /* ixgbe_set_advertise */
5426
5427 /************************************************************************
5428 * ixgbe_get_advertise - Get current advertised speed settings
5429 *
5430 * Formatted for sysctl usage.
5431 * Flags:
5432 * 0x01 - advertise 100 Mb
5433 * 0x02 - advertise 1G
5434 * 0x04 - advertise 10G
5435 * 0x08 - advertise 10 Mb (yes, Mb)
5436 * 0x10 - advertise 2.5G
5437 * 0x20 - advertise 5G
5438 ************************************************************************/
5439 static int
5440 ixgbe_get_advertise(struct adapter *adapter)
5441 {
5442 struct ixgbe_hw *hw = &adapter->hw;
5443 int speed;
5444 ixgbe_link_speed link_caps = 0;
5445 s32 err;
5446 bool negotiate = FALSE;
5447
5448 /*
5449 * Advertised speed means nothing unless it's copper or
5450 * multi-speed fiber
5451 */
5452 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5453 !(hw->phy.multispeed_fiber))
5454 return (0);
5455
5456 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5457 if (err != IXGBE_SUCCESS)
5458 return (0);
5459
5460 speed =
5461 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5462 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5463 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5464 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5465 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5466 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5467
5468 return speed;
5469 } /* ixgbe_get_advertise */
5470
5471 /************************************************************************
5472 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5473 *
5474 * Control values:
5475 * 0/1 - off / on (use default value of 1000)
5476 *
5477 * Legal timer values are:
5478 * 50,100,250,500,1000,2000,5000,10000
5479 *
5480 * Turning off interrupt moderation will also turn this off.
5481 ************************************************************************/
5482 static int
5483 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5484 {
5485 struct sysctlnode node = *rnode;
5486 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5487 struct ifnet *ifp = adapter->ifp;
5488 int error;
5489 int newval;
5490
5491 if (ixgbe_fw_recovery_mode_swflag(adapter))
5492 return (EPERM);
5493
5494 newval = adapter->dmac;
5495 node.sysctl_data = &newval;
5496 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5497 if ((error) || (newp == NULL))
5498 return (error);
5499
5500 switch (newval) {
5501 case 0:
5502 /* Disabled */
5503 adapter->dmac = 0;
5504 break;
5505 case 1:
5506 /* Enable and use default */
5507 adapter->dmac = 1000;
5508 break;
5509 case 50:
5510 case 100:
5511 case 250:
5512 case 500:
5513 case 1000:
5514 case 2000:
5515 case 5000:
5516 case 10000:
5517 /* Legal values - allow */
5518 adapter->dmac = newval;
5519 break;
5520 default:
5521 /* Do nothing, illegal value */
5522 return (EINVAL);
5523 }
5524
5525 /* Re-initialize hardware if it's already running */
5526 if (ifp->if_flags & IFF_RUNNING)
5527 ifp->if_init(ifp);
5528
5529 return (0);
5530 }
5531
5532 #ifdef IXGBE_DEBUG
5533 /************************************************************************
5534 * ixgbe_sysctl_power_state
5535 *
5536 * Sysctl to test power states
5537 * Values:
5538 * 0 - set device to D0
5539 * 3 - set device to D3
5540 * (none) - get current device power state
5541 ************************************************************************/
5542 static int
5543 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5544 {
5545 #ifdef notyet
5546 struct sysctlnode node = *rnode;
5547 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5548 device_t dev = adapter->dev;
5549 int curr_ps, new_ps, error = 0;
5550
5551 if (ixgbe_fw_recovery_mode_swflag(adapter))
5552 return (EPERM);
5553
5554 curr_ps = new_ps = pci_get_powerstate(dev);
5555
5556 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5557 if ((error) || (req->newp == NULL))
5558 return (error);
5559
5560 if (new_ps == curr_ps)
5561 return (0);
5562
5563 if (new_ps == 3 && curr_ps == 0)
5564 error = DEVICE_SUSPEND(dev);
5565 else if (new_ps == 0 && curr_ps == 3)
5566 error = DEVICE_RESUME(dev);
5567 else
5568 return (EINVAL);
5569
5570 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5571
5572 return (error);
5573 #else
5574 return 0;
5575 #endif
5576 } /* ixgbe_sysctl_power_state */
5577 #endif
5578
5579 /************************************************************************
5580 * ixgbe_sysctl_wol_enable
5581 *
5582 * Sysctl to enable/disable the WoL capability,
5583 * if supported by the adapter.
5584 *
5585 * Values:
5586 * 0 - disabled
5587 * 1 - enabled
5588 ************************************************************************/
5589 static int
5590 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5591 {
5592 struct sysctlnode node = *rnode;
5593 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5594 struct ixgbe_hw *hw = &adapter->hw;
5595 bool new_wol_enabled;
5596 int error = 0;
5597
5598 /*
5599 * It's not required to check recovery mode because this function never
5600 * touches hardware.
5601 */
5602 new_wol_enabled = hw->wol_enabled;
5603 node.sysctl_data = &new_wol_enabled;
5604 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5605 if ((error) || (newp == NULL))
5606 return (error);
5607 if (new_wol_enabled == hw->wol_enabled)
5608 return (0);
5609
5610 if (new_wol_enabled && !adapter->wol_support)
5611 return (ENODEV);
5612 else
5613 hw->wol_enabled = new_wol_enabled;
5614
5615 return (0);
5616 } /* ixgbe_sysctl_wol_enable */
5617
5618 /************************************************************************
5619 * ixgbe_sysctl_wufc - Wake Up Filter Control
5620 *
5621 * Sysctl to enable/disable the types of packets that the
5622 * adapter will wake up on upon receipt.
5623 * Flags:
5624 * 0x1 - Link Status Change
5625 * 0x2 - Magic Packet
5626 * 0x4 - Direct Exact
5627 * 0x8 - Directed Multicast
5628 * 0x10 - Broadcast
5629 * 0x20 - ARP/IPv4 Request Packet
5630 * 0x40 - Direct IPv4 Packet
5631 * 0x80 - Direct IPv6 Packet
5632 *
5633 * Settings not listed above will cause the sysctl to return an error.
5634 ************************************************************************/
5635 static int
5636 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5637 {
5638 struct sysctlnode node = *rnode;
5639 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5640 int error = 0;
5641 u32 new_wufc;
5642
5643 /*
5644 * It's not required to check recovery mode because this function never
5645 * touches hardware.
5646 */
5647 new_wufc = adapter->wufc;
5648 node.sysctl_data = &new_wufc;
5649 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5650 if ((error) || (newp == NULL))
5651 return (error);
5652 if (new_wufc == adapter->wufc)
5653 return (0);
5654
5655 if (new_wufc & 0xffffff00)
5656 return (EINVAL);
5657
5658 new_wufc &= 0xff;
5659 new_wufc |= (0xffffff & adapter->wufc);
5660 adapter->wufc = new_wufc;
5661
5662 return (0);
5663 } /* ixgbe_sysctl_wufc */
5664
5665 #ifdef IXGBE_DEBUG
5666 /************************************************************************
5667 * ixgbe_sysctl_print_rss_config
5668 ************************************************************************/
5669 static int
5670 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5671 {
5672 #ifdef notyet
5673 struct sysctlnode node = *rnode;
5674 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5675 struct ixgbe_hw *hw = &adapter->hw;
5676 device_t dev = adapter->dev;
5677 struct sbuf *buf;
5678 int error = 0, reta_size;
5679 u32 reg;
5680
5681 if (ixgbe_fw_recovery_mode_swflag(adapter))
5682 return (EPERM);
5683
5684 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5685 if (!buf) {
5686 device_printf(dev, "Could not allocate sbuf for output.\n");
5687 return (ENOMEM);
5688 }
5689
5690 // TODO: use sbufs to make a string to print out
5691 /* Set multiplier for RETA setup and table size based on MAC */
5692 switch (adapter->hw.mac.type) {
5693 case ixgbe_mac_X550:
5694 case ixgbe_mac_X550EM_x:
5695 case ixgbe_mac_X550EM_a:
5696 reta_size = 128;
5697 break;
5698 default:
5699 reta_size = 32;
5700 break;
5701 }
5702
5703 /* Print out the redirection table */
5704 sbuf_cat(buf, "\n");
5705 for (int i = 0; i < reta_size; i++) {
5706 if (i < 32) {
5707 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5708 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5709 } else {
5710 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5711 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5712 }
5713 }
5714
5715 // TODO: print more config
5716
5717 error = sbuf_finish(buf);
5718 if (error)
5719 device_printf(dev, "Error finishing sbuf: %d\n", error);
5720
5721 sbuf_delete(buf);
5722 #endif
5723 return (0);
5724 } /* ixgbe_sysctl_print_rss_config */
5725 #endif /* IXGBE_DEBUG */
5726
5727 /************************************************************************
5728 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5729 *
5730 * For X552/X557-AT devices using an external PHY
5731 ************************************************************************/
5732 static int
5733 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5734 {
5735 struct sysctlnode node = *rnode;
5736 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5737 struct ixgbe_hw *hw = &adapter->hw;
5738 int val;
5739 u16 reg;
5740 int error;
5741
5742 if (ixgbe_fw_recovery_mode_swflag(adapter))
5743 return (EPERM);
5744
5745 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5746 device_printf(adapter->dev,
5747 "Device has no supported external thermal sensor.\n");
5748 return (ENODEV);
5749 }
5750
5751 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5752 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5753 device_printf(adapter->dev,
5754 "Error reading from PHY's current temperature register\n");
5755 return (EAGAIN);
5756 }
5757
5758 node.sysctl_data = &val;
5759
5760 /* Shift temp for output */
5761 val = reg >> 8;
5762
5763 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5764 if ((error) || (newp == NULL))
5765 return (error);
5766
5767 return (0);
5768 } /* ixgbe_sysctl_phy_temp */
5769
5770 /************************************************************************
5771 * ixgbe_sysctl_phy_overtemp_occurred
5772 *
5773 * Reports (directly from the PHY) whether the current PHY
5774 * temperature is over the overtemp threshold.
5775 ************************************************************************/
5776 static int
5777 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5778 {
5779 struct sysctlnode node = *rnode;
5780 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5781 struct ixgbe_hw *hw = &adapter->hw;
5782 int val, error;
5783 u16 reg;
5784
5785 if (ixgbe_fw_recovery_mode_swflag(adapter))
5786 return (EPERM);
5787
5788 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5789 device_printf(adapter->dev,
5790 "Device has no supported external thermal sensor.\n");
5791 return (ENODEV);
5792 }
5793
5794 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5795 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5796 device_printf(adapter->dev,
5797 "Error reading from PHY's temperature status register\n");
5798 return (EAGAIN);
5799 }
5800
5801 node.sysctl_data = &val;
5802
5803 /* Get occurrence bit */
5804 val = !!(reg & 0x4000);
5805
5806 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5807 if ((error) || (newp == NULL))
5808 return (error);
5809
5810 return (0);
5811 } /* ixgbe_sysctl_phy_overtemp_occurred */
5812
5813 /************************************************************************
5814 * ixgbe_sysctl_eee_state
5815 *
5816 * Sysctl to set EEE power saving feature
5817 * Values:
5818 * 0 - disable EEE
5819 * 1 - enable EEE
5820 * (none) - get current device EEE state
5821 ************************************************************************/
5822 static int
5823 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5824 {
5825 struct sysctlnode node = *rnode;
5826 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5827 struct ifnet *ifp = adapter->ifp;
5828 device_t dev = adapter->dev;
5829 int curr_eee, new_eee, error = 0;
5830 s32 retval;
5831
5832 if (ixgbe_fw_recovery_mode_swflag(adapter))
5833 return (EPERM);
5834
5835 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5836 node.sysctl_data = &new_eee;
5837 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5838 if ((error) || (newp == NULL))
5839 return (error);
5840
5841 /* Nothing to do */
5842 if (new_eee == curr_eee)
5843 return (0);
5844
5845 /* Not supported */
5846 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5847 return (EINVAL);
5848
5849 /* Bounds checking */
5850 if ((new_eee < 0) || (new_eee > 1))
5851 return (EINVAL);
5852
5853 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5854 if (retval) {
5855 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5856 return (EINVAL);
5857 }
5858
5859 /* Restart auto-neg */
5860 ifp->if_init(ifp);
5861
5862 device_printf(dev, "New EEE state: %d\n", new_eee);
5863
5864 /* Cache new value */
5865 if (new_eee)
5866 adapter->feat_en |= IXGBE_FEATURE_EEE;
5867 else
5868 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5869
5870 return (error);
5871 } /* ixgbe_sysctl_eee_state */
5872
5873 #define PRINTQS(adapter, regname) \
5874 do { \
5875 struct ixgbe_hw *_hw = &(adapter)->hw; \
5876 int _i; \
5877 \
5878 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5879 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5880 printf((_i == 0) ? "\t" : " "); \
5881 printf("%08x", IXGBE_READ_REG(_hw, \
5882 IXGBE_##regname(_i))); \
5883 } \
5884 printf("\n"); \
5885 } while (0)
5886
5887 /************************************************************************
5888 * ixgbe_print_debug_info
5889 *
5890 * Called only when em_display_debug_stats is enabled.
5891 * Provides a way to take a look at important statistics
5892 * maintained by the driver and hardware.
5893 ************************************************************************/
5894 static void
5895 ixgbe_print_debug_info(struct adapter *adapter)
5896 {
5897 device_t dev = adapter->dev;
5898 struct ixgbe_hw *hw = &adapter->hw;
5899 int table_size;
5900 int i;
5901
5902 switch (adapter->hw.mac.type) {
5903 case ixgbe_mac_X550:
5904 case ixgbe_mac_X550EM_x:
5905 case ixgbe_mac_X550EM_a:
5906 table_size = 128;
5907 break;
5908 default:
5909 table_size = 32;
5910 break;
5911 }
5912
5913 device_printf(dev, "[E]RETA:\n");
5914 for (i = 0; i < table_size; i++) {
5915 if (i < 32)
5916 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5917 IXGBE_RETA(i)));
5918 else
5919 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5920 IXGBE_ERETA(i - 32)));
5921 }
5922
5923 device_printf(dev, "queue:");
5924 for (i = 0; i < adapter->num_queues; i++) {
5925 printf((i == 0) ? "\t" : " ");
5926 printf("%8d", i);
5927 }
5928 printf("\n");
5929 PRINTQS(adapter, RDBAL);
5930 PRINTQS(adapter, RDBAH);
5931 PRINTQS(adapter, RDLEN);
5932 PRINTQS(adapter, SRRCTL);
5933 PRINTQS(adapter, RDH);
5934 PRINTQS(adapter, RDT);
5935 PRINTQS(adapter, RXDCTL);
5936
5937 device_printf(dev, "RQSMR:");
5938 for (i = 0; i < adapter->num_queues / 4; i++) {
5939 printf((i == 0) ? "\t" : " ");
5940 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5941 }
5942 printf("\n");
5943
5944 device_printf(dev, "disabled_count:");
5945 for (i = 0; i < adapter->num_queues; i++) {
5946 printf((i == 0) ? "\t" : " ");
5947 printf("%8d", adapter->queues[i].disabled_count);
5948 }
5949 printf("\n");
5950
5951 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5952 if (hw->mac.type != ixgbe_mac_82598EB) {
5953 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5954 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5955 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5956 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5957 }
5958 } /* ixgbe_print_debug_info */
5959
5960 /************************************************************************
5961 * ixgbe_sysctl_debug
5962 ************************************************************************/
5963 static int
5964 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5965 {
5966 struct sysctlnode node = *rnode;
5967 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5968 int error, result = 0;
5969
5970 if (ixgbe_fw_recovery_mode_swflag(adapter))
5971 return (EPERM);
5972
5973 node.sysctl_data = &result;
5974 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5975
5976 if (error || newp == NULL)
5977 return error;
5978
5979 if (result == 1)
5980 ixgbe_print_debug_info(adapter);
5981
5982 return 0;
5983 } /* ixgbe_sysctl_debug */
5984
5985 /************************************************************************
5986 * ixgbe_init_device_features
5987 ************************************************************************/
5988 static void
5989 ixgbe_init_device_features(struct adapter *adapter)
5990 {
5991 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5992 | IXGBE_FEATURE_RSS
5993 | IXGBE_FEATURE_MSI
5994 | IXGBE_FEATURE_MSIX
5995 | IXGBE_FEATURE_LEGACY_IRQ
5996 | IXGBE_FEATURE_LEGACY_TX;
5997
5998 /* Set capabilities first... */
5999 switch (adapter->hw.mac.type) {
6000 case ixgbe_mac_82598EB:
6001 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6002 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6003 break;
6004 case ixgbe_mac_X540:
6005 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6006 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6007 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6008 (adapter->hw.bus.func == 0))
6009 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6010 break;
6011 case ixgbe_mac_X550:
6012 /*
6013 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6014 * NVM Image version.
6015 */
6016 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6017 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6018 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6019 break;
6020 case ixgbe_mac_X550EM_x:
6021 /*
6022 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6023 * NVM Image version.
6024 */
6025 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6026 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6027 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
6028 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6029 break;
6030 case ixgbe_mac_X550EM_a:
6031 /*
6032 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6033 * NVM Image version.
6034 */
6035 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6036 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6037 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6038 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6039 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6040 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6041 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6042 }
6043 break;
6044 case ixgbe_mac_82599EB:
6045 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6046 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6047 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6048 (adapter->hw.bus.func == 0))
6049 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6050 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6051 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6052 break;
6053 default:
6054 break;
6055 }
6056
6057 /* Enabled by default... */
6058 /* Fan failure detection */
6059 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6060 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6061 /* Netmap */
6062 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6063 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6064 /* EEE */
6065 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6066 adapter->feat_en |= IXGBE_FEATURE_EEE;
6067 /* Thermal Sensor */
6068 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6069 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6070 /*
6071 * Recovery mode:
6072 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6073 * NVM Image version.
6074 */
6075
6076 /* Enabled via global sysctl... */
6077 /* Flow Director */
6078 if (ixgbe_enable_fdir) {
6079 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6080 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6081 else
6082 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6083 }
6084 /* Legacy (single queue) transmit */
6085 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6086 ixgbe_enable_legacy_tx)
6087 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6088 /*
6089 * Message Signal Interrupts - Extended (MSI-X)
6090 * Normal MSI is only enabled if MSI-X calls fail.
6091 */
6092 if (!ixgbe_enable_msix)
6093 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6094 /* Receive-Side Scaling (RSS) */
6095 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6096 adapter->feat_en |= IXGBE_FEATURE_RSS;
6097
6098 /* Disable features with unmet dependencies... */
6099 /* No MSI-X */
6100 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6101 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6102 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6103 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6104 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6105 }
6106 } /* ixgbe_init_device_features */
6107
6108 /************************************************************************
6109 * ixgbe_probe - Device identification routine
6110 *
6111 * Determines if the driver should be loaded on
6112 * adapter based on its PCI vendor/device ID.
6113 *
6114 * return BUS_PROBE_DEFAULT on success, positive on failure
6115 ************************************************************************/
6116 static int
6117 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6118 {
6119 const struct pci_attach_args *pa = aux;
6120
6121 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6122 }
6123
6124 static const ixgbe_vendor_info_t *
6125 ixgbe_lookup(const struct pci_attach_args *pa)
6126 {
6127 const ixgbe_vendor_info_t *ent;
6128 pcireg_t subid;
6129
6130 INIT_DEBUGOUT("ixgbe_lookup: begin");
6131
6132 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6133 return NULL;
6134
6135 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6136
6137 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6138 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6139 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6140 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6141 (ent->subvendor_id == 0)) &&
6142 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6143 (ent->subdevice_id == 0))) {
6144 return ent;
6145 }
6146 }
6147 return NULL;
6148 }
6149
6150 static int
6151 ixgbe_ifflags_cb(struct ethercom *ec)
6152 {
6153 struct ifnet *ifp = &ec->ec_if;
6154 struct adapter *adapter = ifp->if_softc;
6155 int change, rc = 0;
6156
6157 IXGBE_CORE_LOCK(adapter);
6158
6159 change = ifp->if_flags ^ adapter->if_flags;
6160 if (change != 0)
6161 adapter->if_flags = ifp->if_flags;
6162
6163 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
6164 rc = ENETRESET;
6165 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
6166 ixgbe_set_promisc(adapter);
6167
6168 /* Set up VLAN support and filter */
6169 ixgbe_setup_vlan_hw_support(adapter);
6170
6171 IXGBE_CORE_UNLOCK(adapter);
6172
6173 return rc;
6174 }
6175
6176 /************************************************************************
6177 * ixgbe_ioctl - Ioctl entry point
6178 *
6179 * Called when the user wants to configure the interface.
6180 *
6181 * return 0 on success, positive on failure
6182 ************************************************************************/
6183 static int
6184 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
6185 {
6186 struct adapter *adapter = ifp->if_softc;
6187 struct ixgbe_hw *hw = &adapter->hw;
6188 struct ifcapreq *ifcr = data;
6189 struct ifreq *ifr = data;
6190 int error = 0;
6191 int l4csum_en;
6192 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
6193 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
6194
6195 if (ixgbe_fw_recovery_mode_swflag(adapter))
6196 return (EPERM);
6197
6198 switch (command) {
6199 case SIOCSIFFLAGS:
6200 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6201 break;
6202 case SIOCADDMULTI:
6203 case SIOCDELMULTI:
6204 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6205 break;
6206 case SIOCSIFMEDIA:
6207 case SIOCGIFMEDIA:
6208 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6209 break;
6210 case SIOCSIFCAP:
6211 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6212 break;
6213 case SIOCSIFMTU:
6214 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6215 break;
6216 #ifdef __NetBSD__
6217 case SIOCINITIFADDR:
6218 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6219 break;
6220 case SIOCGIFFLAGS:
6221 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6222 break;
6223 case SIOCGIFAFLAG_IN:
6224 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6225 break;
6226 case SIOCGIFADDR:
6227 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6228 break;
6229 case SIOCGIFMTU:
6230 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6231 break;
6232 case SIOCGIFCAP:
6233 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6234 break;
6235 case SIOCGETHERCAP:
6236 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6237 break;
6238 case SIOCGLIFADDR:
6239 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6240 break;
6241 case SIOCZIFDATA:
6242 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6243 hw->mac.ops.clear_hw_cntrs(hw);
6244 ixgbe_clear_evcnt(adapter);
6245 break;
6246 case SIOCAIFADDR:
6247 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6248 break;
6249 #endif
6250 default:
6251 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6252 break;
6253 }
6254
6255 switch (command) {
6256 case SIOCSIFMEDIA:
6257 case SIOCGIFMEDIA:
6258 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6259 case SIOCGI2C:
6260 {
6261 struct ixgbe_i2c_req i2c;
6262
6263 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6264 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6265 if (error != 0)
6266 break;
6267 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6268 error = EINVAL;
6269 break;
6270 }
6271 if (i2c.len > sizeof(i2c.data)) {
6272 error = EINVAL;
6273 break;
6274 }
6275
6276 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6277 i2c.dev_addr, i2c.data);
6278 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6279 break;
6280 }
6281 case SIOCSIFCAP:
6282 /* Layer-4 Rx checksum offload has to be turned on and
6283 * off as a unit.
6284 */
6285 l4csum_en = ifcr->ifcr_capenable & l4csum;
6286 if (l4csum_en != l4csum && l4csum_en != 0)
6287 return EINVAL;
6288 /*FALLTHROUGH*/
6289 case SIOCADDMULTI:
6290 case SIOCDELMULTI:
6291 case SIOCSIFFLAGS:
6292 case SIOCSIFMTU:
6293 default:
6294 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6295 return error;
6296 if ((ifp->if_flags & IFF_RUNNING) == 0)
6297 ;
6298 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6299 IXGBE_CORE_LOCK(adapter);
6300 if ((ifp->if_flags & IFF_RUNNING) != 0)
6301 ixgbe_init_locked(adapter);
6302 ixgbe_recalculate_max_frame(adapter);
6303 IXGBE_CORE_UNLOCK(adapter);
6304 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6305 /*
6306 * Multicast list has changed; set the hardware filter
6307 * accordingly.
6308 */
6309 IXGBE_CORE_LOCK(adapter);
6310 ixgbe_disable_intr(adapter);
6311 ixgbe_set_multi(adapter);
6312 ixgbe_enable_intr(adapter);
6313 IXGBE_CORE_UNLOCK(adapter);
6314 }
6315 return 0;
6316 }
6317
6318 return error;
6319 } /* ixgbe_ioctl */
6320
6321 /************************************************************************
6322 * ixgbe_check_fan_failure
6323 ************************************************************************/
6324 static void
6325 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6326 {
6327 u32 mask;
6328
6329 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6330 IXGBE_ESDP_SDP1;
6331
6332 if (reg & mask)
6333 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6334 } /* ixgbe_check_fan_failure */
6335
6336 /************************************************************************
6337 * ixgbe_handle_que
6338 ************************************************************************/
6339 static void
6340 ixgbe_handle_que(void *context)
6341 {
6342 struct ix_queue *que = context;
6343 struct adapter *adapter = que->adapter;
6344 struct tx_ring *txr = que->txr;
6345 struct ifnet *ifp = adapter->ifp;
6346 bool more = false;
6347
6348 que->handleq.ev_count++;
6349
6350 if (ifp->if_flags & IFF_RUNNING) {
6351 more = ixgbe_rxeof(que);
6352 IXGBE_TX_LOCK(txr);
6353 more |= ixgbe_txeof(txr);
6354 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6355 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6356 ixgbe_mq_start_locked(ifp, txr);
6357 /* Only for queue 0 */
6358 /* NetBSD still needs this for CBQ */
6359 if ((&adapter->queues[0] == que)
6360 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6361 ixgbe_legacy_start_locked(ifp, txr);
6362 IXGBE_TX_UNLOCK(txr);
6363 }
6364
6365 if (more) {
6366 que->req.ev_count++;
6367 ixgbe_sched_handle_que(adapter, que);
6368 } else if (que->res != NULL) {
6369 /* Re-enable this interrupt */
6370 ixgbe_enable_queue(adapter, que->msix);
6371 } else
6372 ixgbe_enable_intr(adapter);
6373
6374 return;
6375 } /* ixgbe_handle_que */
6376
6377 /************************************************************************
6378 * ixgbe_handle_que_work
6379 ************************************************************************/
6380 static void
6381 ixgbe_handle_que_work(struct work *wk, void *context)
6382 {
6383 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6384
6385 /*
6386 * "enqueued flag" is not required here.
6387 * See ixgbe_msix_que().
6388 */
6389 ixgbe_handle_que(que);
6390 }
6391
6392 /************************************************************************
6393 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6394 ************************************************************************/
6395 static int
6396 ixgbe_allocate_legacy(struct adapter *adapter,
6397 const struct pci_attach_args *pa)
6398 {
6399 device_t dev = adapter->dev;
6400 struct ix_queue *que = adapter->queues;
6401 struct tx_ring *txr = adapter->tx_rings;
6402 int counts[PCI_INTR_TYPE_SIZE];
6403 pci_intr_type_t intr_type, max_type;
6404 char intrbuf[PCI_INTRSTR_LEN];
6405 const char *intrstr = NULL;
6406
6407 /* We allocate a single interrupt resource */
6408 max_type = PCI_INTR_TYPE_MSI;
6409 counts[PCI_INTR_TYPE_MSIX] = 0;
6410 counts[PCI_INTR_TYPE_MSI] =
6411 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6412 /* Check not feat_en but feat_cap to fallback to INTx */
6413 counts[PCI_INTR_TYPE_INTX] =
6414 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6415
6416 alloc_retry:
6417 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6418 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6419 return ENXIO;
6420 }
6421 adapter->osdep.nintrs = 1;
6422 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6423 intrbuf, sizeof(intrbuf));
6424 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6425 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6426 device_xname(dev));
6427 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6428 if (adapter->osdep.ihs[0] == NULL) {
6429 aprint_error_dev(dev,"unable to establish %s\n",
6430 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6431 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6432 adapter->osdep.intrs = NULL;
6433 switch (intr_type) {
6434 case PCI_INTR_TYPE_MSI:
6435 /* The next try is for INTx: Disable MSI */
6436 max_type = PCI_INTR_TYPE_INTX;
6437 counts[PCI_INTR_TYPE_INTX] = 1;
6438 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6439 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6440 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6441 goto alloc_retry;
6442 } else
6443 break;
6444 case PCI_INTR_TYPE_INTX:
6445 default:
6446 /* See below */
6447 break;
6448 }
6449 }
6450 if (intr_type == PCI_INTR_TYPE_INTX) {
6451 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6452 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6453 }
6454 if (adapter->osdep.ihs[0] == NULL) {
6455 aprint_error_dev(dev,
6456 "couldn't establish interrupt%s%s\n",
6457 intrstr ? " at " : "", intrstr ? intrstr : "");
6458 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6459 adapter->osdep.intrs = NULL;
6460 return ENXIO;
6461 }
6462 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6463 /*
6464 * Try allocating a fast interrupt and the associated deferred
6465 * processing contexts.
6466 */
6467 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6468 txr->txr_si =
6469 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6470 ixgbe_deferred_mq_start, txr);
6471 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6472 ixgbe_handle_que, que);
6473
6474 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6475 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6476 aprint_error_dev(dev,
6477 "could not establish software interrupts\n");
6478
6479 return ENXIO;
6480 }
6481 /* For simplicity in the handlers */
6482 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6483
6484 return (0);
6485 } /* ixgbe_allocate_legacy */
6486
6487 /************************************************************************
6488 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6489 ************************************************************************/
6490 static int
6491 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6492 {
6493 device_t dev = adapter->dev;
6494 struct ix_queue *que = adapter->queues;
6495 struct tx_ring *txr = adapter->tx_rings;
6496 pci_chipset_tag_t pc;
6497 char intrbuf[PCI_INTRSTR_LEN];
6498 char intr_xname[32];
6499 char wqname[MAXCOMLEN];
6500 const char *intrstr = NULL;
6501 int error, vector = 0;
6502 int cpu_id = 0;
6503 kcpuset_t *affinity;
6504 #ifdef RSS
6505 unsigned int rss_buckets = 0;
6506 kcpuset_t cpu_mask;
6507 #endif
6508
6509 pc = adapter->osdep.pc;
6510 #ifdef RSS
6511 /*
6512 * If we're doing RSS, the number of queues needs to
6513 * match the number of RSS buckets that are configured.
6514 *
6515 * + If there's more queues than RSS buckets, we'll end
6516 * up with queues that get no traffic.
6517 *
6518 * + If there's more RSS buckets than queues, we'll end
6519 * up having multiple RSS buckets map to the same queue,
6520 * so there'll be some contention.
6521 */
6522 rss_buckets = rss_getnumbuckets();
6523 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6524 (adapter->num_queues != rss_buckets)) {
6525 device_printf(dev,
6526 "%s: number of queues (%d) != number of RSS buckets (%d)"
6527 "; performance will be impacted.\n",
6528 __func__, adapter->num_queues, rss_buckets);
6529 }
6530 #endif
6531
6532 adapter->osdep.nintrs = adapter->num_queues + 1;
6533 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6534 adapter->osdep.nintrs) != 0) {
6535 aprint_error_dev(dev,
6536 "failed to allocate MSI-X interrupt\n");
6537 return (ENXIO);
6538 }
6539
6540 kcpuset_create(&affinity, false);
6541 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6542 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6543 device_xname(dev), i);
6544 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6545 sizeof(intrbuf));
6546 #ifdef IXGBE_MPSAFE
6547 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6548 true);
6549 #endif
6550 /* Set the handler function */
6551 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6552 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6553 intr_xname);
6554 if (que->res == NULL) {
6555 aprint_error_dev(dev,
6556 "Failed to register QUE handler\n");
6557 error = ENXIO;
6558 goto err_out;
6559 }
6560 que->msix = vector;
6561 adapter->active_queues |= (u64)(1 << que->msix);
6562
6563 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6564 #ifdef RSS
6565 /*
6566 * The queue ID is used as the RSS layer bucket ID.
6567 * We look up the queue ID -> RSS CPU ID and select
6568 * that.
6569 */
6570 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6571 CPU_SETOF(cpu_id, &cpu_mask);
6572 #endif
6573 } else {
6574 /*
6575 * Bind the MSI-X vector, and thus the
6576 * rings to the corresponding CPU.
6577 *
6578 * This just happens to match the default RSS
6579 * round-robin bucket -> queue -> CPU allocation.
6580 */
6581 if (adapter->num_queues > 1)
6582 cpu_id = i;
6583 }
6584 /* Round-robin affinity */
6585 kcpuset_zero(affinity);
6586 kcpuset_set(affinity, cpu_id % ncpu);
6587 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6588 NULL);
6589 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6590 intrstr);
6591 if (error == 0) {
6592 #if 1 /* def IXGBE_DEBUG */
6593 #ifdef RSS
6594 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6595 cpu_id % ncpu);
6596 #else
6597 aprint_normal(", bound queue %d to cpu %d", i,
6598 cpu_id % ncpu);
6599 #endif
6600 #endif /* IXGBE_DEBUG */
6601 }
6602 aprint_normal("\n");
6603
6604 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6605 txr->txr_si = softint_establish(
6606 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6607 ixgbe_deferred_mq_start, txr);
6608 if (txr->txr_si == NULL) {
6609 aprint_error_dev(dev,
6610 "couldn't establish software interrupt\n");
6611 error = ENXIO;
6612 goto err_out;
6613 }
6614 }
6615 que->que_si
6616 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6617 ixgbe_handle_que, que);
6618 if (que->que_si == NULL) {
6619 aprint_error_dev(dev,
6620 "couldn't establish software interrupt\n");
6621 error = ENXIO;
6622 goto err_out;
6623 }
6624 }
6625 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6626 error = workqueue_create(&adapter->txr_wq, wqname,
6627 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6628 IXGBE_WORKQUEUE_FLAGS);
6629 if (error) {
6630 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6631 goto err_out;
6632 }
6633 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6634
6635 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6636 error = workqueue_create(&adapter->que_wq, wqname,
6637 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6638 IXGBE_WORKQUEUE_FLAGS);
6639 if (error) {
6640 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6641 goto err_out;
6642 }
6643
6644 /* and Link */
6645 cpu_id++;
6646 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6647 adapter->vector = vector;
6648 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6649 sizeof(intrbuf));
6650 #ifdef IXGBE_MPSAFE
6651 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6652 true);
6653 #endif
6654 /* Set the link handler function */
6655 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6656 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6657 intr_xname);
6658 if (adapter->osdep.ihs[vector] == NULL) {
6659 aprint_error_dev(dev, "Failed to register LINK handler\n");
6660 error = ENXIO;
6661 goto err_out;
6662 }
6663 /* Round-robin affinity */
6664 kcpuset_zero(affinity);
6665 kcpuset_set(affinity, cpu_id % ncpu);
6666 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6667 NULL);
6668
6669 aprint_normal_dev(dev,
6670 "for link, interrupting at %s", intrstr);
6671 if (error == 0)
6672 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6673 else
6674 aprint_normal("\n");
6675
6676 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6677 adapter->mbx_si =
6678 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6679 ixgbe_handle_mbx, adapter);
6680 if (adapter->mbx_si == NULL) {
6681 aprint_error_dev(dev,
6682 "could not establish software interrupts\n");
6683
6684 error = ENXIO;
6685 goto err_out;
6686 }
6687 }
6688
6689 kcpuset_destroy(affinity);
6690 aprint_normal_dev(dev,
6691 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6692
6693 return (0);
6694
6695 err_out:
6696 kcpuset_destroy(affinity);
6697 ixgbe_free_softint(adapter);
6698 ixgbe_free_pciintr_resources(adapter);
6699 return (error);
6700 } /* ixgbe_allocate_msix */
6701
6702 /************************************************************************
6703 * ixgbe_configure_interrupts
6704 *
6705 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6706 * This will also depend on user settings.
6707 ************************************************************************/
6708 static int
6709 ixgbe_configure_interrupts(struct adapter *adapter)
6710 {
6711 device_t dev = adapter->dev;
6712 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6713 int want, queues, msgs;
6714
6715 /* Default to 1 queue if MSI-X setup fails */
6716 adapter->num_queues = 1;
6717
6718 /* Override by tuneable */
6719 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6720 goto msi;
6721
6722 /*
6723 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6724 * interrupt slot.
6725 */
6726 if (ncpu == 1)
6727 goto msi;
6728
6729 /* First try MSI-X */
6730 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6731 msgs = MIN(msgs, IXG_MAX_NINTR);
6732 if (msgs < 2)
6733 goto msi;
6734
6735 adapter->msix_mem = (void *)1; /* XXX */
6736
6737 /* Figure out a reasonable auto config value */
6738 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6739
6740 #ifdef RSS
6741 /* If we're doing RSS, clamp at the number of RSS buckets */
6742 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6743 queues = uimin(queues, rss_getnumbuckets());
6744 #endif
6745 if (ixgbe_num_queues > queues) {
6746 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6747 ixgbe_num_queues = queues;
6748 }
6749
6750 if (ixgbe_num_queues != 0)
6751 queues = ixgbe_num_queues;
6752 else
6753 queues = uimin(queues,
6754 uimin(mac->max_tx_queues, mac->max_rx_queues));
6755
6756 /* reflect correct sysctl value */
6757 ixgbe_num_queues = queues;
6758
6759 /*
6760 * Want one vector (RX/TX pair) per queue
6761 * plus an additional for Link.
6762 */
6763 want = queues + 1;
6764 if (msgs >= want)
6765 msgs = want;
6766 else {
6767 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6768 "%d vectors but %d queues wanted!\n",
6769 msgs, want);
6770 goto msi;
6771 }
6772 adapter->num_queues = queues;
6773 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6774 return (0);
6775
6776 /*
6777 * MSI-X allocation failed or provided us with
6778 * less vectors than needed. Free MSI-X resources
6779 * and we'll try enabling MSI.
6780 */
6781 msi:
6782 /* Without MSI-X, some features are no longer supported */
6783 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6784 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6785 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6786 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6787
6788 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6789 adapter->msix_mem = NULL; /* XXX */
6790 if (msgs > 1)
6791 msgs = 1;
6792 if (msgs != 0) {
6793 msgs = 1;
6794 adapter->feat_en |= IXGBE_FEATURE_MSI;
6795 return (0);
6796 }
6797
6798 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6799 aprint_error_dev(dev,
6800 "Device does not support legacy interrupts.\n");
6801 return 1;
6802 }
6803
6804 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6805
6806 return (0);
6807 } /* ixgbe_configure_interrupts */
6808
6809
6810 /************************************************************************
6811 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6812 *
6813 * Done outside of interrupt context since the driver might sleep
6814 ************************************************************************/
6815 static void
6816 ixgbe_handle_link(void *context)
6817 {
6818 struct adapter *adapter = context;
6819 struct ixgbe_hw *hw = &adapter->hw;
6820
6821 IXGBE_CORE_LOCK(adapter);
6822 ++adapter->link_sicount.ev_count;
6823 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6824 ixgbe_update_link_status(adapter);
6825
6826 /* Re-enable link interrupts */
6827 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6828
6829 IXGBE_CORE_UNLOCK(adapter);
6830 } /* ixgbe_handle_link */
6831
6832 #if 0
6833 /************************************************************************
6834 * ixgbe_rearm_queues
6835 ************************************************************************/
6836 static __inline void
6837 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6838 {
6839 u32 mask;
6840
6841 switch (adapter->hw.mac.type) {
6842 case ixgbe_mac_82598EB:
6843 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6845 break;
6846 case ixgbe_mac_82599EB:
6847 case ixgbe_mac_X540:
6848 case ixgbe_mac_X550:
6849 case ixgbe_mac_X550EM_x:
6850 case ixgbe_mac_X550EM_a:
6851 mask = (queues & 0xFFFFFFFF);
6852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6853 mask = (queues >> 32);
6854 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6855 break;
6856 default:
6857 break;
6858 }
6859 } /* ixgbe_rearm_queues */
6860 #endif
6861