ixgbe.c revision 1.340 1 1.340 msaitoh /* $NetBSD: ixgbe.c,v 1.340 2023/10/11 09:43:17 msaitoh Exp $ */
2 1.99 msaitoh
3 1.1 dyoung /******************************************************************************
4 1.1 dyoung
5 1.99 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 dyoung All rights reserved.
7 1.99 msaitoh
8 1.99 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 dyoung modification, are permitted provided that the following conditions are met:
10 1.99 msaitoh
11 1.99 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 dyoung this list of conditions and the following disclaimer.
13 1.99 msaitoh
14 1.99 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.99 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 dyoung documentation and/or other materials provided with the distribution.
17 1.99 msaitoh
18 1.99 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.99 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 dyoung this software without specific prior written permission.
21 1.99 msaitoh
22 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.99 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.99 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.99 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.99 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.99 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.99 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.99 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.99 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 dyoung POSSIBILITY OF SUCH DAMAGE.
33 1.1 dyoung
34 1.1 dyoung ******************************************************************************/
35 1.145 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 1.99 msaitoh
37 1.1 dyoung /*
38 1.1 dyoung * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 dyoung * All rights reserved.
40 1.1 dyoung *
41 1.1 dyoung * This code is derived from software contributed to The NetBSD Foundation
42 1.1 dyoung * by Coyote Point Systems, Inc.
43 1.1 dyoung *
44 1.1 dyoung * Redistribution and use in source and binary forms, with or without
45 1.1 dyoung * modification, are permitted provided that the following conditions
46 1.1 dyoung * are met:
47 1.1 dyoung * 1. Redistributions of source code must retain the above copyright
48 1.1 dyoung * notice, this list of conditions and the following disclaimer.
49 1.1 dyoung * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 dyoung * notice, this list of conditions and the following disclaimer in the
51 1.1 dyoung * documentation and/or other materials provided with the distribution.
52 1.1 dyoung *
53 1.1 dyoung * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 dyoung * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 dyoung * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 dyoung * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 dyoung * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 dyoung * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 dyoung * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 dyoung * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 dyoung * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 dyoung * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 dyoung * POSSIBILITY OF SUCH DAMAGE.
64 1.1 dyoung */
65 1.1 dyoung
66 1.281 msaitoh #include <sys/cdefs.h>
67 1.340 msaitoh __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.340 2023/10/11 09:43:17 msaitoh Exp $");
68 1.281 msaitoh
69 1.80 msaitoh #ifdef _KERNEL_OPT
70 1.1 dyoung #include "opt_inet.h"
71 1.22 msaitoh #include "opt_inet6.h"
72 1.80 msaitoh #include "opt_net_mpsafe.h"
73 1.80 msaitoh #endif
74 1.1 dyoung
75 1.1 dyoung #include "ixgbe.h"
76 1.251 msaitoh #include "ixgbe_phy.h"
77 1.135 msaitoh #include "ixgbe_sriov.h"
78 1.1 dyoung
79 1.33 msaitoh #include <sys/cprng.h>
80 1.95 msaitoh #include <dev/mii/mii.h>
81 1.95 msaitoh #include <dev/mii/miivar.h>
82 1.33 msaitoh
83 1.99 msaitoh /************************************************************************
84 1.99 msaitoh * Driver version
85 1.99 msaitoh ************************************************************************/
86 1.159 maxv static const char ixgbe_driver_version[] = "4.0.1-k";
87 1.301 msaitoh /* XXX NetBSD: + 3.3.24 */
88 1.1 dyoung
89 1.99 msaitoh /************************************************************************
90 1.99 msaitoh * PCI Device ID Table
91 1.1 dyoung *
92 1.99 msaitoh * Used by probe to select devices to load on
93 1.99 msaitoh * Last field stores an index into ixgbe_strings
94 1.99 msaitoh * Last entry must be all 0s
95 1.1 dyoung *
96 1.99 msaitoh * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 1.99 msaitoh ************************************************************************/
98 1.159 maxv static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 1.1 dyoung {
100 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 1.334 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0},
121 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
122 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
123 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
124 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
125 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
126 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
127 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
128 1.24 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
129 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
130 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
131 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
132 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
133 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
134 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
135 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
136 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
137 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
138 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
139 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
140 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
141 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
142 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
143 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
144 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
145 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
146 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
147 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
148 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
149 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
150 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
151 1.1 dyoung /* required last entry */
152 1.1 dyoung {0, 0, 0, 0, 0}
153 1.1 dyoung };
154 1.1 dyoung
155 1.99 msaitoh /************************************************************************
156 1.99 msaitoh * Table of branding strings
157 1.99 msaitoh ************************************************************************/
158 1.1 dyoung static const char *ixgbe_strings[] = {
159 1.1 dyoung "Intel(R) PRO/10GbE PCI-Express Network Driver"
160 1.1 dyoung };
161 1.1 dyoung
162 1.99 msaitoh /************************************************************************
163 1.99 msaitoh * Function prototypes
164 1.99 msaitoh ************************************************************************/
165 1.186 msaitoh static int ixgbe_probe(device_t, cfdata_t, void *);
166 1.333 msaitoh static void ixgbe_quirks(struct ixgbe_softc *);
167 1.186 msaitoh static void ixgbe_attach(device_t, device_t, void *);
168 1.186 msaitoh static int ixgbe_detach(device_t, int);
169 1.1 dyoung #if 0
170 1.186 msaitoh static int ixgbe_shutdown(device_t);
171 1.1 dyoung #endif
172 1.44 msaitoh static bool ixgbe_suspend(device_t, const pmf_qual_t *);
173 1.44 msaitoh static bool ixgbe_resume(device_t, const pmf_qual_t *);
174 1.98 msaitoh static int ixgbe_ifflags_cb(struct ethercom *);
175 1.186 msaitoh static int ixgbe_ioctl(struct ifnet *, u_long, void *);
176 1.1 dyoung static int ixgbe_init(struct ifnet *);
177 1.333 msaitoh static void ixgbe_init_locked(struct ixgbe_softc *);
178 1.232 msaitoh static void ixgbe_ifstop(struct ifnet *, int);
179 1.252 msaitoh static void ixgbe_stop_locked(void *);
180 1.333 msaitoh static void ixgbe_init_device_features(struct ixgbe_softc *);
181 1.333 msaitoh static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
182 1.333 msaitoh static void ixgbe_add_media_types(struct ixgbe_softc *);
183 1.186 msaitoh static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
184 1.186 msaitoh static int ixgbe_media_change(struct ifnet *);
185 1.333 msaitoh static int ixgbe_allocate_pci_resources(struct ixgbe_softc *,
186 1.1 dyoung const struct pci_attach_args *);
187 1.333 msaitoh static void ixgbe_free_deferred_handlers(struct ixgbe_softc *);
188 1.333 msaitoh static void ixgbe_get_slot_info(struct ixgbe_softc *);
189 1.333 msaitoh static int ixgbe_allocate_msix(struct ixgbe_softc *,
190 1.1 dyoung const struct pci_attach_args *);
191 1.333 msaitoh static int ixgbe_allocate_legacy(struct ixgbe_softc *,
192 1.1 dyoung const struct pci_attach_args *);
193 1.333 msaitoh static int ixgbe_configure_interrupts(struct ixgbe_softc *);
194 1.333 msaitoh static void ixgbe_free_pciintr_resources(struct ixgbe_softc *);
195 1.333 msaitoh static void ixgbe_free_pci_resources(struct ixgbe_softc *);
196 1.1 dyoung static void ixgbe_local_timer(void *);
197 1.233 msaitoh static void ixgbe_handle_timer(struct work *, void *);
198 1.186 msaitoh static void ixgbe_recovery_mode_timer(void *);
199 1.233 msaitoh static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
200 1.333 msaitoh static int ixgbe_setup_interface(device_t, struct ixgbe_softc *);
201 1.333 msaitoh static void ixgbe_config_gpie(struct ixgbe_softc *);
202 1.333 msaitoh static void ixgbe_config_dmac(struct ixgbe_softc *);
203 1.333 msaitoh static void ixgbe_config_delay_values(struct ixgbe_softc *);
204 1.333 msaitoh static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *);
205 1.333 msaitoh static void ixgbe_config_link(struct ixgbe_softc *);
206 1.333 msaitoh static void ixgbe_check_wol_support(struct ixgbe_softc *);
207 1.333 msaitoh static int ixgbe_setup_low_power_mode(struct ixgbe_softc *);
208 1.161 kamil #if 0
209 1.333 msaitoh static void ixgbe_rearm_queues(struct ixgbe_softc *, u64);
210 1.161 kamil #endif
211 1.1 dyoung
212 1.333 msaitoh static void ixgbe_initialize_transmit_units(struct ixgbe_softc *);
213 1.333 msaitoh static void ixgbe_initialize_receive_units(struct ixgbe_softc *);
214 1.333 msaitoh static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
215 1.333 msaitoh static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
216 1.333 msaitoh static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
217 1.333 msaitoh
218 1.333 msaitoh static void ixgbe_enable_intr(struct ixgbe_softc *);
219 1.333 msaitoh static void ixgbe_disable_intr(struct ixgbe_softc *);
220 1.333 msaitoh static void ixgbe_update_stats_counters(struct ixgbe_softc *);
221 1.333 msaitoh static void ixgbe_set_rxfilter(struct ixgbe_softc *);
222 1.333 msaitoh static void ixgbe_update_link_status(struct ixgbe_softc *);
223 1.333 msaitoh static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
224 1.333 msaitoh static void ixgbe_configure_ivars(struct ixgbe_softc *);
225 1.1 dyoung static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
226 1.333 msaitoh static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
227 1.1 dyoung
228 1.333 msaitoh static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *);
229 1.333 msaitoh static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *);
230 1.193 msaitoh static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
231 1.333 msaitoh static int ixgbe_register_vlan(struct ixgbe_softc *, u16);
232 1.333 msaitoh static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16);
233 1.1 dyoung
234 1.333 msaitoh static void ixgbe_add_device_sysctls(struct ixgbe_softc *);
235 1.333 msaitoh static void ixgbe_add_hw_stats(struct ixgbe_softc *);
236 1.333 msaitoh static void ixgbe_clear_evcnt(struct ixgbe_softc *);
237 1.333 msaitoh static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238 1.333 msaitoh static int ixgbe_set_advertise(struct ixgbe_softc *, int);
239 1.333 msaitoh static int ixgbe_get_default_advertise(struct ixgbe_softc *);
240 1.44 msaitoh
241 1.44 msaitoh /* Sysctl handlers */
242 1.52 msaitoh static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
243 1.52 msaitoh static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
244 1.186 msaitoh static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
245 1.44 msaitoh static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
246 1.44 msaitoh static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
247 1.44 msaitoh static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
248 1.48 msaitoh #ifdef IXGBE_DEBUG
249 1.48 msaitoh static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
250 1.48 msaitoh static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
251 1.48 msaitoh #endif
252 1.186 msaitoh static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
253 1.287 msaitoh static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
254 1.186 msaitoh static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
255 1.186 msaitoh static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
256 1.186 msaitoh static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
257 1.186 msaitoh static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
258 1.186 msaitoh static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
259 1.158 msaitoh static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
260 1.286 msaitoh static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
261 1.313 msaitoh static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
262 1.313 msaitoh static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
263 1.44 msaitoh static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
264 1.44 msaitoh static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
265 1.1 dyoung
266 1.277 msaitoh /* Interrupt functions */
267 1.34 msaitoh static int ixgbe_msix_que(void *);
268 1.233 msaitoh static int ixgbe_msix_admin(void *);
269 1.333 msaitoh static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *);
270 1.277 msaitoh static int ixgbe_legacy_irq(void *);
271 1.1 dyoung
272 1.233 msaitoh /* Event handlers running on workqueue */
273 1.1 dyoung static void ixgbe_handle_que(void *);
274 1.1 dyoung static void ixgbe_handle_link(void *);
275 1.233 msaitoh static void ixgbe_handle_msf(void *);
276 1.273 msaitoh static void ixgbe_handle_mod(void *, bool);
277 1.44 msaitoh static void ixgbe_handle_phy(void *);
278 1.1 dyoung
279 1.233 msaitoh /* Deferred workqueue handlers */
280 1.233 msaitoh static void ixgbe_handle_admin(struct work *, void *);
281 1.128 knakahar static void ixgbe_handle_que_work(struct work *, void *);
282 1.128 knakahar
283 1.159 maxv static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
284 1.1 dyoung
285 1.99 msaitoh /************************************************************************
286 1.99 msaitoh * NetBSD Device Interface Entry Points
287 1.99 msaitoh ************************************************************************/
288 1.333 msaitoh CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc),
289 1.1 dyoung ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
290 1.1 dyoung DVF_DETACH_SHUTDOWN);
291 1.1 dyoung
292 1.1 dyoung #if 0
293 1.44 msaitoh devclass_t ix_devclass;
294 1.44 msaitoh DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
295 1.1 dyoung
296 1.44 msaitoh MODULE_DEPEND(ix, pci, 1, 1, 1);
297 1.44 msaitoh MODULE_DEPEND(ix, ether, 1, 1, 1);
298 1.115 msaitoh #ifdef DEV_NETMAP
299 1.115 msaitoh MODULE_DEPEND(ix, netmap, 1, 1, 1);
300 1.115 msaitoh #endif
301 1.1 dyoung #endif
302 1.1 dyoung
303 1.1 dyoung /*
304 1.99 msaitoh * TUNEABLE PARAMETERS:
305 1.99 msaitoh */
306 1.1 dyoung
307 1.1 dyoung /*
308 1.99 msaitoh * AIM: Adaptive Interrupt Moderation
309 1.99 msaitoh * which means that the interrupt rate
310 1.99 msaitoh * is varied over time based on the
311 1.99 msaitoh * traffic for that interrupt vector
312 1.99 msaitoh */
313 1.73 msaitoh static bool ixgbe_enable_aim = true;
314 1.52 msaitoh #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
315 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
316 1.52 msaitoh "Enable adaptive interrupt moderation");
317 1.1 dyoung
318 1.22 msaitoh static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
319 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
320 1.52 msaitoh &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
321 1.1 dyoung
322 1.1 dyoung /* How many packets rxeof tries to clean at a time */
323 1.1 dyoung static int ixgbe_rx_process_limit = 256;
324 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
325 1.99 msaitoh &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
326 1.1 dyoung
327 1.28 msaitoh /* How many packets txeof tries to clean at a time */
328 1.28 msaitoh static int ixgbe_tx_process_limit = 256;
329 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
330 1.52 msaitoh &ixgbe_tx_process_limit, 0,
331 1.99 msaitoh "Maximum number of sent packets to process at a time, -1 means unlimited");
332 1.52 msaitoh
333 1.52 msaitoh /* Flow control setting, default to full */
334 1.52 msaitoh static int ixgbe_flow_control = ixgbe_fc_full;
335 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
336 1.52 msaitoh &ixgbe_flow_control, 0, "Default flow control used for all adapters");
337 1.52 msaitoh
338 1.179 msaitoh /* Which packet processing uses workqueue or softint */
339 1.128 knakahar static bool ixgbe_txrx_workqueue = false;
340 1.128 knakahar
341 1.1 dyoung /*
342 1.99 msaitoh * Smart speed setting, default to on
343 1.99 msaitoh * this only works as a compile option
344 1.99 msaitoh * right now as its during attach, set
345 1.99 msaitoh * this to 'ixgbe_smart_speed_off' to
346 1.99 msaitoh * disable.
347 1.99 msaitoh */
348 1.1 dyoung static int ixgbe_smart_speed = ixgbe_smart_speed_on;
349 1.1 dyoung
350 1.1 dyoung /*
351 1.99 msaitoh * MSI-X should be the default for best performance,
352 1.1 dyoung * but this allows it to be forced off for testing.
353 1.1 dyoung */
354 1.1 dyoung static int ixgbe_enable_msix = 1;
355 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
356 1.52 msaitoh "Enable MSI-X interrupts");
357 1.1 dyoung
358 1.1 dyoung /*
359 1.1 dyoung * Number of Queues, can be set to 0,
360 1.1 dyoung * it then autoconfigures based on the
361 1.1 dyoung * number of cpus with a max of 8. This
362 1.220 pgoyette * can be overridden manually here.
363 1.1 dyoung */
364 1.62 msaitoh static int ixgbe_num_queues = 0;
365 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
366 1.52 msaitoh "Number of queues to configure, 0 indicates autoconfigure");
367 1.1 dyoung
368 1.1 dyoung /*
369 1.99 msaitoh * Number of TX descriptors per ring,
370 1.99 msaitoh * setting higher than RX as this seems
371 1.99 msaitoh * the better performing choice.
372 1.99 msaitoh */
373 1.335 msaitoh static int ixgbe_txd = DEFAULT_TXD;
374 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
375 1.52 msaitoh "Number of transmit descriptors per queue");
376 1.1 dyoung
377 1.1 dyoung /* Number of RX descriptors per ring */
378 1.335 msaitoh static int ixgbe_rxd = DEFAULT_RXD;
379 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
380 1.52 msaitoh "Number of receive descriptors per queue");
381 1.33 msaitoh
382 1.33 msaitoh /*
383 1.99 msaitoh * Defining this on will allow the use
384 1.99 msaitoh * of unsupported SFP+ modules, note that
385 1.99 msaitoh * doing so you are on your own :)
386 1.99 msaitoh */
387 1.35 msaitoh static int allow_unsupported_sfp = false;
388 1.52 msaitoh #define TUNABLE_INT(__x, __y)
389 1.52 msaitoh TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
390 1.1 dyoung
391 1.99 msaitoh /*
392 1.99 msaitoh * Not sure if Flow Director is fully baked,
393 1.99 msaitoh * so we'll default to turning it off.
394 1.99 msaitoh */
395 1.99 msaitoh static int ixgbe_enable_fdir = 0;
396 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
397 1.99 msaitoh "Enable Flow Director");
398 1.99 msaitoh
399 1.99 msaitoh /* Legacy Transmit (single queue) */
400 1.99 msaitoh static int ixgbe_enable_legacy_tx = 0;
401 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
402 1.99 msaitoh &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
403 1.99 msaitoh
404 1.99 msaitoh /* Receive-Side Scaling */
405 1.99 msaitoh static int ixgbe_enable_rss = 1;
406 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
407 1.99 msaitoh "Enable Receive-Side Scaling (RSS)");
408 1.99 msaitoh
409 1.99 msaitoh #if 0
410 1.99 msaitoh static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
411 1.99 msaitoh static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
412 1.1 dyoung #endif
413 1.1 dyoung
414 1.80 msaitoh #ifdef NET_MPSAFE
415 1.80 msaitoh #define IXGBE_MPSAFE 1
416 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
417 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
418 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
419 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
420 1.80 msaitoh #else
421 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS 0
422 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS 0
423 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
424 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS 0
425 1.80 msaitoh #endif
426 1.128 knakahar #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
427 1.80 msaitoh
428 1.312 msaitoh /* Interval between reports of errors */
429 1.312 msaitoh static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
430 1.312 msaitoh
431 1.99 msaitoh /************************************************************************
432 1.99 msaitoh * ixgbe_initialize_rss_mapping
433 1.99 msaitoh ************************************************************************/
434 1.98 msaitoh static void
435 1.333 msaitoh ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
436 1.1 dyoung {
437 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
438 1.186 msaitoh u32 reta = 0, mrqc, rss_key[10];
439 1.186 msaitoh int queue_id, table_size, index_mult;
440 1.186 msaitoh int i, j;
441 1.186 msaitoh u32 rss_hash_config;
442 1.99 msaitoh
443 1.122 knakahar /* force use default RSS key. */
444 1.122 knakahar #ifdef __NetBSD__
445 1.122 knakahar rss_getkey((uint8_t *) &rss_key);
446 1.122 knakahar #else
447 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
448 1.99 msaitoh /* Fetch the configured RSS key */
449 1.99 msaitoh rss_getkey((uint8_t *) &rss_key);
450 1.99 msaitoh } else {
451 1.99 msaitoh /* set up random bits */
452 1.99 msaitoh cprng_fast(&rss_key, sizeof(rss_key));
453 1.99 msaitoh }
454 1.122 knakahar #endif
455 1.1 dyoung
456 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
457 1.98 msaitoh index_mult = 0x1;
458 1.98 msaitoh table_size = 128;
459 1.333 msaitoh switch (sc->hw.mac.type) {
460 1.98 msaitoh case ixgbe_mac_82598EB:
461 1.98 msaitoh index_mult = 0x11;
462 1.98 msaitoh break;
463 1.98 msaitoh case ixgbe_mac_X550:
464 1.98 msaitoh case ixgbe_mac_X550EM_x:
465 1.99 msaitoh case ixgbe_mac_X550EM_a:
466 1.98 msaitoh table_size = 512;
467 1.98 msaitoh break;
468 1.98 msaitoh default:
469 1.98 msaitoh break;
470 1.98 msaitoh }
471 1.1 dyoung
472 1.98 msaitoh /* Set up the redirection table */
473 1.99 msaitoh for (i = 0, j = 0; i < table_size; i++, j++) {
474 1.333 msaitoh if (j == sc->num_queues)
475 1.99 msaitoh j = 0;
476 1.99 msaitoh
477 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
478 1.99 msaitoh /*
479 1.99 msaitoh * Fetch the RSS bucket id for the given indirection
480 1.99 msaitoh * entry. Cap it at the number of configured buckets
481 1.99 msaitoh * (which is num_queues.)
482 1.99 msaitoh */
483 1.99 msaitoh queue_id = rss_get_indirection_to_bucket(i);
484 1.333 msaitoh queue_id = queue_id % sc->num_queues;
485 1.99 msaitoh } else
486 1.99 msaitoh queue_id = (j * index_mult);
487 1.99 msaitoh
488 1.98 msaitoh /*
489 1.98 msaitoh * The low 8 bits are for hash value (n+0);
490 1.98 msaitoh * The next 8 bits are for hash value (n+1), etc.
491 1.98 msaitoh */
492 1.98 msaitoh reta = reta >> 8;
493 1.98 msaitoh reta = reta | (((uint32_t) queue_id) << 24);
494 1.98 msaitoh if ((i & 3) == 3) {
495 1.98 msaitoh if (i < 128)
496 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
497 1.98 msaitoh else
498 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
499 1.99 msaitoh reta);
500 1.98 msaitoh reta = 0;
501 1.98 msaitoh }
502 1.98 msaitoh }
503 1.1 dyoung
504 1.98 msaitoh /* Now fill our hash function seeds */
505 1.99 msaitoh for (i = 0; i < 10; i++)
506 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
507 1.1 dyoung
508 1.98 msaitoh /* Perform hash on these packet types */
509 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
510 1.99 msaitoh rss_hash_config = rss_gethashconfig();
511 1.99 msaitoh else {
512 1.99 msaitoh /*
513 1.99 msaitoh * Disable UDP - IP fragments aren't currently being handled
514 1.99 msaitoh * and so we end up with a mix of 2-tuple and 4-tuple
515 1.99 msaitoh * traffic.
516 1.99 msaitoh */
517 1.99 msaitoh rss_hash_config = RSS_HASHTYPE_RSS_IPV4
518 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV4
519 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6
520 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6
521 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6_EX
522 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
523 1.99 msaitoh }
524 1.99 msaitoh
525 1.98 msaitoh mrqc = IXGBE_MRQC_RSSEN;
526 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
527 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
528 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
529 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
530 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
531 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
532 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
533 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
534 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
535 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
536 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
537 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
538 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
539 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
540 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
541 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
542 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
543 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
544 1.333 msaitoh mrqc |= ixgbe_get_mrqc(sc->iov_mode);
545 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
546 1.99 msaitoh } /* ixgbe_initialize_rss_mapping */
547 1.1 dyoung
548 1.99 msaitoh /************************************************************************
549 1.99 msaitoh * ixgbe_initialize_receive_units - Setup receive registers and features.
550 1.99 msaitoh ************************************************************************/
551 1.98 msaitoh #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
552 1.185 msaitoh
553 1.1 dyoung static void
554 1.333 msaitoh ixgbe_initialize_receive_units(struct ixgbe_softc *sc)
555 1.1 dyoung {
556 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
557 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
558 1.333 msaitoh struct ifnet *ifp = sc->ifp;
559 1.186 msaitoh int i, j;
560 1.98 msaitoh u32 bufsz, fctrl, srrctl, rxcsum;
561 1.98 msaitoh u32 hlreg;
562 1.98 msaitoh
563 1.98 msaitoh /*
564 1.98 msaitoh * Make sure receives are disabled while
565 1.98 msaitoh * setting up the descriptor ring
566 1.98 msaitoh */
567 1.98 msaitoh ixgbe_disable_rx(hw);
568 1.1 dyoung
569 1.98 msaitoh /* Enable broadcasts */
570 1.98 msaitoh fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
571 1.98 msaitoh fctrl |= IXGBE_FCTRL_BAM;
572 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) {
573 1.98 msaitoh fctrl |= IXGBE_FCTRL_DPF;
574 1.98 msaitoh fctrl |= IXGBE_FCTRL_PMCF;
575 1.98 msaitoh }
576 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
577 1.1 dyoung
578 1.98 msaitoh /* Set for Jumbo Frames? */
579 1.98 msaitoh hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
580 1.98 msaitoh if (ifp->if_mtu > ETHERMTU)
581 1.98 msaitoh hlreg |= IXGBE_HLREG0_JUMBOEN;
582 1.98 msaitoh else
583 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_JUMBOEN;
584 1.99 msaitoh
585 1.98 msaitoh #ifdef DEV_NETMAP
586 1.99 msaitoh /* CRC stripping is conditional in Netmap */
587 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
588 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP) &&
589 1.99 msaitoh !ix_crcstrip)
590 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
591 1.60 msaitoh else
592 1.99 msaitoh #endif /* DEV_NETMAP */
593 1.98 msaitoh hlreg |= IXGBE_HLREG0_RXCRCSTRP;
594 1.99 msaitoh
595 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
596 1.1 dyoung
597 1.333 msaitoh bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
598 1.99 msaitoh IXGBE_SRRCTL_BSIZEPKT_SHIFT;
599 1.1 dyoung
600 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++) {
601 1.98 msaitoh u64 rdba = rxr->rxdma.dma_paddr;
602 1.152 msaitoh u32 reg;
603 1.98 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
604 1.98 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
605 1.99 msaitoh j = rxr->me;
606 1.1 dyoung
607 1.98 msaitoh /* Setup the Base and Length of the Rx Descriptor Ring */
608 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
609 1.98 msaitoh (rdba & 0x00000000ffffffffULL));
610 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
611 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
612 1.333 msaitoh sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
613 1.1 dyoung
614 1.98 msaitoh /* Set up the SRRCTL register */
615 1.98 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
616 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
617 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
618 1.98 msaitoh srrctl |= bufsz;
619 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
620 1.47 msaitoh
621 1.98 msaitoh /* Set RQSMR (Receive Queue Statistic Mapping) register */
622 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
623 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
624 1.98 msaitoh reg |= i << (regshift * 8);
625 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
626 1.98 msaitoh
627 1.98 msaitoh /*
628 1.98 msaitoh * Set DROP_EN iff we have no flow control and >1 queue.
629 1.98 msaitoh * Note that srrctl was cleared shortly before during reset,
630 1.98 msaitoh * so we do not need to clear the bit, but do it just in case
631 1.98 msaitoh * this code is moved elsewhere.
632 1.98 msaitoh */
633 1.333 msaitoh if ((sc->num_queues > 1) &&
634 1.333 msaitoh (sc->hw.fc.requested_mode == ixgbe_fc_none))
635 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
636 1.319 msaitoh else
637 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
638 1.98 msaitoh
639 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
640 1.98 msaitoh
641 1.98 msaitoh /* Setup the HW Rx Head and Tail Descriptor Pointers */
642 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
643 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
644 1.98 msaitoh
645 1.98 msaitoh /* Set the driver rx tail address */
646 1.98 msaitoh rxr->tail = IXGBE_RDT(rxr->me);
647 1.98 msaitoh }
648 1.98 msaitoh
649 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
650 1.99 msaitoh u32 psrtype = IXGBE_PSRTYPE_TCPHDR
651 1.186 msaitoh | IXGBE_PSRTYPE_UDPHDR
652 1.186 msaitoh | IXGBE_PSRTYPE_IPV4HDR
653 1.186 msaitoh | IXGBE_PSRTYPE_IPV6HDR;
654 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
655 1.98 msaitoh }
656 1.98 msaitoh
657 1.98 msaitoh rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
658 1.98 msaitoh
659 1.333 msaitoh ixgbe_initialize_rss_mapping(sc);
660 1.98 msaitoh
661 1.333 msaitoh if (sc->num_queues > 1) {
662 1.98 msaitoh /* RSS and RX IPP Checksum are mutually exclusive */
663 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
664 1.98 msaitoh }
665 1.98 msaitoh
666 1.98 msaitoh if (ifp->if_capenable & IFCAP_RXCSUM)
667 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
668 1.98 msaitoh
669 1.98 msaitoh /* This is useful for calculating UDP/IP fragment checksums */
670 1.98 msaitoh if (!(rxcsum & IXGBE_RXCSUM_PCSD))
671 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_IPPCSE;
672 1.98 msaitoh
673 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
674 1.98 msaitoh
675 1.99 msaitoh } /* ixgbe_initialize_receive_units */
676 1.98 msaitoh
677 1.99 msaitoh /************************************************************************
678 1.99 msaitoh * ixgbe_initialize_transmit_units - Enable transmit units.
679 1.99 msaitoh ************************************************************************/
680 1.98 msaitoh static void
681 1.333 msaitoh ixgbe_initialize_transmit_units(struct ixgbe_softc *sc)
682 1.98 msaitoh {
683 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
684 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
685 1.144 msaitoh int i;
686 1.98 msaitoh
687 1.225 msaitoh INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
688 1.225 msaitoh
689 1.98 msaitoh /* Setup the Base and Length of the Tx Descriptor Ring */
690 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, txr++) {
691 1.99 msaitoh u64 tdba = txr->txdma.dma_paddr;
692 1.99 msaitoh u32 txctrl = 0;
693 1.152 msaitoh u32 tqsmreg, reg;
694 1.152 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
695 1.152 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
696 1.99 msaitoh int j = txr->me;
697 1.98 msaitoh
698 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
699 1.98 msaitoh (tdba & 0x00000000ffffffffULL));
700 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
701 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
702 1.333 msaitoh sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
703 1.98 msaitoh
704 1.152 msaitoh /*
705 1.152 msaitoh * Set TQSMR (Transmit Queue Statistic Mapping) register.
706 1.152 msaitoh * Register location is different between 82598 and others.
707 1.152 msaitoh */
708 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
709 1.152 msaitoh tqsmreg = IXGBE_TQSMR(regnum);
710 1.152 msaitoh else
711 1.152 msaitoh tqsmreg = IXGBE_TQSM(regnum);
712 1.152 msaitoh reg = IXGBE_READ_REG(hw, tqsmreg);
713 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
714 1.152 msaitoh reg |= i << (regshift * 8);
715 1.152 msaitoh IXGBE_WRITE_REG(hw, tqsmreg, reg);
716 1.152 msaitoh
717 1.98 msaitoh /* Setup the HW Tx Head and Tail descriptor pointers */
718 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
719 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
720 1.98 msaitoh
721 1.98 msaitoh /* Cache the tail address */
722 1.98 msaitoh txr->tail = IXGBE_TDT(j);
723 1.98 msaitoh
724 1.155 msaitoh txr->txr_no_space = false;
725 1.155 msaitoh
726 1.98 msaitoh /* Disable Head Writeback */
727 1.98 msaitoh /*
728 1.98 msaitoh * Note: for X550 series devices, these registers are actually
729 1.295 andvar * prefixed with TPH_ instead of DCA_, but the addresses and
730 1.98 msaitoh * fields remain the same.
731 1.98 msaitoh */
732 1.98 msaitoh switch (hw->mac.type) {
733 1.98 msaitoh case ixgbe_mac_82598EB:
734 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
735 1.98 msaitoh break;
736 1.98 msaitoh default:
737 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
738 1.98 msaitoh break;
739 1.98 msaitoh }
740 1.98 msaitoh txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
741 1.98 msaitoh switch (hw->mac.type) {
742 1.98 msaitoh case ixgbe_mac_82598EB:
743 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
744 1.98 msaitoh break;
745 1.98 msaitoh default:
746 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
747 1.98 msaitoh break;
748 1.98 msaitoh }
749 1.98 msaitoh
750 1.98 msaitoh }
751 1.98 msaitoh
752 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
753 1.98 msaitoh u32 dmatxctl, rttdcs;
754 1.99 msaitoh
755 1.98 msaitoh dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
756 1.98 msaitoh dmatxctl |= IXGBE_DMATXCTL_TE;
757 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
758 1.98 msaitoh /* Disable arbiter to set MTQC */
759 1.98 msaitoh rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
760 1.98 msaitoh rttdcs |= IXGBE_RTTDCS_ARBDIS;
761 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC,
763 1.333 msaitoh ixgbe_get_mtqc(sc->iov_mode));
764 1.98 msaitoh rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
765 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
766 1.98 msaitoh }
767 1.99 msaitoh } /* ixgbe_initialize_transmit_units */
768 1.98 msaitoh
769 1.245 msaitoh static void
770 1.333 msaitoh ixgbe_quirks(struct ixgbe_softc *sc)
771 1.245 msaitoh {
772 1.333 msaitoh device_t dev = sc->dev;
773 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
774 1.245 msaitoh const char *vendor, *product;
775 1.245 msaitoh
776 1.248 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
777 1.248 msaitoh /*
778 1.248 msaitoh * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
779 1.248 msaitoh * MA10-ST0.
780 1.248 msaitoh */
781 1.248 msaitoh vendor = pmf_get_platform("system-vendor");
782 1.248 msaitoh product = pmf_get_platform("system-product");
783 1.245 msaitoh
784 1.248 msaitoh if ((vendor == NULL) || (product == NULL))
785 1.248 msaitoh return;
786 1.245 msaitoh
787 1.248 msaitoh if ((strcmp(vendor, "GIGABYTE") == 0) &&
788 1.248 msaitoh (strcmp(product, "MA10-ST0") == 0)) {
789 1.248 msaitoh aprint_verbose_dev(dev,
790 1.248 msaitoh "Enable SFP+ MOD_ABS inverse quirk\n");
791 1.333 msaitoh sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
792 1.248 msaitoh }
793 1.245 msaitoh }
794 1.245 msaitoh }
795 1.245 msaitoh
796 1.99 msaitoh /************************************************************************
797 1.99 msaitoh * ixgbe_attach - Device initialization routine
798 1.98 msaitoh *
799 1.99 msaitoh * Called when the driver is being loaded.
800 1.99 msaitoh * Identifies the type of hardware, allocates all resources
801 1.99 msaitoh * and initializes the hardware.
802 1.98 msaitoh *
803 1.99 msaitoh * return 0 on success, positive on failure
804 1.99 msaitoh ************************************************************************/
805 1.98 msaitoh static void
806 1.98 msaitoh ixgbe_attach(device_t parent, device_t dev, void *aux)
807 1.98 msaitoh {
808 1.333 msaitoh struct ixgbe_softc *sc;
809 1.98 msaitoh struct ixgbe_hw *hw;
810 1.186 msaitoh int error = -1;
811 1.98 msaitoh u32 ctrl_ext;
812 1.340 msaitoh u16 high, low, nvmreg, dev_caps;
813 1.99 msaitoh pcireg_t id, subid;
814 1.159 maxv const ixgbe_vendor_info_t *ent;
815 1.98 msaitoh struct pci_attach_args *pa = aux;
816 1.219 msaitoh bool unsupported_sfp = false;
817 1.98 msaitoh const char *str;
818 1.233 msaitoh char wqname[MAXCOMLEN];
819 1.99 msaitoh char buf[256];
820 1.98 msaitoh
821 1.98 msaitoh INIT_DEBUGOUT("ixgbe_attach: begin");
822 1.98 msaitoh
823 1.98 msaitoh /* Allocate, clear, and link in our adapter structure */
824 1.333 msaitoh sc = device_private(dev);
825 1.333 msaitoh sc->hw.back = sc;
826 1.333 msaitoh sc->dev = dev;
827 1.333 msaitoh hw = &sc->hw;
828 1.333 msaitoh sc->osdep.pc = pa->pa_pc;
829 1.333 msaitoh sc->osdep.tag = pa->pa_tag;
830 1.98 msaitoh if (pci_dma64_available(pa))
831 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat64;
832 1.98 msaitoh else
833 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat;
834 1.333 msaitoh sc->osdep.attached = false;
835 1.333 msaitoh sc->osdep.detaching = false;
836 1.98 msaitoh
837 1.98 msaitoh ent = ixgbe_lookup(pa);
838 1.98 msaitoh
839 1.98 msaitoh KASSERT(ent != NULL);
840 1.98 msaitoh
841 1.98 msaitoh aprint_normal(": %s, Version - %s\n",
842 1.98 msaitoh ixgbe_strings[ent->index], ixgbe_driver_version);
843 1.98 msaitoh
844 1.233 msaitoh /* Core Lock Init */
845 1.333 msaitoh IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
846 1.1 dyoung
847 1.233 msaitoh /* Set up the timer callout and workqueue */
848 1.333 msaitoh callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
849 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
850 1.333 msaitoh error = workqueue_create(&sc->timer_wq, wqname,
851 1.333 msaitoh ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
852 1.233 msaitoh IXGBE_TASKLET_WQ_FLAGS);
853 1.233 msaitoh if (error) {
854 1.233 msaitoh aprint_error_dev(dev,
855 1.233 msaitoh "could not create timer workqueue (%d)\n", error);
856 1.233 msaitoh goto err_out;
857 1.233 msaitoh }
858 1.1 dyoung
859 1.1 dyoung /* Determine hardware revision */
860 1.99 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
861 1.99 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
862 1.99 msaitoh
863 1.99 msaitoh hw->vendor_id = PCI_VENDOR(id);
864 1.99 msaitoh hw->device_id = PCI_PRODUCT(id);
865 1.99 msaitoh hw->revision_id =
866 1.99 msaitoh PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
867 1.99 msaitoh hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
868 1.99 msaitoh hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
869 1.99 msaitoh
870 1.248 msaitoh /* Set quirk flags */
871 1.333 msaitoh ixgbe_quirks(sc);
872 1.248 msaitoh
873 1.99 msaitoh /*
874 1.99 msaitoh * Make sure BUSMASTER is set
875 1.99 msaitoh */
876 1.99 msaitoh ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
877 1.99 msaitoh
878 1.99 msaitoh /* Do base PCI setup - map BAR0 */
879 1.333 msaitoh if (ixgbe_allocate_pci_resources(sc, pa)) {
880 1.99 msaitoh aprint_error_dev(dev, "Allocation of PCI resources failed\n");
881 1.99 msaitoh error = ENXIO;
882 1.99 msaitoh goto err_out;
883 1.99 msaitoh }
884 1.99 msaitoh
885 1.99 msaitoh /* let hardware know driver is loaded */
886 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
887 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
888 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
889 1.99 msaitoh
890 1.99 msaitoh /*
891 1.99 msaitoh * Initialize the shared code
892 1.99 msaitoh */
893 1.144 msaitoh if (ixgbe_init_shared_code(hw) != 0) {
894 1.319 msaitoh aprint_error_dev(dev,
895 1.319 msaitoh "Unable to initialize the shared code\n");
896 1.99 msaitoh error = ENXIO;
897 1.99 msaitoh goto err_out;
898 1.99 msaitoh }
899 1.1 dyoung
900 1.79 msaitoh switch (hw->mac.type) {
901 1.79 msaitoh case ixgbe_mac_82598EB:
902 1.79 msaitoh str = "82598EB";
903 1.79 msaitoh break;
904 1.79 msaitoh case ixgbe_mac_82599EB:
905 1.79 msaitoh str = "82599EB";
906 1.79 msaitoh break;
907 1.79 msaitoh case ixgbe_mac_X540:
908 1.79 msaitoh str = "X540";
909 1.79 msaitoh break;
910 1.79 msaitoh case ixgbe_mac_X550:
911 1.79 msaitoh str = "X550";
912 1.79 msaitoh break;
913 1.79 msaitoh case ixgbe_mac_X550EM_x:
914 1.246 msaitoh str = "X550EM X";
915 1.79 msaitoh break;
916 1.99 msaitoh case ixgbe_mac_X550EM_a:
917 1.99 msaitoh str = "X550EM A";
918 1.99 msaitoh break;
919 1.79 msaitoh default:
920 1.79 msaitoh str = "Unknown";
921 1.79 msaitoh break;
922 1.79 msaitoh }
923 1.79 msaitoh aprint_normal_dev(dev, "device %s\n", str);
924 1.79 msaitoh
925 1.99 msaitoh hw->allow_unsupported_sfp = allow_unsupported_sfp;
926 1.99 msaitoh
927 1.99 msaitoh /* Pick up the 82599 settings */
928 1.292 msaitoh if (hw->mac.type != ixgbe_mac_82598EB)
929 1.99 msaitoh hw->phy.smart_speed = ixgbe_smart_speed;
930 1.292 msaitoh
931 1.292 msaitoh /* Set the right number of segments */
932 1.292 msaitoh KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
933 1.333 msaitoh sc->num_segs = IXGBE_SCATTER_DEFAULT;
934 1.99 msaitoh
935 1.172 msaitoh /* Ensure SW/FW semaphore is free */
936 1.172 msaitoh ixgbe_init_swfw_semaphore(hw);
937 1.172 msaitoh
938 1.113 msaitoh hw->mac.ops.set_lan_id(hw);
939 1.333 msaitoh ixgbe_init_device_features(sc);
940 1.99 msaitoh
941 1.333 msaitoh if (ixgbe_configure_interrupts(sc)) {
942 1.1 dyoung error = ENXIO;
943 1.1 dyoung goto err_out;
944 1.1 dyoung }
945 1.1 dyoung
946 1.99 msaitoh /* Allocate multicast array memory. */
947 1.333 msaitoh sc->mta = malloc(sizeof(*sc->mta) *
948 1.215 chs MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949 1.99 msaitoh
950 1.99 msaitoh /* Enable WoL (if supported) */
951 1.333 msaitoh ixgbe_check_wol_support(sc);
952 1.99 msaitoh
953 1.193 msaitoh /* Register for VLAN events */
954 1.333 msaitoh ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb);
955 1.193 msaitoh
956 1.99 msaitoh /* Verify adapter fan is still functional (if applicable) */
957 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 1.99 msaitoh u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 1.333 msaitoh ixgbe_check_fan_failure(sc, esdp, FALSE);
960 1.99 msaitoh }
961 1.99 msaitoh
962 1.99 msaitoh /* Set an initial default flow control value */
963 1.99 msaitoh hw->fc.requested_mode = ixgbe_flow_control;
964 1.99 msaitoh
965 1.1 dyoung /* Do descriptor calc and sanity checks */
966 1.1 dyoung if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 1.1 dyoung ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 1.336 msaitoh aprint_error_dev(dev, "Invalid TX ring size (%d). "
969 1.336 msaitoh "It must be between %d and %d, "
970 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
971 1.336 msaitoh "Using default value of %d instead.\n",
972 1.336 msaitoh ixgbe_txd, MIN_TXD, MAX_TXD,
973 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
974 1.336 msaitoh DEFAULT_TXD);
975 1.333 msaitoh sc->num_tx_desc = DEFAULT_TXD;
976 1.1 dyoung } else
977 1.333 msaitoh sc->num_tx_desc = ixgbe_txd;
978 1.1 dyoung
979 1.1 dyoung if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 1.33 msaitoh ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 1.336 msaitoh aprint_error_dev(dev, "Invalid RX ring size (%d). "
982 1.336 msaitoh "It must be between %d and %d, "
983 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
984 1.336 msaitoh "Using default value of %d instead.\n",
985 1.336 msaitoh ixgbe_rxd, MIN_RXD, MAX_RXD,
986 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
987 1.336 msaitoh DEFAULT_RXD);
988 1.333 msaitoh sc->num_rx_desc = DEFAULT_RXD;
989 1.1 dyoung } else
990 1.333 msaitoh sc->num_rx_desc = ixgbe_rxd;
991 1.1 dyoung
992 1.313 msaitoh /* Sysctls for limiting the amount of work done in the taskqueues */
993 1.333 msaitoh sc->rx_process_limit
994 1.333 msaitoh = (ixgbe_rx_process_limit <= sc->num_rx_desc)
995 1.333 msaitoh ? ixgbe_rx_process_limit : sc->num_rx_desc;
996 1.333 msaitoh sc->tx_process_limit
997 1.333 msaitoh = (ixgbe_tx_process_limit <= sc->num_tx_desc)
998 1.333 msaitoh ? ixgbe_tx_process_limit : sc->num_tx_desc;
999 1.313 msaitoh
1000 1.286 msaitoh /* Set default high limit of copying mbuf in rxeof */
1001 1.333 msaitoh sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
1002 1.286 msaitoh
1003 1.1 dyoung /* Allocate our TX/RX Queues */
1004 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1005 1.1 dyoung error = ENOMEM;
1006 1.1 dyoung goto err_out;
1007 1.1 dyoung }
1008 1.1 dyoung
1009 1.99 msaitoh hw->phy.reset_if_overtemp = TRUE;
1010 1.99 msaitoh error = ixgbe_reset_hw(hw);
1011 1.99 msaitoh hw->phy.reset_if_overtemp = FALSE;
1012 1.237 msaitoh if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1013 1.99 msaitoh error = IXGBE_SUCCESS;
1014 1.237 msaitoh else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1015 1.324 msaitoh aprint_error_dev(dev,
1016 1.324 msaitoh "Unsupported SFP+ module type was detected.\n");
1017 1.219 msaitoh unsupported_sfp = true;
1018 1.219 msaitoh error = IXGBE_SUCCESS;
1019 1.1 dyoung } else if (error) {
1020 1.282 msaitoh aprint_error_dev(dev,
1021 1.282 msaitoh "Hardware initialization failed(error = %d)\n", error);
1022 1.1 dyoung error = EIO;
1023 1.1 dyoung goto err_late;
1024 1.1 dyoung }
1025 1.1 dyoung
1026 1.1 dyoung /* Make sure we have a good EEPROM before we read from it */
1027 1.333 msaitoh if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1028 1.48 msaitoh aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1029 1.1 dyoung error = EIO;
1030 1.1 dyoung goto err_late;
1031 1.1 dyoung }
1032 1.1 dyoung
1033 1.88 msaitoh aprint_normal("%s:", device_xname(dev));
1034 1.88 msaitoh /* NVM Image Version */
1035 1.169 msaitoh high = low = 0;
1036 1.88 msaitoh switch (hw->mac.type) {
1037 1.300 msaitoh case ixgbe_mac_82598EB:
1038 1.300 msaitoh /*
1039 1.300 msaitoh * Print version from the dev starter version (0x29). The
1040 1.300 msaitoh * location is the same as newer device's IXGBE_NVM_MAP_VER.
1041 1.300 msaitoh */
1042 1.300 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1043 1.300 msaitoh if (nvmreg == 0xffff)
1044 1.300 msaitoh break;
1045 1.300 msaitoh high = (nvmreg >> 12) & 0x0f;
1046 1.300 msaitoh low = (nvmreg >> 4) & 0xff;
1047 1.300 msaitoh id = nvmreg & 0x0f;
1048 1.300 msaitoh /*
1049 1.300 msaitoh * The following output might not be correct. Some 82598 cards
1050 1.300 msaitoh * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1051 1.300 msaitoh */
1052 1.300 msaitoh aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1053 1.300 msaitoh break;
1054 1.88 msaitoh case ixgbe_mac_X540:
1055 1.99 msaitoh case ixgbe_mac_X550EM_a:
1056 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1057 1.88 msaitoh if (nvmreg == 0xffff)
1058 1.88 msaitoh break;
1059 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1060 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1061 1.88 msaitoh id = nvmreg & 0x0f;
1062 1.107 msaitoh aprint_normal(" NVM Image Version %u.", high);
1063 1.107 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1064 1.107 msaitoh str = "%x";
1065 1.107 msaitoh else
1066 1.107 msaitoh str = "%02x";
1067 1.107 msaitoh aprint_normal(str, low);
1068 1.107 msaitoh aprint_normal(" ID 0x%x,", id);
1069 1.88 msaitoh break;
1070 1.88 msaitoh case ixgbe_mac_X550EM_x:
1071 1.88 msaitoh case ixgbe_mac_X550:
1072 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1073 1.88 msaitoh if (nvmreg == 0xffff)
1074 1.88 msaitoh break;
1075 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1076 1.88 msaitoh low = nvmreg & 0xff;
1077 1.107 msaitoh aprint_normal(" NVM Image Version %u.%02x,", high, low);
1078 1.88 msaitoh break;
1079 1.88 msaitoh default:
1080 1.88 msaitoh break;
1081 1.88 msaitoh }
1082 1.169 msaitoh hw->eeprom.nvm_image_ver_high = high;
1083 1.169 msaitoh hw->eeprom.nvm_image_ver_low = low;
1084 1.88 msaitoh
1085 1.88 msaitoh /* PHY firmware revision */
1086 1.88 msaitoh switch (hw->mac.type) {
1087 1.88 msaitoh case ixgbe_mac_X540:
1088 1.88 msaitoh case ixgbe_mac_X550:
1089 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1090 1.88 msaitoh if (nvmreg == 0xffff)
1091 1.88 msaitoh break;
1092 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1093 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1094 1.88 msaitoh id = nvmreg & 0x000f;
1095 1.114 msaitoh aprint_normal(" PHY FW Revision %u.", high);
1096 1.114 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1097 1.114 msaitoh str = "%x";
1098 1.114 msaitoh else
1099 1.114 msaitoh str = "%02x";
1100 1.114 msaitoh aprint_normal(str, low);
1101 1.114 msaitoh aprint_normal(" ID 0x%x,", id);
1102 1.88 msaitoh break;
1103 1.88 msaitoh default:
1104 1.88 msaitoh break;
1105 1.88 msaitoh }
1106 1.88 msaitoh
1107 1.88 msaitoh /* NVM Map version & OEM NVM Image version */
1108 1.88 msaitoh switch (hw->mac.type) {
1109 1.88 msaitoh case ixgbe_mac_X550:
1110 1.88 msaitoh case ixgbe_mac_X550EM_x:
1111 1.99 msaitoh case ixgbe_mac_X550EM_a:
1112 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1113 1.88 msaitoh if (nvmreg != 0xffff) {
1114 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1115 1.88 msaitoh low = nvmreg & 0x00ff;
1116 1.88 msaitoh aprint_normal(" NVM Map version %u.%02x,", high, low);
1117 1.88 msaitoh }
1118 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1119 1.107 msaitoh if (nvmreg != 0xffff) {
1120 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1121 1.88 msaitoh low = nvmreg & 0x00ff;
1122 1.88 msaitoh aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1123 1.88 msaitoh low);
1124 1.88 msaitoh }
1125 1.88 msaitoh break;
1126 1.88 msaitoh default:
1127 1.88 msaitoh break;
1128 1.88 msaitoh }
1129 1.88 msaitoh
1130 1.88 msaitoh /* Print the ETrackID */
1131 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1132 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1133 1.88 msaitoh aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1134 1.79 msaitoh
1135 1.307 msaitoh /* Printed Board Assembly number */
1136 1.307 msaitoh error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
1137 1.307 msaitoh aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
1138 1.307 msaitoh
1139 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
1140 1.333 msaitoh error = ixgbe_allocate_msix(sc, pa);
1141 1.119 msaitoh if (error) {
1142 1.119 msaitoh /* Free allocated queue structures first */
1143 1.333 msaitoh ixgbe_free_queues(sc);
1144 1.119 msaitoh
1145 1.119 msaitoh /* Fallback to legacy interrupt */
1146 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_MSI)
1147 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
1148 1.333 msaitoh sc->num_queues = 1;
1149 1.119 msaitoh
1150 1.119 msaitoh /* Allocate our TX/RX Queues again */
1151 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1152 1.119 msaitoh error = ENOMEM;
1153 1.119 msaitoh goto err_out;
1154 1.119 msaitoh }
1155 1.119 msaitoh }
1156 1.119 msaitoh }
1157 1.307 msaitoh
1158 1.169 msaitoh /* Recovery mode */
1159 1.333 msaitoh switch (sc->hw.mac.type) {
1160 1.169 msaitoh case ixgbe_mac_X550:
1161 1.169 msaitoh case ixgbe_mac_X550EM_x:
1162 1.169 msaitoh case ixgbe_mac_X550EM_a:
1163 1.169 msaitoh /* >= 2.00 */
1164 1.169 msaitoh if (hw->eeprom.nvm_image_ver_high >= 2) {
1165 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1166 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1167 1.169 msaitoh }
1168 1.169 msaitoh break;
1169 1.169 msaitoh default:
1170 1.169 msaitoh break;
1171 1.169 msaitoh }
1172 1.169 msaitoh
1173 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) == 0)
1174 1.333 msaitoh error = ixgbe_allocate_legacy(sc, pa);
1175 1.185 msaitoh if (error)
1176 1.99 msaitoh goto err_late;
1177 1.99 msaitoh
1178 1.119 msaitoh /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1179 1.333 msaitoh mutex_init(&(sc)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1180 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1181 1.333 msaitoh error = workqueue_create(&sc->admin_wq, wqname,
1182 1.333 msaitoh ixgbe_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
1183 1.223 thorpej IXGBE_TASKLET_WQ_FLAGS);
1184 1.223 thorpej if (error) {
1185 1.223 thorpej aprint_error_dev(dev,
1186 1.233 msaitoh "could not create admin workqueue (%d)\n", error);
1187 1.223 thorpej goto err_out;
1188 1.223 thorpej }
1189 1.119 msaitoh
1190 1.99 msaitoh error = ixgbe_start_hw(hw);
1191 1.25 msaitoh switch (error) {
1192 1.25 msaitoh case IXGBE_ERR_EEPROM_VERSION:
1193 1.319 msaitoh aprint_error_dev(dev,
1194 1.319 msaitoh "This device is a pre-production adapter/"
1195 1.1 dyoung "LOM. Please be aware there may be issues associated "
1196 1.48 msaitoh "with your hardware.\nIf you are experiencing problems "
1197 1.1 dyoung "please contact your Intel or hardware representative "
1198 1.1 dyoung "who provided you with this hardware.\n");
1199 1.25 msaitoh break;
1200 1.25 msaitoh default:
1201 1.25 msaitoh break;
1202 1.1 dyoung }
1203 1.1 dyoung
1204 1.116 msaitoh /* Setup OS specific network interface */
1205 1.333 msaitoh if (ixgbe_setup_interface(dev, sc) != 0)
1206 1.116 msaitoh goto err_late;
1207 1.116 msaitoh
1208 1.110 msaitoh /*
1209 1.110 msaitoh * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1210 1.110 msaitoh * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1211 1.110 msaitoh */
1212 1.110 msaitoh if (hw->phy.media_type == ixgbe_media_type_copper) {
1213 1.95 msaitoh uint16_t id1, id2;
1214 1.95 msaitoh int oui, model, rev;
1215 1.285 pgoyette char descr[MII_MAX_DESCR_LEN];
1216 1.95 msaitoh
1217 1.95 msaitoh id1 = hw->phy.id >> 16;
1218 1.95 msaitoh id2 = hw->phy.id & 0xffff;
1219 1.95 msaitoh oui = MII_OUI(id1, id2);
1220 1.95 msaitoh model = MII_MODEL(id2);
1221 1.95 msaitoh rev = MII_REV(id2);
1222 1.285 pgoyette mii_get_descr(descr, sizeof(descr), oui, model);
1223 1.285 pgoyette if (descr[0])
1224 1.299 msaitoh aprint_normal_dev(dev, "PHY: %s, rev. %d\n",
1225 1.299 msaitoh descr, rev);
1226 1.95 msaitoh else
1227 1.95 msaitoh aprint_normal_dev(dev,
1228 1.95 msaitoh "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1229 1.95 msaitoh oui, model, rev);
1230 1.95 msaitoh }
1231 1.95 msaitoh
1232 1.173 msaitoh /* Enable EEE power saving */
1233 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
1234 1.173 msaitoh hw->mac.ops.setup_eee(hw,
1235 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
1236 1.173 msaitoh
1237 1.52 msaitoh /* Enable power to the phy. */
1238 1.219 msaitoh if (!unsupported_sfp) {
1239 1.219 msaitoh /* Enable the optics for 82599 SFP+ fiber */
1240 1.219 msaitoh ixgbe_enable_tx_laser(hw);
1241 1.219 msaitoh
1242 1.219 msaitoh /*
1243 1.219 msaitoh * XXX Currently, ixgbe_set_phy_power() supports only copper
1244 1.219 msaitoh * PHY, so it's not required to test with !unsupported_sfp.
1245 1.219 msaitoh */
1246 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
1247 1.219 msaitoh }
1248 1.52 msaitoh
1249 1.1 dyoung /* Initialize statistics */
1250 1.333 msaitoh ixgbe_update_stats_counters(sc);
1251 1.1 dyoung
1252 1.98 msaitoh /* Check PCIE slot type/speed/width */
1253 1.333 msaitoh ixgbe_get_slot_info(sc);
1254 1.1 dyoung
1255 1.99 msaitoh /*
1256 1.99 msaitoh * Do time init and sysctl init here, but
1257 1.99 msaitoh * only on the first port of a bypass adapter.
1258 1.99 msaitoh */
1259 1.333 msaitoh ixgbe_bypass_init(sc);
1260 1.99 msaitoh
1261 1.99 msaitoh /* Set an initial dmac value */
1262 1.333 msaitoh sc->dmac = 0;
1263 1.99 msaitoh /* Set initial advertised speeds (if applicable) */
1264 1.333 msaitoh sc->advertise = ixgbe_get_default_advertise(sc);
1265 1.45 msaitoh
1266 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1267 1.99 msaitoh ixgbe_define_iov_schemas(dev, &error);
1268 1.44 msaitoh
1269 1.44 msaitoh /* Add sysctls */
1270 1.333 msaitoh ixgbe_add_device_sysctls(sc);
1271 1.333 msaitoh ixgbe_add_hw_stats(sc);
1272 1.44 msaitoh
1273 1.99 msaitoh /* For Netmap */
1274 1.333 msaitoh sc->init_locked = ixgbe_init_locked;
1275 1.333 msaitoh sc->stop_locked = ixgbe_stop_locked;
1276 1.99 msaitoh
1277 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
1278 1.333 msaitoh ixgbe_netmap_attach(sc);
1279 1.1 dyoung
1280 1.340 msaitoh /* Print some flags */
1281 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
1282 1.99 msaitoh aprint_verbose_dev(dev, "feature cap %s\n", buf);
1283 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
1284 1.99 msaitoh aprint_verbose_dev(dev, "feature ena %s\n", buf);
1285 1.340 msaitoh if (ixgbe_get_device_caps(hw, &dev_caps) == 0) {
1286 1.340 msaitoh snprintb(buf, sizeof(buf), IXGBE_DEVICE_CAPS_FLAGS, dev_caps);
1287 1.340 msaitoh aprint_verbose_dev(dev, "device cap %s\n", buf);
1288 1.340 msaitoh }
1289 1.44 msaitoh
1290 1.44 msaitoh if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1291 1.333 msaitoh pmf_class_network_register(dev, sc->ifp);
1292 1.44 msaitoh else
1293 1.44 msaitoh aprint_error_dev(dev, "couldn't establish power handler\n");
1294 1.44 msaitoh
1295 1.169 msaitoh /* Init recovery mode timer and state variable */
1296 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1297 1.333 msaitoh sc->recovery_mode = 0;
1298 1.169 msaitoh
1299 1.169 msaitoh /* Set up the timer callout */
1300 1.333 msaitoh callout_init(&sc->recovery_mode_timer,
1301 1.169 msaitoh IXGBE_CALLOUT_FLAGS);
1302 1.235 msaitoh snprintf(wqname, sizeof(wqname), "%s-recovery",
1303 1.235 msaitoh device_xname(dev));
1304 1.333 msaitoh error = workqueue_create(&sc->recovery_mode_timer_wq,
1305 1.333 msaitoh wqname, ixgbe_handle_recovery_mode_timer, sc,
1306 1.233 msaitoh IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1307 1.233 msaitoh if (error) {
1308 1.233 msaitoh aprint_error_dev(dev, "could not create "
1309 1.233 msaitoh "recovery_mode_timer workqueue (%d)\n", error);
1310 1.233 msaitoh goto err_out;
1311 1.233 msaitoh }
1312 1.169 msaitoh
1313 1.169 msaitoh /* Start the task */
1314 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
1315 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
1316 1.169 msaitoh }
1317 1.169 msaitoh
1318 1.1 dyoung INIT_DEBUGOUT("ixgbe_attach: end");
1319 1.333 msaitoh sc->osdep.attached = true;
1320 1.98 msaitoh
1321 1.1 dyoung return;
1322 1.43 msaitoh
1323 1.1 dyoung err_late:
1324 1.333 msaitoh ixgbe_free_queues(sc);
1325 1.1 dyoung err_out:
1326 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1327 1.99 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1328 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1329 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
1330 1.333 msaitoh ixgbe_free_pci_resources(sc);
1331 1.333 msaitoh if (sc->mta != NULL)
1332 1.333 msaitoh free(sc->mta, M_DEVBUF);
1333 1.333 msaitoh mutex_destroy(&(sc)->admin_mtx); /* XXX appropriate order? */
1334 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
1335 1.99 msaitoh
1336 1.1 dyoung return;
1337 1.99 msaitoh } /* ixgbe_attach */
1338 1.1 dyoung
1339 1.99 msaitoh /************************************************************************
1340 1.99 msaitoh * ixgbe_check_wol_support
1341 1.99 msaitoh *
1342 1.99 msaitoh * Checks whether the adapter's ports are capable of
1343 1.99 msaitoh * Wake On LAN by reading the adapter's NVM.
1344 1.1 dyoung *
1345 1.99 msaitoh * Sets each port's hw->wol_enabled value depending
1346 1.99 msaitoh * on the value read here.
1347 1.99 msaitoh ************************************************************************/
1348 1.98 msaitoh static void
1349 1.333 msaitoh ixgbe_check_wol_support(struct ixgbe_softc *sc)
1350 1.98 msaitoh {
1351 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1352 1.186 msaitoh u16 dev_caps = 0;
1353 1.1 dyoung
1354 1.98 msaitoh /* Find out WoL support for port */
1355 1.333 msaitoh sc->wol_support = hw->wol_enabled = 0;
1356 1.98 msaitoh ixgbe_get_device_caps(hw, &dev_caps);
1357 1.98 msaitoh if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1358 1.98 msaitoh ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1359 1.99 msaitoh hw->bus.func == 0))
1360 1.333 msaitoh sc->wol_support = hw->wol_enabled = 1;
1361 1.98 msaitoh
1362 1.98 msaitoh /* Save initial wake up filter configuration */
1363 1.333 msaitoh sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1364 1.98 msaitoh
1365 1.98 msaitoh return;
1366 1.99 msaitoh } /* ixgbe_check_wol_support */
1367 1.98 msaitoh
1368 1.99 msaitoh /************************************************************************
1369 1.99 msaitoh * ixgbe_setup_interface
1370 1.98 msaitoh *
1371 1.99 msaitoh * Setup networking device structure and register an interface.
1372 1.99 msaitoh ************************************************************************/
1373 1.1 dyoung static int
1374 1.333 msaitoh ixgbe_setup_interface(device_t dev, struct ixgbe_softc *sc)
1375 1.1 dyoung {
1376 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
1377 1.98 msaitoh struct ifnet *ifp;
1378 1.1 dyoung
1379 1.98 msaitoh INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1380 1.1 dyoung
1381 1.333 msaitoh ifp = sc->ifp = &ec->ec_if;
1382 1.98 msaitoh strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1383 1.98 msaitoh ifp->if_baudrate = IF_Gbps(10);
1384 1.98 msaitoh ifp->if_init = ixgbe_init;
1385 1.98 msaitoh ifp->if_stop = ixgbe_ifstop;
1386 1.333 msaitoh ifp->if_softc = sc;
1387 1.98 msaitoh ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1388 1.98 msaitoh #ifdef IXGBE_MPSAFE
1389 1.112 ozaki ifp->if_extflags = IFEF_MPSAFE;
1390 1.98 msaitoh #endif
1391 1.98 msaitoh ifp->if_ioctl = ixgbe_ioctl;
1392 1.98 msaitoh #if __FreeBSD_version >= 1100045
1393 1.98 msaitoh /* TSO parameters */
1394 1.98 msaitoh ifp->if_hw_tsomax = 65518;
1395 1.98 msaitoh ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1396 1.98 msaitoh ifp->if_hw_tsomaxsegsize = 2048;
1397 1.98 msaitoh #endif
1398 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1399 1.99 msaitoh #if 0
1400 1.99 msaitoh ixgbe_start_locked = ixgbe_legacy_start_locked;
1401 1.99 msaitoh #endif
1402 1.99 msaitoh } else {
1403 1.99 msaitoh ifp->if_transmit = ixgbe_mq_start;
1404 1.99 msaitoh #if 0
1405 1.99 msaitoh ixgbe_start_locked = ixgbe_mq_start_locked;
1406 1.29 msaitoh #endif
1407 1.99 msaitoh }
1408 1.99 msaitoh ifp->if_start = ixgbe_legacy_start;
1409 1.333 msaitoh IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
1410 1.98 msaitoh IFQ_SET_READY(&ifp->if_snd);
1411 1.98 msaitoh
1412 1.284 riastrad if_initialize(ifp);
1413 1.333 msaitoh sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
1414 1.333 msaitoh ether_ifattach(ifp, sc->hw.mac.addr);
1415 1.216 msaitoh aprint_normal_dev(dev, "Ethernet address %s\n",
1416 1.333 msaitoh ether_sprintf(sc->hw.mac.addr));
1417 1.98 msaitoh /*
1418 1.98 msaitoh * We use per TX queue softint, so if_deferred_start_init() isn't
1419 1.98 msaitoh * used.
1420 1.98 msaitoh */
1421 1.98 msaitoh ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1422 1.98 msaitoh
1423 1.333 msaitoh sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1424 1.98 msaitoh
1425 1.98 msaitoh /*
1426 1.98 msaitoh * Tell the upper layer(s) we support long frames.
1427 1.98 msaitoh */
1428 1.98 msaitoh ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1429 1.98 msaitoh
1430 1.98 msaitoh /* Set capability flags */
1431 1.98 msaitoh ifp->if_capabilities |= IFCAP_RXCSUM
1432 1.186 msaitoh | IFCAP_TXCSUM
1433 1.186 msaitoh | IFCAP_TSOv4
1434 1.186 msaitoh | IFCAP_TSOv6;
1435 1.98 msaitoh ifp->if_capenable = 0;
1436 1.98 msaitoh
1437 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1438 1.186 msaitoh | ETHERCAP_VLAN_HWCSUM
1439 1.186 msaitoh | ETHERCAP_JUMBO_MTU
1440 1.186 msaitoh | ETHERCAP_VLAN_MTU;
1441 1.98 msaitoh
1442 1.98 msaitoh /* Enable the above capabilities by default */
1443 1.98 msaitoh ec->ec_capenable = ec->ec_capabilities;
1444 1.98 msaitoh
1445 1.98 msaitoh /*
1446 1.99 msaitoh * Don't turn this on by default, if vlans are
1447 1.99 msaitoh * created on another pseudo device (eg. lagg)
1448 1.99 msaitoh * then vlan events are not passed thru, breaking
1449 1.99 msaitoh * operation, but with HW FILTER off it works. If
1450 1.99 msaitoh * using vlans directly on the ixgbe driver you can
1451 1.99 msaitoh * enable this and get full hardware tag filtering.
1452 1.99 msaitoh */
1453 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1454 1.1 dyoung
1455 1.98 msaitoh /*
1456 1.98 msaitoh * Specify the media types supported by this adapter and register
1457 1.98 msaitoh * callbacks to update media and link information
1458 1.98 msaitoh */
1459 1.333 msaitoh ec->ec_ifmedia = &sc->media;
1460 1.333 msaitoh ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixgbe_media_change,
1461 1.333 msaitoh ixgbe_media_status, &sc->core_mtx);
1462 1.45 msaitoh
1463 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1464 1.333 msaitoh ixgbe_add_media_types(sc);
1465 1.49 msaitoh
1466 1.98 msaitoh /* Set autoselect media by default */
1467 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1468 1.1 dyoung
1469 1.156 ozaki if_register(ifp);
1470 1.156 ozaki
1471 1.98 msaitoh return (0);
1472 1.99 msaitoh } /* ixgbe_setup_interface */
1473 1.1 dyoung
1474 1.99 msaitoh /************************************************************************
1475 1.99 msaitoh * ixgbe_add_media_types
1476 1.99 msaitoh ************************************************************************/
1477 1.98 msaitoh static void
1478 1.333 msaitoh ixgbe_add_media_types(struct ixgbe_softc *sc)
1479 1.98 msaitoh {
1480 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1481 1.186 msaitoh u64 layer;
1482 1.1 dyoung
1483 1.333 msaitoh layer = sc->phy_layer;
1484 1.1 dyoung
1485 1.98 msaitoh #define ADD(mm, dd) \
1486 1.333 msaitoh ifmedia_add(&sc->media, IFM_ETHER | (mm), (dd), NULL);
1487 1.1 dyoung
1488 1.140 msaitoh ADD(IFM_NONE, 0);
1489 1.140 msaitoh
1490 1.98 msaitoh /* Media types with matching NetBSD media defines */
1491 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1492 1.98 msaitoh ADD(IFM_10G_T | IFM_FDX, 0);
1493 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1494 1.98 msaitoh ADD(IFM_1000_T | IFM_FDX, 0);
1495 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1496 1.98 msaitoh ADD(IFM_100_TX | IFM_FDX, 0);
1497 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1498 1.99 msaitoh ADD(IFM_10_T | IFM_FDX, 0);
1499 1.26 msaitoh
1500 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1501 1.319 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1502 1.98 msaitoh ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1503 1.1 dyoung
1504 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1505 1.98 msaitoh ADD(IFM_10G_LR | IFM_FDX, 0);
1506 1.319 msaitoh if (hw->phy.multispeed_fiber)
1507 1.98 msaitoh ADD(IFM_1000_LX | IFM_FDX, 0);
1508 1.98 msaitoh }
1509 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1510 1.98 msaitoh ADD(IFM_10G_SR | IFM_FDX, 0);
1511 1.319 msaitoh if (hw->phy.multispeed_fiber)
1512 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1513 1.319 msaitoh } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1514 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1515 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1516 1.98 msaitoh ADD(IFM_10G_CX4 | IFM_FDX, 0);
1517 1.1 dyoung
1518 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1519 1.98 msaitoh ADD(IFM_10G_KR | IFM_FDX, 0);
1520 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1521 1.180 msaitoh ADD(IFM_10G_KX4 | IFM_FDX, 0);
1522 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1523 1.98 msaitoh ADD(IFM_1000_KX | IFM_FDX, 0);
1524 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1525 1.99 msaitoh ADD(IFM_2500_KX | IFM_FDX, 0);
1526 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1527 1.103 msaitoh ADD(IFM_2500_T | IFM_FDX, 0);
1528 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T)
1529 1.103 msaitoh ADD(IFM_5000_T | IFM_FDX, 0);
1530 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1531 1.208 msaitoh ADD(IFM_1000_BX10 | IFM_FDX, 0);
1532 1.98 msaitoh /* XXX no ifmedia_set? */
1533 1.185 msaitoh
1534 1.98 msaitoh ADD(IFM_AUTO, 0);
1535 1.98 msaitoh
1536 1.98 msaitoh #undef ADD
1537 1.99 msaitoh } /* ixgbe_add_media_types */
1538 1.1 dyoung
1539 1.99 msaitoh /************************************************************************
1540 1.99 msaitoh * ixgbe_is_sfp
1541 1.99 msaitoh ************************************************************************/
1542 1.99 msaitoh static inline bool
1543 1.99 msaitoh ixgbe_is_sfp(struct ixgbe_hw *hw)
1544 1.99 msaitoh {
1545 1.99 msaitoh switch (hw->mac.type) {
1546 1.99 msaitoh case ixgbe_mac_82598EB:
1547 1.99 msaitoh if (hw->phy.type == ixgbe_phy_nl)
1548 1.144 msaitoh return (TRUE);
1549 1.144 msaitoh return (FALSE);
1550 1.99 msaitoh case ixgbe_mac_82599EB:
1551 1.203 msaitoh case ixgbe_mac_X550EM_x:
1552 1.203 msaitoh case ixgbe_mac_X550EM_a:
1553 1.99 msaitoh switch (hw->mac.ops.get_media_type(hw)) {
1554 1.99 msaitoh case ixgbe_media_type_fiber:
1555 1.99 msaitoh case ixgbe_media_type_fiber_qsfp:
1556 1.144 msaitoh return (TRUE);
1557 1.99 msaitoh default:
1558 1.144 msaitoh return (FALSE);
1559 1.99 msaitoh }
1560 1.99 msaitoh default:
1561 1.144 msaitoh return (FALSE);
1562 1.99 msaitoh }
1563 1.99 msaitoh } /* ixgbe_is_sfp */
1564 1.99 msaitoh
1565 1.226 thorpej static void
1566 1.333 msaitoh ixgbe_schedule_admin_tasklet(struct ixgbe_softc *sc)
1567 1.226 thorpej {
1568 1.243 msaitoh
1569 1.333 msaitoh KASSERT(mutex_owned(&sc->admin_mtx));
1570 1.260 knakahar
1571 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
1572 1.333 msaitoh if (sc->admin_pending == 0)
1573 1.333 msaitoh workqueue_enqueue(sc->admin_wq,
1574 1.333 msaitoh &sc->admin_wc, NULL);
1575 1.333 msaitoh sc->admin_pending = 1;
1576 1.255 msaitoh }
1577 1.226 thorpej }
1578 1.226 thorpej
1579 1.99 msaitoh /************************************************************************
1580 1.99 msaitoh * ixgbe_config_link
1581 1.99 msaitoh ************************************************************************/
1582 1.98 msaitoh static void
1583 1.333 msaitoh ixgbe_config_link(struct ixgbe_softc *sc)
1584 1.98 msaitoh {
1585 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1586 1.186 msaitoh u32 autoneg, err = 0;
1587 1.233 msaitoh u32 task_requests = 0;
1588 1.186 msaitoh bool sfp, negotiate = false;
1589 1.1 dyoung
1590 1.98 msaitoh sfp = ixgbe_is_sfp(hw);
1591 1.1 dyoung
1592 1.185 msaitoh if (sfp) {
1593 1.99 msaitoh if (hw->phy.multispeed_fiber) {
1594 1.99 msaitoh ixgbe_enable_tx_laser(hw);
1595 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1596 1.99 msaitoh }
1597 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1598 1.260 knakahar
1599 1.333 msaitoh mutex_enter(&sc->admin_mtx);
1600 1.333 msaitoh sc->task_requests |= task_requests;
1601 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
1602 1.333 msaitoh mutex_exit(&sc->admin_mtx);
1603 1.98 msaitoh } else {
1604 1.333 msaitoh struct ifmedia *ifm = &sc->media;
1605 1.143 msaitoh
1606 1.98 msaitoh if (hw->mac.ops.check_link)
1607 1.333 msaitoh err = ixgbe_check_link(hw, &sc->link_speed,
1608 1.333 msaitoh &sc->link_up, FALSE);
1609 1.98 msaitoh if (err)
1610 1.144 msaitoh return;
1611 1.143 msaitoh
1612 1.143 msaitoh /*
1613 1.143 msaitoh * Check if it's the first call. If it's the first call,
1614 1.143 msaitoh * get value for auto negotiation.
1615 1.143 msaitoh */
1616 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
1617 1.143 msaitoh if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1618 1.143 msaitoh && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1619 1.186 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1620 1.99 msaitoh &negotiate);
1621 1.98 msaitoh if (err)
1622 1.144 msaitoh return;
1623 1.98 msaitoh if (hw->mac.ops.setup_link)
1624 1.186 msaitoh err = hw->mac.ops.setup_link(hw, autoneg,
1625 1.333 msaitoh sc->link_up);
1626 1.98 msaitoh }
1627 1.99 msaitoh } /* ixgbe_config_link */
1628 1.98 msaitoh
1629 1.99 msaitoh /************************************************************************
1630 1.99 msaitoh * ixgbe_update_stats_counters - Update board statistics counters.
1631 1.99 msaitoh ************************************************************************/
1632 1.98 msaitoh static void
1633 1.333 msaitoh ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1634 1.1 dyoung {
1635 1.333 msaitoh struct ifnet *ifp = sc->ifp;
1636 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1637 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1638 1.305 msaitoh u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1639 1.304 msaitoh u64 total, total_missed_rx = 0;
1640 1.303 msaitoh uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1641 1.186 msaitoh unsigned int queue_counters;
1642 1.176 msaitoh int i;
1643 1.44 msaitoh
1644 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1645 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1646 1.303 msaitoh
1647 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1648 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1649 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
1650 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1651 1.44 msaitoh
1652 1.176 msaitoh /* 16 registers exist */
1653 1.333 msaitoh queue_counters = uimin(__arraycount(stats->qprc), sc->num_queues);
1654 1.176 msaitoh for (i = 0; i < queue_counters; i++) {
1655 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1656 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1657 1.329 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1658 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbrc[i],
1659 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)) +
1660 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32));
1661 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbtc[i],
1662 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)) +
1663 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32));
1664 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
1665 1.329 msaitoh } else {
1666 1.329 msaitoh /* 82598 */
1667 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBRC(i), qbrc[i]);
1668 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBTC(i), qbtc[i]);
1669 1.329 msaitoh }
1670 1.98 msaitoh }
1671 1.151 msaitoh
1672 1.175 msaitoh /* 8 registers exist */
1673 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1674 1.98 msaitoh uint32_t mp;
1675 1.44 msaitoh
1676 1.151 msaitoh /* MPC */
1677 1.98 msaitoh mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1678 1.98 msaitoh /* global total per queue */
1679 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpc[i], mp);
1680 1.98 msaitoh /* running comprehensive total for stats display */
1681 1.98 msaitoh total_missed_rx += mp;
1682 1.44 msaitoh
1683 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1684 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1685 1.151 msaitoh
1686 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1687 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1688 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1689 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1690 1.319 msaitoh IXGBE_PXONRXCNT(i), pxonrxc[i]);
1691 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1692 1.319 msaitoh IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1693 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1694 1.319 msaitoh IXGBE_PXON2OFFCNT(i), pxon2offc[i]);
1695 1.151 msaitoh } else {
1696 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1697 1.319 msaitoh IXGBE_PXONRXC(i), pxonrxc[i]);
1698 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1699 1.319 msaitoh IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1700 1.151 msaitoh }
1701 1.98 msaitoh }
1702 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1703 1.44 msaitoh
1704 1.98 msaitoh /* Document says M[LR]FC are valid when link is up and 10Gbps */
1705 1.333 msaitoh if ((sc->link_active == LINK_STATE_UP)
1706 1.333 msaitoh && (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1707 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1708 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1709 1.98 msaitoh }
1710 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
1711 1.326 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LINK_DN_CNT, link_dn_cnt);
1712 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1713 1.44 msaitoh
1714 1.98 msaitoh /* Hardware workaround, gprc counts missed packets */
1715 1.305 msaitoh IXGBE_EVC_ADD(&stats->gprc,
1716 1.305 msaitoh IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1717 1.44 msaitoh
1718 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1719 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1720 1.305 msaitoh total = lxontxc + lxofftxc;
1721 1.44 msaitoh
1722 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1723 1.305 msaitoh IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1724 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1725 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1726 1.280 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1727 1.305 msaitoh - total * ETHER_MIN_LEN);
1728 1.305 msaitoh IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1729 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1730 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1731 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1732 1.98 msaitoh } else {
1733 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1734 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1735 1.98 msaitoh /* 82598 only has a counter in the high register */
1736 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1737 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1738 1.305 msaitoh - total * ETHER_MIN_LEN);
1739 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1740 1.98 msaitoh }
1741 1.44 msaitoh
1742 1.98 msaitoh /*
1743 1.98 msaitoh * Workaround: mprc hardware is incorrectly counting
1744 1.98 msaitoh * broadcasts, so for now we subtract those.
1745 1.98 msaitoh */
1746 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1747 1.305 msaitoh IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1748 1.305 msaitoh - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1749 1.305 msaitoh
1750 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1751 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1752 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1753 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1754 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1755 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1756 1.305 msaitoh
1757 1.305 msaitoh IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1758 1.305 msaitoh IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1759 1.305 msaitoh IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1760 1.305 msaitoh
1761 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1762 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1763 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1764 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1765 1.305 msaitoh
1766 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1767 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1768 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1769 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1770 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1771 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1772 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1773 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1774 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1775 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1776 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1777 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1778 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1779 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1780 1.98 msaitoh /* Only read FCOE on 82599 */
1781 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1782 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1783 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1784 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1785 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1786 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1787 1.98 msaitoh }
1788 1.44 msaitoh
1789 1.44 msaitoh /*
1790 1.224 msaitoh * Fill out the OS statistics structure. Only RX errors are required
1791 1.224 msaitoh * here because all TX counters are incremented in the TX path and
1792 1.224 msaitoh * normal RX counters are prepared in ether_input().
1793 1.44 msaitoh */
1794 1.222 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1795 1.222 thorpej if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1796 1.298 msaitoh
1797 1.298 msaitoh /*
1798 1.298 msaitoh * Aggregate following types of errors as RX errors:
1799 1.298 msaitoh * - CRC error count,
1800 1.298 msaitoh * - illegal byte error count,
1801 1.298 msaitoh * - length error count,
1802 1.298 msaitoh * - undersized packets count,
1803 1.298 msaitoh * - fragmented packets count,
1804 1.298 msaitoh * - oversized packets count,
1805 1.298 msaitoh * - jabber count.
1806 1.298 msaitoh */
1807 1.298 msaitoh if_statadd_ref(nsr, if_ierrors,
1808 1.303 msaitoh crcerrs + illerrc + rlec + ruc + rfc + roc + rjc);
1809 1.298 msaitoh
1810 1.222 thorpej IF_STAT_PUTREF(ifp);
1811 1.99 msaitoh } /* ixgbe_update_stats_counters */
1812 1.1 dyoung
1813 1.99 msaitoh /************************************************************************
1814 1.99 msaitoh * ixgbe_add_hw_stats
1815 1.99 msaitoh *
1816 1.99 msaitoh * Add sysctl variables, one per statistic, to the system.
1817 1.99 msaitoh ************************************************************************/
1818 1.98 msaitoh static void
1819 1.333 msaitoh ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1820 1.1 dyoung {
1821 1.333 msaitoh device_t dev = sc->dev;
1822 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
1823 1.333 msaitoh struct sysctllog **log = &sc->sysctllog;
1824 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
1825 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
1826 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1827 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1828 1.98 msaitoh const char *xname = device_xname(dev);
1829 1.144 msaitoh int i;
1830 1.1 dyoung
1831 1.98 msaitoh /* Driver Statistics */
1832 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1833 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EFBIG");
1834 1.333 msaitoh evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1835 1.98 msaitoh NULL, xname, "m_defrag() failed");
1836 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1837 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EFBIG");
1838 1.333 msaitoh evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1839 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EINVAL");
1840 1.333 msaitoh evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
1841 1.98 msaitoh NULL, xname, "Driver tx dma hard fail other");
1842 1.333 msaitoh evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1843 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EAGAIN");
1844 1.333 msaitoh evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1845 1.98 msaitoh NULL, xname, "Driver tx dma soft fail ENOMEM");
1846 1.333 msaitoh evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
1847 1.98 msaitoh NULL, xname, "Watchdog timeouts");
1848 1.333 msaitoh evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
1849 1.98 msaitoh NULL, xname, "TSO errors");
1850 1.333 msaitoh evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
1851 1.233 msaitoh NULL, xname, "Admin MSI-X IRQ Handled");
1852 1.333 msaitoh evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
1853 1.233 msaitoh NULL, xname, "Link event");
1854 1.333 msaitoh evcnt_attach_dynamic(&sc->mod_workev, EVCNT_TYPE_INTR,
1855 1.233 msaitoh NULL, xname, "SFP+ module event");
1856 1.333 msaitoh evcnt_attach_dynamic(&sc->msf_workev, EVCNT_TYPE_INTR,
1857 1.233 msaitoh NULL, xname, "Multispeed event");
1858 1.333 msaitoh evcnt_attach_dynamic(&sc->phy_workev, EVCNT_TYPE_INTR,
1859 1.233 msaitoh NULL, xname, "External PHY event");
1860 1.1 dyoung
1861 1.168 msaitoh /* Max number of traffic class is 8 */
1862 1.168 msaitoh KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1863 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1864 1.333 msaitoh snprintf(sc->tcs[i].evnamebuf,
1865 1.333 msaitoh sizeof(sc->tcs[i].evnamebuf), "%s tc%d", xname, i);
1866 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
1867 1.168 msaitoh evcnt_attach_dynamic(&stats->mpc[i],
1868 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1869 1.168 msaitoh "RX Missed Packet Count");
1870 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1871 1.168 msaitoh evcnt_attach_dynamic(&stats->rnbc[i],
1872 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1873 1.333 msaitoh sc->tcs[i].evnamebuf,
1874 1.168 msaitoh "Receive No Buffers");
1875 1.168 msaitoh }
1876 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
1877 1.168 msaitoh evcnt_attach_dynamic(&stats->pxontxc[i],
1878 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1879 1.331 msaitoh "Priority XON Transmitted");
1880 1.168 msaitoh evcnt_attach_dynamic(&stats->pxofftxc[i],
1881 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1882 1.331 msaitoh "Priority XOFF Transmitted");
1883 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
1884 1.168 msaitoh evcnt_attach_dynamic(&stats->pxon2offc[i],
1885 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1886 1.333 msaitoh sc->tcs[i].evnamebuf,
1887 1.331 msaitoh "Priority XON to XOFF");
1888 1.330 msaitoh evcnt_attach_dynamic(&stats->pxonrxc[i],
1889 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1890 1.331 msaitoh "Priority XON Received");
1891 1.330 msaitoh evcnt_attach_dynamic(&stats->pxoffrxc[i],
1892 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1893 1.331 msaitoh "Priority XOFF Received");
1894 1.168 msaitoh }
1895 1.168 msaitoh }
1896 1.168 msaitoh
1897 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
1898 1.135 msaitoh #ifdef LRO
1899 1.135 msaitoh struct lro_ctrl *lro = &rxr->lro;
1900 1.327 msaitoh #endif
1901 1.135 msaitoh
1902 1.333 msaitoh snprintf(sc->queues[i].evnamebuf,
1903 1.333 msaitoh sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
1904 1.333 msaitoh snprintf(sc->queues[i].namebuf,
1905 1.333 msaitoh sizeof(sc->queues[i].namebuf), "q%d", i);
1906 1.1 dyoung
1907 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
1908 1.319 msaitoh aprint_error_dev(dev,
1909 1.319 msaitoh "could not create sysctl root\n");
1910 1.98 msaitoh break;
1911 1.98 msaitoh }
1912 1.1 dyoung
1913 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &rnode,
1914 1.98 msaitoh 0, CTLTYPE_NODE,
1915 1.333 msaitoh sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1916 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1917 1.98 msaitoh break;
1918 1.23 msaitoh
1919 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1920 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
1921 1.98 msaitoh "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1922 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler, 0,
1923 1.333 msaitoh (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1924 1.98 msaitoh break;
1925 1.1 dyoung
1926 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1927 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1928 1.98 msaitoh "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1929 1.98 msaitoh ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1930 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1931 1.98 msaitoh break;
1932 1.1 dyoung
1933 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1934 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1935 1.98 msaitoh "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1936 1.98 msaitoh ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1937 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1938 1.98 msaitoh break;
1939 1.1 dyoung
1940 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1941 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1942 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor next to check"),
1943 1.280 msaitoh ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1944 1.154 msaitoh CTL_CREATE, CTL_EOL) != 0)
1945 1.154 msaitoh break;
1946 1.154 msaitoh
1947 1.154 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1948 1.287 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1949 1.287 msaitoh SYSCTL_DESCR("Receive Descriptor next to refresh"),
1950 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1951 1.287 msaitoh CTL_CREATE, CTL_EOL) != 0)
1952 1.287 msaitoh break;
1953 1.287 msaitoh
1954 1.287 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1955 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1956 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Head"),
1957 1.98 msaitoh ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1958 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1959 1.33 msaitoh break;
1960 1.98 msaitoh
1961 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1962 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1963 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Tail"),
1964 1.98 msaitoh ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1965 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1966 1.28 msaitoh break;
1967 1.98 msaitoh
1968 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
1969 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "IRQs on queue");
1970 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].handleq,
1971 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
1972 1.327 msaitoh "Handled queue in softint");
1973 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
1974 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Requeued in softint");
1975 1.327 msaitoh if (i < __arraycount(stats->qbtc))
1976 1.327 msaitoh evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1977 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1978 1.328 msaitoh "Queue Bytes Transmitted (reg)");
1979 1.327 msaitoh evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1980 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1981 1.328 msaitoh "Queue Packets Transmitted (soft)");
1982 1.327 msaitoh if (i < __arraycount(stats->qptc))
1983 1.280 msaitoh evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1984 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1985 1.328 msaitoh "Queue Packets Transmitted (reg)");
1986 1.327 msaitoh #ifndef IXGBE_LEGACY_TX
1987 1.327 msaitoh evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1988 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1989 1.327 msaitoh "Packets dropped in pcq");
1990 1.327 msaitoh #endif
1991 1.327 msaitoh evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1992 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1993 1.327 msaitoh "TX Queue No Descriptor Available");
1994 1.327 msaitoh evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1995 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "TSO");
1996 1.327 msaitoh
1997 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1998 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1999 1.328 msaitoh "Queue Bytes Received (soft)");
2000 1.327 msaitoh if (i < __arraycount(stats->qbrc))
2001 1.280 msaitoh evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
2002 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2003 1.328 msaitoh "Queue Bytes Received (reg)");
2004 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2005 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2006 1.328 msaitoh "Queue Packets Received (soft)");
2007 1.327 msaitoh if (i < __arraycount(stats->qprc))
2008 1.327 msaitoh evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
2009 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2010 1.328 msaitoh "Queue Packets Received (reg)");
2011 1.327 msaitoh if ((i < __arraycount(stats->qprdc)) &&
2012 1.327 msaitoh (hw->mac.type >= ixgbe_mac_82599EB))
2013 1.151 msaitoh evcnt_attach_dynamic(&stats->qprdc[i],
2014 1.151 msaitoh EVCNT_TYPE_MISC, NULL,
2015 1.333 msaitoh sc->queues[i].evnamebuf,
2016 1.328 msaitoh "Queue Packets Received Drop");
2017 1.33 msaitoh
2018 1.290 msaitoh evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
2019 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
2020 1.98 msaitoh evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2021 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx discarded");
2022 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2023 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
2024 1.98 msaitoh #ifdef LRO
2025 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2026 1.98 msaitoh CTLFLAG_RD, &lro->lro_queued, 0,
2027 1.98 msaitoh "LRO Queued");
2028 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2029 1.98 msaitoh CTLFLAG_RD, &lro->lro_flushed, 0,
2030 1.98 msaitoh "LRO Flushed");
2031 1.98 msaitoh #endif /* LRO */
2032 1.1 dyoung }
2033 1.28 msaitoh
2034 1.99 msaitoh /* MAC stats get their own sub node */
2035 1.98 msaitoh
2036 1.98 msaitoh snprintf(stats->namebuf,
2037 1.98 msaitoh sizeof(stats->namebuf), "%s MAC Statistics", xname);
2038 1.98 msaitoh
2039 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2040 1.98 msaitoh stats->namebuf, "rx csum offload - IP");
2041 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2042 1.98 msaitoh stats->namebuf, "rx csum offload - L4");
2043 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2044 1.98 msaitoh stats->namebuf, "rx csum offload - IP bad");
2045 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2046 1.98 msaitoh stats->namebuf, "rx csum offload - L4 bad");
2047 1.98 msaitoh evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
2048 1.98 msaitoh stats->namebuf, "Interrupt conditions zero");
2049 1.98 msaitoh evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
2050 1.98 msaitoh stats->namebuf, "Legacy interrupts");
2051 1.99 msaitoh
2052 1.98 msaitoh evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2053 1.98 msaitoh stats->namebuf, "CRC Errors");
2054 1.98 msaitoh evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2055 1.98 msaitoh stats->namebuf, "Illegal Byte Errors");
2056 1.98 msaitoh evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2057 1.98 msaitoh stats->namebuf, "Byte Errors");
2058 1.98 msaitoh evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2059 1.98 msaitoh stats->namebuf, "MAC Short Packets Discarded");
2060 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2061 1.98 msaitoh evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2062 1.98 msaitoh stats->namebuf, "Bad SFD");
2063 1.98 msaitoh evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2064 1.98 msaitoh stats->namebuf, "Total Packets Missed");
2065 1.98 msaitoh evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2066 1.98 msaitoh stats->namebuf, "MAC Local Faults");
2067 1.98 msaitoh evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2068 1.98 msaitoh stats->namebuf, "MAC Remote Faults");
2069 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2070 1.326 msaitoh evcnt_attach_dynamic(&stats->link_dn_cnt, EVCNT_TYPE_MISC,
2071 1.326 msaitoh NULL, stats->namebuf, "Link down event in the MAC");
2072 1.98 msaitoh evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2073 1.98 msaitoh stats->namebuf, "Receive Length Errors");
2074 1.98 msaitoh evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2075 1.98 msaitoh stats->namebuf, "Link XON Transmitted");
2076 1.330 msaitoh evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2077 1.330 msaitoh stats->namebuf, "Link XOFF Transmitted");
2078 1.98 msaitoh evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2079 1.98 msaitoh stats->namebuf, "Link XON Received");
2080 1.98 msaitoh evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2081 1.98 msaitoh stats->namebuf, "Link XOFF Received");
2082 1.98 msaitoh
2083 1.98 msaitoh /* Packet Reception Stats */
2084 1.98 msaitoh evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2085 1.98 msaitoh stats->namebuf, "Total Octets Received");
2086 1.98 msaitoh evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2087 1.98 msaitoh stats->namebuf, "Good Octets Received");
2088 1.98 msaitoh evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2089 1.98 msaitoh stats->namebuf, "Total Packets Received");
2090 1.98 msaitoh evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2091 1.98 msaitoh stats->namebuf, "Good Packets Received");
2092 1.98 msaitoh evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2093 1.98 msaitoh stats->namebuf, "Multicast Packets Received");
2094 1.98 msaitoh evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2095 1.98 msaitoh stats->namebuf, "Broadcast Packets Received");
2096 1.98 msaitoh evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2097 1.98 msaitoh stats->namebuf, "64 byte frames received ");
2098 1.98 msaitoh evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2099 1.98 msaitoh stats->namebuf, "65-127 byte frames received");
2100 1.98 msaitoh evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2101 1.98 msaitoh stats->namebuf, "128-255 byte frames received");
2102 1.98 msaitoh evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2103 1.98 msaitoh stats->namebuf, "256-511 byte frames received");
2104 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2105 1.98 msaitoh stats->namebuf, "512-1023 byte frames received");
2106 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2107 1.98 msaitoh stats->namebuf, "1023-1522 byte frames received");
2108 1.98 msaitoh evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2109 1.98 msaitoh stats->namebuf, "Receive Undersized");
2110 1.98 msaitoh evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2111 1.98 msaitoh stats->namebuf, "Fragmented Packets Received ");
2112 1.98 msaitoh evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2113 1.98 msaitoh stats->namebuf, "Oversized Packets Received");
2114 1.98 msaitoh evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2115 1.98 msaitoh stats->namebuf, "Received Jabber");
2116 1.98 msaitoh evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2117 1.98 msaitoh stats->namebuf, "Management Packets Received");
2118 1.98 msaitoh evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2119 1.98 msaitoh stats->namebuf, "Management Packets Dropped");
2120 1.98 msaitoh evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2121 1.98 msaitoh stats->namebuf, "Checksum Errors");
2122 1.1 dyoung
2123 1.98 msaitoh /* Packet Transmission Stats */
2124 1.98 msaitoh evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2125 1.98 msaitoh stats->namebuf, "Good Octets Transmitted");
2126 1.98 msaitoh evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2127 1.98 msaitoh stats->namebuf, "Total Packets Transmitted");
2128 1.98 msaitoh evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2129 1.98 msaitoh stats->namebuf, "Good Packets Transmitted");
2130 1.98 msaitoh evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2131 1.98 msaitoh stats->namebuf, "Broadcast Packets Transmitted");
2132 1.98 msaitoh evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2133 1.98 msaitoh stats->namebuf, "Multicast Packets Transmitted");
2134 1.98 msaitoh evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2135 1.98 msaitoh stats->namebuf, "Management Packets Transmitted");
2136 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2137 1.98 msaitoh stats->namebuf, "64 byte frames transmitted ");
2138 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2139 1.98 msaitoh stats->namebuf, "65-127 byte frames transmitted");
2140 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2141 1.98 msaitoh stats->namebuf, "128-255 byte frames transmitted");
2142 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2143 1.98 msaitoh stats->namebuf, "256-511 byte frames transmitted");
2144 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2145 1.98 msaitoh stats->namebuf, "512-1023 byte frames transmitted");
2146 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2147 1.98 msaitoh stats->namebuf, "1024-1522 byte frames transmitted");
2148 1.99 msaitoh } /* ixgbe_add_hw_stats */
2149 1.48 msaitoh
2150 1.1 dyoung static void
2151 1.333 msaitoh ixgbe_clear_evcnt(struct ixgbe_softc *sc)
2152 1.1 dyoung {
2153 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
2154 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
2155 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2156 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
2157 1.168 msaitoh int i;
2158 1.98 msaitoh
2159 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
2160 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
2161 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
2162 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
2163 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
2164 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
2165 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
2166 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, 0);
2167 1.333 msaitoh IXGBE_EVC_STORE(&sc->watchdog_events, 0);
2168 1.333 msaitoh IXGBE_EVC_STORE(&sc->admin_irqev, 0);
2169 1.333 msaitoh IXGBE_EVC_STORE(&sc->link_workev, 0);
2170 1.333 msaitoh IXGBE_EVC_STORE(&sc->mod_workev, 0);
2171 1.333 msaitoh IXGBE_EVC_STORE(&sc->msf_workev, 0);
2172 1.333 msaitoh IXGBE_EVC_STORE(&sc->phy_workev, 0);
2173 1.98 msaitoh
2174 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2175 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
2176 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpc[i], 0);
2177 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
2178 1.305 msaitoh IXGBE_EVC_STORE(&stats->rnbc[i], 0);
2179 1.168 msaitoh }
2180 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
2181 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
2182 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
2183 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
2184 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
2185 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2186 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
2187 1.168 msaitoh }
2188 1.168 msaitoh }
2189 1.168 msaitoh
2190 1.333 msaitoh txr = sc->tx_rings;
2191 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
2192 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
2193 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
2194 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].req, 0);
2195 1.305 msaitoh IXGBE_EVC_STORE(&txr->total_packets, 0);
2196 1.98 msaitoh #ifndef IXGBE_LEGACY_TX
2197 1.305 msaitoh IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2198 1.45 msaitoh #endif
2199 1.327 msaitoh IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2200 1.327 msaitoh IXGBE_EVC_STORE(&txr->tso_tx, 0);
2201 1.134 msaitoh txr->q_efbig_tx_dma_setup = 0;
2202 1.134 msaitoh txr->q_mbuf_defrag_failed = 0;
2203 1.134 msaitoh txr->q_efbig2_tx_dma_setup = 0;
2204 1.134 msaitoh txr->q_einval_tx_dma_setup = 0;
2205 1.134 msaitoh txr->q_other_tx_dma_setup = 0;
2206 1.134 msaitoh txr->q_eagain_tx_dma_setup = 0;
2207 1.134 msaitoh txr->q_enomem_tx_dma_setup = 0;
2208 1.134 msaitoh txr->q_tso_err = 0;
2209 1.1 dyoung
2210 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
2211 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprc[i], 0);
2212 1.305 msaitoh IXGBE_EVC_STORE(&stats->qptc[i], 0);
2213 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbrc[i], 0);
2214 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbtc[i], 0);
2215 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2216 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprdc[i], 0);
2217 1.98 msaitoh }
2218 1.98 msaitoh
2219 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2220 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2221 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2222 1.305 msaitoh IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2223 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2224 1.305 msaitoh }
2225 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs, 0);
2226 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs, 0);
2227 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2228 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2229 1.305 msaitoh IXGBE_EVC_STORE(&stats->intzero, 0);
2230 1.305 msaitoh IXGBE_EVC_STORE(&stats->legint, 0);
2231 1.305 msaitoh IXGBE_EVC_STORE(&stats->crcerrs, 0);
2232 1.305 msaitoh IXGBE_EVC_STORE(&stats->illerrc, 0);
2233 1.305 msaitoh IXGBE_EVC_STORE(&stats->errbc, 0);
2234 1.305 msaitoh IXGBE_EVC_STORE(&stats->mspdc, 0);
2235 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2236 1.305 msaitoh IXGBE_EVC_STORE(&stats->mbsdc, 0);
2237 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpctotal, 0);
2238 1.305 msaitoh IXGBE_EVC_STORE(&stats->mlfc, 0);
2239 1.305 msaitoh IXGBE_EVC_STORE(&stats->mrfc, 0);
2240 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2241 1.326 msaitoh IXGBE_EVC_STORE(&stats->link_dn_cnt, 0);
2242 1.305 msaitoh IXGBE_EVC_STORE(&stats->rlec, 0);
2243 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxontxc, 0);
2244 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxonrxc, 0);
2245 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxofftxc, 0);
2246 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
2247 1.98 msaitoh
2248 1.98 msaitoh /* Packet Reception Stats */
2249 1.305 msaitoh IXGBE_EVC_STORE(&stats->tor, 0);
2250 1.305 msaitoh IXGBE_EVC_STORE(&stats->gorc, 0);
2251 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpr, 0);
2252 1.305 msaitoh IXGBE_EVC_STORE(&stats->gprc, 0);
2253 1.305 msaitoh IXGBE_EVC_STORE(&stats->mprc, 0);
2254 1.305 msaitoh IXGBE_EVC_STORE(&stats->bprc, 0);
2255 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc64, 0);
2256 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc127, 0);
2257 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc255, 0);
2258 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc511, 0);
2259 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1023, 0);
2260 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1522, 0);
2261 1.305 msaitoh IXGBE_EVC_STORE(&stats->ruc, 0);
2262 1.305 msaitoh IXGBE_EVC_STORE(&stats->rfc, 0);
2263 1.305 msaitoh IXGBE_EVC_STORE(&stats->roc, 0);
2264 1.305 msaitoh IXGBE_EVC_STORE(&stats->rjc, 0);
2265 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngprc, 0);
2266 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngpdc, 0);
2267 1.305 msaitoh IXGBE_EVC_STORE(&stats->xec, 0);
2268 1.98 msaitoh
2269 1.98 msaitoh /* Packet Transmission Stats */
2270 1.305 msaitoh IXGBE_EVC_STORE(&stats->gotc, 0);
2271 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpt, 0);
2272 1.305 msaitoh IXGBE_EVC_STORE(&stats->gptc, 0);
2273 1.305 msaitoh IXGBE_EVC_STORE(&stats->bptc, 0);
2274 1.305 msaitoh IXGBE_EVC_STORE(&stats->mptc, 0);
2275 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngptc, 0);
2276 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc64, 0);
2277 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc127, 0);
2278 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc255, 0);
2279 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc511, 0);
2280 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1023, 0);
2281 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1522, 0);
2282 1.98 msaitoh }
2283 1.98 msaitoh
2284 1.99 msaitoh /************************************************************************
2285 1.99 msaitoh * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2286 1.99 msaitoh *
2287 1.99 msaitoh * Retrieves the TDH value from the hardware
2288 1.99 msaitoh ************************************************************************/
2289 1.185 msaitoh static int
2290 1.98 msaitoh ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2291 1.98 msaitoh {
2292 1.98 msaitoh struct sysctlnode node = *rnode;
2293 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2294 1.333 msaitoh struct ixgbe_softc *sc;
2295 1.98 msaitoh uint32_t val;
2296 1.98 msaitoh
2297 1.99 msaitoh if (!txr)
2298 1.99 msaitoh return (0);
2299 1.99 msaitoh
2300 1.333 msaitoh sc = txr->sc;
2301 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2302 1.169 msaitoh return (EPERM);
2303 1.169 msaitoh
2304 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->me));
2305 1.98 msaitoh node.sysctl_data = &val;
2306 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2307 1.99 msaitoh } /* ixgbe_sysctl_tdh_handler */
2308 1.98 msaitoh
2309 1.99 msaitoh /************************************************************************
2310 1.99 msaitoh * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2311 1.99 msaitoh *
2312 1.99 msaitoh * Retrieves the TDT value from the hardware
2313 1.99 msaitoh ************************************************************************/
2314 1.185 msaitoh static int
2315 1.98 msaitoh ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2316 1.98 msaitoh {
2317 1.98 msaitoh struct sysctlnode node = *rnode;
2318 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2319 1.333 msaitoh struct ixgbe_softc *sc;
2320 1.98 msaitoh uint32_t val;
2321 1.1 dyoung
2322 1.99 msaitoh if (!txr)
2323 1.99 msaitoh return (0);
2324 1.99 msaitoh
2325 1.333 msaitoh sc = txr->sc;
2326 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2327 1.169 msaitoh return (EPERM);
2328 1.169 msaitoh
2329 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->me));
2330 1.98 msaitoh node.sysctl_data = &val;
2331 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2332 1.99 msaitoh } /* ixgbe_sysctl_tdt_handler */
2333 1.45 msaitoh
2334 1.99 msaitoh /************************************************************************
2335 1.154 msaitoh * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2336 1.154 msaitoh * handler function
2337 1.154 msaitoh *
2338 1.154 msaitoh * Retrieves the next_to_check value
2339 1.154 msaitoh ************************************************************************/
2340 1.185 msaitoh static int
2341 1.154 msaitoh ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2342 1.154 msaitoh {
2343 1.154 msaitoh struct sysctlnode node = *rnode;
2344 1.154 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2345 1.333 msaitoh struct ixgbe_softc *sc;
2346 1.154 msaitoh uint32_t val;
2347 1.154 msaitoh
2348 1.154 msaitoh if (!rxr)
2349 1.154 msaitoh return (0);
2350 1.154 msaitoh
2351 1.333 msaitoh sc = rxr->sc;
2352 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2353 1.169 msaitoh return (EPERM);
2354 1.169 msaitoh
2355 1.154 msaitoh val = rxr->next_to_check;
2356 1.154 msaitoh node.sysctl_data = &val;
2357 1.154 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2358 1.154 msaitoh } /* ixgbe_sysctl_next_to_check_handler */
2359 1.154 msaitoh
2360 1.154 msaitoh /************************************************************************
2361 1.287 msaitoh * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2362 1.287 msaitoh * handler function
2363 1.287 msaitoh *
2364 1.287 msaitoh * Retrieves the next_to_refresh value
2365 1.287 msaitoh ************************************************************************/
2366 1.287 msaitoh static int
2367 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2368 1.287 msaitoh {
2369 1.287 msaitoh struct sysctlnode node = *rnode;
2370 1.287 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2371 1.333 msaitoh struct ixgbe_softc *sc;
2372 1.287 msaitoh uint32_t val;
2373 1.287 msaitoh
2374 1.287 msaitoh if (!rxr)
2375 1.287 msaitoh return (0);
2376 1.287 msaitoh
2377 1.333 msaitoh sc = rxr->sc;
2378 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2379 1.287 msaitoh return (EPERM);
2380 1.287 msaitoh
2381 1.287 msaitoh val = rxr->next_to_refresh;
2382 1.287 msaitoh node.sysctl_data = &val;
2383 1.287 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2384 1.287 msaitoh } /* ixgbe_sysctl_next_to_refresh_handler */
2385 1.287 msaitoh
2386 1.287 msaitoh /************************************************************************
2387 1.99 msaitoh * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2388 1.99 msaitoh *
2389 1.99 msaitoh * Retrieves the RDH value from the hardware
2390 1.99 msaitoh ************************************************************************/
2391 1.185 msaitoh static int
2392 1.98 msaitoh ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2393 1.98 msaitoh {
2394 1.98 msaitoh struct sysctlnode node = *rnode;
2395 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2396 1.333 msaitoh struct ixgbe_softc *sc;
2397 1.98 msaitoh uint32_t val;
2398 1.1 dyoung
2399 1.99 msaitoh if (!rxr)
2400 1.99 msaitoh return (0);
2401 1.99 msaitoh
2402 1.333 msaitoh sc = rxr->sc;
2403 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2404 1.169 msaitoh return (EPERM);
2405 1.169 msaitoh
2406 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDH(rxr->me));
2407 1.98 msaitoh node.sysctl_data = &val;
2408 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2409 1.99 msaitoh } /* ixgbe_sysctl_rdh_handler */
2410 1.1 dyoung
2411 1.99 msaitoh /************************************************************************
2412 1.99 msaitoh * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2413 1.99 msaitoh *
2414 1.99 msaitoh * Retrieves the RDT value from the hardware
2415 1.99 msaitoh ************************************************************************/
2416 1.185 msaitoh static int
2417 1.98 msaitoh ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2418 1.98 msaitoh {
2419 1.98 msaitoh struct sysctlnode node = *rnode;
2420 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2421 1.333 msaitoh struct ixgbe_softc *sc;
2422 1.98 msaitoh uint32_t val;
2423 1.1 dyoung
2424 1.99 msaitoh if (!rxr)
2425 1.99 msaitoh return (0);
2426 1.99 msaitoh
2427 1.333 msaitoh sc = rxr->sc;
2428 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2429 1.169 msaitoh return (EPERM);
2430 1.169 msaitoh
2431 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDT(rxr->me));
2432 1.98 msaitoh node.sysctl_data = &val;
2433 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2434 1.99 msaitoh } /* ixgbe_sysctl_rdt_handler */
2435 1.1 dyoung
2436 1.193 msaitoh static int
2437 1.193 msaitoh ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2438 1.193 msaitoh {
2439 1.193 msaitoh struct ifnet *ifp = &ec->ec_if;
2440 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2441 1.193 msaitoh int rv;
2442 1.193 msaitoh
2443 1.193 msaitoh if (set)
2444 1.333 msaitoh rv = ixgbe_register_vlan(sc, vid);
2445 1.193 msaitoh else
2446 1.333 msaitoh rv = ixgbe_unregister_vlan(sc, vid);
2447 1.193 msaitoh
2448 1.200 msaitoh if (rv != 0)
2449 1.200 msaitoh return rv;
2450 1.200 msaitoh
2451 1.200 msaitoh /*
2452 1.200 msaitoh * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2453 1.200 msaitoh * or 0 to 1.
2454 1.200 msaitoh */
2455 1.200 msaitoh if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2456 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2457 1.200 msaitoh
2458 1.193 msaitoh return rv;
2459 1.193 msaitoh }
2460 1.193 msaitoh
2461 1.99 msaitoh /************************************************************************
2462 1.99 msaitoh * ixgbe_register_vlan
2463 1.99 msaitoh *
2464 1.99 msaitoh * Run via vlan config EVENT, it enables us to use the
2465 1.99 msaitoh * HW Filter table since we can get the vlan id. This
2466 1.99 msaitoh * just creates the entry in the soft version of the
2467 1.99 msaitoh * VFTA, init will repopulate the real table.
2468 1.99 msaitoh ************************************************************************/
2469 1.193 msaitoh static int
2470 1.333 msaitoh ixgbe_register_vlan(struct ixgbe_softc *sc, u16 vtag)
2471 1.98 msaitoh {
2472 1.98 msaitoh u16 index, bit;
2473 1.193 msaitoh int error;
2474 1.48 msaitoh
2475 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2476 1.193 msaitoh return EINVAL;
2477 1.1 dyoung
2478 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2479 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2480 1.98 msaitoh bit = vtag & 0x1F;
2481 1.333 msaitoh sc->shadow_vfta[index] |= ((u32)1 << bit);
2482 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, true,
2483 1.193 msaitoh true);
2484 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2485 1.193 msaitoh if (error != 0)
2486 1.193 msaitoh error = EACCES;
2487 1.193 msaitoh
2488 1.193 msaitoh return error;
2489 1.99 msaitoh } /* ixgbe_register_vlan */
2490 1.1 dyoung
2491 1.99 msaitoh /************************************************************************
2492 1.99 msaitoh * ixgbe_unregister_vlan
2493 1.99 msaitoh *
2494 1.99 msaitoh * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2495 1.99 msaitoh ************************************************************************/
2496 1.193 msaitoh static int
2497 1.333 msaitoh ixgbe_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
2498 1.98 msaitoh {
2499 1.98 msaitoh u16 index, bit;
2500 1.193 msaitoh int error;
2501 1.1 dyoung
2502 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2503 1.193 msaitoh return EINVAL;
2504 1.1 dyoung
2505 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2506 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2507 1.98 msaitoh bit = vtag & 0x1F;
2508 1.333 msaitoh sc->shadow_vfta[index] &= ~((u32)1 << bit);
2509 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, false,
2510 1.193 msaitoh true);
2511 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2512 1.193 msaitoh if (error != 0)
2513 1.193 msaitoh error = EACCES;
2514 1.193 msaitoh
2515 1.193 msaitoh return error;
2516 1.99 msaitoh } /* ixgbe_unregister_vlan */
2517 1.98 msaitoh
2518 1.98 msaitoh static void
2519 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *sc)
2520 1.98 msaitoh {
2521 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2522 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2523 1.98 msaitoh struct rx_ring *rxr;
2524 1.200 msaitoh u32 ctrl;
2525 1.186 msaitoh int i;
2526 1.177 msaitoh bool hwtagging;
2527 1.98 msaitoh
2528 1.178 msaitoh /* Enable HW tagging only if any vlan is attached */
2529 1.177 msaitoh hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2530 1.178 msaitoh && VLAN_ATTACHED(ec);
2531 1.1 dyoung
2532 1.98 msaitoh /* Setup the queues for vlans */
2533 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
2534 1.333 msaitoh rxr = &sc->rx_rings[i];
2535 1.178 msaitoh /*
2536 1.178 msaitoh * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2537 1.178 msaitoh */
2538 1.177 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
2539 1.177 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2540 1.177 msaitoh if (hwtagging)
2541 1.115 msaitoh ctrl |= IXGBE_RXDCTL_VME;
2542 1.177 msaitoh else
2543 1.177 msaitoh ctrl &= ~IXGBE_RXDCTL_VME;
2544 1.177 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2545 1.98 msaitoh }
2546 1.177 msaitoh rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2547 1.1 dyoung }
2548 1.1 dyoung
2549 1.200 msaitoh /* VLAN hw tagging for 82598 */
2550 1.200 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
2551 1.200 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2552 1.200 msaitoh if (hwtagging)
2553 1.200 msaitoh ctrl |= IXGBE_VLNCTRL_VME;
2554 1.200 msaitoh else
2555 1.200 msaitoh ctrl &= ~IXGBE_VLNCTRL_VME;
2556 1.200 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2557 1.200 msaitoh }
2558 1.200 msaitoh } /* ixgbe_setup_vlan_hw_tagging */
2559 1.200 msaitoh
2560 1.200 msaitoh static void
2561 1.333 msaitoh ixgbe_setup_vlan_hw_support(struct ixgbe_softc *sc)
2562 1.200 msaitoh {
2563 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2564 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2565 1.200 msaitoh int i;
2566 1.200 msaitoh u32 ctrl;
2567 1.200 msaitoh struct vlanid_list *vlanidp;
2568 1.200 msaitoh
2569 1.200 msaitoh /*
2570 1.294 skrll * This function is called from both if_init and ifflags_cb()
2571 1.200 msaitoh * on NetBSD.
2572 1.200 msaitoh */
2573 1.200 msaitoh
2574 1.200 msaitoh /*
2575 1.200 msaitoh * Part 1:
2576 1.200 msaitoh * Setup VLAN HW tagging
2577 1.200 msaitoh */
2578 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2579 1.200 msaitoh
2580 1.200 msaitoh /*
2581 1.200 msaitoh * Part 2:
2582 1.200 msaitoh * Setup VLAN HW filter
2583 1.200 msaitoh */
2584 1.193 msaitoh /* Cleanup shadow_vfta */
2585 1.193 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2586 1.333 msaitoh sc->shadow_vfta[i] = 0;
2587 1.193 msaitoh /* Generate shadow_vfta from ec_vids */
2588 1.201 msaitoh ETHER_LOCK(ec);
2589 1.193 msaitoh SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2590 1.193 msaitoh uint32_t idx;
2591 1.193 msaitoh
2592 1.193 msaitoh idx = vlanidp->vid / 32;
2593 1.193 msaitoh KASSERT(idx < IXGBE_VFTA_SIZE);
2594 1.333 msaitoh sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2595 1.193 msaitoh }
2596 1.201 msaitoh ETHER_UNLOCK(ec);
2597 1.99 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2598 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), sc->shadow_vfta[i]);
2599 1.22 msaitoh
2600 1.98 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2601 1.98 msaitoh /* Enable the Filter Table if enabled */
2602 1.177 msaitoh if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2603 1.98 msaitoh ctrl |= IXGBE_VLNCTRL_VFE;
2604 1.177 msaitoh else
2605 1.177 msaitoh ctrl &= ~IXGBE_VLNCTRL_VFE;
2606 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2607 1.99 msaitoh } /* ixgbe_setup_vlan_hw_support */
2608 1.1 dyoung
2609 1.99 msaitoh /************************************************************************
2610 1.99 msaitoh * ixgbe_get_slot_info
2611 1.99 msaitoh *
2612 1.99 msaitoh * Get the width and transaction speed of
2613 1.99 msaitoh * the slot this adapter is plugged into.
2614 1.99 msaitoh ************************************************************************/
2615 1.98 msaitoh static void
2616 1.333 msaitoh ixgbe_get_slot_info(struct ixgbe_softc *sc)
2617 1.98 msaitoh {
2618 1.333 msaitoh device_t dev = sc->dev;
2619 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2620 1.186 msaitoh u32 offset;
2621 1.98 msaitoh u16 link;
2622 1.186 msaitoh int bus_info_valid = TRUE;
2623 1.99 msaitoh
2624 1.99 msaitoh /* Some devices are behind an internal bridge */
2625 1.99 msaitoh switch (hw->device_id) {
2626 1.99 msaitoh case IXGBE_DEV_ID_82599_SFP_SF_QP:
2627 1.99 msaitoh case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2628 1.99 msaitoh goto get_parent_info;
2629 1.99 msaitoh default:
2630 1.99 msaitoh break;
2631 1.99 msaitoh }
2632 1.1 dyoung
2633 1.99 msaitoh ixgbe_get_bus_info(hw);
2634 1.99 msaitoh
2635 1.99 msaitoh /*
2636 1.99 msaitoh * Some devices don't use PCI-E, but there is no need
2637 1.99 msaitoh * to display "Unknown" for bus speed and width.
2638 1.99 msaitoh */
2639 1.99 msaitoh switch (hw->mac.type) {
2640 1.99 msaitoh case ixgbe_mac_X550EM_x:
2641 1.99 msaitoh case ixgbe_mac_X550EM_a:
2642 1.99 msaitoh return;
2643 1.99 msaitoh default:
2644 1.99 msaitoh goto display;
2645 1.1 dyoung }
2646 1.1 dyoung
2647 1.99 msaitoh get_parent_info:
2648 1.98 msaitoh /*
2649 1.99 msaitoh * For the Quad port adapter we need to parse back
2650 1.99 msaitoh * up the PCI tree to find the speed of the expansion
2651 1.99 msaitoh * slot into which this adapter is plugged. A bit more work.
2652 1.99 msaitoh */
2653 1.98 msaitoh dev = device_parent(device_parent(dev));
2654 1.99 msaitoh #if 0
2655 1.98 msaitoh #ifdef IXGBE_DEBUG
2656 1.99 msaitoh device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2657 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2658 1.98 msaitoh #endif
2659 1.98 msaitoh dev = device_parent(device_parent(dev));
2660 1.98 msaitoh #ifdef IXGBE_DEBUG
2661 1.99 msaitoh device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2662 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2663 1.99 msaitoh #endif
2664 1.1 dyoung #endif
2665 1.98 msaitoh /* Now get the PCI Express Capabilities offset */
2666 1.333 msaitoh if (pci_get_capability(sc->osdep.pc, sc->osdep.tag,
2667 1.99 msaitoh PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2668 1.99 msaitoh /*
2669 1.99 msaitoh * Hmm...can't get PCI-Express capabilities.
2670 1.99 msaitoh * Falling back to default method.
2671 1.99 msaitoh */
2672 1.99 msaitoh bus_info_valid = FALSE;
2673 1.99 msaitoh ixgbe_get_bus_info(hw);
2674 1.99 msaitoh goto display;
2675 1.99 msaitoh }
2676 1.98 msaitoh /* ...and read the Link Status Register */
2677 1.333 msaitoh link = pci_conf_read(sc->osdep.pc, sc->osdep.tag,
2678 1.120 msaitoh offset + PCIE_LCSR) >> 16;
2679 1.120 msaitoh ixgbe_set_pci_config_data_generic(hw, link);
2680 1.52 msaitoh
2681 1.98 msaitoh display:
2682 1.99 msaitoh device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2683 1.186 msaitoh ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2684 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2685 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2686 1.99 msaitoh "Unknown"),
2687 1.99 msaitoh ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2688 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2689 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2690 1.99 msaitoh "Unknown"));
2691 1.99 msaitoh
2692 1.99 msaitoh if (bus_info_valid) {
2693 1.99 msaitoh if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2694 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2695 1.99 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500))) {
2696 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2697 1.99 msaitoh " for this card\n is not sufficient for"
2698 1.99 msaitoh " optimal performance.\n");
2699 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2700 1.99 msaitoh "PCIE, or x4 PCIE Gen2 slot is required.\n");
2701 1.99 msaitoh }
2702 1.99 msaitoh if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2703 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2704 1.99 msaitoh (hw->bus.speed < ixgbe_bus_speed_8000))) {
2705 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2706 1.99 msaitoh " for this card\n is not sufficient for"
2707 1.99 msaitoh " optimal performance.\n");
2708 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2709 1.99 msaitoh "PCIE Gen3 slot is required.\n");
2710 1.99 msaitoh }
2711 1.99 msaitoh } else
2712 1.319 msaitoh device_printf(dev,
2713 1.319 msaitoh "Unable to determine slot speed/width. The speed/width "
2714 1.319 msaitoh "reported are that of the internal switch.\n");
2715 1.45 msaitoh
2716 1.45 msaitoh return;
2717 1.99 msaitoh } /* ixgbe_get_slot_info */
2718 1.1 dyoung
2719 1.99 msaitoh /************************************************************************
2720 1.321 msaitoh * ixgbe_enable_queue - Queue Interrupt Enabler
2721 1.99 msaitoh ************************************************************************/
2722 1.1 dyoung static inline void
2723 1.333 msaitoh ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
2724 1.1 dyoung {
2725 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2726 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2727 1.197 msaitoh u64 queue = 1ULL << vector;
2728 1.186 msaitoh u32 mask;
2729 1.1 dyoung
2730 1.139 knakahar mutex_enter(&que->dc_mtx);
2731 1.139 knakahar if (que->disabled_count > 0 && --que->disabled_count > 0)
2732 1.127 knakahar goto out;
2733 1.127 knakahar
2734 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2735 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2736 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2737 1.1 dyoung } else {
2738 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2739 1.98 msaitoh if (mask)
2740 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2741 1.98 msaitoh mask = (queue >> 32);
2742 1.98 msaitoh if (mask)
2743 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2744 1.1 dyoung }
2745 1.127 knakahar out:
2746 1.139 knakahar mutex_exit(&que->dc_mtx);
2747 1.99 msaitoh } /* ixgbe_enable_queue */
2748 1.1 dyoung
2749 1.99 msaitoh /************************************************************************
2750 1.139 knakahar * ixgbe_disable_queue_internal
2751 1.99 msaitoh ************************************************************************/
2752 1.82 msaitoh static inline void
2753 1.333 msaitoh ixgbe_disable_queue_internal(struct ixgbe_softc *sc, u32 vector, bool nestok)
2754 1.1 dyoung {
2755 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2756 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2757 1.197 msaitoh u64 queue = 1ULL << vector;
2758 1.186 msaitoh u32 mask;
2759 1.1 dyoung
2760 1.139 knakahar mutex_enter(&que->dc_mtx);
2761 1.139 knakahar
2762 1.139 knakahar if (que->disabled_count > 0) {
2763 1.139 knakahar if (nestok)
2764 1.139 knakahar que->disabled_count++;
2765 1.139 knakahar goto out;
2766 1.139 knakahar }
2767 1.139 knakahar que->disabled_count++;
2768 1.127 knakahar
2769 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2770 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2771 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2772 1.1 dyoung } else {
2773 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2774 1.98 msaitoh if (mask)
2775 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2776 1.98 msaitoh mask = (queue >> 32);
2777 1.98 msaitoh if (mask)
2778 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2779 1.1 dyoung }
2780 1.127 knakahar out:
2781 1.139 knakahar mutex_exit(&que->dc_mtx);
2782 1.139 knakahar } /* ixgbe_disable_queue_internal */
2783 1.139 knakahar
2784 1.139 knakahar /************************************************************************
2785 1.139 knakahar * ixgbe_disable_queue
2786 1.139 knakahar ************************************************************************/
2787 1.139 knakahar static inline void
2788 1.333 msaitoh ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
2789 1.139 knakahar {
2790 1.139 knakahar
2791 1.333 msaitoh ixgbe_disable_queue_internal(sc, vector, true);
2792 1.99 msaitoh } /* ixgbe_disable_queue */
2793 1.1 dyoung
2794 1.99 msaitoh /************************************************************************
2795 1.133 knakahar * ixgbe_sched_handle_que - schedule deferred packet processing
2796 1.133 knakahar ************************************************************************/
2797 1.133 knakahar static inline void
2798 1.333 msaitoh ixgbe_sched_handle_que(struct ixgbe_softc *sc, struct ix_queue *que)
2799 1.133 knakahar {
2800 1.133 knakahar
2801 1.185 msaitoh if (que->txrx_use_workqueue) {
2802 1.133 knakahar /*
2803 1.333 msaitoh * sc->que_wq is bound to each CPU instead of
2804 1.133 knakahar * each NIC queue to reduce workqueue kthread. As we
2805 1.133 knakahar * should consider about interrupt affinity in this
2806 1.133 knakahar * function, the workqueue kthread must be WQ_PERCPU.
2807 1.133 knakahar * If create WQ_PERCPU workqueue kthread for each NIC
2808 1.133 knakahar * queue, that number of created workqueue kthread is
2809 1.133 knakahar * (number of used NIC queue) * (number of CPUs) =
2810 1.133 knakahar * (number of CPUs) ^ 2 most often.
2811 1.133 knakahar *
2812 1.133 knakahar * The same NIC queue's interrupts are avoided by
2813 1.133 knakahar * masking the queue's interrupt. And different
2814 1.133 knakahar * NIC queue's interrupts use different struct work
2815 1.133 knakahar * (que->wq_cookie). So, "enqueued flag" to avoid
2816 1.133 knakahar * twice workqueue_enqueue() is not required .
2817 1.133 knakahar */
2818 1.333 msaitoh workqueue_enqueue(sc->que_wq, &que->wq_cookie, curcpu());
2819 1.319 msaitoh } else
2820 1.133 knakahar softint_schedule(que->que_si);
2821 1.133 knakahar }
2822 1.133 knakahar
2823 1.133 knakahar /************************************************************************
2824 1.99 msaitoh * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2825 1.99 msaitoh ************************************************************************/
2826 1.34 msaitoh static int
2827 1.1 dyoung ixgbe_msix_que(void *arg)
2828 1.1 dyoung {
2829 1.1 dyoung struct ix_queue *que = arg;
2830 1.339 msaitoh struct ixgbe_softc *sc = que->sc;
2831 1.333 msaitoh struct ifnet *ifp = sc->ifp;
2832 1.1 dyoung struct tx_ring *txr = que->txr;
2833 1.1 dyoung struct rx_ring *rxr = que->rxr;
2834 1.1 dyoung u32 newitr = 0;
2835 1.1 dyoung
2836 1.33 msaitoh /* Protect against spurious interrupts */
2837 1.33 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
2838 1.34 msaitoh return 0;
2839 1.33 msaitoh
2840 1.333 msaitoh ixgbe_disable_queue(sc, que->msix);
2841 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
2842 1.1 dyoung
2843 1.147 knakahar /*
2844 1.147 knakahar * Don't change "que->txrx_use_workqueue" from this point to avoid
2845 1.147 knakahar * flip-flopping softint/workqueue mode in one deferred processing.
2846 1.147 knakahar */
2847 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
2848 1.147 knakahar
2849 1.1 dyoung IXGBE_TX_LOCK(txr);
2850 1.33 msaitoh ixgbe_txeof(txr);
2851 1.1 dyoung IXGBE_TX_UNLOCK(txr);
2852 1.1 dyoung
2853 1.1 dyoung /* Do AIM now? */
2854 1.1 dyoung
2855 1.333 msaitoh if (sc->enable_aim == false)
2856 1.1 dyoung goto no_calc;
2857 1.1 dyoung /*
2858 1.99 msaitoh * Do Adaptive Interrupt Moderation:
2859 1.99 msaitoh * - Write out last calculated setting
2860 1.99 msaitoh * - Calculate based on average size over
2861 1.99 msaitoh * the last interval.
2862 1.99 msaitoh */
2863 1.99 msaitoh if (que->eitr_setting)
2864 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, que->eitr_setting);
2865 1.99 msaitoh
2866 1.98 msaitoh que->eitr_setting = 0;
2867 1.1 dyoung
2868 1.98 msaitoh /* Idle, do nothing */
2869 1.186 msaitoh if ((txr->bytes == 0) && (rxr->bytes == 0))
2870 1.186 msaitoh goto no_calc;
2871 1.185 msaitoh
2872 1.1 dyoung if ((txr->bytes) && (txr->packets))
2873 1.98 msaitoh newitr = txr->bytes/txr->packets;
2874 1.1 dyoung if ((rxr->bytes) && (rxr->packets))
2875 1.165 riastrad newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2876 1.1 dyoung newitr += 24; /* account for hardware frame, crc */
2877 1.1 dyoung
2878 1.1 dyoung /* set an upper boundary */
2879 1.165 riastrad newitr = uimin(newitr, 3000);
2880 1.1 dyoung
2881 1.1 dyoung /* Be nice to the mid range */
2882 1.1 dyoung if ((newitr > 300) && (newitr < 1200))
2883 1.1 dyoung newitr = (newitr / 3);
2884 1.1 dyoung else
2885 1.1 dyoung newitr = (newitr / 2);
2886 1.1 dyoung
2887 1.124 msaitoh /*
2888 1.124 msaitoh * When RSC is used, ITR interval must be larger than RSC_DELAY.
2889 1.124 msaitoh * Currently, we use 2us for RSC_DELAY. The minimum value is always
2890 1.124 msaitoh * greater than 2us on 100M (and 10M?(not documented)), but it's not
2891 1.124 msaitoh * on 1G and higher.
2892 1.124 msaitoh */
2893 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
2894 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL))
2895 1.124 msaitoh if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2896 1.124 msaitoh newitr = IXGBE_MIN_RSC_EITR_10G1G;
2897 1.124 msaitoh
2898 1.186 msaitoh /* save for next interrupt */
2899 1.186 msaitoh que->eitr_setting = newitr;
2900 1.1 dyoung
2901 1.98 msaitoh /* Reset state */
2902 1.98 msaitoh txr->bytes = 0;
2903 1.98 msaitoh txr->packets = 0;
2904 1.98 msaitoh rxr->bytes = 0;
2905 1.98 msaitoh rxr->packets = 0;
2906 1.1 dyoung
2907 1.1 dyoung no_calc:
2908 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
2909 1.99 msaitoh
2910 1.34 msaitoh return 1;
2911 1.99 msaitoh } /* ixgbe_msix_que */
2912 1.1 dyoung
2913 1.99 msaitoh /************************************************************************
2914 1.99 msaitoh * ixgbe_media_status - Media Ioctl callback
2915 1.98 msaitoh *
2916 1.99 msaitoh * Called whenever the user queries the status of
2917 1.99 msaitoh * the interface using ifconfig.
2918 1.99 msaitoh ************************************************************************/
2919 1.98 msaitoh static void
2920 1.98 msaitoh ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2921 1.1 dyoung {
2922 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2923 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2924 1.98 msaitoh int layer;
2925 1.1 dyoung
2926 1.98 msaitoh INIT_DEBUGOUT("ixgbe_media_status: begin");
2927 1.333 msaitoh ixgbe_update_link_status(sc);
2928 1.1 dyoung
2929 1.1 dyoung ifmr->ifm_status = IFM_AVALID;
2930 1.1 dyoung ifmr->ifm_active = IFM_ETHER;
2931 1.1 dyoung
2932 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
2933 1.68 msaitoh ifmr->ifm_active |= IFM_NONE;
2934 1.1 dyoung return;
2935 1.1 dyoung }
2936 1.1 dyoung
2937 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE;
2938 1.333 msaitoh layer = sc->phy_layer;
2939 1.1 dyoung
2940 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2941 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2942 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2943 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2944 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2945 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2946 1.333 msaitoh switch (sc->link_speed) {
2947 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2948 1.43 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2949 1.43 msaitoh break;
2950 1.103 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
2951 1.103 msaitoh ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2952 1.103 msaitoh break;
2953 1.103 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
2954 1.103 msaitoh ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2955 1.103 msaitoh break;
2956 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2957 1.33 msaitoh ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2958 1.43 msaitoh break;
2959 1.43 msaitoh case IXGBE_LINK_SPEED_100_FULL:
2960 1.24 msaitoh ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2961 1.43 msaitoh break;
2962 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
2963 1.99 msaitoh ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2964 1.99 msaitoh break;
2965 1.43 msaitoh }
2966 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2967 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2968 1.333 msaitoh switch (sc->link_speed) {
2969 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2970 1.43 msaitoh ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2971 1.43 msaitoh break;
2972 1.43 msaitoh }
2973 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2974 1.333 msaitoh switch (sc->link_speed) {
2975 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2976 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2977 1.43 msaitoh break;
2978 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2979 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2980 1.43 msaitoh break;
2981 1.43 msaitoh }
2982 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2983 1.333 msaitoh switch (sc->link_speed) {
2984 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2985 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2986 1.43 msaitoh break;
2987 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2988 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2989 1.43 msaitoh break;
2990 1.43 msaitoh }
2991 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2992 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2993 1.333 msaitoh switch (sc->link_speed) {
2994 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2995 1.43 msaitoh ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2996 1.43 msaitoh break;
2997 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2998 1.28 msaitoh ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2999 1.43 msaitoh break;
3000 1.43 msaitoh }
3001 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3002 1.333 msaitoh switch (sc->link_speed) {
3003 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3004 1.43 msaitoh ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
3005 1.43 msaitoh break;
3006 1.43 msaitoh }
3007 1.43 msaitoh /*
3008 1.99 msaitoh * XXX: These need to use the proper media types once
3009 1.99 msaitoh * they're added.
3010 1.99 msaitoh */
3011 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3012 1.333 msaitoh switch (sc->link_speed) {
3013 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3014 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
3015 1.48 msaitoh break;
3016 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3017 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3018 1.48 msaitoh break;
3019 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3020 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3021 1.48 msaitoh break;
3022 1.48 msaitoh }
3023 1.99 msaitoh else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
3024 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
3025 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3026 1.333 msaitoh switch (sc->link_speed) {
3027 1.48 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3028 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
3029 1.48 msaitoh break;
3030 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3031 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3032 1.48 msaitoh break;
3033 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3034 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3035 1.48 msaitoh break;
3036 1.48 msaitoh }
3037 1.98 msaitoh
3038 1.43 msaitoh /* If nothing is recognized... */
3039 1.43 msaitoh #if 0
3040 1.43 msaitoh if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
3041 1.43 msaitoh ifmr->ifm_active |= IFM_UNKNOWN;
3042 1.43 msaitoh #endif
3043 1.98 msaitoh
3044 1.104 msaitoh ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
3045 1.104 msaitoh
3046 1.44 msaitoh /* Display current flow control setting used on link */
3047 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
3048 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3049 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3050 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
3051 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3052 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3053 1.1 dyoung
3054 1.1 dyoung return;
3055 1.99 msaitoh } /* ixgbe_media_status */
3056 1.1 dyoung
3057 1.99 msaitoh /************************************************************************
3058 1.99 msaitoh * ixgbe_media_change - Media Ioctl callback
3059 1.1 dyoung *
3060 1.99 msaitoh * Called when the user changes speed/duplex using
3061 1.99 msaitoh * media/mediopt option with ifconfig.
3062 1.99 msaitoh ************************************************************************/
3063 1.1 dyoung static int
3064 1.98 msaitoh ixgbe_media_change(struct ifnet *ifp)
3065 1.1 dyoung {
3066 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3067 1.333 msaitoh struct ifmedia *ifm = &sc->media;
3068 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3069 1.43 msaitoh ixgbe_link_speed speed = 0;
3070 1.94 msaitoh ixgbe_link_speed link_caps = 0;
3071 1.94 msaitoh bool negotiate = false;
3072 1.94 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
3073 1.1 dyoung
3074 1.1 dyoung INIT_DEBUGOUT("ixgbe_media_change: begin");
3075 1.1 dyoung
3076 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3077 1.1 dyoung return (EINVAL);
3078 1.1 dyoung
3079 1.44 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
3080 1.144 msaitoh return (EPERM);
3081 1.44 msaitoh
3082 1.43 msaitoh /*
3083 1.99 msaitoh * We don't actually need to check against the supported
3084 1.99 msaitoh * media types of the adapter; ifmedia will take care of
3085 1.99 msaitoh * that for us.
3086 1.99 msaitoh */
3087 1.43 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) {
3088 1.98 msaitoh case IFM_AUTO:
3089 1.98 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3090 1.98 msaitoh &negotiate);
3091 1.98 msaitoh if (err != IXGBE_SUCCESS) {
3092 1.333 msaitoh device_printf(sc->dev, "Unable to determine "
3093 1.98 msaitoh "supported advertise speeds\n");
3094 1.98 msaitoh return (ENODEV);
3095 1.98 msaitoh }
3096 1.98 msaitoh speed |= link_caps;
3097 1.98 msaitoh break;
3098 1.98 msaitoh case IFM_10G_T:
3099 1.98 msaitoh case IFM_10G_LRM:
3100 1.98 msaitoh case IFM_10G_LR:
3101 1.98 msaitoh case IFM_10G_TWINAX:
3102 1.181 msaitoh case IFM_10G_SR:
3103 1.181 msaitoh case IFM_10G_CX4:
3104 1.98 msaitoh case IFM_10G_KR:
3105 1.98 msaitoh case IFM_10G_KX4:
3106 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
3107 1.98 msaitoh break;
3108 1.103 msaitoh case IFM_5000_T:
3109 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
3110 1.103 msaitoh break;
3111 1.103 msaitoh case IFM_2500_T:
3112 1.99 msaitoh case IFM_2500_KX:
3113 1.99 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3114 1.99 msaitoh break;
3115 1.98 msaitoh case IFM_1000_T:
3116 1.98 msaitoh case IFM_1000_LX:
3117 1.98 msaitoh case IFM_1000_SX:
3118 1.98 msaitoh case IFM_1000_KX:
3119 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
3120 1.98 msaitoh break;
3121 1.98 msaitoh case IFM_100_TX:
3122 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
3123 1.98 msaitoh break;
3124 1.99 msaitoh case IFM_10_T:
3125 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
3126 1.99 msaitoh break;
3127 1.140 msaitoh case IFM_NONE:
3128 1.140 msaitoh break;
3129 1.98 msaitoh default:
3130 1.98 msaitoh goto invalid;
3131 1.48 msaitoh }
3132 1.43 msaitoh
3133 1.43 msaitoh hw->mac.autotry_restart = TRUE;
3134 1.43 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
3135 1.333 msaitoh sc->advertise = 0;
3136 1.109 msaitoh if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3137 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3138 1.333 msaitoh sc->advertise |= 1 << 2;
3139 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3140 1.333 msaitoh sc->advertise |= 1 << 1;
3141 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3142 1.333 msaitoh sc->advertise |= 1 << 0;
3143 1.99 msaitoh if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3144 1.333 msaitoh sc->advertise |= 1 << 3;
3145 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3146 1.333 msaitoh sc->advertise |= 1 << 4;
3147 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3148 1.333 msaitoh sc->advertise |= 1 << 5;
3149 1.51 msaitoh }
3150 1.1 dyoung
3151 1.1 dyoung return (0);
3152 1.43 msaitoh
3153 1.43 msaitoh invalid:
3154 1.333 msaitoh device_printf(sc->dev, "Invalid media type!\n");
3155 1.98 msaitoh
3156 1.43 msaitoh return (EINVAL);
3157 1.99 msaitoh } /* ixgbe_media_change */
3158 1.1 dyoung
3159 1.99 msaitoh /************************************************************************
3160 1.320 msaitoh * ixgbe_msix_admin - Link status change ISR (MSI-X)
3161 1.99 msaitoh ************************************************************************/
3162 1.98 msaitoh static int
3163 1.233 msaitoh ixgbe_msix_admin(void *arg)
3164 1.98 msaitoh {
3165 1.333 msaitoh struct ixgbe_softc *sc = arg;
3166 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3167 1.277 msaitoh u32 eicr;
3168 1.273 msaitoh u32 eims_orig;
3169 1.273 msaitoh u32 eims_disable = 0;
3170 1.98 msaitoh
3171 1.333 msaitoh IXGBE_EVC_ADD(&sc->admin_irqev, 1);
3172 1.98 msaitoh
3173 1.273 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3174 1.273 msaitoh /* Pause other interrupts */
3175 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3176 1.273 msaitoh
3177 1.125 knakahar /*
3178 1.273 msaitoh * First get the cause.
3179 1.273 msaitoh *
3180 1.125 knakahar * The specifications of 82598, 82599, X540 and X550 say EICS register
3181 1.125 knakahar * is write only. However, Linux says it is a workaround for silicon
3182 1.273 msaitoh * errata to read EICS instead of EICR to get interrupt cause.
3183 1.273 msaitoh * At least, reading EICR clears lower 16bits of EIMS on 82598.
3184 1.125 knakahar */
3185 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3186 1.98 msaitoh /* Be sure the queue bits are not cleared */
3187 1.99 msaitoh eicr &= ~IXGBE_EICR_RTX_QUEUE;
3188 1.265 msaitoh /* Clear all OTHER interrupts with write */
3189 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3190 1.1 dyoung
3191 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
3192 1.277 msaitoh
3193 1.277 msaitoh /* Re-enable some OTHER interrupts */
3194 1.277 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3195 1.277 msaitoh
3196 1.277 msaitoh return 1;
3197 1.277 msaitoh } /* ixgbe_msix_admin */
3198 1.277 msaitoh
3199 1.277 msaitoh static void
3200 1.333 msaitoh ixgbe_intr_admin_common(struct ixgbe_softc *sc, u32 eicr, u32 *eims_disable)
3201 1.277 msaitoh {
3202 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3203 1.277 msaitoh u32 task_requests = 0;
3204 1.277 msaitoh s32 retval;
3205 1.277 msaitoh
3206 1.266 msaitoh /* Link status change */
3207 1.266 msaitoh if (eicr & IXGBE_EICR_LSC) {
3208 1.266 msaitoh task_requests |= IXGBE_REQUEST_TASK_LSC;
3209 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3210 1.266 msaitoh }
3211 1.266 msaitoh
3212 1.204 msaitoh if (ixgbe_is_sfp(hw)) {
3213 1.310 msaitoh u32 eicr_mask;
3214 1.310 msaitoh
3215 1.204 msaitoh /* Pluggable optics-related interrupt */
3216 1.204 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
3217 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3218 1.204 msaitoh else
3219 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3220 1.204 msaitoh
3221 1.204 msaitoh /*
3222 1.204 msaitoh * An interrupt might not arrive when a module is inserted.
3223 1.204 msaitoh * When an link status change interrupt occurred and the driver
3224 1.204 msaitoh * still regard SFP as unplugged, issue the module softint
3225 1.204 msaitoh * and then issue LSC interrupt.
3226 1.204 msaitoh */
3227 1.204 msaitoh if ((eicr & eicr_mask)
3228 1.204 msaitoh || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3229 1.204 msaitoh && (eicr & IXGBE_EICR_LSC))) {
3230 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD;
3231 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3232 1.204 msaitoh }
3233 1.204 msaitoh
3234 1.204 msaitoh if ((hw->mac.type == ixgbe_mac_82599EB) &&
3235 1.204 msaitoh (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3236 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF;
3237 1.277 msaitoh *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3238 1.204 msaitoh }
3239 1.204 msaitoh }
3240 1.204 msaitoh
3241 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3242 1.311 msaitoh #ifdef IXGBE_FDIR
3243 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
3244 1.99 msaitoh (eicr & IXGBE_EICR_FLOW_DIR)) {
3245 1.333 msaitoh if (!atomic_cas_uint(&sc->fdir_reinit, 0, 1)) {
3246 1.275 msaitoh task_requests |= IXGBE_REQUEST_TASK_FDIR;
3247 1.275 msaitoh /* Disable the interrupt */
3248 1.277 msaitoh *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3249 1.275 msaitoh }
3250 1.99 msaitoh }
3251 1.311 msaitoh #endif
3252 1.99 msaitoh
3253 1.99 msaitoh if (eicr & IXGBE_EICR_ECC) {
3254 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3255 1.312 msaitoh &ixgbe_errlog_intrvl))
3256 1.333 msaitoh device_printf(sc->dev,
3257 1.312 msaitoh "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3258 1.98 msaitoh }
3259 1.1 dyoung
3260 1.98 msaitoh /* Check for over temp condition */
3261 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3262 1.333 msaitoh switch (sc->hw.mac.type) {
3263 1.99 msaitoh case ixgbe_mac_X550EM_a:
3264 1.99 msaitoh if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3265 1.99 msaitoh break;
3266 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3267 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3268 1.99 msaitoh break;
3269 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3270 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3271 1.333 msaitoh device_printf(sc->dev,
3272 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3273 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3274 1.333 msaitoh device_printf(sc->dev,
3275 1.312 msaitoh "System shutdown required!\n");
3276 1.312 msaitoh }
3277 1.99 msaitoh break;
3278 1.99 msaitoh default:
3279 1.99 msaitoh if (!(eicr & IXGBE_EICR_TS))
3280 1.99 msaitoh break;
3281 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3282 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3283 1.99 msaitoh break;
3284 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3285 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3286 1.333 msaitoh device_printf(sc->dev,
3287 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3288 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3289 1.333 msaitoh device_printf(sc->dev,
3290 1.312 msaitoh "System shutdown required!\n");
3291 1.312 msaitoh }
3292 1.99 msaitoh break;
3293 1.99 msaitoh }
3294 1.1 dyoung }
3295 1.99 msaitoh
3296 1.99 msaitoh /* Check for VF message */
3297 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
3298 1.233 msaitoh (eicr & IXGBE_EICR_MAILBOX)) {
3299 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MBX;
3300 1.277 msaitoh *eims_disable |= IXGBE_EIMS_MAILBOX;
3301 1.233 msaitoh }
3302 1.1 dyoung }
3303 1.1 dyoung
3304 1.98 msaitoh /* Check for fan failure */
3305 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3306 1.333 msaitoh ixgbe_check_fan_failure(sc, eicr, true);
3307 1.1 dyoung
3308 1.98 msaitoh /* External PHY interrupt */
3309 1.99 msaitoh if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3310 1.99 msaitoh (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3311 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_PHY;
3312 1.277 msaitoh *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3313 1.233 msaitoh }
3314 1.233 msaitoh
3315 1.233 msaitoh if (task_requests != 0) {
3316 1.333 msaitoh mutex_enter(&sc->admin_mtx);
3317 1.333 msaitoh sc->task_requests |= task_requests;
3318 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
3319 1.333 msaitoh mutex_exit(&sc->admin_mtx);
3320 1.186 msaitoh }
3321 1.277 msaitoh }
3322 1.1 dyoung
3323 1.124 msaitoh static void
3324 1.333 msaitoh ixgbe_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
3325 1.124 msaitoh {
3326 1.185 msaitoh
3327 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
3328 1.186 msaitoh itr |= itr << 16;
3329 1.186 msaitoh else
3330 1.186 msaitoh itr |= IXGBE_EITR_CNT_WDIS;
3331 1.124 msaitoh
3332 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(index), itr);
3333 1.124 msaitoh }
3334 1.124 msaitoh
3335 1.124 msaitoh
3336 1.99 msaitoh /************************************************************************
3337 1.99 msaitoh * ixgbe_sysctl_interrupt_rate_handler
3338 1.99 msaitoh ************************************************************************/
3339 1.98 msaitoh static int
3340 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3341 1.1 dyoung {
3342 1.98 msaitoh struct sysctlnode node = *rnode;
3343 1.99 msaitoh struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3344 1.339 msaitoh struct ixgbe_softc *sc;
3345 1.98 msaitoh uint32_t reg, usec, rate;
3346 1.98 msaitoh int error;
3347 1.45 msaitoh
3348 1.98 msaitoh if (que == NULL)
3349 1.98 msaitoh return 0;
3350 1.169 msaitoh
3351 1.333 msaitoh sc = que->sc;
3352 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
3353 1.169 msaitoh return (EPERM);
3354 1.169 msaitoh
3355 1.333 msaitoh reg = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(que->msix));
3356 1.98 msaitoh usec = ((reg & 0x0FF8) >> 3);
3357 1.98 msaitoh if (usec > 0)
3358 1.98 msaitoh rate = 500000 / usec;
3359 1.98 msaitoh else
3360 1.98 msaitoh rate = 0;
3361 1.98 msaitoh node.sysctl_data = &rate;
3362 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
3363 1.98 msaitoh if (error || newp == NULL)
3364 1.98 msaitoh return error;
3365 1.98 msaitoh reg &= ~0xfff; /* default, no limitation */
3366 1.98 msaitoh if (rate > 0 && rate < 500000) {
3367 1.98 msaitoh if (rate < 1000)
3368 1.98 msaitoh rate = 1000;
3369 1.228 msaitoh reg |= ((4000000 / rate) & 0xff8);
3370 1.124 msaitoh /*
3371 1.124 msaitoh * When RSC is used, ITR interval must be larger than
3372 1.124 msaitoh * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3373 1.124 msaitoh * The minimum value is always greater than 2us on 100M
3374 1.124 msaitoh * (and 10M?(not documented)), but it's not on 1G and higher.
3375 1.124 msaitoh */
3376 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
3377 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3378 1.333 msaitoh if ((sc->num_queues > 1)
3379 1.124 msaitoh && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3380 1.124 msaitoh return EINVAL;
3381 1.124 msaitoh }
3382 1.98 msaitoh ixgbe_max_interrupt_rate = rate;
3383 1.124 msaitoh } else
3384 1.124 msaitoh ixgbe_max_interrupt_rate = 0;
3385 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, reg);
3386 1.99 msaitoh
3387 1.99 msaitoh return (0);
3388 1.99 msaitoh } /* ixgbe_sysctl_interrupt_rate_handler */
3389 1.45 msaitoh
3390 1.98 msaitoh const struct sysctlnode *
3391 1.333 msaitoh ixgbe_sysctl_instance(struct ixgbe_softc *sc)
3392 1.98 msaitoh {
3393 1.98 msaitoh const char *dvname;
3394 1.98 msaitoh struct sysctllog **log;
3395 1.98 msaitoh int rc;
3396 1.98 msaitoh const struct sysctlnode *rnode;
3397 1.1 dyoung
3398 1.333 msaitoh if (sc->sysctltop != NULL)
3399 1.333 msaitoh return sc->sysctltop;
3400 1.1 dyoung
3401 1.333 msaitoh log = &sc->sysctllog;
3402 1.333 msaitoh dvname = device_xname(sc->dev);
3403 1.1 dyoung
3404 1.98 msaitoh if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3405 1.98 msaitoh 0, CTLTYPE_NODE, dvname,
3406 1.98 msaitoh SYSCTL_DESCR("ixgbe information and settings"),
3407 1.98 msaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3408 1.98 msaitoh goto err;
3409 1.63 msaitoh
3410 1.98 msaitoh return rnode;
3411 1.98 msaitoh err:
3412 1.333 msaitoh device_printf(sc->dev,
3413 1.207 msaitoh "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3414 1.98 msaitoh return NULL;
3415 1.63 msaitoh }
3416 1.63 msaitoh
3417 1.99 msaitoh /************************************************************************
3418 1.99 msaitoh * ixgbe_add_device_sysctls
3419 1.99 msaitoh ************************************************************************/
3420 1.63 msaitoh static void
3421 1.333 msaitoh ixgbe_add_device_sysctls(struct ixgbe_softc *sc)
3422 1.1 dyoung {
3423 1.333 msaitoh device_t dev = sc->dev;
3424 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3425 1.98 msaitoh struct sysctllog **log;
3426 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
3427 1.1 dyoung
3428 1.333 msaitoh log = &sc->sysctllog;
3429 1.1 dyoung
3430 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
3431 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl root\n");
3432 1.98 msaitoh return;
3433 1.98 msaitoh }
3434 1.1 dyoung
3435 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3436 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3437 1.158 msaitoh "debug", SYSCTL_DESCR("Debug Info"),
3438 1.333 msaitoh ixgbe_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL)
3439 1.280 msaitoh != 0)
3440 1.158 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3441 1.158 msaitoh
3442 1.158 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3443 1.286 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3444 1.286 msaitoh "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3445 1.286 msaitoh ixgbe_sysctl_rx_copy_len, 0,
3446 1.333 msaitoh (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
3447 1.286 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3448 1.286 msaitoh
3449 1.286 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3450 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3451 1.314 msaitoh "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
3452 1.333 msaitoh NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3453 1.314 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3454 1.314 msaitoh
3455 1.314 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3456 1.314 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3457 1.314 msaitoh "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
3458 1.333 msaitoh NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3459 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3460 1.1 dyoung
3461 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3462 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
3463 1.313 msaitoh SYSCTL_DESCR("max number of RX packets to process"),
3464 1.333 msaitoh ixgbe_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3465 1.313 msaitoh CTL_EOL) != 0)
3466 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3467 1.313 msaitoh
3468 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3469 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
3470 1.313 msaitoh SYSCTL_DESCR("max number of TX packets to process"),
3471 1.333 msaitoh ixgbe_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3472 1.313 msaitoh CTL_EOL) != 0)
3473 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3474 1.313 msaitoh
3475 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3476 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3477 1.98 msaitoh "num_queues", SYSCTL_DESCR("Number of queues"),
3478 1.333 msaitoh NULL, 0, &sc->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3479 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3480 1.43 msaitoh
3481 1.98 msaitoh /* Sysctls for all devices */
3482 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3483 1.99 msaitoh CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3484 1.333 msaitoh ixgbe_sysctl_flowcntl, 0, (void *)sc, 0, CTL_CREATE,
3485 1.99 msaitoh CTL_EOL) != 0)
3486 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3487 1.63 msaitoh
3488 1.333 msaitoh sc->enable_aim = ixgbe_enable_aim;
3489 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3490 1.99 msaitoh CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3491 1.333 msaitoh NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3492 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3493 1.1 dyoung
3494 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3495 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3496 1.98 msaitoh "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3497 1.333 msaitoh ixgbe_sysctl_advertise, 0, (void *)sc, 0, CTL_CREATE,
3498 1.99 msaitoh CTL_EOL) != 0)
3499 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3500 1.1 dyoung
3501 1.147 knakahar /*
3502 1.147 knakahar * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3503 1.147 knakahar * it causesflip-flopping softint/workqueue mode in one deferred
3504 1.147 knakahar * processing. Therefore, preempt_disable()/preempt_enable() are
3505 1.147 knakahar * required in ixgbe_sched_handle_que() to avoid
3506 1.147 knakahar * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3507 1.147 knakahar * I think changing "que->txrx_use_workqueue" in interrupt handler
3508 1.147 knakahar * is lighter than doing preempt_disable()/preempt_enable() in every
3509 1.147 knakahar * ixgbe_sched_handle_que().
3510 1.147 knakahar */
3511 1.333 msaitoh sc->txrx_use_workqueue = ixgbe_txrx_workqueue;
3512 1.128 knakahar if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3513 1.280 msaitoh CTLTYPE_BOOL, "txrx_workqueue",
3514 1.280 msaitoh SYSCTL_DESCR("Use workqueue for packet processing"),
3515 1.333 msaitoh NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE,
3516 1.280 msaitoh CTL_EOL) != 0)
3517 1.128 knakahar aprint_error_dev(dev, "could not create sysctl\n");
3518 1.128 knakahar
3519 1.98 msaitoh #ifdef IXGBE_DEBUG
3520 1.98 msaitoh /* testing sysctls (for all devices) */
3521 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3522 1.99 msaitoh CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3523 1.333 msaitoh ixgbe_sysctl_power_state, 0, (void *)sc, 0, CTL_CREATE,
3524 1.99 msaitoh CTL_EOL) != 0)
3525 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3526 1.45 msaitoh
3527 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3528 1.99 msaitoh CTLTYPE_STRING, "print_rss_config",
3529 1.99 msaitoh SYSCTL_DESCR("Prints RSS Configuration"),
3530 1.333 msaitoh ixgbe_sysctl_print_rss_config, 0, (void *)sc, 0, CTL_CREATE,
3531 1.99 msaitoh CTL_EOL) != 0)
3532 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3533 1.98 msaitoh #endif
3534 1.98 msaitoh /* for X550 series devices */
3535 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3536 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3537 1.99 msaitoh CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3538 1.333 msaitoh ixgbe_sysctl_dmac, 0, (void *)sc, 0, CTL_CREATE,
3539 1.99 msaitoh CTL_EOL) != 0)
3540 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3541 1.1 dyoung
3542 1.98 msaitoh /* for WoL-capable devices */
3543 1.333 msaitoh if (sc->wol_support) {
3544 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3545 1.99 msaitoh CTLTYPE_BOOL, "wol_enable",
3546 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3547 1.333 msaitoh ixgbe_sysctl_wol_enable, 0, (void *)sc, 0, CTL_CREATE,
3548 1.99 msaitoh CTL_EOL) != 0)
3549 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3550 1.1 dyoung
3551 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3552 1.99 msaitoh CTLTYPE_INT, "wufc",
3553 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3554 1.333 msaitoh ixgbe_sysctl_wufc, 0, (void *)sc, 0, CTL_CREATE,
3555 1.99 msaitoh CTL_EOL) != 0)
3556 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3557 1.98 msaitoh }
3558 1.1 dyoung
3559 1.98 msaitoh /* for X552/X557-AT devices */
3560 1.325 msaitoh if ((hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) ||
3561 1.325 msaitoh (hw->device_id == IXGBE_DEV_ID_X550EM_A_10G_T)) {
3562 1.98 msaitoh const struct sysctlnode *phy_node;
3563 1.1 dyoung
3564 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3565 1.98 msaitoh "phy", SYSCTL_DESCR("External PHY sysctls"),
3566 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3567 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3568 1.98 msaitoh return;
3569 1.98 msaitoh }
3570 1.1 dyoung
3571 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3572 1.99 msaitoh CTLTYPE_INT, "temp",
3573 1.99 msaitoh SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3574 1.333 msaitoh ixgbe_sysctl_phy_temp, 0, (void *)sc, 0, CTL_CREATE,
3575 1.99 msaitoh CTL_EOL) != 0)
3576 1.99 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3577 1.99 msaitoh
3578 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3579 1.99 msaitoh CTLTYPE_INT, "overtemp_occurred",
3580 1.280 msaitoh SYSCTL_DESCR(
3581 1.280 msaitoh "External PHY High Temperature Event Occurred"),
3582 1.333 msaitoh ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)sc, 0,
3583 1.99 msaitoh CTL_CREATE, CTL_EOL) != 0)
3584 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3585 1.99 msaitoh }
3586 1.33 msaitoh
3587 1.163 msaitoh if ((hw->mac.type == ixgbe_mac_X550EM_a)
3588 1.163 msaitoh && (hw->phy.type == ixgbe_phy_fw))
3589 1.163 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3590 1.163 msaitoh CTLTYPE_BOOL, "force_10_100_autonego",
3591 1.163 msaitoh SYSCTL_DESCR("Force autonego on 10M and 100M"),
3592 1.163 msaitoh NULL, 0, &hw->phy.force_10_100_autonego, 0,
3593 1.163 msaitoh CTL_CREATE, CTL_EOL) != 0)
3594 1.163 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3595 1.163 msaitoh
3596 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) {
3597 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3598 1.99 msaitoh CTLTYPE_INT, "eee_state",
3599 1.99 msaitoh SYSCTL_DESCR("EEE Power Save State"),
3600 1.333 msaitoh ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE,
3601 1.99 msaitoh CTL_EOL) != 0)
3602 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3603 1.98 msaitoh }
3604 1.99 msaitoh } /* ixgbe_add_device_sysctls */
3605 1.1 dyoung
3606 1.99 msaitoh /************************************************************************
3607 1.99 msaitoh * ixgbe_allocate_pci_resources
3608 1.99 msaitoh ************************************************************************/
3609 1.98 msaitoh static int
3610 1.333 msaitoh ixgbe_allocate_pci_resources(struct ixgbe_softc *sc,
3611 1.98 msaitoh const struct pci_attach_args *pa)
3612 1.1 dyoung {
3613 1.171 msaitoh pcireg_t memtype, csr;
3614 1.333 msaitoh device_t dev = sc->dev;
3615 1.98 msaitoh bus_addr_t addr;
3616 1.98 msaitoh int flags;
3617 1.1 dyoung
3618 1.98 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3619 1.98 msaitoh switch (memtype) {
3620 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3621 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3622 1.333 msaitoh sc->osdep.mem_bus_space_tag = pa->pa_memt;
3623 1.98 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3624 1.333 msaitoh memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
3625 1.98 msaitoh goto map_err;
3626 1.98 msaitoh if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3627 1.98 msaitoh aprint_normal_dev(dev, "clearing prefetchable bit\n");
3628 1.98 msaitoh flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3629 1.98 msaitoh }
3630 1.333 msaitoh if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
3631 1.333 msaitoh sc->osdep.mem_size, flags,
3632 1.333 msaitoh &sc->osdep.mem_bus_space_handle) != 0) {
3633 1.98 msaitoh map_err:
3634 1.333 msaitoh sc->osdep.mem_size = 0;
3635 1.98 msaitoh aprint_error_dev(dev, "unable to map BAR0\n");
3636 1.98 msaitoh return ENXIO;
3637 1.98 msaitoh }
3638 1.171 msaitoh /*
3639 1.171 msaitoh * Enable address decoding for memory range in case BIOS or
3640 1.171 msaitoh * UEFI don't set it.
3641 1.171 msaitoh */
3642 1.171 msaitoh csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3643 1.171 msaitoh PCI_COMMAND_STATUS_REG);
3644 1.171 msaitoh csr |= PCI_COMMAND_MEM_ENABLE;
3645 1.171 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3646 1.171 msaitoh csr);
3647 1.98 msaitoh break;
3648 1.98 msaitoh default:
3649 1.98 msaitoh aprint_error_dev(dev, "unexpected type on BAR0\n");
3650 1.98 msaitoh return ENXIO;
3651 1.98 msaitoh }
3652 1.1 dyoung
3653 1.98 msaitoh return (0);
3654 1.99 msaitoh } /* ixgbe_allocate_pci_resources */
3655 1.1 dyoung
3656 1.119 msaitoh static void
3657 1.333 msaitoh ixgbe_free_deferred_handlers(struct ixgbe_softc *sc)
3658 1.119 msaitoh {
3659 1.333 msaitoh struct ix_queue *que = sc->queues;
3660 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3661 1.119 msaitoh int i;
3662 1.119 msaitoh
3663 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++, txr++) {
3664 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3665 1.119 msaitoh if (txr->txr_si != NULL)
3666 1.119 msaitoh softint_disestablish(txr->txr_si);
3667 1.119 msaitoh }
3668 1.119 msaitoh if (que->que_si != NULL)
3669 1.119 msaitoh softint_disestablish(que->que_si);
3670 1.119 msaitoh }
3671 1.333 msaitoh if (sc->txr_wq != NULL)
3672 1.333 msaitoh workqueue_destroy(sc->txr_wq);
3673 1.333 msaitoh if (sc->txr_wq_enqueued != NULL)
3674 1.333 msaitoh percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
3675 1.333 msaitoh if (sc->que_wq != NULL)
3676 1.333 msaitoh workqueue_destroy(sc->que_wq);
3677 1.333 msaitoh
3678 1.333 msaitoh if (sc->admin_wq != NULL) {
3679 1.333 msaitoh workqueue_destroy(sc->admin_wq);
3680 1.333 msaitoh sc->admin_wq = NULL;
3681 1.333 msaitoh }
3682 1.333 msaitoh if (sc->timer_wq != NULL) {
3683 1.333 msaitoh workqueue_destroy(sc->timer_wq);
3684 1.333 msaitoh sc->timer_wq = NULL;
3685 1.233 msaitoh }
3686 1.333 msaitoh if (sc->recovery_mode_timer_wq != NULL) {
3687 1.236 msaitoh /*
3688 1.236 msaitoh * ixgbe_ifstop() doesn't call the workqueue_wait() for
3689 1.236 msaitoh * the recovery_mode_timer workqueue, so call it here.
3690 1.236 msaitoh */
3691 1.333 msaitoh workqueue_wait(sc->recovery_mode_timer_wq,
3692 1.333 msaitoh &sc->recovery_mode_timer_wc);
3693 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
3694 1.333 msaitoh workqueue_destroy(sc->recovery_mode_timer_wq);
3695 1.333 msaitoh sc->recovery_mode_timer_wq = NULL;
3696 1.119 msaitoh }
3697 1.257 msaitoh } /* ixgbe_free_deferred_handlers */
3698 1.119 msaitoh
3699 1.99 msaitoh /************************************************************************
3700 1.99 msaitoh * ixgbe_detach - Device removal routine
3701 1.1 dyoung *
3702 1.99 msaitoh * Called when the driver is being removed.
3703 1.99 msaitoh * Stops the adapter and deallocates all the resources
3704 1.99 msaitoh * that were allocated for driver operation.
3705 1.1 dyoung *
3706 1.99 msaitoh * return 0 on success, positive on failure
3707 1.99 msaitoh ************************************************************************/
3708 1.98 msaitoh static int
3709 1.98 msaitoh ixgbe_detach(device_t dev, int flags)
3710 1.1 dyoung {
3711 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3712 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
3713 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3714 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3715 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
3716 1.98 msaitoh u32 ctrl_ext;
3717 1.168 msaitoh int i;
3718 1.28 msaitoh
3719 1.98 msaitoh INIT_DEBUGOUT("ixgbe_detach: begin");
3720 1.333 msaitoh if (sc->osdep.attached == false)
3721 1.98 msaitoh return 0;
3722 1.26 msaitoh
3723 1.99 msaitoh if (ixgbe_pci_iov_detach(dev) != 0) {
3724 1.99 msaitoh device_printf(dev, "SR-IOV in use; detach first.\n");
3725 1.99 msaitoh return (EBUSY);
3726 1.99 msaitoh }
3727 1.99 msaitoh
3728 1.333 msaitoh if (VLAN_ATTACHED(&sc->osdep.ec) &&
3729 1.293 yamaguch (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
3730 1.99 msaitoh aprint_error_dev(dev, "VLANs in use, detach first\n");
3731 1.99 msaitoh return (EBUSY);
3732 1.26 msaitoh }
3733 1.293 yamaguch
3734 1.333 msaitoh ether_ifdetach(sc->ifp);
3735 1.24 msaitoh
3736 1.333 msaitoh sc->osdep.detaching = true;
3737 1.241 msaitoh /*
3738 1.252 msaitoh * Stop the interface. ixgbe_setup_low_power_mode() calls
3739 1.253 msaitoh * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3740 1.252 msaitoh * directly.
3741 1.241 msaitoh */
3742 1.333 msaitoh ixgbe_setup_low_power_mode(sc);
3743 1.241 msaitoh
3744 1.333 msaitoh callout_halt(&sc->timer, NULL);
3745 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3746 1.333 msaitoh callout_halt(&sc->recovery_mode_timer, NULL);
3747 1.333 msaitoh
3748 1.333 msaitoh workqueue_wait(sc->admin_wq, &sc->admin_wc);
3749 1.333 msaitoh atomic_store_relaxed(&sc->admin_pending, 0);
3750 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
3751 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
3752 1.241 msaitoh
3753 1.98 msaitoh pmf_device_deregister(dev);
3754 1.26 msaitoh
3755 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
3756 1.185 msaitoh
3757 1.98 msaitoh /* let hardware know driver is unloading */
3758 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3759 1.98 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3760 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3761 1.24 msaitoh
3762 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
3763 1.333 msaitoh netmap_detach(sc->ifp);
3764 1.99 msaitoh
3765 1.333 msaitoh ixgbe_free_pci_resources(sc);
3766 1.98 msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */
3767 1.98 msaitoh bus_generic_detach(dev);
3768 1.98 msaitoh #endif
3769 1.333 msaitoh if_detach(sc->ifp);
3770 1.333 msaitoh ifmedia_fini(&sc->media);
3771 1.333 msaitoh if_percpuq_destroy(sc->ipq);
3772 1.333 msaitoh
3773 1.333 msaitoh sysctl_teardown(&sc->sysctllog);
3774 1.333 msaitoh evcnt_detach(&sc->efbig_tx_dma_setup);
3775 1.333 msaitoh evcnt_detach(&sc->mbuf_defrag_failed);
3776 1.333 msaitoh evcnt_detach(&sc->efbig2_tx_dma_setup);
3777 1.333 msaitoh evcnt_detach(&sc->einval_tx_dma_setup);
3778 1.333 msaitoh evcnt_detach(&sc->other_tx_dma_setup);
3779 1.333 msaitoh evcnt_detach(&sc->eagain_tx_dma_setup);
3780 1.333 msaitoh evcnt_detach(&sc->enomem_tx_dma_setup);
3781 1.333 msaitoh evcnt_detach(&sc->watchdog_events);
3782 1.333 msaitoh evcnt_detach(&sc->tso_err);
3783 1.333 msaitoh evcnt_detach(&sc->admin_irqev);
3784 1.333 msaitoh evcnt_detach(&sc->link_workev);
3785 1.333 msaitoh evcnt_detach(&sc->mod_workev);
3786 1.333 msaitoh evcnt_detach(&sc->msf_workev);
3787 1.333 msaitoh evcnt_detach(&sc->phy_workev);
3788 1.1 dyoung
3789 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3790 1.98 msaitoh if (i < __arraycount(stats->mpc)) {
3791 1.98 msaitoh evcnt_detach(&stats->mpc[i]);
3792 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
3793 1.98 msaitoh evcnt_detach(&stats->rnbc[i]);
3794 1.98 msaitoh }
3795 1.98 msaitoh if (i < __arraycount(stats->pxontxc)) {
3796 1.98 msaitoh evcnt_detach(&stats->pxontxc[i]);
3797 1.98 msaitoh evcnt_detach(&stats->pxonrxc[i]);
3798 1.98 msaitoh evcnt_detach(&stats->pxofftxc[i]);
3799 1.98 msaitoh evcnt_detach(&stats->pxoffrxc[i]);
3800 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3801 1.151 msaitoh evcnt_detach(&stats->pxon2offc[i]);
3802 1.98 msaitoh }
3803 1.168 msaitoh }
3804 1.168 msaitoh
3805 1.333 msaitoh txr = sc->tx_rings;
3806 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
3807 1.333 msaitoh evcnt_detach(&sc->queues[i].irqs);
3808 1.333 msaitoh evcnt_detach(&sc->queues[i].handleq);
3809 1.333 msaitoh evcnt_detach(&sc->queues[i].req);
3810 1.168 msaitoh evcnt_detach(&txr->total_packets);
3811 1.168 msaitoh #ifndef IXGBE_LEGACY_TX
3812 1.168 msaitoh evcnt_detach(&txr->pcq_drops);
3813 1.168 msaitoh #endif
3814 1.327 msaitoh evcnt_detach(&txr->no_desc_avail);
3815 1.327 msaitoh evcnt_detach(&txr->tso_tx);
3816 1.168 msaitoh
3817 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
3818 1.98 msaitoh evcnt_detach(&stats->qprc[i]);
3819 1.98 msaitoh evcnt_detach(&stats->qptc[i]);
3820 1.98 msaitoh evcnt_detach(&stats->qbrc[i]);
3821 1.98 msaitoh evcnt_detach(&stats->qbtc[i]);
3822 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3823 1.151 msaitoh evcnt_detach(&stats->qprdc[i]);
3824 1.34 msaitoh }
3825 1.98 msaitoh
3826 1.98 msaitoh evcnt_detach(&rxr->rx_packets);
3827 1.98 msaitoh evcnt_detach(&rxr->rx_bytes);
3828 1.98 msaitoh evcnt_detach(&rxr->rx_copies);
3829 1.290 msaitoh evcnt_detach(&rxr->no_mbuf);
3830 1.98 msaitoh evcnt_detach(&rxr->rx_discarded);
3831 1.1 dyoung }
3832 1.98 msaitoh evcnt_detach(&stats->ipcs);
3833 1.98 msaitoh evcnt_detach(&stats->l4cs);
3834 1.98 msaitoh evcnt_detach(&stats->ipcs_bad);
3835 1.98 msaitoh evcnt_detach(&stats->l4cs_bad);
3836 1.98 msaitoh evcnt_detach(&stats->intzero);
3837 1.98 msaitoh evcnt_detach(&stats->legint);
3838 1.98 msaitoh evcnt_detach(&stats->crcerrs);
3839 1.98 msaitoh evcnt_detach(&stats->illerrc);
3840 1.98 msaitoh evcnt_detach(&stats->errbc);
3841 1.98 msaitoh evcnt_detach(&stats->mspdc);
3842 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3843 1.98 msaitoh evcnt_detach(&stats->mbsdc);
3844 1.98 msaitoh evcnt_detach(&stats->mpctotal);
3845 1.98 msaitoh evcnt_detach(&stats->mlfc);
3846 1.98 msaitoh evcnt_detach(&stats->mrfc);
3847 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
3848 1.326 msaitoh evcnt_detach(&stats->link_dn_cnt);
3849 1.98 msaitoh evcnt_detach(&stats->rlec);
3850 1.98 msaitoh evcnt_detach(&stats->lxontxc);
3851 1.98 msaitoh evcnt_detach(&stats->lxonrxc);
3852 1.98 msaitoh evcnt_detach(&stats->lxofftxc);
3853 1.98 msaitoh evcnt_detach(&stats->lxoffrxc);
3854 1.98 msaitoh
3855 1.98 msaitoh /* Packet Reception Stats */
3856 1.98 msaitoh evcnt_detach(&stats->tor);
3857 1.98 msaitoh evcnt_detach(&stats->gorc);
3858 1.98 msaitoh evcnt_detach(&stats->tpr);
3859 1.98 msaitoh evcnt_detach(&stats->gprc);
3860 1.98 msaitoh evcnt_detach(&stats->mprc);
3861 1.98 msaitoh evcnt_detach(&stats->bprc);
3862 1.98 msaitoh evcnt_detach(&stats->prc64);
3863 1.98 msaitoh evcnt_detach(&stats->prc127);
3864 1.98 msaitoh evcnt_detach(&stats->prc255);
3865 1.98 msaitoh evcnt_detach(&stats->prc511);
3866 1.98 msaitoh evcnt_detach(&stats->prc1023);
3867 1.98 msaitoh evcnt_detach(&stats->prc1522);
3868 1.98 msaitoh evcnt_detach(&stats->ruc);
3869 1.98 msaitoh evcnt_detach(&stats->rfc);
3870 1.98 msaitoh evcnt_detach(&stats->roc);
3871 1.98 msaitoh evcnt_detach(&stats->rjc);
3872 1.98 msaitoh evcnt_detach(&stats->mngprc);
3873 1.98 msaitoh evcnt_detach(&stats->mngpdc);
3874 1.98 msaitoh evcnt_detach(&stats->xec);
3875 1.1 dyoung
3876 1.98 msaitoh /* Packet Transmission Stats */
3877 1.98 msaitoh evcnt_detach(&stats->gotc);
3878 1.98 msaitoh evcnt_detach(&stats->tpt);
3879 1.98 msaitoh evcnt_detach(&stats->gptc);
3880 1.98 msaitoh evcnt_detach(&stats->bptc);
3881 1.98 msaitoh evcnt_detach(&stats->mptc);
3882 1.98 msaitoh evcnt_detach(&stats->mngptc);
3883 1.98 msaitoh evcnt_detach(&stats->ptc64);
3884 1.98 msaitoh evcnt_detach(&stats->ptc127);
3885 1.98 msaitoh evcnt_detach(&stats->ptc255);
3886 1.98 msaitoh evcnt_detach(&stats->ptc511);
3887 1.98 msaitoh evcnt_detach(&stats->ptc1023);
3888 1.98 msaitoh evcnt_detach(&stats->ptc1522);
3889 1.1 dyoung
3890 1.333 msaitoh ixgbe_free_queues(sc);
3891 1.333 msaitoh free(sc->mta, M_DEVBUF);
3892 1.1 dyoung
3893 1.333 msaitoh mutex_destroy(&sc->admin_mtx); /* XXX appropriate order? */
3894 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
3895 1.1 dyoung
3896 1.1 dyoung return (0);
3897 1.99 msaitoh } /* ixgbe_detach */
3898 1.1 dyoung
3899 1.99 msaitoh /************************************************************************
3900 1.99 msaitoh * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3901 1.99 msaitoh *
3902 1.99 msaitoh * Prepare the adapter/port for LPLU and/or WoL
3903 1.99 msaitoh ************************************************************************/
3904 1.1 dyoung static int
3905 1.333 msaitoh ixgbe_setup_low_power_mode(struct ixgbe_softc *sc)
3906 1.1 dyoung {
3907 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3908 1.333 msaitoh device_t dev = sc->dev;
3909 1.333 msaitoh struct ifnet *ifp = sc->ifp;
3910 1.186 msaitoh s32 error = 0;
3911 1.98 msaitoh
3912 1.98 msaitoh /* Limit power management flow to X550EM baseT */
3913 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3914 1.99 msaitoh hw->phy.ops.enter_lplu) {
3915 1.98 msaitoh /* X550EM baseT adapters need a special LPLU flow */
3916 1.98 msaitoh hw->phy.reset_disable = true;
3917 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3918 1.98 msaitoh error = hw->phy.ops.enter_lplu(hw);
3919 1.98 msaitoh if (error)
3920 1.98 msaitoh device_printf(dev,
3921 1.98 msaitoh "Error entering LPLU: %d\n", error);
3922 1.98 msaitoh hw->phy.reset_disable = false;
3923 1.98 msaitoh } else {
3924 1.98 msaitoh /* Just stop for other adapters */
3925 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3926 1.33 msaitoh }
3927 1.1 dyoung
3928 1.333 msaitoh IXGBE_CORE_LOCK(sc);
3929 1.253 msaitoh
3930 1.98 msaitoh if (!hw->wol_enabled) {
3931 1.98 msaitoh ixgbe_set_phy_power(hw, FALSE);
3932 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3933 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3934 1.98 msaitoh } else {
3935 1.98 msaitoh /* Turn off support for APM wakeup. (Using ACPI instead) */
3936 1.166 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3937 1.166 msaitoh IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3938 1.34 msaitoh
3939 1.35 msaitoh /*
3940 1.98 msaitoh * Clear Wake Up Status register to prevent any previous wakeup
3941 1.98 msaitoh * events from waking us up immediately after we suspend.
3942 1.33 msaitoh */
3943 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3944 1.98 msaitoh
3945 1.1 dyoung /*
3946 1.98 msaitoh * Program the Wakeup Filter Control register with user filter
3947 1.98 msaitoh * settings
3948 1.33 msaitoh */
3949 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3950 1.98 msaitoh
3951 1.98 msaitoh /* Enable wakeups and power management in Wakeup Control */
3952 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC,
3953 1.98 msaitoh IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3954 1.1 dyoung }
3955 1.1 dyoung
3956 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
3957 1.253 msaitoh
3958 1.98 msaitoh return error;
3959 1.99 msaitoh } /* ixgbe_setup_low_power_mode */
3960 1.98 msaitoh
3961 1.99 msaitoh /************************************************************************
3962 1.99 msaitoh * ixgbe_shutdown - Shutdown entry point
3963 1.99 msaitoh ************************************************************************/
3964 1.98 msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3965 1.98 msaitoh static int
3966 1.98 msaitoh ixgbe_shutdown(device_t dev)
3967 1.98 msaitoh {
3968 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3969 1.98 msaitoh int error = 0;
3970 1.34 msaitoh
3971 1.98 msaitoh INIT_DEBUGOUT("ixgbe_shutdown: begin");
3972 1.34 msaitoh
3973 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3974 1.1 dyoung
3975 1.98 msaitoh return (error);
3976 1.99 msaitoh } /* ixgbe_shutdown */
3977 1.98 msaitoh #endif
3978 1.1 dyoung
3979 1.99 msaitoh /************************************************************************
3980 1.99 msaitoh * ixgbe_suspend
3981 1.99 msaitoh *
3982 1.99 msaitoh * From D0 to D3
3983 1.99 msaitoh ************************************************************************/
3984 1.98 msaitoh static bool
3985 1.98 msaitoh ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3986 1.1 dyoung {
3987 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3988 1.186 msaitoh int error = 0;
3989 1.98 msaitoh
3990 1.98 msaitoh INIT_DEBUGOUT("ixgbe_suspend: begin");
3991 1.98 msaitoh
3992 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3993 1.1 dyoung
3994 1.98 msaitoh return (error);
3995 1.99 msaitoh } /* ixgbe_suspend */
3996 1.1 dyoung
3997 1.99 msaitoh /************************************************************************
3998 1.99 msaitoh * ixgbe_resume
3999 1.99 msaitoh *
4000 1.99 msaitoh * From D3 to D0
4001 1.99 msaitoh ************************************************************************/
4002 1.98 msaitoh static bool
4003 1.98 msaitoh ixgbe_resume(device_t dev, const pmf_qual_t *qual)
4004 1.98 msaitoh {
4005 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
4006 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4007 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4008 1.186 msaitoh u32 wus;
4009 1.1 dyoung
4010 1.98 msaitoh INIT_DEBUGOUT("ixgbe_resume: begin");
4011 1.33 msaitoh
4012 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4013 1.43 msaitoh
4014 1.98 msaitoh /* Read & clear WUS register */
4015 1.98 msaitoh wus = IXGBE_READ_REG(hw, IXGBE_WUS);
4016 1.98 msaitoh if (wus)
4017 1.98 msaitoh device_printf(dev, "Woken up by (WUS): %#010x\n",
4018 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_WUS));
4019 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4020 1.98 msaitoh /* And clear WUFC until next low-power transition */
4021 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4022 1.1 dyoung
4023 1.1 dyoung /*
4024 1.98 msaitoh * Required after D3->D0 transition;
4025 1.98 msaitoh * will re-advertise all previous advertised speeds
4026 1.98 msaitoh */
4027 1.98 msaitoh if (ifp->if_flags & IFF_UP)
4028 1.333 msaitoh ixgbe_init_locked(sc);
4029 1.34 msaitoh
4030 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4031 1.1 dyoung
4032 1.98 msaitoh return true;
4033 1.99 msaitoh } /* ixgbe_resume */
4034 1.1 dyoung
4035 1.98 msaitoh /*
4036 1.98 msaitoh * Set the various hardware offload abilities.
4037 1.98 msaitoh *
4038 1.98 msaitoh * This takes the ifnet's if_capenable flags (e.g. set by the user using
4039 1.98 msaitoh * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
4040 1.98 msaitoh * mbuf offload flags the driver will understand.
4041 1.98 msaitoh */
4042 1.1 dyoung static void
4043 1.333 msaitoh ixgbe_set_if_hwassist(struct ixgbe_softc *sc)
4044 1.1 dyoung {
4045 1.98 msaitoh /* XXX */
4046 1.1 dyoung }
4047 1.1 dyoung
4048 1.99 msaitoh /************************************************************************
4049 1.99 msaitoh * ixgbe_init_locked - Init entry point
4050 1.99 msaitoh *
4051 1.99 msaitoh * Used in two ways: It is used by the stack as an init
4052 1.99 msaitoh * entry point in network interface structure. It is also
4053 1.99 msaitoh * used by the driver as a hw/sw initialization routine to
4054 1.99 msaitoh * get to a consistent state.
4055 1.1 dyoung *
4056 1.99 msaitoh * return 0 on success, positive on failure
4057 1.99 msaitoh ************************************************************************/
4058 1.98 msaitoh static void
4059 1.333 msaitoh ixgbe_init_locked(struct ixgbe_softc *sc)
4060 1.1 dyoung {
4061 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4062 1.333 msaitoh device_t dev = sc->dev;
4063 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4064 1.157 msaitoh struct ix_queue *que;
4065 1.186 msaitoh struct tx_ring *txr;
4066 1.186 msaitoh struct rx_ring *rxr;
4067 1.98 msaitoh u32 txdctl, mhadd;
4068 1.98 msaitoh u32 rxdctl, rxctrl;
4069 1.186 msaitoh u32 ctrl_ext;
4070 1.219 msaitoh bool unsupported_sfp = false;
4071 1.283 msaitoh int i, j, error;
4072 1.1 dyoung
4073 1.98 msaitoh /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
4074 1.1 dyoung
4075 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4076 1.98 msaitoh INIT_DEBUGOUT("ixgbe_init_locked: begin");
4077 1.1 dyoung
4078 1.219 msaitoh hw->need_unsupported_sfp_recovery = false;
4079 1.98 msaitoh hw->adapter_stopped = FALSE;
4080 1.98 msaitoh ixgbe_stop_adapter(hw);
4081 1.333 msaitoh callout_stop(&sc->timer);
4082 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4083 1.333 msaitoh callout_stop(&sc->recovery_mode_timer);
4084 1.333 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
4085 1.157 msaitoh que->disabled_count = 0;
4086 1.1 dyoung
4087 1.98 msaitoh /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4088 1.333 msaitoh sc->max_frame_size =
4089 1.98 msaitoh ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4090 1.1 dyoung
4091 1.98 msaitoh /* Queue indices may change with IOV mode */
4092 1.333 msaitoh ixgbe_align_all_queue_indices(sc);
4093 1.99 msaitoh
4094 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
4095 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
4096 1.1 dyoung
4097 1.98 msaitoh /* Get the latest mac address, User can use a LAA */
4098 1.98 msaitoh memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4099 1.98 msaitoh IXGBE_ETH_LENGTH_OF_ADDRESS);
4100 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
4101 1.98 msaitoh hw->addr_ctrl.rar_used_count = 1;
4102 1.1 dyoung
4103 1.98 msaitoh /* Set hardware offload abilities from ifnet flags */
4104 1.333 msaitoh ixgbe_set_if_hwassist(sc);
4105 1.48 msaitoh
4106 1.98 msaitoh /* Prepare transmit descriptors and buffers */
4107 1.333 msaitoh if (ixgbe_setup_transmit_structures(sc)) {
4108 1.98 msaitoh device_printf(dev, "Could not setup transmit structures\n");
4109 1.333 msaitoh ixgbe_stop_locked(sc);
4110 1.98 msaitoh return;
4111 1.98 msaitoh }
4112 1.1 dyoung
4113 1.98 msaitoh ixgbe_init_hw(hw);
4114 1.144 msaitoh
4115 1.333 msaitoh ixgbe_initialize_iov(sc);
4116 1.144 msaitoh
4117 1.333 msaitoh ixgbe_initialize_transmit_units(sc);
4118 1.1 dyoung
4119 1.98 msaitoh /* Setup Multicast table */
4120 1.333 msaitoh ixgbe_set_rxfilter(sc);
4121 1.43 msaitoh
4122 1.289 msaitoh /* Use fixed buffer size, even for jumbo frames */
4123 1.333 msaitoh sc->rx_mbuf_sz = MCLBYTES;
4124 1.43 msaitoh
4125 1.98 msaitoh /* Prepare receive descriptors and buffers */
4126 1.333 msaitoh error = ixgbe_setup_receive_structures(sc);
4127 1.283 msaitoh if (error) {
4128 1.283 msaitoh device_printf(dev,
4129 1.283 msaitoh "Could not setup receive structures (err = %d)\n", error);
4130 1.333 msaitoh ixgbe_stop_locked(sc);
4131 1.98 msaitoh return;
4132 1.98 msaitoh }
4133 1.43 msaitoh
4134 1.98 msaitoh /* Configure RX settings */
4135 1.333 msaitoh ixgbe_initialize_receive_units(sc);
4136 1.43 msaitoh
4137 1.233 msaitoh /* Initialize variable holding task enqueue requests interrupts */
4138 1.333 msaitoh sc->task_requests = 0;
4139 1.233 msaitoh
4140 1.99 msaitoh /* Enable SDP & MSI-X interrupts based on adapter */
4141 1.333 msaitoh ixgbe_config_gpie(sc);
4142 1.43 msaitoh
4143 1.98 msaitoh /* Set MTU size */
4144 1.98 msaitoh if (ifp->if_mtu > ETHERMTU) {
4145 1.98 msaitoh /* aka IXGBE_MAXFRS on 82599 and newer */
4146 1.98 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4147 1.98 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
4148 1.333 msaitoh mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4149 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4150 1.55 msaitoh }
4151 1.55 msaitoh
4152 1.98 msaitoh /* Now enable all the queues */
4153 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4154 1.333 msaitoh txr = &sc->tx_rings[i];
4155 1.98 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4156 1.98 msaitoh txdctl |= IXGBE_TXDCTL_ENABLE;
4157 1.98 msaitoh /* Set WTHRESH to 8, burst writeback */
4158 1.292 msaitoh txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4159 1.98 msaitoh /*
4160 1.98 msaitoh * When the internal queue falls below PTHRESH (32),
4161 1.98 msaitoh * start prefetching as long as there are at least
4162 1.98 msaitoh * HTHRESH (1) buffers ready. The values are taken
4163 1.98 msaitoh * from the Intel linux driver 3.8.21.
4164 1.98 msaitoh * Prefetching enables tx line rate even with 1 queue.
4165 1.98 msaitoh */
4166 1.98 msaitoh txdctl |= (32 << 0) | (1 << 8);
4167 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4168 1.55 msaitoh }
4169 1.43 msaitoh
4170 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4171 1.333 msaitoh rxr = &sc->rx_rings[i];
4172 1.98 msaitoh rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4173 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4174 1.98 msaitoh /*
4175 1.99 msaitoh * PTHRESH = 21
4176 1.99 msaitoh * HTHRESH = 4
4177 1.99 msaitoh * WTHRESH = 8
4178 1.99 msaitoh */
4179 1.98 msaitoh rxdctl &= ~0x3FFFFF;
4180 1.98 msaitoh rxdctl |= 0x080420;
4181 1.98 msaitoh }
4182 1.98 msaitoh rxdctl |= IXGBE_RXDCTL_ENABLE;
4183 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4184 1.144 msaitoh for (j = 0; j < 10; j++) {
4185 1.98 msaitoh if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4186 1.98 msaitoh IXGBE_RXDCTL_ENABLE)
4187 1.98 msaitoh break;
4188 1.98 msaitoh else
4189 1.98 msaitoh msec_delay(1);
4190 1.55 msaitoh }
4191 1.217 msaitoh IXGBE_WRITE_BARRIER(hw);
4192 1.99 msaitoh
4193 1.98 msaitoh /*
4194 1.98 msaitoh * In netmap mode, we must preserve the buffers made
4195 1.98 msaitoh * available to userspace before the if_init()
4196 1.98 msaitoh * (this is true by default on the TX side, because
4197 1.98 msaitoh * init makes all buffers available to userspace).
4198 1.98 msaitoh *
4199 1.98 msaitoh * netmap_reset() and the device specific routines
4200 1.98 msaitoh * (e.g. ixgbe_setup_receive_rings()) map these
4201 1.98 msaitoh * buffers at the end of the NIC ring, so here we
4202 1.98 msaitoh * must set the RDT (tail) register to make sure
4203 1.98 msaitoh * they are not overwritten.
4204 1.98 msaitoh *
4205 1.98 msaitoh * In this driver the NIC ring starts at RDH = 0,
4206 1.98 msaitoh * RDT points to the last slot available for reception (?),
4207 1.98 msaitoh * so RDT = num_rx_desc - 1 means the whole ring is available.
4208 1.98 msaitoh */
4209 1.99 msaitoh #ifdef DEV_NETMAP
4210 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
4211 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP)) {
4212 1.333 msaitoh struct netmap_adapter *na = NA(sc->ifp);
4213 1.189 msaitoh struct netmap_kring *kring = na->rx_rings[i];
4214 1.98 msaitoh int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4215 1.98 msaitoh
4216 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4217 1.98 msaitoh } else
4218 1.98 msaitoh #endif /* DEV_NETMAP */
4219 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4220 1.333 msaitoh sc->num_rx_desc - 1);
4221 1.48 msaitoh }
4222 1.98 msaitoh
4223 1.98 msaitoh /* Enable Receive engine */
4224 1.98 msaitoh rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4225 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4226 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_DMBYPS;
4227 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_RXEN;
4228 1.98 msaitoh ixgbe_enable_rx_dma(hw, rxctrl);
4229 1.98 msaitoh
4230 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4231 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4232 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4233 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4234 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4235 1.98 msaitoh
4236 1.144 msaitoh /* Set up MSI/MSI-X routing */
4237 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4238 1.333 msaitoh ixgbe_configure_ivars(sc);
4239 1.98 msaitoh /* Set up auto-mask */
4240 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4241 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4242 1.98 msaitoh else {
4243 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4244 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4245 1.55 msaitoh }
4246 1.98 msaitoh } else { /* Simple settings for Legacy/MSI */
4247 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 0);
4248 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 1);
4249 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4250 1.55 msaitoh }
4251 1.43 msaitoh
4252 1.333 msaitoh ixgbe_init_fdir(sc);
4253 1.98 msaitoh
4254 1.98 msaitoh /*
4255 1.98 msaitoh * Check on any SFP devices that
4256 1.98 msaitoh * need to be kick-started
4257 1.98 msaitoh */
4258 1.98 msaitoh if (hw->phy.type == ixgbe_phy_none) {
4259 1.283 msaitoh error = hw->phy.ops.identify(hw);
4260 1.283 msaitoh if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4261 1.219 msaitoh unsupported_sfp = true;
4262 1.219 msaitoh } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4263 1.219 msaitoh unsupported_sfp = true;
4264 1.219 msaitoh
4265 1.219 msaitoh if (unsupported_sfp)
4266 1.219 msaitoh device_printf(dev,
4267 1.219 msaitoh "Unsupported SFP+ module type was detected.\n");
4268 1.98 msaitoh
4269 1.98 msaitoh /* Set moderation on the Link interrupt */
4270 1.333 msaitoh ixgbe_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
4271 1.98 msaitoh
4272 1.173 msaitoh /* Enable EEE power saving */
4273 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
4274 1.173 msaitoh hw->mac.ops.setup_eee(hw,
4275 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
4276 1.173 msaitoh
4277 1.144 msaitoh /* Enable power to the phy. */
4278 1.219 msaitoh if (!unsupported_sfp) {
4279 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
4280 1.144 msaitoh
4281 1.219 msaitoh /* Config/Enable Link */
4282 1.333 msaitoh ixgbe_config_link(sc);
4283 1.219 msaitoh }
4284 1.55 msaitoh
4285 1.98 msaitoh /* Hardware Packet Buffer & Flow Control setup */
4286 1.333 msaitoh ixgbe_config_delay_values(sc);
4287 1.1 dyoung
4288 1.98 msaitoh /* Initialize the FC settings */
4289 1.98 msaitoh ixgbe_start_hw(hw);
4290 1.1 dyoung
4291 1.98 msaitoh /* Set up VLAN support and filter */
4292 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
4293 1.1 dyoung
4294 1.98 msaitoh /* Setup DMA Coalescing */
4295 1.333 msaitoh ixgbe_config_dmac(sc);
4296 1.98 msaitoh
4297 1.230 msaitoh /* OK to schedule workqueues. */
4298 1.333 msaitoh sc->schedule_wqs_ok = true;
4299 1.230 msaitoh
4300 1.98 msaitoh /* Enable the use of the MBX by the VF's */
4301 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
4302 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4303 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4304 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4305 1.1 dyoung }
4306 1.98 msaitoh
4307 1.123 msaitoh /* Update saved flags. See ixgbe_ifflags_cb() */
4308 1.333 msaitoh sc->if_flags = ifp->if_flags;
4309 1.333 msaitoh sc->ec_capenable = sc->osdep.ec.ec_capenable;
4310 1.123 msaitoh
4311 1.337 msaitoh /* Inform the stack we're ready */
4312 1.98 msaitoh ifp->if_flags |= IFF_RUNNING;
4313 1.98 msaitoh
4314 1.337 msaitoh /* And now turn on interrupts */
4315 1.337 msaitoh ixgbe_enable_intr(sc);
4316 1.337 msaitoh
4317 1.1 dyoung return;
4318 1.99 msaitoh } /* ixgbe_init_locked */
4319 1.1 dyoung
4320 1.99 msaitoh /************************************************************************
4321 1.99 msaitoh * ixgbe_init
4322 1.99 msaitoh ************************************************************************/
4323 1.98 msaitoh static int
4324 1.98 msaitoh ixgbe_init(struct ifnet *ifp)
4325 1.98 msaitoh {
4326 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
4327 1.98 msaitoh
4328 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4329 1.333 msaitoh ixgbe_init_locked(sc);
4330 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4331 1.98 msaitoh
4332 1.98 msaitoh return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4333 1.99 msaitoh } /* ixgbe_init */
4334 1.43 msaitoh
4335 1.99 msaitoh /************************************************************************
4336 1.99 msaitoh * ixgbe_set_ivar
4337 1.99 msaitoh *
4338 1.99 msaitoh * Setup the correct IVAR register for a particular MSI-X interrupt
4339 1.99 msaitoh * (yes this is all very magic and confusing :)
4340 1.99 msaitoh * - entry is the register array entry
4341 1.99 msaitoh * - vector is the MSI-X vector for this queue
4342 1.99 msaitoh * - type is RX/TX/MISC
4343 1.99 msaitoh ************************************************************************/
4344 1.42 msaitoh static void
4345 1.333 msaitoh ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
4346 1.1 dyoung {
4347 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4348 1.98 msaitoh u32 ivar, index;
4349 1.98 msaitoh
4350 1.98 msaitoh vector |= IXGBE_IVAR_ALLOC_VAL;
4351 1.98 msaitoh
4352 1.98 msaitoh switch (hw->mac.type) {
4353 1.98 msaitoh case ixgbe_mac_82598EB:
4354 1.98 msaitoh if (type == -1)
4355 1.98 msaitoh entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4356 1.98 msaitoh else
4357 1.98 msaitoh entry += (type * 64);
4358 1.98 msaitoh index = (entry >> 2) & 0x1F;
4359 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4360 1.198 msaitoh ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4361 1.198 msaitoh ivar |= ((u32)vector << (8 * (entry & 0x3)));
4362 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
4363 1.98 msaitoh break;
4364 1.98 msaitoh case ixgbe_mac_82599EB:
4365 1.98 msaitoh case ixgbe_mac_X540:
4366 1.98 msaitoh case ixgbe_mac_X550:
4367 1.98 msaitoh case ixgbe_mac_X550EM_x:
4368 1.99 msaitoh case ixgbe_mac_X550EM_a:
4369 1.98 msaitoh if (type == -1) { /* MISC IVAR */
4370 1.98 msaitoh index = (entry & 1) * 8;
4371 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4372 1.194 msaitoh ivar &= ~(0xffUL << index);
4373 1.194 msaitoh ivar |= ((u32)vector << index);
4374 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4375 1.98 msaitoh } else { /* RX/TX IVARS */
4376 1.98 msaitoh index = (16 * (entry & 1)) + (8 * type);
4377 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4378 1.194 msaitoh ivar &= ~(0xffUL << index);
4379 1.194 msaitoh ivar |= ((u32)vector << index);
4380 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4381 1.98 msaitoh }
4382 1.135 msaitoh break;
4383 1.98 msaitoh default:
4384 1.98 msaitoh break;
4385 1.98 msaitoh }
4386 1.99 msaitoh } /* ixgbe_set_ivar */
4387 1.1 dyoung
4388 1.99 msaitoh /************************************************************************
4389 1.99 msaitoh * ixgbe_configure_ivars
4390 1.99 msaitoh ************************************************************************/
4391 1.98 msaitoh static void
4392 1.333 msaitoh ixgbe_configure_ivars(struct ixgbe_softc *sc)
4393 1.98 msaitoh {
4394 1.333 msaitoh struct ix_queue *que = sc->queues;
4395 1.186 msaitoh u32 newitr;
4396 1.1 dyoung
4397 1.98 msaitoh if (ixgbe_max_interrupt_rate > 0)
4398 1.98 msaitoh newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4399 1.98 msaitoh else {
4400 1.48 msaitoh /*
4401 1.99 msaitoh * Disable DMA coalescing if interrupt moderation is
4402 1.99 msaitoh * disabled.
4403 1.99 msaitoh */
4404 1.333 msaitoh sc->dmac = 0;
4405 1.98 msaitoh newitr = 0;
4406 1.98 msaitoh }
4407 1.98 msaitoh
4408 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
4409 1.333 msaitoh struct rx_ring *rxr = &sc->rx_rings[i];
4410 1.333 msaitoh struct tx_ring *txr = &sc->tx_rings[i];
4411 1.98 msaitoh /* First the RX queue entry */
4412 1.333 msaitoh ixgbe_set_ivar(sc, rxr->me, que->msix, 0);
4413 1.98 msaitoh /* ... and the TX */
4414 1.333 msaitoh ixgbe_set_ivar(sc, txr->me, que->msix, 1);
4415 1.98 msaitoh /* Set an Initial EITR value */
4416 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, newitr);
4417 1.138 knakahar /*
4418 1.138 knakahar * To eliminate influence of the previous state.
4419 1.138 knakahar * At this point, Tx/Rx interrupt handler
4420 1.138 knakahar * (ixgbe_msix_que()) cannot be called, so both
4421 1.138 knakahar * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4422 1.138 knakahar */
4423 1.138 knakahar que->eitr_setting = 0;
4424 1.98 msaitoh }
4425 1.98 msaitoh
4426 1.98 msaitoh /* For the Link interrupt */
4427 1.333 msaitoh ixgbe_set_ivar(sc, 1, sc->vector, -1);
4428 1.99 msaitoh } /* ixgbe_configure_ivars */
4429 1.98 msaitoh
4430 1.99 msaitoh /************************************************************************
4431 1.99 msaitoh * ixgbe_config_gpie
4432 1.99 msaitoh ************************************************************************/
4433 1.98 msaitoh static void
4434 1.333 msaitoh ixgbe_config_gpie(struct ixgbe_softc *sc)
4435 1.98 msaitoh {
4436 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4437 1.186 msaitoh u32 gpie;
4438 1.98 msaitoh
4439 1.98 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4440 1.98 msaitoh
4441 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4442 1.99 msaitoh /* Enable Enhanced MSI-X mode */
4443 1.99 msaitoh gpie |= IXGBE_GPIE_MSIX_MODE
4444 1.186 msaitoh | IXGBE_GPIE_EIAME
4445 1.186 msaitoh | IXGBE_GPIE_PBA_SUPPORT
4446 1.186 msaitoh | IXGBE_GPIE_OCD;
4447 1.99 msaitoh }
4448 1.99 msaitoh
4449 1.98 msaitoh /* Fan Failure Interrupt */
4450 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4451 1.98 msaitoh gpie |= IXGBE_SDP1_GPIEN;
4452 1.1 dyoung
4453 1.99 msaitoh /* Thermal Sensor Interrupt */
4454 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4455 1.99 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4456 1.1 dyoung
4457 1.99 msaitoh /* Link detection */
4458 1.99 msaitoh switch (hw->mac.type) {
4459 1.99 msaitoh case ixgbe_mac_82599EB:
4460 1.99 msaitoh gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4461 1.99 msaitoh break;
4462 1.99 msaitoh case ixgbe_mac_X550EM_x:
4463 1.99 msaitoh case ixgbe_mac_X550EM_a:
4464 1.98 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4465 1.99 msaitoh break;
4466 1.99 msaitoh default:
4467 1.99 msaitoh break;
4468 1.1 dyoung }
4469 1.1 dyoung
4470 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4471 1.98 msaitoh
4472 1.99 msaitoh } /* ixgbe_config_gpie */
4473 1.1 dyoung
4474 1.99 msaitoh /************************************************************************
4475 1.99 msaitoh * ixgbe_config_delay_values
4476 1.99 msaitoh *
4477 1.333 msaitoh * Requires sc->max_frame_size to be set.
4478 1.99 msaitoh ************************************************************************/
4479 1.33 msaitoh static void
4480 1.333 msaitoh ixgbe_config_delay_values(struct ixgbe_softc *sc)
4481 1.33 msaitoh {
4482 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4483 1.186 msaitoh u32 rxpb, frame, size, tmp;
4484 1.33 msaitoh
4485 1.333 msaitoh frame = sc->max_frame_size;
4486 1.33 msaitoh
4487 1.98 msaitoh /* Calculate High Water */
4488 1.98 msaitoh switch (hw->mac.type) {
4489 1.98 msaitoh case ixgbe_mac_X540:
4490 1.44 msaitoh case ixgbe_mac_X550:
4491 1.44 msaitoh case ixgbe_mac_X550EM_x:
4492 1.99 msaitoh case ixgbe_mac_X550EM_a:
4493 1.98 msaitoh tmp = IXGBE_DV_X540(frame, frame);
4494 1.44 msaitoh break;
4495 1.44 msaitoh default:
4496 1.98 msaitoh tmp = IXGBE_DV(frame, frame);
4497 1.44 msaitoh break;
4498 1.44 msaitoh }
4499 1.98 msaitoh size = IXGBE_BT2KB(tmp);
4500 1.98 msaitoh rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4501 1.98 msaitoh hw->fc.high_water[0] = rxpb - size;
4502 1.44 msaitoh
4503 1.98 msaitoh /* Now calculate Low Water */
4504 1.98 msaitoh switch (hw->mac.type) {
4505 1.98 msaitoh case ixgbe_mac_X540:
4506 1.98 msaitoh case ixgbe_mac_X550:
4507 1.98 msaitoh case ixgbe_mac_X550EM_x:
4508 1.99 msaitoh case ixgbe_mac_X550EM_a:
4509 1.98 msaitoh tmp = IXGBE_LOW_DV_X540(frame);
4510 1.98 msaitoh break;
4511 1.98 msaitoh default:
4512 1.98 msaitoh tmp = IXGBE_LOW_DV(frame);
4513 1.98 msaitoh break;
4514 1.33 msaitoh }
4515 1.98 msaitoh hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4516 1.33 msaitoh
4517 1.98 msaitoh hw->fc.pause_time = IXGBE_FC_PAUSE;
4518 1.98 msaitoh hw->fc.send_xon = TRUE;
4519 1.99 msaitoh } /* ixgbe_config_delay_values */
4520 1.33 msaitoh
4521 1.99 msaitoh /************************************************************************
4522 1.213 msaitoh * ixgbe_set_rxfilter - Multicast Update
4523 1.1 dyoung *
4524 1.99 msaitoh * Called whenever multicast address list is updated.
4525 1.99 msaitoh ************************************************************************/
4526 1.1 dyoung static void
4527 1.333 msaitoh ixgbe_set_rxfilter(struct ixgbe_softc *sc)
4528 1.1 dyoung {
4529 1.99 msaitoh struct ixgbe_mc_addr *mta;
4530 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4531 1.98 msaitoh u8 *update_ptr;
4532 1.98 msaitoh int mcnt = 0;
4533 1.99 msaitoh u32 fctrl;
4534 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
4535 1.98 msaitoh struct ether_multi *enm;
4536 1.98 msaitoh struct ether_multistep step;
4537 1.98 msaitoh
4538 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4539 1.213 msaitoh IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4540 1.98 msaitoh
4541 1.333 msaitoh mta = sc->mta;
4542 1.98 msaitoh bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4543 1.1 dyoung
4544 1.105 msaitoh ETHER_LOCK(ec);
4545 1.183 ozaki ec->ec_flags &= ~ETHER_F_ALLMULTI;
4546 1.98 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
4547 1.98 msaitoh while (enm != NULL) {
4548 1.98 msaitoh if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4549 1.98 msaitoh (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4550 1.98 msaitoh ETHER_ADDR_LEN) != 0)) {
4551 1.183 ozaki ec->ec_flags |= ETHER_F_ALLMULTI;
4552 1.98 msaitoh break;
4553 1.98 msaitoh }
4554 1.98 msaitoh bcopy(enm->enm_addrlo,
4555 1.98 msaitoh mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4556 1.333 msaitoh mta[mcnt].vmdq = sc->pool;
4557 1.98 msaitoh mcnt++;
4558 1.98 msaitoh ETHER_NEXT_MULTI(step, enm);
4559 1.98 msaitoh }
4560 1.1 dyoung
4561 1.333 msaitoh fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
4562 1.98 msaitoh if (ifp->if_flags & IFF_PROMISC)
4563 1.98 msaitoh fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4564 1.183 ozaki else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4565 1.98 msaitoh fctrl |= IXGBE_FCTRL_MPE;
4566 1.212 msaitoh fctrl &= ~IXGBE_FCTRL_UPE;
4567 1.212 msaitoh } else
4568 1.212 msaitoh fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4569 1.1 dyoung
4570 1.211 msaitoh /* Update multicast filter entries only when it's not ALLMULTI */
4571 1.211 msaitoh if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4572 1.211 msaitoh ETHER_UNLOCK(ec);
4573 1.98 msaitoh update_ptr = (u8 *)mta;
4574 1.333 msaitoh ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
4575 1.99 msaitoh ixgbe_mc_array_itr, TRUE);
4576 1.211 msaitoh } else
4577 1.211 msaitoh ETHER_UNLOCK(ec);
4578 1.332 msaitoh
4579 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
4580 1.213 msaitoh } /* ixgbe_set_rxfilter */
4581 1.1 dyoung
4582 1.99 msaitoh /************************************************************************
4583 1.99 msaitoh * ixgbe_mc_array_itr
4584 1.99 msaitoh *
4585 1.99 msaitoh * An iterator function needed by the multicast shared code.
4586 1.99 msaitoh * It feeds the shared code routine the addresses in the
4587 1.213 msaitoh * array of ixgbe_set_rxfilter() one by one.
4588 1.99 msaitoh ************************************************************************/
4589 1.98 msaitoh static u8 *
4590 1.98 msaitoh ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4591 1.98 msaitoh {
4592 1.98 msaitoh struct ixgbe_mc_addr *mta;
4593 1.1 dyoung
4594 1.98 msaitoh mta = (struct ixgbe_mc_addr *)*update_ptr;
4595 1.98 msaitoh *vmdq = mta->vmdq;
4596 1.33 msaitoh
4597 1.98 msaitoh *update_ptr = (u8*)(mta + 1);
4598 1.99 msaitoh
4599 1.98 msaitoh return (mta->addr);
4600 1.99 msaitoh } /* ixgbe_mc_array_itr */
4601 1.82 msaitoh
4602 1.99 msaitoh /************************************************************************
4603 1.99 msaitoh * ixgbe_local_timer - Timer routine
4604 1.98 msaitoh *
4605 1.99 msaitoh * Checks for link status, updates statistics,
4606 1.99 msaitoh * and runs the watchdog check.
4607 1.99 msaitoh ************************************************************************/
4608 1.98 msaitoh static void
4609 1.98 msaitoh ixgbe_local_timer(void *arg)
4610 1.98 msaitoh {
4611 1.333 msaitoh struct ixgbe_softc *sc = arg;
4612 1.1 dyoung
4613 1.333 msaitoh if (sc->schedule_wqs_ok) {
4614 1.333 msaitoh if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
4615 1.333 msaitoh workqueue_enqueue(sc->timer_wq,
4616 1.333 msaitoh &sc->timer_wc, NULL);
4617 1.233 msaitoh }
4618 1.98 msaitoh }
4619 1.28 msaitoh
4620 1.98 msaitoh static void
4621 1.233 msaitoh ixgbe_handle_timer(struct work *wk, void *context)
4622 1.98 msaitoh {
4623 1.339 msaitoh struct ixgbe_softc *sc = context;
4624 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4625 1.333 msaitoh device_t dev = sc->dev;
4626 1.333 msaitoh struct ix_queue *que = sc->queues;
4627 1.153 msaitoh u64 queues = 0;
4628 1.134 msaitoh u64 v0, v1, v2, v3, v4, v5, v6, v7;
4629 1.153 msaitoh int hung = 0;
4630 1.134 msaitoh int i;
4631 1.1 dyoung
4632 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4633 1.1 dyoung
4634 1.98 msaitoh /* Check for pluggable optics */
4635 1.237 msaitoh if (ixgbe_is_sfp(hw)) {
4636 1.249 msaitoh bool sched_mod_task = false;
4637 1.237 msaitoh
4638 1.249 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4639 1.249 msaitoh /*
4640 1.249 msaitoh * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4641 1.250 msaitoh * any GPIO(SDP). So just schedule TASK_MOD.
4642 1.249 msaitoh */
4643 1.249 msaitoh sched_mod_task = true;
4644 1.249 msaitoh } else {
4645 1.249 msaitoh bool was_full, is_full;
4646 1.249 msaitoh
4647 1.249 msaitoh was_full =
4648 1.249 msaitoh hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4649 1.251 msaitoh is_full = ixgbe_sfp_cage_full(hw);
4650 1.249 msaitoh
4651 1.249 msaitoh /* Do probe if cage state changed */
4652 1.249 msaitoh if (was_full ^ is_full)
4653 1.249 msaitoh sched_mod_task = true;
4654 1.249 msaitoh }
4655 1.249 msaitoh if (sched_mod_task) {
4656 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4657 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4658 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
4659 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4660 1.239 msaitoh }
4661 1.237 msaitoh }
4662 1.1 dyoung
4663 1.333 msaitoh ixgbe_update_link_status(sc);
4664 1.333 msaitoh ixgbe_update_stats_counters(sc);
4665 1.33 msaitoh
4666 1.134 msaitoh /* Update some event counters */
4667 1.134 msaitoh v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4668 1.333 msaitoh que = sc->queues;
4669 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4670 1.186 msaitoh struct tx_ring *txr = que->txr;
4671 1.134 msaitoh
4672 1.134 msaitoh v0 += txr->q_efbig_tx_dma_setup;
4673 1.134 msaitoh v1 += txr->q_mbuf_defrag_failed;
4674 1.134 msaitoh v2 += txr->q_efbig2_tx_dma_setup;
4675 1.134 msaitoh v3 += txr->q_einval_tx_dma_setup;
4676 1.134 msaitoh v4 += txr->q_other_tx_dma_setup;
4677 1.134 msaitoh v5 += txr->q_eagain_tx_dma_setup;
4678 1.134 msaitoh v6 += txr->q_enomem_tx_dma_setup;
4679 1.134 msaitoh v7 += txr->q_tso_err;
4680 1.134 msaitoh }
4681 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
4682 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
4683 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
4684 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
4685 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
4686 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
4687 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
4688 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, v7);
4689 1.134 msaitoh
4690 1.153 msaitoh /*
4691 1.153 msaitoh * Check the TX queues status
4692 1.186 msaitoh * - mark hung queues so we don't schedule on them
4693 1.186 msaitoh * - watchdog only if all queues show hung
4694 1.153 msaitoh */
4695 1.333 msaitoh que = sc->queues;
4696 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4697 1.153 msaitoh /* Keep track of queues with work for soft irq */
4698 1.153 msaitoh if (que->txr->busy)
4699 1.190 msaitoh queues |= 1ULL << que->me;
4700 1.153 msaitoh /*
4701 1.153 msaitoh * Each time txeof runs without cleaning, but there
4702 1.153 msaitoh * are uncleaned descriptors it increments busy. If
4703 1.153 msaitoh * we get to the MAX we declare it hung.
4704 1.153 msaitoh */
4705 1.153 msaitoh if (que->busy == IXGBE_QUEUE_HUNG) {
4706 1.153 msaitoh ++hung;
4707 1.153 msaitoh /* Mark the queue as inactive */
4708 1.333 msaitoh sc->active_queues &= ~(1ULL << que->me);
4709 1.153 msaitoh continue;
4710 1.153 msaitoh } else {
4711 1.153 msaitoh /* Check if we've come back from hung */
4712 1.333 msaitoh if ((sc->active_queues & (1ULL << que->me)) == 0)
4713 1.333 msaitoh sc->active_queues |= 1ULL << que->me;
4714 1.153 msaitoh }
4715 1.153 msaitoh if (que->busy >= IXGBE_MAX_TX_BUSY) {
4716 1.153 msaitoh device_printf(dev,
4717 1.153 msaitoh "Warning queue %d appears to be hung!\n", i);
4718 1.153 msaitoh que->txr->busy = IXGBE_QUEUE_HUNG;
4719 1.153 msaitoh ++hung;
4720 1.153 msaitoh }
4721 1.150 msaitoh }
4722 1.150 msaitoh
4723 1.232 msaitoh /* Only truly watchdog if all queues show hung */
4724 1.333 msaitoh if (hung == sc->num_queues)
4725 1.153 msaitoh goto watchdog;
4726 1.160 msaitoh #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4727 1.153 msaitoh else if (queues != 0) { /* Force an IRQ on queues with work */
4728 1.333 msaitoh que = sc->queues;
4729 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4730 1.139 knakahar mutex_enter(&que->dc_mtx);
4731 1.153 msaitoh if (que->disabled_count == 0)
4732 1.333 msaitoh ixgbe_rearm_queues(sc,
4733 1.153 msaitoh queues & ((u64)1 << i));
4734 1.139 knakahar mutex_exit(&que->dc_mtx);
4735 1.131 knakahar }
4736 1.98 msaitoh }
4737 1.160 msaitoh #endif
4738 1.150 msaitoh
4739 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4740 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4741 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4742 1.153 msaitoh return;
4743 1.1 dyoung
4744 1.153 msaitoh watchdog:
4745 1.333 msaitoh device_printf(sc->dev, "Watchdog timeout -- resetting\n");
4746 1.333 msaitoh sc->ifp->if_flags &= ~IFF_RUNNING;
4747 1.333 msaitoh IXGBE_EVC_ADD(&sc->watchdog_events, 1);
4748 1.333 msaitoh ixgbe_init_locked(sc);
4749 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4750 1.233 msaitoh } /* ixgbe_handle_timer */
4751 1.43 msaitoh
4752 1.99 msaitoh /************************************************************************
4753 1.169 msaitoh * ixgbe_recovery_mode_timer - Recovery mode timer routine
4754 1.169 msaitoh ************************************************************************/
4755 1.169 msaitoh static void
4756 1.169 msaitoh ixgbe_recovery_mode_timer(void *arg)
4757 1.169 msaitoh {
4758 1.333 msaitoh struct ixgbe_softc *sc = arg;
4759 1.233 msaitoh
4760 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
4761 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode_timer_pending,
4762 1.254 msaitoh 0, 1) == 0) {
4763 1.333 msaitoh workqueue_enqueue(sc->recovery_mode_timer_wq,
4764 1.333 msaitoh &sc->recovery_mode_timer_wc, NULL);
4765 1.254 msaitoh }
4766 1.233 msaitoh }
4767 1.233 msaitoh }
4768 1.233 msaitoh
4769 1.233 msaitoh static void
4770 1.233 msaitoh ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4771 1.233 msaitoh {
4772 1.333 msaitoh struct ixgbe_softc *sc = context;
4773 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4774 1.169 msaitoh
4775 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4776 1.169 msaitoh if (ixgbe_fw_recovery_mode(hw)) {
4777 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode, 0, 1) == 0) {
4778 1.169 msaitoh /* Firmware error detected, entering recovery mode */
4779 1.333 msaitoh device_printf(sc->dev,
4780 1.319 msaitoh "Firmware recovery mode detected. Limiting "
4781 1.319 msaitoh "functionality. Refer to the Intel(R) Ethernet "
4782 1.319 msaitoh "Adapters and Devices User Guide for details on "
4783 1.319 msaitoh "firmware recovery mode.\n");
4784 1.169 msaitoh
4785 1.169 msaitoh if (hw->adapter_stopped == FALSE)
4786 1.333 msaitoh ixgbe_stop_locked(sc);
4787 1.169 msaitoh }
4788 1.169 msaitoh } else
4789 1.333 msaitoh atomic_cas_uint(&sc->recovery_mode, 1, 0);
4790 1.169 msaitoh
4791 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
4792 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4793 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4794 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4795 1.233 msaitoh } /* ixgbe_handle_recovery_mode_timer */
4796 1.169 msaitoh
4797 1.169 msaitoh /************************************************************************
4798 1.99 msaitoh * ixgbe_handle_mod - Tasklet for SFP module interrupts
4799 1.273 msaitoh * bool int_en: true if it's called when the interrupt is enabled.
4800 1.99 msaitoh ************************************************************************/
4801 1.1 dyoung static void
4802 1.273 msaitoh ixgbe_handle_mod(void *context, bool int_en)
4803 1.1 dyoung {
4804 1.339 msaitoh struct ixgbe_softc *sc = context;
4805 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4806 1.333 msaitoh device_t dev = sc->dev;
4807 1.249 msaitoh enum ixgbe_sfp_type last_sfp_type;
4808 1.251 msaitoh u32 err;
4809 1.249 msaitoh bool last_unsupported_sfp_recovery;
4810 1.98 msaitoh
4811 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4812 1.257 msaitoh
4813 1.249 msaitoh last_sfp_type = hw->phy.sfp_type;
4814 1.249 msaitoh last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4815 1.333 msaitoh IXGBE_EVC_ADD(&sc->mod_workev, 1);
4816 1.333 msaitoh if (sc->hw.need_crosstalk_fix) {
4817 1.251 msaitoh if ((hw->mac.type != ixgbe_mac_82598EB) &&
4818 1.251 msaitoh !ixgbe_sfp_cage_full(hw))
4819 1.218 msaitoh goto out;
4820 1.98 msaitoh }
4821 1.98 msaitoh
4822 1.98 msaitoh err = hw->phy.ops.identify_sfp(hw);
4823 1.98 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4824 1.249 msaitoh if (last_unsupported_sfp_recovery == false)
4825 1.249 msaitoh device_printf(dev,
4826 1.249 msaitoh "Unsupported SFP+ module type was detected.\n");
4827 1.218 msaitoh goto out;
4828 1.33 msaitoh }
4829 1.33 msaitoh
4830 1.219 msaitoh if (hw->need_unsupported_sfp_recovery) {
4831 1.219 msaitoh device_printf(dev, "Recovering from unsupported SFP\n");
4832 1.219 msaitoh /*
4833 1.219 msaitoh * We could recover the status by calling setup_sfp(),
4834 1.219 msaitoh * setup_link() and some others. It's complex and might not
4835 1.219 msaitoh * work correctly on some unknown cases. To avoid such type of
4836 1.219 msaitoh * problem, call ixgbe_init_locked(). It's simple and safe
4837 1.219 msaitoh * approach.
4838 1.219 msaitoh */
4839 1.333 msaitoh ixgbe_init_locked(sc);
4840 1.249 msaitoh } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4841 1.249 msaitoh (hw->phy.sfp_type != last_sfp_type)) {
4842 1.249 msaitoh /* A module is inserted and changed. */
4843 1.249 msaitoh
4844 1.219 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4845 1.219 msaitoh err = hw->phy.ops.reset(hw);
4846 1.219 msaitoh else {
4847 1.219 msaitoh err = hw->mac.ops.setup_sfp(hw);
4848 1.219 msaitoh hw->phy.sfp_setup_needed = FALSE;
4849 1.219 msaitoh }
4850 1.219 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4851 1.219 msaitoh device_printf(dev,
4852 1.219 msaitoh "Setup failure - unsupported SFP+ module type.\n");
4853 1.219 msaitoh goto out;
4854 1.219 msaitoh }
4855 1.1 dyoung }
4856 1.233 msaitoh
4857 1.218 msaitoh out:
4858 1.233 msaitoh /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4859 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
4860 1.233 msaitoh
4861 1.233 msaitoh /* Adjust media types shown in ifconfig */
4862 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4863 1.333 msaitoh ifmedia_removeall(&sc->media);
4864 1.333 msaitoh ixgbe_add_media_types(sc);
4865 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
4866 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4867 1.233 msaitoh
4868 1.249 msaitoh /*
4869 1.288 andvar * Don't schedule MSF event if the chip is 82598. 82598 doesn't support
4870 1.249 msaitoh * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4871 1.250 msaitoh * flap because the function calls setup_link().
4872 1.249 msaitoh */
4873 1.260 knakahar if (hw->mac.type != ixgbe_mac_82598EB) {
4874 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4875 1.273 msaitoh if (int_en)
4876 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4877 1.273 msaitoh else
4878 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4879 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4880 1.260 knakahar }
4881 1.249 msaitoh
4882 1.233 msaitoh /*
4883 1.233 msaitoh * Don't call ixgbe_schedule_admin_tasklet() because we are on
4884 1.233 msaitoh * the workqueue now.
4885 1.233 msaitoh */
4886 1.99 msaitoh } /* ixgbe_handle_mod */
4887 1.1 dyoung
4888 1.1 dyoung
4889 1.99 msaitoh /************************************************************************
4890 1.99 msaitoh * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4891 1.99 msaitoh ************************************************************************/
4892 1.33 msaitoh static void
4893 1.233 msaitoh ixgbe_handle_msf(void *context)
4894 1.33 msaitoh {
4895 1.339 msaitoh struct ixgbe_softc *sc = context;
4896 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4897 1.186 msaitoh u32 autoneg;
4898 1.186 msaitoh bool negotiate;
4899 1.33 msaitoh
4900 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4901 1.257 msaitoh
4902 1.333 msaitoh IXGBE_EVC_ADD(&sc->msf_workev, 1);
4903 1.33 msaitoh
4904 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
4905 1.98 msaitoh if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4906 1.98 msaitoh hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4907 1.98 msaitoh if (hw->mac.ops.setup_link)
4908 1.98 msaitoh hw->mac.ops.setup_link(hw, autoneg, TRUE);
4909 1.99 msaitoh } /* ixgbe_handle_msf */
4910 1.33 msaitoh
4911 1.99 msaitoh /************************************************************************
4912 1.99 msaitoh * ixgbe_handle_phy - Tasklet for external PHY interrupts
4913 1.99 msaitoh ************************************************************************/
4914 1.1 dyoung static void
4915 1.98 msaitoh ixgbe_handle_phy(void *context)
4916 1.1 dyoung {
4917 1.339 msaitoh struct ixgbe_softc *sc = context;
4918 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4919 1.98 msaitoh int error;
4920 1.1 dyoung
4921 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4922 1.257 msaitoh
4923 1.333 msaitoh IXGBE_EVC_ADD(&sc->phy_workev, 1);
4924 1.98 msaitoh error = hw->phy.ops.handle_lasi(hw);
4925 1.98 msaitoh if (error == IXGBE_ERR_OVERTEMP)
4926 1.333 msaitoh device_printf(sc->dev,
4927 1.98 msaitoh "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4928 1.98 msaitoh " PHY will downshift to lower power state!\n");
4929 1.98 msaitoh else if (error)
4930 1.333 msaitoh device_printf(sc->dev,
4931 1.99 msaitoh "Error handling LASI interrupt: %d\n", error);
4932 1.99 msaitoh } /* ixgbe_handle_phy */
4933 1.1 dyoung
4934 1.98 msaitoh static void
4935 1.233 msaitoh ixgbe_handle_admin(struct work *wk, void *context)
4936 1.233 msaitoh {
4937 1.339 msaitoh struct ixgbe_softc *sc = context;
4938 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4939 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4940 1.260 knakahar u32 task_requests;
4941 1.273 msaitoh u32 eims_enable = 0;
4942 1.260 knakahar
4943 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4944 1.333 msaitoh sc->admin_pending = 0;
4945 1.333 msaitoh task_requests = sc->task_requests;
4946 1.333 msaitoh sc->task_requests = 0;
4947 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4948 1.233 msaitoh
4949 1.233 msaitoh /*
4950 1.233 msaitoh * Hold the IFNET_LOCK across this entire call. This will
4951 1.333 msaitoh * prevent additional changes to sc->phy_layer
4952 1.233 msaitoh * and serialize calls to this tasklet. We cannot hold the
4953 1.233 msaitoh * CORE_LOCK while calling into the ifmedia functions as
4954 1.233 msaitoh * they call ifmedia_lock() and the lock is CORE_LOCK.
4955 1.233 msaitoh */
4956 1.233 msaitoh IFNET_LOCK(ifp);
4957 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4958 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4959 1.333 msaitoh ixgbe_handle_link(sc);
4960 1.273 msaitoh eims_enable |= IXGBE_EIMS_LSC;
4961 1.273 msaitoh }
4962 1.319 msaitoh if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0)
4963 1.333 msaitoh ixgbe_handle_mod(sc, false);
4964 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4965 1.333 msaitoh ixgbe_handle_mod(sc, true);
4966 1.273 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
4967 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4968 1.273 msaitoh else
4969 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4970 1.260 knakahar }
4971 1.273 msaitoh if ((task_requests
4972 1.273 msaitoh & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4973 1.333 msaitoh ixgbe_handle_msf(sc);
4974 1.273 msaitoh if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4975 1.273 msaitoh (hw->mac.type == ixgbe_mac_82599EB))
4976 1.273 msaitoh eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4977 1.260 knakahar }
4978 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4979 1.333 msaitoh ixgbe_handle_phy(sc);
4980 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4981 1.260 knakahar }
4982 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4983 1.333 msaitoh ixgbe_reinit_fdir(sc);
4984 1.273 msaitoh eims_enable |= IXGBE_EIMS_FLOW_DIR;
4985 1.260 knakahar }
4986 1.233 msaitoh #if 0 /* notyet */
4987 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4988 1.333 msaitoh ixgbe_handle_mbx(sc);
4989 1.273 msaitoh eims_enable |= IXGBE_EIMS_MAILBOX;
4990 1.260 knakahar }
4991 1.233 msaitoh #endif
4992 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4993 1.233 msaitoh
4994 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4995 1.233 msaitoh IFNET_UNLOCK(ifp);
4996 1.233 msaitoh } /* ixgbe_handle_admin */
4997 1.233 msaitoh
4998 1.233 msaitoh static void
4999 1.98 msaitoh ixgbe_ifstop(struct ifnet *ifp, int disable)
5000 1.98 msaitoh {
5001 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
5002 1.1 dyoung
5003 1.333 msaitoh IXGBE_CORE_LOCK(sc);
5004 1.333 msaitoh ixgbe_stop_locked(sc);
5005 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
5006 1.223 thorpej
5007 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
5008 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
5009 1.98 msaitoh }
5010 1.1 dyoung
5011 1.99 msaitoh /************************************************************************
5012 1.252 msaitoh * ixgbe_stop_locked - Stop the hardware
5013 1.98 msaitoh *
5014 1.99 msaitoh * Disables all traffic on the adapter by issuing a
5015 1.99 msaitoh * global reset on the MAC and deallocates TX/RX buffers.
5016 1.99 msaitoh ************************************************************************/
5017 1.1 dyoung static void
5018 1.252 msaitoh ixgbe_stop_locked(void *arg)
5019 1.1 dyoung {
5020 1.186 msaitoh struct ifnet *ifp;
5021 1.339 msaitoh struct ixgbe_softc *sc = arg;
5022 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5023 1.99 msaitoh
5024 1.333 msaitoh ifp = sc->ifp;
5025 1.98 msaitoh
5026 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5027 1.98 msaitoh
5028 1.252 msaitoh INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
5029 1.333 msaitoh ixgbe_disable_intr(sc);
5030 1.333 msaitoh callout_stop(&sc->timer);
5031 1.98 msaitoh
5032 1.223 thorpej /* Don't schedule workqueues. */
5033 1.333 msaitoh sc->schedule_wqs_ok = false;
5034 1.223 thorpej
5035 1.98 msaitoh /* Let the stack know...*/
5036 1.98 msaitoh ifp->if_flags &= ~IFF_RUNNING;
5037 1.98 msaitoh
5038 1.98 msaitoh ixgbe_reset_hw(hw);
5039 1.98 msaitoh hw->adapter_stopped = FALSE;
5040 1.98 msaitoh ixgbe_stop_adapter(hw);
5041 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82599EB)
5042 1.98 msaitoh ixgbe_stop_mac_link_on_d3_82599(hw);
5043 1.98 msaitoh /* Turn off the laser - noop with no optics */
5044 1.98 msaitoh ixgbe_disable_tx_laser(hw);
5045 1.1 dyoung
5046 1.98 msaitoh /* Update the stack */
5047 1.333 msaitoh sc->link_up = FALSE;
5048 1.333 msaitoh ixgbe_update_link_status(sc);
5049 1.1 dyoung
5050 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
5051 1.333 msaitoh ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
5052 1.1 dyoung
5053 1.98 msaitoh return;
5054 1.252 msaitoh } /* ixgbe_stop_locked */
5055 1.1 dyoung
5056 1.99 msaitoh /************************************************************************
5057 1.99 msaitoh * ixgbe_update_link_status - Update OS on link state
5058 1.99 msaitoh *
5059 1.99 msaitoh * Note: Only updates the OS on the cached link state.
5060 1.186 msaitoh * The real check of the hardware only happens with
5061 1.186 msaitoh * a link interrupt.
5062 1.99 msaitoh ************************************************************************/
5063 1.98 msaitoh static void
5064 1.333 msaitoh ixgbe_update_link_status(struct ixgbe_softc *sc)
5065 1.1 dyoung {
5066 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5067 1.333 msaitoh device_t dev = sc->dev;
5068 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5069 1.98 msaitoh
5070 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5071 1.136 knakahar
5072 1.333 msaitoh if (sc->link_up) {
5073 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
5074 1.138 knakahar /*
5075 1.138 knakahar * To eliminate influence of the previous state
5076 1.138 knakahar * in the same way as ixgbe_init_locked().
5077 1.138 knakahar */
5078 1.333 msaitoh struct ix_queue *que = sc->queues;
5079 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5080 1.138 knakahar que->eitr_setting = 0;
5081 1.138 knakahar
5082 1.333 msaitoh if (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
5083 1.98 msaitoh /*
5084 1.98 msaitoh * Discard count for both MAC Local Fault and
5085 1.98 msaitoh * Remote Fault because those registers are
5086 1.98 msaitoh * valid only when the link speed is up and
5087 1.98 msaitoh * 10Gbps.
5088 1.98 msaitoh */
5089 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MLFC);
5090 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MRFC);
5091 1.98 msaitoh }
5092 1.98 msaitoh
5093 1.98 msaitoh if (bootverbose) {
5094 1.98 msaitoh const char *bpsmsg;
5095 1.1 dyoung
5096 1.333 msaitoh switch (sc->link_speed) {
5097 1.98 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
5098 1.98 msaitoh bpsmsg = "10 Gbps";
5099 1.98 msaitoh break;
5100 1.98 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
5101 1.98 msaitoh bpsmsg = "5 Gbps";
5102 1.98 msaitoh break;
5103 1.98 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
5104 1.98 msaitoh bpsmsg = "2.5 Gbps";
5105 1.98 msaitoh break;
5106 1.98 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
5107 1.98 msaitoh bpsmsg = "1 Gbps";
5108 1.98 msaitoh break;
5109 1.98 msaitoh case IXGBE_LINK_SPEED_100_FULL:
5110 1.98 msaitoh bpsmsg = "100 Mbps";
5111 1.98 msaitoh break;
5112 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
5113 1.99 msaitoh bpsmsg = "10 Mbps";
5114 1.99 msaitoh break;
5115 1.98 msaitoh default:
5116 1.98 msaitoh bpsmsg = "unknown speed";
5117 1.98 msaitoh break;
5118 1.98 msaitoh }
5119 1.98 msaitoh device_printf(dev, "Link is up %s %s \n",
5120 1.98 msaitoh bpsmsg, "Full Duplex");
5121 1.98 msaitoh }
5122 1.333 msaitoh sc->link_active = LINK_STATE_UP;
5123 1.98 msaitoh /* Update any Flow Control changes */
5124 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5125 1.98 msaitoh /* Update DMA coalescing config */
5126 1.333 msaitoh ixgbe_config_dmac(sc);
5127 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_UP);
5128 1.144 msaitoh
5129 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5130 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5131 1.98 msaitoh }
5132 1.174 msaitoh } else {
5133 1.174 msaitoh /*
5134 1.174 msaitoh * Do it when link active changes to DOWN. i.e.
5135 1.174 msaitoh * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5136 1.186 msaitoh * b) LINK_STATE_UP -> LINK_STATE_DOWN
5137 1.174 msaitoh */
5138 1.333 msaitoh if (sc->link_active != LINK_STATE_DOWN) {
5139 1.98 msaitoh if (bootverbose)
5140 1.98 msaitoh device_printf(dev, "Link is Down\n");
5141 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_DOWN);
5142 1.333 msaitoh sc->link_active = LINK_STATE_DOWN;
5143 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5144 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5145 1.333 msaitoh ixgbe_drain_all(sc);
5146 1.98 msaitoh }
5147 1.1 dyoung }
5148 1.99 msaitoh } /* ixgbe_update_link_status */
5149 1.1 dyoung
5150 1.99 msaitoh /************************************************************************
5151 1.99 msaitoh * ixgbe_config_dmac - Configure DMA Coalescing
5152 1.99 msaitoh ************************************************************************/
5153 1.1 dyoung static void
5154 1.333 msaitoh ixgbe_config_dmac(struct ixgbe_softc *sc)
5155 1.1 dyoung {
5156 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5157 1.98 msaitoh struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5158 1.1 dyoung
5159 1.99 msaitoh if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5160 1.98 msaitoh return;
5161 1.65 msaitoh
5162 1.333 msaitoh if (dcfg->watchdog_timer ^ sc->dmac ||
5163 1.333 msaitoh dcfg->link_speed ^ sc->link_speed) {
5164 1.333 msaitoh dcfg->watchdog_timer = sc->dmac;
5165 1.98 msaitoh dcfg->fcoe_en = false;
5166 1.333 msaitoh dcfg->link_speed = sc->link_speed;
5167 1.98 msaitoh dcfg->num_tcs = 1;
5168 1.51 msaitoh
5169 1.98 msaitoh INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5170 1.98 msaitoh dcfg->watchdog_timer, dcfg->link_speed);
5171 1.51 msaitoh
5172 1.98 msaitoh hw->mac.ops.dmac_config(hw);
5173 1.98 msaitoh }
5174 1.99 msaitoh } /* ixgbe_config_dmac */
5175 1.51 msaitoh
5176 1.99 msaitoh /************************************************************************
5177 1.99 msaitoh * ixgbe_enable_intr
5178 1.99 msaitoh ************************************************************************/
5179 1.98 msaitoh static void
5180 1.333 msaitoh ixgbe_enable_intr(struct ixgbe_softc *sc)
5181 1.98 msaitoh {
5182 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5183 1.333 msaitoh struct ix_queue *que = sc->queues;
5184 1.98 msaitoh u32 mask, fwsm;
5185 1.51 msaitoh
5186 1.98 msaitoh mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5187 1.45 msaitoh
5188 1.333 msaitoh switch (sc->hw.mac.type) {
5189 1.99 msaitoh case ixgbe_mac_82599EB:
5190 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5191 1.99 msaitoh /* Temperature sensor on some adapters */
5192 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0;
5193 1.99 msaitoh /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5194 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5195 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP2;
5196 1.99 msaitoh break;
5197 1.99 msaitoh case ixgbe_mac_X540:
5198 1.99 msaitoh /* Detect if Thermal Sensor is enabled */
5199 1.99 msaitoh fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5200 1.99 msaitoh if (fwsm & IXGBE_FWSM_TS_ENABLED)
5201 1.98 msaitoh mask |= IXGBE_EIMS_TS;
5202 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5203 1.99 msaitoh break;
5204 1.99 msaitoh case ixgbe_mac_X550:
5205 1.99 msaitoh /* MAC thermal sensor is automatically enabled */
5206 1.99 msaitoh mask |= IXGBE_EIMS_TS;
5207 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5208 1.99 msaitoh break;
5209 1.99 msaitoh case ixgbe_mac_X550EM_x:
5210 1.99 msaitoh case ixgbe_mac_X550EM_a:
5211 1.99 msaitoh /* Some devices use SDP0 for important information */
5212 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5213 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5214 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5215 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5216 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5217 1.99 msaitoh if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5218 1.99 msaitoh mask |= IXGBE_EICR_GPI_SDP0_X540;
5219 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5220 1.99 msaitoh break;
5221 1.99 msaitoh default:
5222 1.99 msaitoh break;
5223 1.1 dyoung }
5224 1.51 msaitoh
5225 1.99 msaitoh /* Enable Fan Failure detection */
5226 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
5227 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5228 1.99 msaitoh /* Enable SR-IOV */
5229 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5230 1.99 msaitoh mask |= IXGBE_EIMS_MAILBOX;
5231 1.99 msaitoh /* Enable Flow Director */
5232 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FDIR)
5233 1.99 msaitoh mask |= IXGBE_EIMS_FLOW_DIR;
5234 1.99 msaitoh
5235 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5236 1.64 msaitoh
5237 1.98 msaitoh /* With MSI-X we use auto clear */
5238 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0) {
5239 1.270 msaitoh /*
5240 1.309 msaitoh * We use auto clear for RTX_QUEUE only. Don't use other
5241 1.309 msaitoh * interrupts (e.g. link interrupt). BTW, we don't use
5242 1.309 msaitoh * TCP_TIMER interrupt itself.
5243 1.270 msaitoh */
5244 1.270 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5245 1.98 msaitoh }
5246 1.1 dyoung
5247 1.98 msaitoh /*
5248 1.99 msaitoh * Now enable all queues, this is done separately to
5249 1.99 msaitoh * allow for handling the extended (beyond 32) MSI-X
5250 1.99 msaitoh * vectors that can be used by 82599
5251 1.99 msaitoh */
5252 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5253 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
5254 1.1 dyoung
5255 1.98 msaitoh IXGBE_WRITE_FLUSH(hw);
5256 1.43 msaitoh
5257 1.99 msaitoh } /* ixgbe_enable_intr */
5258 1.1 dyoung
5259 1.99 msaitoh /************************************************************************
5260 1.139 knakahar * ixgbe_disable_intr_internal
5261 1.99 msaitoh ************************************************************************/
5262 1.44 msaitoh static void
5263 1.333 msaitoh ixgbe_disable_intr_internal(struct ixgbe_softc *sc, bool nestok)
5264 1.44 msaitoh {
5265 1.333 msaitoh struct ix_queue *que = sc->queues;
5266 1.127 knakahar
5267 1.127 knakahar /* disable interrupts other than queues */
5268 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5269 1.127 knakahar
5270 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0)
5271 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
5272 1.127 knakahar
5273 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5274 1.333 msaitoh ixgbe_disable_queue_internal(sc, que->msix, nestok);
5275 1.127 knakahar
5276 1.333 msaitoh IXGBE_WRITE_FLUSH(&sc->hw);
5277 1.99 msaitoh
5278 1.139 knakahar } /* ixgbe_do_disable_intr_internal */
5279 1.139 knakahar
5280 1.139 knakahar /************************************************************************
5281 1.139 knakahar * ixgbe_disable_intr
5282 1.139 knakahar ************************************************************************/
5283 1.139 knakahar static void
5284 1.333 msaitoh ixgbe_disable_intr(struct ixgbe_softc *sc)
5285 1.139 knakahar {
5286 1.139 knakahar
5287 1.333 msaitoh ixgbe_disable_intr_internal(sc, true);
5288 1.99 msaitoh } /* ixgbe_disable_intr */
5289 1.98 msaitoh
5290 1.99 msaitoh /************************************************************************
5291 1.139 knakahar * ixgbe_ensure_disabled_intr
5292 1.139 knakahar ************************************************************************/
5293 1.139 knakahar void
5294 1.333 msaitoh ixgbe_ensure_disabled_intr(struct ixgbe_softc *sc)
5295 1.139 knakahar {
5296 1.139 knakahar
5297 1.333 msaitoh ixgbe_disable_intr_internal(sc, false);
5298 1.139 knakahar } /* ixgbe_ensure_disabled_intr */
5299 1.139 knakahar
5300 1.139 knakahar /************************************************************************
5301 1.99 msaitoh * ixgbe_legacy_irq - Legacy Interrupt Service routine
5302 1.99 msaitoh ************************************************************************/
5303 1.98 msaitoh static int
5304 1.98 msaitoh ixgbe_legacy_irq(void *arg)
5305 1.1 dyoung {
5306 1.98 msaitoh struct ix_queue *que = arg;
5307 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
5308 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5309 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5310 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
5311 1.277 msaitoh u32 eicr;
5312 1.269 msaitoh u32 eims_orig;
5313 1.273 msaitoh u32 eims_enable = 0;
5314 1.273 msaitoh u32 eims_disable = 0;
5315 1.98 msaitoh
5316 1.269 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5317 1.269 msaitoh /*
5318 1.269 msaitoh * Silicon errata #26 on 82598. Disable all interrupts before reading
5319 1.269 msaitoh * EICR.
5320 1.269 msaitoh */
5321 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5322 1.98 msaitoh
5323 1.268 msaitoh /* Read and clear EICR */
5324 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5325 1.44 msaitoh
5326 1.99 msaitoh if (eicr == 0) {
5327 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.intzero, 1);
5328 1.269 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5329 1.98 msaitoh return 0;
5330 1.98 msaitoh }
5331 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.legint, 1);
5332 1.44 msaitoh
5333 1.272 msaitoh /* Queue (0) intr */
5334 1.308 msaitoh if (((ifp->if_flags & IFF_RUNNING) != 0) &&
5335 1.308 msaitoh (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5336 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
5337 1.272 msaitoh
5338 1.147 knakahar /*
5339 1.265 msaitoh * The same as ixgbe_msix_que() about
5340 1.265 msaitoh * "que->txrx_use_workqueue".
5341 1.147 knakahar */
5342 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
5343 1.147 knakahar
5344 1.98 msaitoh IXGBE_TX_LOCK(txr);
5345 1.98 msaitoh ixgbe_txeof(txr);
5346 1.99 msaitoh #ifdef notyet
5347 1.99 msaitoh if (!ixgbe_ring_empty(ifp, txr->br))
5348 1.99 msaitoh ixgbe_start_locked(ifp, txr);
5349 1.99 msaitoh #endif
5350 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
5351 1.271 msaitoh
5352 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
5353 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
5354 1.273 msaitoh /* Disable queue 0 interrupt */
5355 1.273 msaitoh eims_disable |= 1UL << 0;
5356 1.273 msaitoh } else
5357 1.317 msaitoh eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE;
5358 1.44 msaitoh
5359 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
5360 1.233 msaitoh
5361 1.273 msaitoh /* Re-enable some interrupts */
5362 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5363 1.273 msaitoh (eims_orig & ~eims_disable) | eims_enable);
5364 1.99 msaitoh
5365 1.98 msaitoh return 1;
5366 1.99 msaitoh } /* ixgbe_legacy_irq */
5367 1.98 msaitoh
5368 1.99 msaitoh /************************************************************************
5369 1.119 msaitoh * ixgbe_free_pciintr_resources
5370 1.99 msaitoh ************************************************************************/
5371 1.98 msaitoh static void
5372 1.333 msaitoh ixgbe_free_pciintr_resources(struct ixgbe_softc *sc)
5373 1.44 msaitoh {
5374 1.333 msaitoh struct ix_queue *que = sc->queues;
5375 1.98 msaitoh int rid;
5376 1.44 msaitoh
5377 1.98 msaitoh /*
5378 1.99 msaitoh * Release all msix queue resources:
5379 1.99 msaitoh */
5380 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
5381 1.119 msaitoh if (que->res != NULL) {
5382 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[i]);
5383 1.333 msaitoh sc->osdep.ihs[i] = NULL;
5384 1.119 msaitoh }
5385 1.58 msaitoh }
5386 1.58 msaitoh
5387 1.98 msaitoh /* Clean the Legacy or Link interrupt last */
5388 1.333 msaitoh if (sc->vector) /* we are doing MSIX */
5389 1.333 msaitoh rid = sc->vector;
5390 1.98 msaitoh else
5391 1.98 msaitoh rid = 0;
5392 1.44 msaitoh
5393 1.333 msaitoh if (sc->osdep.ihs[rid] != NULL) {
5394 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[rid]);
5395 1.333 msaitoh sc->osdep.ihs[rid] = NULL;
5396 1.98 msaitoh }
5397 1.44 msaitoh
5398 1.333 msaitoh if (sc->osdep.intrs != NULL) {
5399 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
5400 1.333 msaitoh sc->osdep.nintrs);
5401 1.333 msaitoh sc->osdep.intrs = NULL;
5402 1.119 msaitoh }
5403 1.119 msaitoh } /* ixgbe_free_pciintr_resources */
5404 1.119 msaitoh
5405 1.119 msaitoh /************************************************************************
5406 1.119 msaitoh * ixgbe_free_pci_resources
5407 1.119 msaitoh ************************************************************************/
5408 1.119 msaitoh static void
5409 1.333 msaitoh ixgbe_free_pci_resources(struct ixgbe_softc *sc)
5410 1.119 msaitoh {
5411 1.119 msaitoh
5412 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
5413 1.44 msaitoh
5414 1.333 msaitoh if (sc->osdep.mem_size != 0) {
5415 1.333 msaitoh bus_space_unmap(sc->osdep.mem_bus_space_tag,
5416 1.333 msaitoh sc->osdep.mem_bus_space_handle,
5417 1.333 msaitoh sc->osdep.mem_size);
5418 1.44 msaitoh }
5419 1.99 msaitoh } /* ixgbe_free_pci_resources */
5420 1.44 msaitoh
5421 1.99 msaitoh /************************************************************************
5422 1.99 msaitoh * ixgbe_sysctl_flowcntl
5423 1.99 msaitoh *
5424 1.99 msaitoh * SYSCTL wrapper around setting Flow Control
5425 1.99 msaitoh ************************************************************************/
5426 1.98 msaitoh static int
5427 1.98 msaitoh ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5428 1.98 msaitoh {
5429 1.98 msaitoh struct sysctlnode node = *rnode;
5430 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5431 1.99 msaitoh int error, fc;
5432 1.82 msaitoh
5433 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5434 1.169 msaitoh return (EPERM);
5435 1.169 msaitoh
5436 1.333 msaitoh fc = sc->hw.fc.current_mode;
5437 1.98 msaitoh node.sysctl_data = &fc;
5438 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5439 1.98 msaitoh if (error != 0 || newp == NULL)
5440 1.98 msaitoh return error;
5441 1.82 msaitoh
5442 1.98 msaitoh /* Don't bother if it's not changed */
5443 1.333 msaitoh if (fc == sc->hw.fc.current_mode)
5444 1.98 msaitoh return (0);
5445 1.83 msaitoh
5446 1.333 msaitoh return ixgbe_set_flowcntl(sc, fc);
5447 1.99 msaitoh } /* ixgbe_sysctl_flowcntl */
5448 1.1 dyoung
5449 1.99 msaitoh /************************************************************************
5450 1.99 msaitoh * ixgbe_set_flowcntl - Set flow control
5451 1.99 msaitoh *
5452 1.99 msaitoh * Flow control values:
5453 1.99 msaitoh * 0 - off
5454 1.99 msaitoh * 1 - rx pause
5455 1.99 msaitoh * 2 - tx pause
5456 1.99 msaitoh * 3 - full
5457 1.99 msaitoh ************************************************************************/
5458 1.98 msaitoh static int
5459 1.333 msaitoh ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
5460 1.98 msaitoh {
5461 1.98 msaitoh switch (fc) {
5462 1.98 msaitoh case ixgbe_fc_rx_pause:
5463 1.98 msaitoh case ixgbe_fc_tx_pause:
5464 1.98 msaitoh case ixgbe_fc_full:
5465 1.333 msaitoh sc->hw.fc.requested_mode = fc;
5466 1.333 msaitoh if (sc->num_queues > 1)
5467 1.333 msaitoh ixgbe_disable_rx_drop(sc);
5468 1.98 msaitoh break;
5469 1.98 msaitoh case ixgbe_fc_none:
5470 1.333 msaitoh sc->hw.fc.requested_mode = ixgbe_fc_none;
5471 1.333 msaitoh if (sc->num_queues > 1)
5472 1.333 msaitoh ixgbe_enable_rx_drop(sc);
5473 1.98 msaitoh break;
5474 1.98 msaitoh default:
5475 1.98 msaitoh return (EINVAL);
5476 1.1 dyoung }
5477 1.99 msaitoh
5478 1.98 msaitoh #if 0 /* XXX NetBSD */
5479 1.98 msaitoh /* Don't autoneg if forcing a value */
5480 1.333 msaitoh sc->hw.fc.disable_fc_autoneg = TRUE;
5481 1.98 msaitoh #endif
5482 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5483 1.99 msaitoh
5484 1.98 msaitoh return (0);
5485 1.99 msaitoh } /* ixgbe_set_flowcntl */
5486 1.1 dyoung
5487 1.99 msaitoh /************************************************************************
5488 1.99 msaitoh * ixgbe_enable_rx_drop
5489 1.99 msaitoh *
5490 1.99 msaitoh * Enable the hardware to drop packets when the buffer is
5491 1.99 msaitoh * full. This is useful with multiqueue, so that no single
5492 1.99 msaitoh * queue being full stalls the entire RX engine. We only
5493 1.99 msaitoh * enable this when Multiqueue is enabled AND Flow Control
5494 1.99 msaitoh * is disabled.
5495 1.99 msaitoh ************************************************************************/
5496 1.98 msaitoh static void
5497 1.333 msaitoh ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
5498 1.98 msaitoh {
5499 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5500 1.186 msaitoh struct rx_ring *rxr;
5501 1.186 msaitoh u32 srrctl;
5502 1.1 dyoung
5503 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5504 1.333 msaitoh rxr = &sc->rx_rings[i];
5505 1.99 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5506 1.99 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
5507 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5508 1.98 msaitoh }
5509 1.99 msaitoh
5510 1.98 msaitoh /* enable drop for each vf */
5511 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5512 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5513 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5514 1.98 msaitoh IXGBE_QDE_ENABLE));
5515 1.98 msaitoh }
5516 1.99 msaitoh } /* ixgbe_enable_rx_drop */
5517 1.43 msaitoh
5518 1.99 msaitoh /************************************************************************
5519 1.99 msaitoh * ixgbe_disable_rx_drop
5520 1.99 msaitoh ************************************************************************/
5521 1.98 msaitoh static void
5522 1.333 msaitoh ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
5523 1.98 msaitoh {
5524 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5525 1.186 msaitoh struct rx_ring *rxr;
5526 1.186 msaitoh u32 srrctl;
5527 1.43 msaitoh
5528 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5529 1.333 msaitoh rxr = &sc->rx_rings[i];
5530 1.186 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5531 1.186 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5532 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5533 1.98 msaitoh }
5534 1.99 msaitoh
5535 1.98 msaitoh /* disable drop for each vf */
5536 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5537 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5538 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5539 1.1 dyoung }
5540 1.99 msaitoh } /* ixgbe_disable_rx_drop */
5541 1.98 msaitoh
5542 1.99 msaitoh /************************************************************************
5543 1.99 msaitoh * ixgbe_sysctl_advertise
5544 1.99 msaitoh *
5545 1.99 msaitoh * SYSCTL wrapper around setting advertised speed
5546 1.99 msaitoh ************************************************************************/
5547 1.98 msaitoh static int
5548 1.98 msaitoh ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5549 1.98 msaitoh {
5550 1.99 msaitoh struct sysctlnode node = *rnode;
5551 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5552 1.186 msaitoh int error = 0, advertise;
5553 1.1 dyoung
5554 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5555 1.169 msaitoh return (EPERM);
5556 1.169 msaitoh
5557 1.333 msaitoh advertise = sc->advertise;
5558 1.98 msaitoh node.sysctl_data = &advertise;
5559 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5560 1.98 msaitoh if (error != 0 || newp == NULL)
5561 1.98 msaitoh return error;
5562 1.28 msaitoh
5563 1.333 msaitoh return ixgbe_set_advertise(sc, advertise);
5564 1.99 msaitoh } /* ixgbe_sysctl_advertise */
5565 1.1 dyoung
5566 1.99 msaitoh /************************************************************************
5567 1.99 msaitoh * ixgbe_set_advertise - Control advertised link speed
5568 1.99 msaitoh *
5569 1.99 msaitoh * Flags:
5570 1.103 msaitoh * 0x00 - Default (all capable link speed)
5571 1.296 msaitoh * 0x1 - advertise 100 Mb
5572 1.296 msaitoh * 0x2 - advertise 1G
5573 1.296 msaitoh * 0x4 - advertise 10G
5574 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5575 1.103 msaitoh * 0x10 - advertise 2.5G
5576 1.103 msaitoh * 0x20 - advertise 5G
5577 1.99 msaitoh ************************************************************************/
5578 1.98 msaitoh static int
5579 1.333 msaitoh ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
5580 1.1 dyoung {
5581 1.186 msaitoh device_t dev;
5582 1.186 msaitoh struct ixgbe_hw *hw;
5583 1.99 msaitoh ixgbe_link_speed speed = 0;
5584 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5585 1.186 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
5586 1.186 msaitoh bool negotiate = FALSE;
5587 1.98 msaitoh
5588 1.98 msaitoh /* Checks to validate new value */
5589 1.333 msaitoh if (sc->advertise == advertise) /* no change */
5590 1.98 msaitoh return (0);
5591 1.98 msaitoh
5592 1.333 msaitoh dev = sc->dev;
5593 1.333 msaitoh hw = &sc->hw;
5594 1.98 msaitoh
5595 1.98 msaitoh /* No speed changes for backplane media */
5596 1.98 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
5597 1.98 msaitoh return (ENODEV);
5598 1.98 msaitoh
5599 1.98 msaitoh if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5600 1.98 msaitoh (hw->phy.multispeed_fiber))) {
5601 1.98 msaitoh device_printf(dev,
5602 1.98 msaitoh "Advertised speed can only be set on copper or "
5603 1.98 msaitoh "multispeed fiber media types.\n");
5604 1.98 msaitoh return (EINVAL);
5605 1.98 msaitoh }
5606 1.98 msaitoh
5607 1.259 msaitoh if (advertise < 0x0 || advertise > 0x3f) {
5608 1.319 msaitoh device_printf(dev, "Invalid advertised speed; "
5609 1.319 msaitoh "valid modes are 0x0 through 0x3f\n");
5610 1.98 msaitoh return (EINVAL);
5611 1.98 msaitoh }
5612 1.1 dyoung
5613 1.99 msaitoh if (hw->mac.ops.get_link_capabilities) {
5614 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5615 1.99 msaitoh &negotiate);
5616 1.99 msaitoh if (err != IXGBE_SUCCESS) {
5617 1.319 msaitoh device_printf(dev, "Unable to determine supported "
5618 1.319 msaitoh "advertise speeds\n");
5619 1.99 msaitoh return (ENODEV);
5620 1.99 msaitoh }
5621 1.99 msaitoh }
5622 1.99 msaitoh
5623 1.98 msaitoh /* Set new value and report new advertised mode */
5624 1.99 msaitoh if (advertise & 0x1) {
5625 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5626 1.319 msaitoh device_printf(dev, "Interface does not support 100Mb "
5627 1.319 msaitoh "advertised speed\n");
5628 1.98 msaitoh return (EINVAL);
5629 1.98 msaitoh }
5630 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
5631 1.99 msaitoh }
5632 1.99 msaitoh if (advertise & 0x2) {
5633 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5634 1.319 msaitoh device_printf(dev, "Interface does not support 1Gb "
5635 1.319 msaitoh "advertised speed\n");
5636 1.99 msaitoh return (EINVAL);
5637 1.99 msaitoh }
5638 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
5639 1.99 msaitoh }
5640 1.99 msaitoh if (advertise & 0x4) {
5641 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5642 1.319 msaitoh device_printf(dev, "Interface does not support 10Gb "
5643 1.319 msaitoh "advertised speed\n");
5644 1.99 msaitoh return (EINVAL);
5645 1.99 msaitoh }
5646 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
5647 1.99 msaitoh }
5648 1.99 msaitoh if (advertise & 0x8) {
5649 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5650 1.319 msaitoh device_printf(dev, "Interface does not support 10Mb "
5651 1.319 msaitoh "advertised speed\n");
5652 1.99 msaitoh return (EINVAL);
5653 1.99 msaitoh }
5654 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
5655 1.99 msaitoh }
5656 1.103 msaitoh if (advertise & 0x10) {
5657 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5658 1.319 msaitoh device_printf(dev, "Interface does not support 2.5Gb "
5659 1.319 msaitoh "advertised speed\n");
5660 1.103 msaitoh return (EINVAL);
5661 1.103 msaitoh }
5662 1.103 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5663 1.103 msaitoh }
5664 1.103 msaitoh if (advertise & 0x20) {
5665 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5666 1.319 msaitoh device_printf(dev, "Interface does not support 5Gb "
5667 1.319 msaitoh "advertised speed\n");
5668 1.103 msaitoh return (EINVAL);
5669 1.103 msaitoh }
5670 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
5671 1.103 msaitoh }
5672 1.99 msaitoh if (advertise == 0)
5673 1.99 msaitoh speed = link_caps; /* All capable link speed */
5674 1.1 dyoung
5675 1.98 msaitoh hw->mac.autotry_restart = TRUE;
5676 1.98 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
5677 1.333 msaitoh sc->advertise = advertise;
5678 1.1 dyoung
5679 1.99 msaitoh return (0);
5680 1.99 msaitoh } /* ixgbe_set_advertise */
5681 1.1 dyoung
5682 1.99 msaitoh /************************************************************************
5683 1.296 msaitoh * ixgbe_get_default_advertise - Get default advertised speed settings
5684 1.99 msaitoh *
5685 1.99 msaitoh * Formatted for sysctl usage.
5686 1.99 msaitoh * Flags:
5687 1.296 msaitoh * 0x1 - advertise 100 Mb
5688 1.296 msaitoh * 0x2 - advertise 1G
5689 1.296 msaitoh * 0x4 - advertise 10G
5690 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5691 1.103 msaitoh * 0x10 - advertise 2.5G
5692 1.103 msaitoh * 0x20 - advertise 5G
5693 1.99 msaitoh ************************************************************************/
5694 1.98 msaitoh static int
5695 1.333 msaitoh ixgbe_get_default_advertise(struct ixgbe_softc *sc)
5696 1.1 dyoung {
5697 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5698 1.186 msaitoh int speed;
5699 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5700 1.186 msaitoh s32 err;
5701 1.186 msaitoh bool negotiate = FALSE;
5702 1.98 msaitoh
5703 1.99 msaitoh /*
5704 1.99 msaitoh * Advertised speed means nothing unless it's copper or
5705 1.99 msaitoh * multi-speed fiber
5706 1.99 msaitoh */
5707 1.99 msaitoh if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5708 1.99 msaitoh !(hw->phy.multispeed_fiber))
5709 1.99 msaitoh return (0);
5710 1.1 dyoung
5711 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5712 1.99 msaitoh if (err != IXGBE_SUCCESS)
5713 1.99 msaitoh return (0);
5714 1.1 dyoung
5715 1.99 msaitoh speed =
5716 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5717 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5718 1.103 msaitoh ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5719 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5720 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5721 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5722 1.99 msaitoh
5723 1.99 msaitoh return speed;
5724 1.296 msaitoh } /* ixgbe_get_default_advertise */
5725 1.99 msaitoh
5726 1.99 msaitoh /************************************************************************
5727 1.99 msaitoh * ixgbe_sysctl_dmac - Manage DMA Coalescing
5728 1.99 msaitoh *
5729 1.99 msaitoh * Control values:
5730 1.99 msaitoh * 0/1 - off / on (use default value of 1000)
5731 1.99 msaitoh *
5732 1.99 msaitoh * Legal timer values are:
5733 1.99 msaitoh * 50,100,250,500,1000,2000,5000,10000
5734 1.99 msaitoh *
5735 1.99 msaitoh * Turning off interrupt moderation will also turn this off.
5736 1.99 msaitoh ************************************************************************/
5737 1.1 dyoung static int
5738 1.98 msaitoh ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5739 1.1 dyoung {
5740 1.44 msaitoh struct sysctlnode node = *rnode;
5741 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5742 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5743 1.186 msaitoh int error;
5744 1.186 msaitoh int newval;
5745 1.1 dyoung
5746 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5747 1.169 msaitoh return (EPERM);
5748 1.169 msaitoh
5749 1.333 msaitoh newval = sc->dmac;
5750 1.98 msaitoh node.sysctl_data = &newval;
5751 1.22 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5752 1.98 msaitoh if ((error) || (newp == NULL))
5753 1.98 msaitoh return (error);
5754 1.98 msaitoh
5755 1.98 msaitoh switch (newval) {
5756 1.98 msaitoh case 0:
5757 1.98 msaitoh /* Disabled */
5758 1.333 msaitoh sc->dmac = 0;
5759 1.98 msaitoh break;
5760 1.98 msaitoh case 1:
5761 1.98 msaitoh /* Enable and use default */
5762 1.333 msaitoh sc->dmac = 1000;
5763 1.98 msaitoh break;
5764 1.98 msaitoh case 50:
5765 1.98 msaitoh case 100:
5766 1.98 msaitoh case 250:
5767 1.98 msaitoh case 500:
5768 1.98 msaitoh case 1000:
5769 1.98 msaitoh case 2000:
5770 1.98 msaitoh case 5000:
5771 1.98 msaitoh case 10000:
5772 1.98 msaitoh /* Legal values - allow */
5773 1.333 msaitoh sc->dmac = newval;
5774 1.98 msaitoh break;
5775 1.98 msaitoh default:
5776 1.98 msaitoh /* Do nothing, illegal value */
5777 1.98 msaitoh return (EINVAL);
5778 1.22 msaitoh }
5779 1.1 dyoung
5780 1.98 msaitoh /* Re-initialize hardware if it's already running */
5781 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING)
5782 1.302 riastrad if_init(ifp);
5783 1.1 dyoung
5784 1.98 msaitoh return (0);
5785 1.1 dyoung }
5786 1.1 dyoung
5787 1.98 msaitoh #ifdef IXGBE_DEBUG
5788 1.99 msaitoh /************************************************************************
5789 1.99 msaitoh * ixgbe_sysctl_power_state
5790 1.99 msaitoh *
5791 1.99 msaitoh * Sysctl to test power states
5792 1.99 msaitoh * Values:
5793 1.99 msaitoh * 0 - set device to D0
5794 1.99 msaitoh * 3 - set device to D3
5795 1.99 msaitoh * (none) - get current device power state
5796 1.99 msaitoh ************************************************************************/
5797 1.98 msaitoh static int
5798 1.98 msaitoh ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5799 1.44 msaitoh {
5800 1.99 msaitoh #ifdef notyet
5801 1.98 msaitoh struct sysctlnode node = *rnode;
5802 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5803 1.333 msaitoh device_t dev = sc->dev;
5804 1.186 msaitoh int curr_ps, new_ps, error = 0;
5805 1.44 msaitoh
5806 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5807 1.169 msaitoh return (EPERM);
5808 1.169 msaitoh
5809 1.98 msaitoh curr_ps = new_ps = pci_get_powerstate(dev);
5810 1.44 msaitoh
5811 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5812 1.98 msaitoh if ((error) || (req->newp == NULL))
5813 1.98 msaitoh return (error);
5814 1.44 msaitoh
5815 1.98 msaitoh if (new_ps == curr_ps)
5816 1.98 msaitoh return (0);
5817 1.44 msaitoh
5818 1.98 msaitoh if (new_ps == 3 && curr_ps == 0)
5819 1.98 msaitoh error = DEVICE_SUSPEND(dev);
5820 1.98 msaitoh else if (new_ps == 0 && curr_ps == 3)
5821 1.98 msaitoh error = DEVICE_RESUME(dev);
5822 1.98 msaitoh else
5823 1.98 msaitoh return (EINVAL);
5824 1.44 msaitoh
5825 1.98 msaitoh device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5826 1.44 msaitoh
5827 1.98 msaitoh return (error);
5828 1.98 msaitoh #else
5829 1.98 msaitoh return 0;
5830 1.98 msaitoh #endif
5831 1.99 msaitoh } /* ixgbe_sysctl_power_state */
5832 1.98 msaitoh #endif
5833 1.99 msaitoh
5834 1.99 msaitoh /************************************************************************
5835 1.99 msaitoh * ixgbe_sysctl_wol_enable
5836 1.99 msaitoh *
5837 1.99 msaitoh * Sysctl to enable/disable the WoL capability,
5838 1.99 msaitoh * if supported by the adapter.
5839 1.99 msaitoh *
5840 1.99 msaitoh * Values:
5841 1.99 msaitoh * 0 - disabled
5842 1.99 msaitoh * 1 - enabled
5843 1.99 msaitoh ************************************************************************/
5844 1.98 msaitoh static int
5845 1.98 msaitoh ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5846 1.98 msaitoh {
5847 1.98 msaitoh struct sysctlnode node = *rnode;
5848 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5849 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5850 1.186 msaitoh bool new_wol_enabled;
5851 1.186 msaitoh int error = 0;
5852 1.44 msaitoh
5853 1.169 msaitoh /*
5854 1.169 msaitoh * It's not required to check recovery mode because this function never
5855 1.169 msaitoh * touches hardware.
5856 1.169 msaitoh */
5857 1.98 msaitoh new_wol_enabled = hw->wol_enabled;
5858 1.98 msaitoh node.sysctl_data = &new_wol_enabled;
5859 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5860 1.98 msaitoh if ((error) || (newp == NULL))
5861 1.98 msaitoh return (error);
5862 1.98 msaitoh if (new_wol_enabled == hw->wol_enabled)
5863 1.98 msaitoh return (0);
5864 1.44 msaitoh
5865 1.333 msaitoh if (new_wol_enabled && !sc->wol_support)
5866 1.98 msaitoh return (ENODEV);
5867 1.98 msaitoh else
5868 1.98 msaitoh hw->wol_enabled = new_wol_enabled;
5869 1.44 msaitoh
5870 1.98 msaitoh return (0);
5871 1.99 msaitoh } /* ixgbe_sysctl_wol_enable */
5872 1.48 msaitoh
5873 1.99 msaitoh /************************************************************************
5874 1.99 msaitoh * ixgbe_sysctl_wufc - Wake Up Filter Control
5875 1.99 msaitoh *
5876 1.99 msaitoh * Sysctl to enable/disable the types of packets that the
5877 1.99 msaitoh * adapter will wake up on upon receipt.
5878 1.99 msaitoh * Flags:
5879 1.99 msaitoh * 0x1 - Link Status Change
5880 1.99 msaitoh * 0x2 - Magic Packet
5881 1.99 msaitoh * 0x4 - Direct Exact
5882 1.99 msaitoh * 0x8 - Directed Multicast
5883 1.99 msaitoh * 0x10 - Broadcast
5884 1.99 msaitoh * 0x20 - ARP/IPv4 Request Packet
5885 1.99 msaitoh * 0x40 - Direct IPv4 Packet
5886 1.99 msaitoh * 0x80 - Direct IPv6 Packet
5887 1.98 msaitoh *
5888 1.99 msaitoh * Settings not listed above will cause the sysctl to return an error.
5889 1.99 msaitoh ************************************************************************/
5890 1.1 dyoung static int
5891 1.98 msaitoh ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5892 1.1 dyoung {
5893 1.98 msaitoh struct sysctlnode node = *rnode;
5894 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5895 1.98 msaitoh int error = 0;
5896 1.98 msaitoh u32 new_wufc;
5897 1.52 msaitoh
5898 1.169 msaitoh /*
5899 1.169 msaitoh * It's not required to check recovery mode because this function never
5900 1.169 msaitoh * touches hardware.
5901 1.169 msaitoh */
5902 1.333 msaitoh new_wufc = sc->wufc;
5903 1.98 msaitoh node.sysctl_data = &new_wufc;
5904 1.52 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5905 1.98 msaitoh if ((error) || (newp == NULL))
5906 1.98 msaitoh return (error);
5907 1.333 msaitoh if (new_wufc == sc->wufc)
5908 1.98 msaitoh return (0);
5909 1.98 msaitoh
5910 1.98 msaitoh if (new_wufc & 0xffffff00)
5911 1.98 msaitoh return (EINVAL);
5912 1.99 msaitoh
5913 1.99 msaitoh new_wufc &= 0xff;
5914 1.333 msaitoh new_wufc |= (0xffffff & sc->wufc);
5915 1.333 msaitoh sc->wufc = new_wufc;
5916 1.52 msaitoh
5917 1.98 msaitoh return (0);
5918 1.99 msaitoh } /* ixgbe_sysctl_wufc */
5919 1.52 msaitoh
5920 1.98 msaitoh #ifdef IXGBE_DEBUG
5921 1.99 msaitoh /************************************************************************
5922 1.99 msaitoh * ixgbe_sysctl_print_rss_config
5923 1.99 msaitoh ************************************************************************/
5924 1.52 msaitoh static int
5925 1.98 msaitoh ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5926 1.52 msaitoh {
5927 1.99 msaitoh #ifdef notyet
5928 1.99 msaitoh struct sysctlnode node = *rnode;
5929 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5930 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5931 1.333 msaitoh device_t dev = sc->dev;
5932 1.186 msaitoh struct sbuf *buf;
5933 1.186 msaitoh int error = 0, reta_size;
5934 1.186 msaitoh u32 reg;
5935 1.1 dyoung
5936 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5937 1.169 msaitoh return (EPERM);
5938 1.169 msaitoh
5939 1.98 msaitoh buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5940 1.98 msaitoh if (!buf) {
5941 1.98 msaitoh device_printf(dev, "Could not allocate sbuf for output.\n");
5942 1.98 msaitoh return (ENOMEM);
5943 1.98 msaitoh }
5944 1.52 msaitoh
5945 1.98 msaitoh // TODO: use sbufs to make a string to print out
5946 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
5947 1.333 msaitoh switch (sc->hw.mac.type) {
5948 1.98 msaitoh case ixgbe_mac_X550:
5949 1.98 msaitoh case ixgbe_mac_X550EM_x:
5950 1.99 msaitoh case ixgbe_mac_X550EM_a:
5951 1.98 msaitoh reta_size = 128;
5952 1.98 msaitoh break;
5953 1.98 msaitoh default:
5954 1.98 msaitoh reta_size = 32;
5955 1.98 msaitoh break;
5956 1.43 msaitoh }
5957 1.1 dyoung
5958 1.98 msaitoh /* Print out the redirection table */
5959 1.98 msaitoh sbuf_cat(buf, "\n");
5960 1.98 msaitoh for (int i = 0; i < reta_size; i++) {
5961 1.98 msaitoh if (i < 32) {
5962 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5963 1.98 msaitoh sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5964 1.98 msaitoh } else {
5965 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5966 1.98 msaitoh sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5967 1.98 msaitoh }
5968 1.28 msaitoh }
5969 1.1 dyoung
5970 1.98 msaitoh // TODO: print more config
5971 1.43 msaitoh
5972 1.98 msaitoh error = sbuf_finish(buf);
5973 1.98 msaitoh if (error)
5974 1.98 msaitoh device_printf(dev, "Error finishing sbuf: %d\n", error);
5975 1.1 dyoung
5976 1.98 msaitoh sbuf_delete(buf);
5977 1.99 msaitoh #endif
5978 1.98 msaitoh return (0);
5979 1.99 msaitoh } /* ixgbe_sysctl_print_rss_config */
5980 1.98 msaitoh #endif /* IXGBE_DEBUG */
5981 1.24 msaitoh
5982 1.99 msaitoh /************************************************************************
5983 1.99 msaitoh * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5984 1.99 msaitoh *
5985 1.99 msaitoh * For X552/X557-AT devices using an external PHY
5986 1.99 msaitoh ************************************************************************/
5987 1.44 msaitoh static int
5988 1.44 msaitoh ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5989 1.44 msaitoh {
5990 1.44 msaitoh struct sysctlnode node = *rnode;
5991 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5992 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5993 1.44 msaitoh int val;
5994 1.44 msaitoh u16 reg;
5995 1.44 msaitoh int error;
5996 1.44 msaitoh
5997 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5998 1.169 msaitoh return (EPERM);
5999 1.169 msaitoh
6000 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6001 1.325 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6002 1.333 msaitoh device_printf(sc->dev,
6003 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6004 1.44 msaitoh return (ENODEV);
6005 1.44 msaitoh }
6006 1.44 msaitoh
6007 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
6008 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6009 1.333 msaitoh device_printf(sc->dev,
6010 1.44 msaitoh "Error reading from PHY's current temperature register\n");
6011 1.44 msaitoh return (EAGAIN);
6012 1.44 msaitoh }
6013 1.44 msaitoh
6014 1.44 msaitoh node.sysctl_data = &val;
6015 1.44 msaitoh
6016 1.44 msaitoh /* Shift temp for output */
6017 1.44 msaitoh val = reg >> 8;
6018 1.44 msaitoh
6019 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6020 1.44 msaitoh if ((error) || (newp == NULL))
6021 1.44 msaitoh return (error);
6022 1.44 msaitoh
6023 1.44 msaitoh return (0);
6024 1.99 msaitoh } /* ixgbe_sysctl_phy_temp */
6025 1.44 msaitoh
6026 1.99 msaitoh /************************************************************************
6027 1.99 msaitoh * ixgbe_sysctl_phy_overtemp_occurred
6028 1.99 msaitoh *
6029 1.99 msaitoh * Reports (directly from the PHY) whether the current PHY
6030 1.99 msaitoh * temperature is over the overtemp threshold.
6031 1.99 msaitoh ************************************************************************/
6032 1.44 msaitoh static int
6033 1.44 msaitoh ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
6034 1.44 msaitoh {
6035 1.44 msaitoh struct sysctlnode node = *rnode;
6036 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6037 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6038 1.44 msaitoh int val, error;
6039 1.44 msaitoh u16 reg;
6040 1.44 msaitoh
6041 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6042 1.169 msaitoh return (EPERM);
6043 1.169 msaitoh
6044 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6045 1.325 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)){
6046 1.333 msaitoh device_printf(sc->dev,
6047 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6048 1.44 msaitoh return (ENODEV);
6049 1.44 msaitoh }
6050 1.44 msaitoh
6051 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
6052 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6053 1.333 msaitoh device_printf(sc->dev,
6054 1.44 msaitoh "Error reading from PHY's temperature status register\n");
6055 1.44 msaitoh return (EAGAIN);
6056 1.44 msaitoh }
6057 1.44 msaitoh
6058 1.44 msaitoh node.sysctl_data = &val;
6059 1.44 msaitoh
6060 1.44 msaitoh /* Get occurrence bit */
6061 1.44 msaitoh val = !!(reg & 0x4000);
6062 1.44 msaitoh
6063 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6064 1.44 msaitoh if ((error) || (newp == NULL))
6065 1.44 msaitoh return (error);
6066 1.44 msaitoh
6067 1.44 msaitoh return (0);
6068 1.99 msaitoh } /* ixgbe_sysctl_phy_overtemp_occurred */
6069 1.99 msaitoh
6070 1.99 msaitoh /************************************************************************
6071 1.99 msaitoh * ixgbe_sysctl_eee_state
6072 1.99 msaitoh *
6073 1.99 msaitoh * Sysctl to set EEE power saving feature
6074 1.99 msaitoh * Values:
6075 1.99 msaitoh * 0 - disable EEE
6076 1.99 msaitoh * 1 - enable EEE
6077 1.99 msaitoh * (none) - get current device EEE state
6078 1.99 msaitoh ************************************************************************/
6079 1.99 msaitoh static int
6080 1.99 msaitoh ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6081 1.99 msaitoh {
6082 1.99 msaitoh struct sysctlnode node = *rnode;
6083 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6084 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6085 1.333 msaitoh device_t dev = sc->dev;
6086 1.186 msaitoh int curr_eee, new_eee, error = 0;
6087 1.186 msaitoh s32 retval;
6088 1.99 msaitoh
6089 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6090 1.169 msaitoh return (EPERM);
6091 1.169 msaitoh
6092 1.333 msaitoh curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
6093 1.99 msaitoh node.sysctl_data = &new_eee;
6094 1.99 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6095 1.99 msaitoh if ((error) || (newp == NULL))
6096 1.99 msaitoh return (error);
6097 1.99 msaitoh
6098 1.99 msaitoh /* Nothing to do */
6099 1.99 msaitoh if (new_eee == curr_eee)
6100 1.99 msaitoh return (0);
6101 1.99 msaitoh
6102 1.99 msaitoh /* Not supported */
6103 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
6104 1.99 msaitoh return (EINVAL);
6105 1.99 msaitoh
6106 1.99 msaitoh /* Bounds checking */
6107 1.99 msaitoh if ((new_eee < 0) || (new_eee > 1))
6108 1.99 msaitoh return (EINVAL);
6109 1.99 msaitoh
6110 1.333 msaitoh retval = ixgbe_setup_eee(&sc->hw, new_eee);
6111 1.99 msaitoh if (retval) {
6112 1.99 msaitoh device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6113 1.99 msaitoh return (EINVAL);
6114 1.99 msaitoh }
6115 1.99 msaitoh
6116 1.99 msaitoh /* Restart auto-neg */
6117 1.302 riastrad if_init(ifp);
6118 1.99 msaitoh
6119 1.99 msaitoh device_printf(dev, "New EEE state: %d\n", new_eee);
6120 1.99 msaitoh
6121 1.99 msaitoh /* Cache new value */
6122 1.99 msaitoh if (new_eee)
6123 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6124 1.99 msaitoh else
6125 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_EEE;
6126 1.99 msaitoh
6127 1.99 msaitoh return (error);
6128 1.99 msaitoh } /* ixgbe_sysctl_eee_state */
6129 1.99 msaitoh
6130 1.333 msaitoh #define PRINTQS(sc, regname) \
6131 1.158 msaitoh do { \
6132 1.333 msaitoh struct ixgbe_hw *_hw = &(sc)->hw; \
6133 1.158 msaitoh int _i; \
6134 1.158 msaitoh \
6135 1.333 msaitoh printf("%s: %s", device_xname((sc)->dev), #regname); \
6136 1.333 msaitoh for (_i = 0; _i < (sc)->num_queues; _i++) { \
6137 1.158 msaitoh printf((_i == 0) ? "\t" : " "); \
6138 1.158 msaitoh printf("%08x", IXGBE_READ_REG(_hw, \
6139 1.158 msaitoh IXGBE_##regname(_i))); \
6140 1.158 msaitoh } \
6141 1.158 msaitoh printf("\n"); \
6142 1.158 msaitoh } while (0)
6143 1.158 msaitoh
6144 1.158 msaitoh /************************************************************************
6145 1.158 msaitoh * ixgbe_print_debug_info
6146 1.158 msaitoh *
6147 1.158 msaitoh * Called only when em_display_debug_stats is enabled.
6148 1.158 msaitoh * Provides a way to take a look at important statistics
6149 1.158 msaitoh * maintained by the driver and hardware.
6150 1.158 msaitoh ************************************************************************/
6151 1.158 msaitoh static void
6152 1.333 msaitoh ixgbe_print_debug_info(struct ixgbe_softc *sc)
6153 1.158 msaitoh {
6154 1.333 msaitoh device_t dev = sc->dev;
6155 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6156 1.158 msaitoh int table_size;
6157 1.158 msaitoh int i;
6158 1.158 msaitoh
6159 1.333 msaitoh switch (sc->hw.mac.type) {
6160 1.158 msaitoh case ixgbe_mac_X550:
6161 1.158 msaitoh case ixgbe_mac_X550EM_x:
6162 1.158 msaitoh case ixgbe_mac_X550EM_a:
6163 1.158 msaitoh table_size = 128;
6164 1.158 msaitoh break;
6165 1.158 msaitoh default:
6166 1.158 msaitoh table_size = 32;
6167 1.158 msaitoh break;
6168 1.158 msaitoh }
6169 1.185 msaitoh
6170 1.158 msaitoh device_printf(dev, "[E]RETA:\n");
6171 1.158 msaitoh for (i = 0; i < table_size; i++) {
6172 1.158 msaitoh if (i < 32)
6173 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6174 1.158 msaitoh IXGBE_RETA(i)));
6175 1.158 msaitoh else
6176 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6177 1.158 msaitoh IXGBE_ERETA(i - 32)));
6178 1.158 msaitoh }
6179 1.158 msaitoh
6180 1.158 msaitoh device_printf(dev, "queue:");
6181 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6182 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6183 1.158 msaitoh printf("%8d", i);
6184 1.158 msaitoh }
6185 1.158 msaitoh printf("\n");
6186 1.333 msaitoh PRINTQS(sc, RDBAL);
6187 1.333 msaitoh PRINTQS(sc, RDBAH);
6188 1.333 msaitoh PRINTQS(sc, RDLEN);
6189 1.333 msaitoh PRINTQS(sc, SRRCTL);
6190 1.333 msaitoh PRINTQS(sc, RDH);
6191 1.333 msaitoh PRINTQS(sc, RDT);
6192 1.333 msaitoh PRINTQS(sc, RXDCTL);
6193 1.158 msaitoh
6194 1.158 msaitoh device_printf(dev, "RQSMR:");
6195 1.333 msaitoh for (i = 0; i < sc->num_queues / 4; i++) {
6196 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6197 1.158 msaitoh printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6198 1.158 msaitoh }
6199 1.158 msaitoh printf("\n");
6200 1.158 msaitoh
6201 1.158 msaitoh device_printf(dev, "disabled_count:");
6202 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6203 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6204 1.333 msaitoh printf("%8d", sc->queues[i].disabled_count);
6205 1.158 msaitoh }
6206 1.158 msaitoh printf("\n");
6207 1.185 msaitoh
6208 1.158 msaitoh device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6209 1.158 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
6210 1.158 msaitoh device_printf(dev, "EIMS_EX(0):\t%08x\n",
6211 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6212 1.158 msaitoh device_printf(dev, "EIMS_EX(1):\t%08x\n",
6213 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6214 1.158 msaitoh }
6215 1.265 msaitoh device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6216 1.265 msaitoh device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6217 1.158 msaitoh } /* ixgbe_print_debug_info */
6218 1.158 msaitoh
6219 1.158 msaitoh /************************************************************************
6220 1.158 msaitoh * ixgbe_sysctl_debug
6221 1.158 msaitoh ************************************************************************/
6222 1.158 msaitoh static int
6223 1.158 msaitoh ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6224 1.158 msaitoh {
6225 1.158 msaitoh struct sysctlnode node = *rnode;
6226 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6227 1.186 msaitoh int error, result = 0;
6228 1.158 msaitoh
6229 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6230 1.169 msaitoh return (EPERM);
6231 1.169 msaitoh
6232 1.158 msaitoh node.sysctl_data = &result;
6233 1.158 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6234 1.158 msaitoh
6235 1.158 msaitoh if (error || newp == NULL)
6236 1.158 msaitoh return error;
6237 1.158 msaitoh
6238 1.158 msaitoh if (result == 1)
6239 1.333 msaitoh ixgbe_print_debug_info(sc);
6240 1.158 msaitoh
6241 1.158 msaitoh return 0;
6242 1.158 msaitoh } /* ixgbe_sysctl_debug */
6243 1.158 msaitoh
6244 1.99 msaitoh /************************************************************************
6245 1.286 msaitoh * ixgbe_sysctl_rx_copy_len
6246 1.286 msaitoh ************************************************************************/
6247 1.286 msaitoh static int
6248 1.286 msaitoh ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6249 1.286 msaitoh {
6250 1.286 msaitoh struct sysctlnode node = *rnode;
6251 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6252 1.286 msaitoh int error;
6253 1.333 msaitoh int result = sc->rx_copy_len;
6254 1.286 msaitoh
6255 1.286 msaitoh node.sysctl_data = &result;
6256 1.286 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6257 1.286 msaitoh
6258 1.286 msaitoh if (error || newp == NULL)
6259 1.286 msaitoh return error;
6260 1.286 msaitoh
6261 1.286 msaitoh if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6262 1.286 msaitoh return EINVAL;
6263 1.286 msaitoh
6264 1.333 msaitoh sc->rx_copy_len = result;
6265 1.286 msaitoh
6266 1.286 msaitoh return 0;
6267 1.286 msaitoh } /* ixgbe_sysctl_rx_copy_len */
6268 1.286 msaitoh
6269 1.286 msaitoh /************************************************************************
6270 1.313 msaitoh * ixgbe_sysctl_tx_process_limit
6271 1.313 msaitoh ************************************************************************/
6272 1.313 msaitoh static int
6273 1.313 msaitoh ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6274 1.313 msaitoh {
6275 1.313 msaitoh struct sysctlnode node = *rnode;
6276 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6277 1.313 msaitoh int error;
6278 1.333 msaitoh int result = sc->tx_process_limit;
6279 1.313 msaitoh
6280 1.313 msaitoh node.sysctl_data = &result;
6281 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6282 1.313 msaitoh
6283 1.313 msaitoh if (error || newp == NULL)
6284 1.313 msaitoh return error;
6285 1.313 msaitoh
6286 1.333 msaitoh if ((result <= 0) || (result > sc->num_tx_desc))
6287 1.313 msaitoh return EINVAL;
6288 1.313 msaitoh
6289 1.333 msaitoh sc->tx_process_limit = result;
6290 1.313 msaitoh
6291 1.313 msaitoh return 0;
6292 1.313 msaitoh } /* ixgbe_sysctl_tx_process_limit */
6293 1.313 msaitoh
6294 1.313 msaitoh /************************************************************************
6295 1.313 msaitoh * ixgbe_sysctl_rx_process_limit
6296 1.313 msaitoh ************************************************************************/
6297 1.313 msaitoh static int
6298 1.313 msaitoh ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6299 1.313 msaitoh {
6300 1.313 msaitoh struct sysctlnode node = *rnode;
6301 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6302 1.313 msaitoh int error;
6303 1.333 msaitoh int result = sc->rx_process_limit;
6304 1.313 msaitoh
6305 1.313 msaitoh node.sysctl_data = &result;
6306 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6307 1.313 msaitoh
6308 1.313 msaitoh if (error || newp == NULL)
6309 1.313 msaitoh return error;
6310 1.313 msaitoh
6311 1.333 msaitoh if ((result <= 0) || (result > sc->num_rx_desc))
6312 1.313 msaitoh return EINVAL;
6313 1.313 msaitoh
6314 1.333 msaitoh sc->rx_process_limit = result;
6315 1.313 msaitoh
6316 1.313 msaitoh return 0;
6317 1.313 msaitoh } /* ixgbe_sysctl_rx_process_limit */
6318 1.313 msaitoh
6319 1.313 msaitoh /************************************************************************
6320 1.99 msaitoh * ixgbe_init_device_features
6321 1.99 msaitoh ************************************************************************/
6322 1.99 msaitoh static void
6323 1.333 msaitoh ixgbe_init_device_features(struct ixgbe_softc *sc)
6324 1.99 msaitoh {
6325 1.333 msaitoh sc->feat_cap = IXGBE_FEATURE_NETMAP
6326 1.186 msaitoh | IXGBE_FEATURE_RSS
6327 1.186 msaitoh | IXGBE_FEATURE_MSI
6328 1.186 msaitoh | IXGBE_FEATURE_MSIX
6329 1.186 msaitoh | IXGBE_FEATURE_LEGACY_IRQ
6330 1.186 msaitoh | IXGBE_FEATURE_LEGACY_TX;
6331 1.99 msaitoh
6332 1.99 msaitoh /* Set capabilities first... */
6333 1.333 msaitoh switch (sc->hw.mac.type) {
6334 1.99 msaitoh case ixgbe_mac_82598EB:
6335 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
6336 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6337 1.99 msaitoh break;
6338 1.99 msaitoh case ixgbe_mac_X540:
6339 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6340 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6341 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6342 1.333 msaitoh (sc->hw.bus.func == 0))
6343 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6344 1.99 msaitoh break;
6345 1.99 msaitoh case ixgbe_mac_X550:
6346 1.169 msaitoh /*
6347 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6348 1.169 msaitoh * NVM Image version.
6349 1.169 msaitoh */
6350 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6351 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6352 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6353 1.99 msaitoh break;
6354 1.99 msaitoh case ixgbe_mac_X550EM_x:
6355 1.169 msaitoh /*
6356 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6357 1.169 msaitoh * NVM Image version.
6358 1.169 msaitoh */
6359 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6360 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6361 1.99 msaitoh break;
6362 1.99 msaitoh case ixgbe_mac_X550EM_a:
6363 1.169 msaitoh /*
6364 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6365 1.169 msaitoh * NVM Image version.
6366 1.169 msaitoh */
6367 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6368 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6369 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6370 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6371 1.333 msaitoh (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6372 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6373 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_EEE;
6374 1.99 msaitoh }
6375 1.99 msaitoh break;
6376 1.99 msaitoh case ixgbe_mac_82599EB:
6377 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6378 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6379 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6380 1.333 msaitoh (sc->hw.bus.func == 0))
6381 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6382 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6383 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6384 1.99 msaitoh break;
6385 1.99 msaitoh default:
6386 1.99 msaitoh break;
6387 1.99 msaitoh }
6388 1.99 msaitoh
6389 1.99 msaitoh /* Enabled by default... */
6390 1.99 msaitoh /* Fan failure detection */
6391 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6392 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6393 1.99 msaitoh /* Netmap */
6394 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
6395 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_NETMAP;
6396 1.99 msaitoh /* EEE */
6397 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
6398 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6399 1.99 msaitoh /* Thermal Sensor */
6400 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6401 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6402 1.169 msaitoh /*
6403 1.169 msaitoh * Recovery mode:
6404 1.169 msaitoh * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6405 1.169 msaitoh * NVM Image version.
6406 1.169 msaitoh */
6407 1.99 msaitoh
6408 1.99 msaitoh /* Enabled via global sysctl... */
6409 1.99 msaitoh /* Flow Director */
6410 1.99 msaitoh if (ixgbe_enable_fdir) {
6411 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FDIR)
6412 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FDIR;
6413 1.99 msaitoh else
6414 1.333 msaitoh device_printf(sc->dev, "Device does not support "
6415 1.320 msaitoh "Flow Director. Leaving disabled.");
6416 1.99 msaitoh }
6417 1.99 msaitoh /* Legacy (single queue) transmit */
6418 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6419 1.99 msaitoh ixgbe_enable_legacy_tx)
6420 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6421 1.99 msaitoh /*
6422 1.99 msaitoh * Message Signal Interrupts - Extended (MSI-X)
6423 1.99 msaitoh * Normal MSI is only enabled if MSI-X calls fail.
6424 1.99 msaitoh */
6425 1.99 msaitoh if (!ixgbe_enable_msix)
6426 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
6427 1.99 msaitoh /* Receive-Side Scaling (RSS) */
6428 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6429 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RSS;
6430 1.99 msaitoh
6431 1.99 msaitoh /* Disable features with unmet dependencies... */
6432 1.99 msaitoh /* No MSI-X */
6433 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
6434 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
6435 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6436 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
6437 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
6438 1.99 msaitoh }
6439 1.99 msaitoh } /* ixgbe_init_device_features */
6440 1.44 msaitoh
6441 1.99 msaitoh /************************************************************************
6442 1.99 msaitoh * ixgbe_probe - Device identification routine
6443 1.98 msaitoh *
6444 1.99 msaitoh * Determines if the driver should be loaded on
6445 1.99 msaitoh * adapter based on its PCI vendor/device ID.
6446 1.98 msaitoh *
6447 1.99 msaitoh * return BUS_PROBE_DEFAULT on success, positive on failure
6448 1.99 msaitoh ************************************************************************/
6449 1.98 msaitoh static int
6450 1.98 msaitoh ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6451 1.98 msaitoh {
6452 1.98 msaitoh const struct pci_attach_args *pa = aux;
6453 1.98 msaitoh
6454 1.98 msaitoh return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6455 1.98 msaitoh }
6456 1.98 msaitoh
6457 1.159 maxv static const ixgbe_vendor_info_t *
6458 1.98 msaitoh ixgbe_lookup(const struct pci_attach_args *pa)
6459 1.98 msaitoh {
6460 1.159 maxv const ixgbe_vendor_info_t *ent;
6461 1.98 msaitoh pcireg_t subid;
6462 1.98 msaitoh
6463 1.98 msaitoh INIT_DEBUGOUT("ixgbe_lookup: begin");
6464 1.98 msaitoh
6465 1.98 msaitoh if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6466 1.98 msaitoh return NULL;
6467 1.98 msaitoh
6468 1.98 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6469 1.98 msaitoh
6470 1.98 msaitoh for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6471 1.99 msaitoh if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6472 1.99 msaitoh (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6473 1.99 msaitoh ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6474 1.99 msaitoh (ent->subvendor_id == 0)) &&
6475 1.99 msaitoh ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6476 1.99 msaitoh (ent->subdevice_id == 0))) {
6477 1.98 msaitoh return ent;
6478 1.98 msaitoh }
6479 1.98 msaitoh }
6480 1.98 msaitoh return NULL;
6481 1.98 msaitoh }
6482 1.98 msaitoh
6483 1.98 msaitoh static int
6484 1.98 msaitoh ixgbe_ifflags_cb(struct ethercom *ec)
6485 1.98 msaitoh {
6486 1.98 msaitoh struct ifnet *ifp = &ec->ec_if;
6487 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6488 1.210 msaitoh u_short change;
6489 1.210 msaitoh int rv = 0;
6490 1.98 msaitoh
6491 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6492 1.98 msaitoh
6493 1.333 msaitoh change = ifp->if_flags ^ sc->if_flags;
6494 1.98 msaitoh if (change != 0)
6495 1.333 msaitoh sc->if_flags = ifp->if_flags;
6496 1.98 msaitoh
6497 1.192 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6498 1.192 msaitoh rv = ENETRESET;
6499 1.192 msaitoh goto out;
6500 1.192 msaitoh } else if ((change & IFF_PROMISC) != 0)
6501 1.333 msaitoh ixgbe_set_rxfilter(sc);
6502 1.98 msaitoh
6503 1.193 msaitoh /* Check for ec_capenable. */
6504 1.333 msaitoh change = ec->ec_capenable ^ sc->ec_capenable;
6505 1.333 msaitoh sc->ec_capenable = ec->ec_capenable;
6506 1.193 msaitoh if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6507 1.193 msaitoh | ETHERCAP_VLAN_HWFILTER)) != 0) {
6508 1.193 msaitoh rv = ENETRESET;
6509 1.193 msaitoh goto out;
6510 1.193 msaitoh }
6511 1.193 msaitoh
6512 1.193 msaitoh /*
6513 1.193 msaitoh * Special handling is not required for ETHERCAP_VLAN_MTU.
6514 1.193 msaitoh * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6515 1.193 msaitoh */
6516 1.193 msaitoh
6517 1.98 msaitoh /* Set up VLAN support and filter */
6518 1.193 msaitoh if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6519 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
6520 1.98 msaitoh
6521 1.192 msaitoh out:
6522 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6523 1.98 msaitoh
6524 1.192 msaitoh return rv;
6525 1.98 msaitoh }
6526 1.98 msaitoh
6527 1.99 msaitoh /************************************************************************
6528 1.99 msaitoh * ixgbe_ioctl - Ioctl entry point
6529 1.98 msaitoh *
6530 1.99 msaitoh * Called when the user wants to configure the interface.
6531 1.98 msaitoh *
6532 1.99 msaitoh * return 0 on success, positive on failure
6533 1.99 msaitoh ************************************************************************/
6534 1.98 msaitoh static int
6535 1.232 msaitoh ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6536 1.98 msaitoh {
6537 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6538 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6539 1.98 msaitoh struct ifcapreq *ifcr = data;
6540 1.98 msaitoh struct ifreq *ifr = data;
6541 1.186 msaitoh int error = 0;
6542 1.98 msaitoh int l4csum_en;
6543 1.185 msaitoh const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6544 1.185 msaitoh IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6545 1.98 msaitoh
6546 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6547 1.169 msaitoh return (EPERM);
6548 1.169 msaitoh
6549 1.98 msaitoh switch (command) {
6550 1.98 msaitoh case SIOCSIFFLAGS:
6551 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6552 1.98 msaitoh break;
6553 1.98 msaitoh case SIOCADDMULTI:
6554 1.98 msaitoh case SIOCDELMULTI:
6555 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6556 1.98 msaitoh break;
6557 1.98 msaitoh case SIOCSIFMEDIA:
6558 1.98 msaitoh case SIOCGIFMEDIA:
6559 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6560 1.98 msaitoh break;
6561 1.98 msaitoh case SIOCSIFCAP:
6562 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6563 1.98 msaitoh break;
6564 1.98 msaitoh case SIOCSIFMTU:
6565 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6566 1.98 msaitoh break;
6567 1.98 msaitoh #ifdef __NetBSD__
6568 1.98 msaitoh case SIOCINITIFADDR:
6569 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6570 1.98 msaitoh break;
6571 1.98 msaitoh case SIOCGIFFLAGS:
6572 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6573 1.98 msaitoh break;
6574 1.98 msaitoh case SIOCGIFAFLAG_IN:
6575 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6576 1.98 msaitoh break;
6577 1.98 msaitoh case SIOCGIFADDR:
6578 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6579 1.98 msaitoh break;
6580 1.98 msaitoh case SIOCGIFMTU:
6581 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6582 1.98 msaitoh break;
6583 1.98 msaitoh case SIOCGIFCAP:
6584 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6585 1.98 msaitoh break;
6586 1.98 msaitoh case SIOCGETHERCAP:
6587 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6588 1.98 msaitoh break;
6589 1.98 msaitoh case SIOCGLIFADDR:
6590 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6591 1.98 msaitoh break;
6592 1.98 msaitoh case SIOCZIFDATA:
6593 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6594 1.98 msaitoh hw->mac.ops.clear_hw_cntrs(hw);
6595 1.333 msaitoh ixgbe_clear_evcnt(sc);
6596 1.98 msaitoh break;
6597 1.98 msaitoh case SIOCAIFADDR:
6598 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6599 1.98 msaitoh break;
6600 1.98 msaitoh #endif
6601 1.98 msaitoh default:
6602 1.98 msaitoh IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6603 1.98 msaitoh break;
6604 1.98 msaitoh }
6605 1.24 msaitoh
6606 1.98 msaitoh switch (command) {
6607 1.98 msaitoh case SIOCGI2C:
6608 1.98 msaitoh {
6609 1.98 msaitoh struct ixgbe_i2c_req i2c;
6610 1.24 msaitoh
6611 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6612 1.98 msaitoh error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6613 1.98 msaitoh if (error != 0)
6614 1.98 msaitoh break;
6615 1.98 msaitoh if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6616 1.98 msaitoh error = EINVAL;
6617 1.98 msaitoh break;
6618 1.98 msaitoh }
6619 1.98 msaitoh if (i2c.len > sizeof(i2c.data)) {
6620 1.98 msaitoh error = EINVAL;
6621 1.98 msaitoh break;
6622 1.98 msaitoh }
6623 1.24 msaitoh
6624 1.98 msaitoh hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6625 1.98 msaitoh i2c.dev_addr, i2c.data);
6626 1.98 msaitoh error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6627 1.98 msaitoh break;
6628 1.98 msaitoh }
6629 1.98 msaitoh case SIOCSIFCAP:
6630 1.98 msaitoh /* Layer-4 Rx checksum offload has to be turned on and
6631 1.98 msaitoh * off as a unit.
6632 1.98 msaitoh */
6633 1.98 msaitoh l4csum_en = ifcr->ifcr_capenable & l4csum;
6634 1.98 msaitoh if (l4csum_en != l4csum && l4csum_en != 0)
6635 1.98 msaitoh return EINVAL;
6636 1.98 msaitoh /*FALLTHROUGH*/
6637 1.98 msaitoh case SIOCADDMULTI:
6638 1.98 msaitoh case SIOCDELMULTI:
6639 1.98 msaitoh case SIOCSIFFLAGS:
6640 1.98 msaitoh case SIOCSIFMTU:
6641 1.98 msaitoh default:
6642 1.98 msaitoh if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6643 1.98 msaitoh return error;
6644 1.98 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
6645 1.98 msaitoh ;
6646 1.98 msaitoh else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6647 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6648 1.135 msaitoh if ((ifp->if_flags & IFF_RUNNING) != 0)
6649 1.333 msaitoh ixgbe_init_locked(sc);
6650 1.333 msaitoh ixgbe_recalculate_max_frame(sc);
6651 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6652 1.98 msaitoh } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6653 1.98 msaitoh /*
6654 1.98 msaitoh * Multicast list has changed; set the hardware filter
6655 1.98 msaitoh * accordingly.
6656 1.98 msaitoh */
6657 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6658 1.333 msaitoh ixgbe_disable_intr(sc);
6659 1.333 msaitoh ixgbe_set_rxfilter(sc);
6660 1.333 msaitoh ixgbe_enable_intr(sc);
6661 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6662 1.98 msaitoh }
6663 1.98 msaitoh return 0;
6664 1.24 msaitoh }
6665 1.24 msaitoh
6666 1.98 msaitoh return error;
6667 1.99 msaitoh } /* ixgbe_ioctl */
6668 1.99 msaitoh
6669 1.99 msaitoh /************************************************************************
6670 1.99 msaitoh * ixgbe_check_fan_failure
6671 1.99 msaitoh ************************************************************************/
6672 1.274 msaitoh static int
6673 1.333 msaitoh ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
6674 1.99 msaitoh {
6675 1.99 msaitoh u32 mask;
6676 1.99 msaitoh
6677 1.333 msaitoh mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
6678 1.99 msaitoh IXGBE_ESDP_SDP1;
6679 1.26 msaitoh
6680 1.312 msaitoh if ((reg & mask) == 0)
6681 1.312 msaitoh return IXGBE_SUCCESS;
6682 1.312 msaitoh
6683 1.312 msaitoh /*
6684 1.312 msaitoh * Use ratecheck() just in case interrupt occur frequently.
6685 1.312 msaitoh * When EXPX9501AT's fan stopped, interrupt occurred only once,
6686 1.312 msaitoh * an red LED on the board turned on and link never up until
6687 1.312 msaitoh * power off.
6688 1.312 msaitoh */
6689 1.333 msaitoh if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl))
6690 1.333 msaitoh device_printf(sc->dev,
6691 1.280 msaitoh "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6692 1.274 msaitoh
6693 1.312 msaitoh return IXGBE_ERR_FAN_FAILURE;
6694 1.99 msaitoh } /* ixgbe_check_fan_failure */
6695 1.99 msaitoh
6696 1.99 msaitoh /************************************************************************
6697 1.99 msaitoh * ixgbe_handle_que
6698 1.99 msaitoh ************************************************************************/
6699 1.98 msaitoh static void
6700 1.98 msaitoh ixgbe_handle_que(void *context)
6701 1.44 msaitoh {
6702 1.98 msaitoh struct ix_queue *que = context;
6703 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
6704 1.186 msaitoh struct tx_ring *txr = que->txr;
6705 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6706 1.121 msaitoh bool more = false;
6707 1.44 msaitoh
6708 1.305 msaitoh IXGBE_EVC_ADD(&que->handleq, 1);
6709 1.44 msaitoh
6710 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING) {
6711 1.98 msaitoh IXGBE_TX_LOCK(txr);
6712 1.323 msaitoh more = ixgbe_txeof(txr);
6713 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
6714 1.99 msaitoh if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6715 1.99 msaitoh ixgbe_mq_start_locked(ifp, txr);
6716 1.98 msaitoh /* Only for queue 0 */
6717 1.99 msaitoh /* NetBSD still needs this for CBQ */
6718 1.333 msaitoh if ((&sc->queues[0] == que)
6719 1.99 msaitoh && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6720 1.99 msaitoh ixgbe_legacy_start_locked(ifp, txr);
6721 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
6722 1.323 msaitoh more |= ixgbe_rxeof(que);
6723 1.44 msaitoh }
6724 1.44 msaitoh
6725 1.128 knakahar if (more) {
6726 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
6727 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
6728 1.128 knakahar } else if (que->res != NULL) {
6729 1.265 msaitoh /* MSIX: Re-enable this interrupt */
6730 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
6731 1.265 msaitoh } else {
6732 1.265 msaitoh /* INTx or MSI */
6733 1.333 msaitoh ixgbe_enable_queue(sc, 0);
6734 1.265 msaitoh }
6735 1.99 msaitoh
6736 1.98 msaitoh return;
6737 1.99 msaitoh } /* ixgbe_handle_que */
6738 1.44 msaitoh
6739 1.99 msaitoh /************************************************************************
6740 1.128 knakahar * ixgbe_handle_que_work
6741 1.128 knakahar ************************************************************************/
6742 1.128 knakahar static void
6743 1.128 knakahar ixgbe_handle_que_work(struct work *wk, void *context)
6744 1.128 knakahar {
6745 1.128 knakahar struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6746 1.128 knakahar
6747 1.128 knakahar /*
6748 1.128 knakahar * "enqueued flag" is not required here.
6749 1.128 knakahar * See ixgbe_msix_que().
6750 1.128 knakahar */
6751 1.128 knakahar ixgbe_handle_que(que);
6752 1.128 knakahar }
6753 1.128 knakahar
6754 1.128 knakahar /************************************************************************
6755 1.99 msaitoh * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6756 1.99 msaitoh ************************************************************************/
6757 1.48 msaitoh static int
6758 1.333 msaitoh ixgbe_allocate_legacy(struct ixgbe_softc *sc,
6759 1.98 msaitoh const struct pci_attach_args *pa)
6760 1.48 msaitoh {
6761 1.333 msaitoh device_t dev = sc->dev;
6762 1.333 msaitoh struct ix_queue *que = sc->queues;
6763 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
6764 1.98 msaitoh int counts[PCI_INTR_TYPE_SIZE];
6765 1.98 msaitoh pci_intr_type_t intr_type, max_type;
6766 1.186 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6767 1.206 knakahar char wqname[MAXCOMLEN];
6768 1.98 msaitoh const char *intrstr = NULL;
6769 1.206 knakahar int defertx_error = 0, error;
6770 1.185 msaitoh
6771 1.99 msaitoh /* We allocate a single interrupt resource */
6772 1.98 msaitoh max_type = PCI_INTR_TYPE_MSI;
6773 1.98 msaitoh counts[PCI_INTR_TYPE_MSIX] = 0;
6774 1.99 msaitoh counts[PCI_INTR_TYPE_MSI] =
6775 1.333 msaitoh (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6776 1.118 msaitoh /* Check not feat_en but feat_cap to fallback to INTx */
6777 1.99 msaitoh counts[PCI_INTR_TYPE_INTX] =
6778 1.333 msaitoh (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6779 1.48 msaitoh
6780 1.98 msaitoh alloc_retry:
6781 1.333 msaitoh if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) {
6782 1.98 msaitoh aprint_error_dev(dev, "couldn't alloc interrupt\n");
6783 1.98 msaitoh return ENXIO;
6784 1.98 msaitoh }
6785 1.333 msaitoh sc->osdep.nintrs = 1;
6786 1.333 msaitoh intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0],
6787 1.98 msaitoh intrbuf, sizeof(intrbuf));
6788 1.333 msaitoh sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc,
6789 1.333 msaitoh sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6790 1.98 msaitoh device_xname(dev));
6791 1.333 msaitoh intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]);
6792 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6793 1.98 msaitoh aprint_error_dev(dev,"unable to establish %s\n",
6794 1.98 msaitoh (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6795 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6796 1.333 msaitoh sc->osdep.intrs = NULL;
6797 1.98 msaitoh switch (intr_type) {
6798 1.98 msaitoh case PCI_INTR_TYPE_MSI:
6799 1.98 msaitoh /* The next try is for INTx: Disable MSI */
6800 1.98 msaitoh max_type = PCI_INTR_TYPE_INTX;
6801 1.98 msaitoh counts[PCI_INTR_TYPE_INTX] = 1;
6802 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6803 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6804 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6805 1.118 msaitoh goto alloc_retry;
6806 1.118 msaitoh } else
6807 1.118 msaitoh break;
6808 1.98 msaitoh case PCI_INTR_TYPE_INTX:
6809 1.98 msaitoh default:
6810 1.98 msaitoh /* See below */
6811 1.98 msaitoh break;
6812 1.98 msaitoh }
6813 1.98 msaitoh }
6814 1.119 msaitoh if (intr_type == PCI_INTR_TYPE_INTX) {
6815 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6816 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6817 1.119 msaitoh }
6818 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6819 1.98 msaitoh aprint_error_dev(dev,
6820 1.98 msaitoh "couldn't establish interrupt%s%s\n",
6821 1.98 msaitoh intrstr ? " at " : "", intrstr ? intrstr : "");
6822 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6823 1.333 msaitoh sc->osdep.intrs = NULL;
6824 1.98 msaitoh return ENXIO;
6825 1.98 msaitoh }
6826 1.98 msaitoh aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6827 1.98 msaitoh /*
6828 1.98 msaitoh * Try allocating a fast interrupt and the associated deferred
6829 1.98 msaitoh * processing contexts.
6830 1.98 msaitoh */
6831 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6832 1.99 msaitoh txr->txr_si =
6833 1.229 msaitoh softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6834 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6835 1.206 knakahar
6836 1.280 msaitoh snprintf(wqname, sizeof(wqname), "%sdeferTx",
6837 1.280 msaitoh device_xname(dev));
6838 1.333 msaitoh defertx_error = workqueue_create(&sc->txr_wq, wqname,
6839 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI,
6840 1.206 knakahar IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6841 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6842 1.206 knakahar }
6843 1.229 msaitoh que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6844 1.98 msaitoh ixgbe_handle_que, que);
6845 1.206 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6846 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
6847 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
6848 1.206 knakahar IXGBE_WORKQUEUE_FLAGS);
6849 1.48 msaitoh
6850 1.333 msaitoh if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)
6851 1.206 knakahar && ((txr->txr_si == NULL) || defertx_error != 0))
6852 1.206 knakahar || (que->que_si == NULL) || error != 0) {
6853 1.98 msaitoh aprint_error_dev(dev,
6854 1.185 msaitoh "could not establish software interrupts\n");
6855 1.99 msaitoh
6856 1.98 msaitoh return ENXIO;
6857 1.98 msaitoh }
6858 1.98 msaitoh /* For simplicity in the handlers */
6859 1.333 msaitoh sc->active_queues = IXGBE_EIMS_ENABLE_MASK;
6860 1.44 msaitoh
6861 1.44 msaitoh return (0);
6862 1.99 msaitoh } /* ixgbe_allocate_legacy */
6863 1.44 msaitoh
6864 1.99 msaitoh /************************************************************************
6865 1.99 msaitoh * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6866 1.99 msaitoh ************************************************************************/
6867 1.44 msaitoh static int
6868 1.333 msaitoh ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
6869 1.44 msaitoh {
6870 1.333 msaitoh device_t dev = sc->dev;
6871 1.333 msaitoh struct ix_queue *que = sc->queues;
6872 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
6873 1.98 msaitoh pci_chipset_tag_t pc;
6874 1.98 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6875 1.98 msaitoh char intr_xname[32];
6876 1.128 knakahar char wqname[MAXCOMLEN];
6877 1.98 msaitoh const char *intrstr = NULL;
6878 1.186 msaitoh int error, vector = 0;
6879 1.98 msaitoh int cpu_id = 0;
6880 1.98 msaitoh kcpuset_t *affinity;
6881 1.99 msaitoh #ifdef RSS
6882 1.186 msaitoh unsigned int rss_buckets = 0;
6883 1.99 msaitoh kcpuset_t cpu_mask;
6884 1.98 msaitoh #endif
6885 1.98 msaitoh
6886 1.333 msaitoh pc = sc->osdep.pc;
6887 1.98 msaitoh #ifdef RSS
6888 1.98 msaitoh /*
6889 1.98 msaitoh * If we're doing RSS, the number of queues needs to
6890 1.98 msaitoh * match the number of RSS buckets that are configured.
6891 1.98 msaitoh *
6892 1.98 msaitoh * + If there's more queues than RSS buckets, we'll end
6893 1.98 msaitoh * up with queues that get no traffic.
6894 1.98 msaitoh *
6895 1.98 msaitoh * + If there's more RSS buckets than queues, we'll end
6896 1.98 msaitoh * up having multiple RSS buckets map to the same queue,
6897 1.98 msaitoh * so there'll be some contention.
6898 1.98 msaitoh */
6899 1.99 msaitoh rss_buckets = rss_getnumbuckets();
6900 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
6901 1.333 msaitoh (sc->num_queues != rss_buckets)) {
6902 1.98 msaitoh device_printf(dev,
6903 1.98 msaitoh "%s: number of queues (%d) != number of RSS buckets (%d)"
6904 1.98 msaitoh "; performance will be impacted.\n",
6905 1.333 msaitoh __func__, sc->num_queues, rss_buckets);
6906 1.98 msaitoh }
6907 1.98 msaitoh #endif
6908 1.98 msaitoh
6909 1.333 msaitoh sc->osdep.nintrs = sc->num_queues + 1;
6910 1.333 msaitoh if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
6911 1.333 msaitoh sc->osdep.nintrs) != 0) {
6912 1.98 msaitoh aprint_error_dev(dev,
6913 1.98 msaitoh "failed to allocate MSI-X interrupt\n");
6914 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
6915 1.98 msaitoh return (ENXIO);
6916 1.98 msaitoh }
6917 1.98 msaitoh
6918 1.98 msaitoh kcpuset_create(&affinity, false);
6919 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
6920 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6921 1.98 msaitoh device_xname(dev), i);
6922 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
6923 1.98 msaitoh sizeof(intrbuf));
6924 1.98 msaitoh #ifdef IXGBE_MPSAFE
6925 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
6926 1.98 msaitoh true);
6927 1.98 msaitoh #endif
6928 1.98 msaitoh /* Set the handler function */
6929 1.333 msaitoh que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
6930 1.333 msaitoh sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6931 1.98 msaitoh intr_xname);
6932 1.98 msaitoh if (que->res == NULL) {
6933 1.98 msaitoh aprint_error_dev(dev,
6934 1.98 msaitoh "Failed to register QUE handler\n");
6935 1.119 msaitoh error = ENXIO;
6936 1.119 msaitoh goto err_out;
6937 1.98 msaitoh }
6938 1.98 msaitoh que->msix = vector;
6939 1.333 msaitoh sc->active_queues |= 1ULL << que->msix;
6940 1.99 msaitoh
6941 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
6942 1.98 msaitoh #ifdef RSS
6943 1.99 msaitoh /*
6944 1.99 msaitoh * The queue ID is used as the RSS layer bucket ID.
6945 1.99 msaitoh * We look up the queue ID -> RSS CPU ID and select
6946 1.99 msaitoh * that.
6947 1.99 msaitoh */
6948 1.99 msaitoh cpu_id = rss_getcpu(i % rss_getnumbuckets());
6949 1.99 msaitoh CPU_SETOF(cpu_id, &cpu_mask);
6950 1.98 msaitoh #endif
6951 1.99 msaitoh } else {
6952 1.99 msaitoh /*
6953 1.99 msaitoh * Bind the MSI-X vector, and thus the
6954 1.99 msaitoh * rings to the corresponding CPU.
6955 1.99 msaitoh *
6956 1.99 msaitoh * This just happens to match the default RSS
6957 1.99 msaitoh * round-robin bucket -> queue -> CPU allocation.
6958 1.99 msaitoh */
6959 1.333 msaitoh if (sc->num_queues > 1)
6960 1.99 msaitoh cpu_id = i;
6961 1.99 msaitoh }
6962 1.98 msaitoh /* Round-robin affinity */
6963 1.98 msaitoh kcpuset_zero(affinity);
6964 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
6965 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[i], affinity,
6966 1.98 msaitoh NULL);
6967 1.98 msaitoh aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6968 1.98 msaitoh intrstr);
6969 1.98 msaitoh if (error == 0) {
6970 1.98 msaitoh #if 1 /* def IXGBE_DEBUG */
6971 1.98 msaitoh #ifdef RSS
6972 1.322 skrll aprint_normal(", bound RSS bucket %d to CPU %d", i,
6973 1.99 msaitoh cpu_id % ncpu);
6974 1.98 msaitoh #else
6975 1.99 msaitoh aprint_normal(", bound queue %d to cpu %d", i,
6976 1.99 msaitoh cpu_id % ncpu);
6977 1.98 msaitoh #endif
6978 1.98 msaitoh #endif /* IXGBE_DEBUG */
6979 1.98 msaitoh }
6980 1.98 msaitoh aprint_normal("\n");
6981 1.99 msaitoh
6982 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6983 1.99 msaitoh txr->txr_si = softint_establish(
6984 1.229 msaitoh SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6985 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6986 1.119 msaitoh if (txr->txr_si == NULL) {
6987 1.119 msaitoh aprint_error_dev(dev,
6988 1.119 msaitoh "couldn't establish software interrupt\n");
6989 1.119 msaitoh error = ENXIO;
6990 1.119 msaitoh goto err_out;
6991 1.119 msaitoh }
6992 1.119 msaitoh }
6993 1.98 msaitoh que->que_si
6994 1.229 msaitoh = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6995 1.98 msaitoh ixgbe_handle_que, que);
6996 1.98 msaitoh if (que->que_si == NULL) {
6997 1.98 msaitoh aprint_error_dev(dev,
6998 1.185 msaitoh "couldn't establish software interrupt\n");
6999 1.119 msaitoh error = ENXIO;
7000 1.119 msaitoh goto err_out;
7001 1.98 msaitoh }
7002 1.98 msaitoh }
7003 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
7004 1.333 msaitoh error = workqueue_create(&sc->txr_wq, wqname,
7005 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7006 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7007 1.128 knakahar if (error) {
7008 1.280 msaitoh aprint_error_dev(dev,
7009 1.280 msaitoh "couldn't create workqueue for deferred Tx\n");
7010 1.128 knakahar goto err_out;
7011 1.128 knakahar }
7012 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
7013 1.128 knakahar
7014 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
7015 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
7016 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7017 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7018 1.128 knakahar if (error) {
7019 1.128 knakahar aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
7020 1.128 knakahar goto err_out;
7021 1.128 knakahar }
7022 1.44 msaitoh
7023 1.98 msaitoh /* and Link */
7024 1.98 msaitoh cpu_id++;
7025 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
7026 1.333 msaitoh sc->vector = vector;
7027 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
7028 1.98 msaitoh sizeof(intrbuf));
7029 1.98 msaitoh #ifdef IXGBE_MPSAFE
7030 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE,
7031 1.98 msaitoh true);
7032 1.98 msaitoh #endif
7033 1.98 msaitoh /* Set the link handler function */
7034 1.333 msaitoh sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
7035 1.333 msaitoh sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc,
7036 1.98 msaitoh intr_xname);
7037 1.333 msaitoh if (sc->osdep.ihs[vector] == NULL) {
7038 1.98 msaitoh aprint_error_dev(dev, "Failed to register LINK handler\n");
7039 1.119 msaitoh error = ENXIO;
7040 1.119 msaitoh goto err_out;
7041 1.98 msaitoh }
7042 1.98 msaitoh /* Round-robin affinity */
7043 1.98 msaitoh kcpuset_zero(affinity);
7044 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
7045 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
7046 1.119 msaitoh NULL);
7047 1.44 msaitoh
7048 1.98 msaitoh aprint_normal_dev(dev,
7049 1.98 msaitoh "for link, interrupting at %s", intrstr);
7050 1.98 msaitoh if (error == 0)
7051 1.98 msaitoh aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
7052 1.44 msaitoh else
7053 1.98 msaitoh aprint_normal("\n");
7054 1.44 msaitoh
7055 1.98 msaitoh kcpuset_destroy(affinity);
7056 1.119 msaitoh aprint_normal_dev(dev,
7057 1.119 msaitoh "Using MSI-X interrupts with %d vectors\n", vector + 1);
7058 1.99 msaitoh
7059 1.44 msaitoh return (0);
7060 1.119 msaitoh
7061 1.119 msaitoh err_out:
7062 1.119 msaitoh kcpuset_destroy(affinity);
7063 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
7064 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
7065 1.119 msaitoh return (error);
7066 1.99 msaitoh } /* ixgbe_allocate_msix */
7067 1.44 msaitoh
7068 1.99 msaitoh /************************************************************************
7069 1.99 msaitoh * ixgbe_configure_interrupts
7070 1.99 msaitoh *
7071 1.99 msaitoh * Setup MSI-X, MSI, or legacy interrupts (in that order).
7072 1.99 msaitoh * This will also depend on user settings.
7073 1.99 msaitoh ************************************************************************/
7074 1.44 msaitoh static int
7075 1.333 msaitoh ixgbe_configure_interrupts(struct ixgbe_softc *sc)
7076 1.44 msaitoh {
7077 1.333 msaitoh device_t dev = sc->dev;
7078 1.333 msaitoh struct ixgbe_mac_info *mac = &sc->hw.mac;
7079 1.98 msaitoh int want, queues, msgs;
7080 1.44 msaitoh
7081 1.99 msaitoh /* Default to 1 queue if MSI-X setup fails */
7082 1.333 msaitoh sc->num_queues = 1;
7083 1.99 msaitoh
7084 1.98 msaitoh /* Override by tuneable */
7085 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX))
7086 1.98 msaitoh goto msi;
7087 1.44 msaitoh
7088 1.118 msaitoh /*
7089 1.118 msaitoh * NetBSD only: Use single vector MSI when number of CPU is 1 to save
7090 1.118 msaitoh * interrupt slot.
7091 1.118 msaitoh */
7092 1.118 msaitoh if (ncpu == 1)
7093 1.118 msaitoh goto msi;
7094 1.185 msaitoh
7095 1.99 msaitoh /* First try MSI-X */
7096 1.333 msaitoh msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
7097 1.98 msaitoh msgs = MIN(msgs, IXG_MAX_NINTR);
7098 1.98 msaitoh if (msgs < 2)
7099 1.98 msaitoh goto msi;
7100 1.44 msaitoh
7101 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7102 1.44 msaitoh
7103 1.98 msaitoh /* Figure out a reasonable auto config value */
7104 1.98 msaitoh queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
7105 1.44 msaitoh
7106 1.98 msaitoh #ifdef RSS
7107 1.98 msaitoh /* If we're doing RSS, clamp at the number of RSS buckets */
7108 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
7109 1.165 riastrad queues = uimin(queues, rss_getnumbuckets());
7110 1.98 msaitoh #endif
7111 1.99 msaitoh if (ixgbe_num_queues > queues) {
7112 1.333 msaitoh aprint_error_dev(sc->dev,
7113 1.319 msaitoh "ixgbe_num_queues (%d) is too large, "
7114 1.319 msaitoh "using reduced amount (%d).\n", ixgbe_num_queues, queues);
7115 1.99 msaitoh ixgbe_num_queues = queues;
7116 1.99 msaitoh }
7117 1.44 msaitoh
7118 1.98 msaitoh if (ixgbe_num_queues != 0)
7119 1.98 msaitoh queues = ixgbe_num_queues;
7120 1.98 msaitoh else
7121 1.165 riastrad queues = uimin(queues,
7122 1.165 riastrad uimin(mac->max_tx_queues, mac->max_rx_queues));
7123 1.44 msaitoh
7124 1.98 msaitoh /* reflect correct sysctl value */
7125 1.98 msaitoh ixgbe_num_queues = queues;
7126 1.44 msaitoh
7127 1.98 msaitoh /*
7128 1.99 msaitoh * Want one vector (RX/TX pair) per queue
7129 1.99 msaitoh * plus an additional for Link.
7130 1.99 msaitoh */
7131 1.98 msaitoh want = queues + 1;
7132 1.98 msaitoh if (msgs >= want)
7133 1.98 msaitoh msgs = want;
7134 1.44 msaitoh else {
7135 1.186 msaitoh aprint_error_dev(dev, "MSI-X Configuration Problem, "
7136 1.319 msaitoh "%d vectors but %d queues wanted!\n", msgs, want);
7137 1.98 msaitoh goto msi;
7138 1.44 msaitoh }
7139 1.333 msaitoh sc->num_queues = queues;
7140 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7141 1.99 msaitoh return (0);
7142 1.44 msaitoh
7143 1.98 msaitoh /*
7144 1.99 msaitoh * MSI-X allocation failed or provided us with
7145 1.99 msaitoh * less vectors than needed. Free MSI-X resources
7146 1.99 msaitoh * and we'll try enabling MSI.
7147 1.99 msaitoh */
7148 1.98 msaitoh msi:
7149 1.99 msaitoh /* Without MSI-X, some features are no longer supported */
7150 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
7151 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
7152 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7153 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
7154 1.99 msaitoh
7155 1.333 msaitoh msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag);
7156 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
7157 1.99 msaitoh if (msgs > 1)
7158 1.99 msaitoh msgs = 1;
7159 1.99 msaitoh if (msgs != 0) {
7160 1.99 msaitoh msgs = 1;
7161 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
7162 1.99 msaitoh return (0);
7163 1.99 msaitoh }
7164 1.99 msaitoh
7165 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7166 1.99 msaitoh aprint_error_dev(dev,
7167 1.99 msaitoh "Device does not support legacy interrupts.\n");
7168 1.99 msaitoh return 1;
7169 1.99 msaitoh }
7170 1.99 msaitoh
7171 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7172 1.99 msaitoh
7173 1.99 msaitoh return (0);
7174 1.99 msaitoh } /* ixgbe_configure_interrupts */
7175 1.44 msaitoh
7176 1.48 msaitoh
7177 1.99 msaitoh /************************************************************************
7178 1.99 msaitoh * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7179 1.99 msaitoh *
7180 1.99 msaitoh * Done outside of interrupt context since the driver might sleep
7181 1.99 msaitoh ************************************************************************/
7182 1.26 msaitoh static void
7183 1.98 msaitoh ixgbe_handle_link(void *context)
7184 1.26 msaitoh {
7185 1.333 msaitoh struct ixgbe_softc *sc = context;
7186 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
7187 1.26 msaitoh
7188 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
7189 1.257 msaitoh
7190 1.333 msaitoh IXGBE_EVC_ADD(&sc->link_workev, 1);
7191 1.333 msaitoh ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0);
7192 1.333 msaitoh ixgbe_update_link_status(sc);
7193 1.26 msaitoh
7194 1.98 msaitoh /* Re-enable link interrupts */
7195 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7196 1.99 msaitoh } /* ixgbe_handle_link */
7197 1.45 msaitoh
7198 1.161 kamil #if 0
7199 1.99 msaitoh /************************************************************************
7200 1.99 msaitoh * ixgbe_rearm_queues
7201 1.99 msaitoh ************************************************************************/
7202 1.160 msaitoh static __inline void
7203 1.333 msaitoh ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues)
7204 1.63 msaitoh {
7205 1.63 msaitoh u32 mask;
7206 1.63 msaitoh
7207 1.333 msaitoh switch (sc->hw.mac.type) {
7208 1.63 msaitoh case ixgbe_mac_82598EB:
7209 1.63 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7210 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
7211 1.63 msaitoh break;
7212 1.63 msaitoh case ixgbe_mac_82599EB:
7213 1.63 msaitoh case ixgbe_mac_X540:
7214 1.63 msaitoh case ixgbe_mac_X550:
7215 1.63 msaitoh case ixgbe_mac_X550EM_x:
7216 1.99 msaitoh case ixgbe_mac_X550EM_a:
7217 1.63 msaitoh mask = (queues & 0xFFFFFFFF);
7218 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
7219 1.63 msaitoh mask = (queues >> 32);
7220 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
7221 1.63 msaitoh break;
7222 1.63 msaitoh default:
7223 1.63 msaitoh break;
7224 1.63 msaitoh }
7225 1.99 msaitoh } /* ixgbe_rearm_queues */
7226 1.161 kamil #endif
7227